chore: review docs

This commit is contained in:
Jesús Pérez 2026-01-14 01:56:30 +00:00
parent bc51cd4113
commit db7ba4969b
Signed by: jesus
GPG Key ID: 9F243E355E0BC939
238 changed files with 27432 additions and 21427 deletions

View File

@ -1,6 +1,6 @@
[book] [book]
authors = ["Provisioning Platform Team"] authors = ["Provisioning Platform Team"]
description = "Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust" description = "Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust"
language = "en" language = "en"
multilingual = false multilingual = false
src = "src" src = "src"
@ -54,7 +54,7 @@ smart-punctuation = true
use-boolean-and = true use-boolean-and = true
[output.html.code.highlightjs] [output.html.code.highlightjs]
additional-languages = ["nushell", "toml", "yaml", "bash", "rust", "kcl"] additional-languages = ["nushell", "toml", "yaml", "bash", "rust", "nickel"]
[output.html.code] [output.html.code]
hidelines = {} hidelines = {}

View File

@ -9,7 +9,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 433 KiB

After

Width:  |  Height:  |  Size: 434 KiB

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -178,7 +178,8 @@
<strong>Decision Makers</strong>: Architecture Team</p> <strong>Decision Makers</strong>: Architecture Team</p>
<hr /> <hr />
<h2 id="context"><a class="header" href="#context">Context</a></h2> <h2 id="context"><a class="header" href="#context">Context</a></h2>
<p>The Provisioning platform required a comprehensive, enterprise-grade security system covering authentication, authorization, secrets management, MFA, compliance, and emergency access. The system needed to be production-ready, scalable, and compliant with GDPR, SOC2, and ISO 27001.</p> <p>The Provisioning platform required a comprehensive, enterprise-grade security system covering authentication, authorization, secrets management, MFA,
compliance, and emergency access. The system needed to be production-ready, scalable, and compliant with GDPR, SOC2, and ISO 27001.</p>
<hr /> <hr />
<h2 id="decision"><a class="header" href="#decision">Decision</a></h2> <h2 id="decision"><a class="header" href="#decision">Decision</a></h2>
<p>Implement a complete security architecture using 12 specialized components organized in 4 implementation groups.</p> <p>Implement a complete security architecture using 12 specialized components organized in 4 implementation groups.</p>
@ -734,7 +735,7 @@ cargo test --test break_glass_integration_tests
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
<a rel="next prefetch" href="../../architecture/adr/ADR-010-configuration-format-strategy.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right"> <a rel="next prefetch" href="../../architecture/adr/adr-010-configuration-format-strategy.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i> <i class="fa fa-angle-right"></i>
</a> </a>
@ -748,7 +749,7 @@ cargo test --test break_glass_integration_tests
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
<a rel="next prefetch" href="../../architecture/adr/ADR-010-configuration-format-strategy.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right"> <a rel="next prefetch" href="../../architecture/adr/adr-010-configuration-format-strategy.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i> <i class="fa fa-angle-right"></i>
</a> </a>
</nav> </nav>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -174,7 +174,8 @@
<main> <main>
<h1 id="integration-patterns"><a class="header" href="#integration-patterns">Integration Patterns</a></h1> <h1 id="integration-patterns"><a class="header" href="#integration-patterns">Integration Patterns</a></h1>
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2> <h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
<p>Provisioning implements sophisticated integration patterns to coordinate between its hybrid Rust/Nushell architecture, manage multi-provider workflows, and enable extensible functionality. This document outlines the key integration patterns, their implementations, and best practices.</p> <p>Provisioning implements sophisticated integration patterns to coordinate between its hybrid Rust/Nushell architecture, manage multi-provider
workflows, and enable extensible functionality. This document outlines the key integration patterns, their implementations, and best practices.</p>
<h2 id="core-integration-patterns"><a class="header" href="#core-integration-patterns">Core Integration Patterns</a></h2> <h2 id="core-integration-patterns"><a class="header" href="#core-integration-patterns">Core Integration Patterns</a></h2>
<h3 id="1-hybrid-language-integration"><a class="header" href="#1-hybrid-language-integration">1. Hybrid Language Integration</a></h3> <h3 id="1-hybrid-language-integration"><a class="header" href="#1-hybrid-language-integration">1. Hybrid Language Integration</a></h3>
<h4 id="rust-to-nushell-communication-pattern"><a class="header" href="#rust-to-nushell-communication-pattern">Rust-to-Nushell Communication Pattern</a></h4> <h4 id="rust-to-nushell-communication-pattern"><a class="header" href="#rust-to-nushell-communication-pattern">Rust-to-Nushell Communication Pattern</a></h4>
@ -680,7 +681,8 @@ mod integration_tests {
assert_eq!(result.unwrap().status, WorkflowStatus::Completed); assert_eq!(result.unwrap().status, WorkflowStatus::Completed);
} }
}</code></pre> }</code></pre>
<p>These integration patterns provide the foundation for the systems sophisticated multi-component architecture, enabling reliable, scalable, and maintainable infrastructure automation.</p> <p>These integration patterns provide the foundation for the systems sophisticated multi-component architecture, enabling reliable, scalable, and
maintainable infrastructure automation.</p>
</main> </main>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -177,7 +177,9 @@
<strong>Status:</strong> Strategic Analysis <strong>Status:</strong> Strategic Analysis
<strong>Related:</strong> <a href="repo-dist-analysis.html">Repository Distribution Analysis</a></p> <strong>Related:</strong> <a href="repo-dist-analysis.html">Repository Distribution Analysis</a></p>
<h2 id="executive-summary"><a class="header" href="#executive-summary">Executive Summary</a></h2> <h2 id="executive-summary"><a class="header" href="#executive-summary">Executive Summary</a></h2>
<p>This document analyzes a <strong>multi-repository strategy</strong> as an alternative to the monorepo approach. After careful consideration of the provisioning systems architecture, a <strong>hybrid approach with 4 core repositories</strong> is recommended, avoiding submodules in favor of a cleaner package-based dependency model.</p> <p>This document analyzes a <strong>multi-repository strategy</strong> as an alternative to the monorepo approach. After careful consideration of the provisioning
systems architecture, a <strong>hybrid approach with 4 core repositories</strong> is recommended, avoiding submodules in favor of a cleaner package-based
dependency model.</p>
<hr /> <hr />
<h2 id="repository-architecture-options"><a class="header" href="#repository-architecture-options">Repository Architecture Options</a></h2> <h2 id="repository-architecture-options"><a class="header" href="#repository-architecture-options">Repository Architecture Options</a></h2>
<h3 id="option-a-pure-monorepo-original-recommendation"><a class="header" href="#option-a-pure-monorepo-original-recommendation">Option A: Pure Monorepo (Original Recommendation)</a></h3> <h3 id="option-a-pure-monorepo-original-recommendation"><a class="header" href="#option-a-pure-monorepo-original-recommendation">Option A: Pure Monorepo (Original Recommendation)</a></h3>
@ -1041,7 +1043,8 @@ provisioning-distribution/ (Repo 5, ~30 MB)
</ul> </ul>
<p><strong>Avoid:</strong> Submodules (complexity nightmare)</p> <p><strong>Avoid:</strong> Submodules (complexity nightmare)</p>
<p><strong>Use:</strong> Package-based dependencies with version compatibility matrix</p> <p><strong>Use:</strong> Package-based dependencies with version compatibility matrix</p>
<p>This architecture scales better for your projects growth, supports a community extension ecosystem, and provides professional-grade separation of concerns while maintaining integration through a well-designed package system.</p> <p>This architecture scales better for your projects growth, supports a community extension ecosystem, and provides professional-grade separation of
concerns while maintaining integration through a well-designed package system.</p>
<hr /> <hr />
<h2 id="next-steps"><a class="header" href="#next-steps">Next Steps</a></h2> <h2 id="next-steps"><a class="header" href="#next-steps">Next Steps</a></h2>
<ol> <ol>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -177,7 +177,8 @@
<strong>Date</strong>: 2025-10-08 <strong>Date</strong>: 2025-10-08
<strong>Status</strong>: Implemented</p> <strong>Status</strong>: Implemented</p>
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2> <h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
<p>Complete authentication and authorization flow integration for the Provisioning Orchestrator, connecting all security components (JWT validation, MFA verification, Cedar authorization, rate limiting, and audit logging) into a cohesive security middleware chain.</p> <p>Complete authentication and authorization flow integration for the Provisioning Orchestrator, connecting all security components (JWT validation, MFA
verification, Cedar authorization, rate limiting, and audit logging) into a cohesive security middleware chain.</p>
<h2 id="architecture"><a class="header" href="#architecture">Architecture</a></h2> <h2 id="architecture"><a class="header" href="#architecture">Architecture</a></h2>
<h3 id="security-middleware-chain"><a class="header" href="#security-middleware-chain">Security Middleware Chain</a></h3> <h3 id="security-middleware-chain"><a class="header" href="#security-middleware-chain">Security Middleware Chain</a></h3>
<p>The middleware chain is applied in this specific order to ensure proper security:</p> <p>The middleware chain is applied in this specific order to ensure proper security:</p>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -177,7 +177,9 @@
<strong>Status:</strong> Clarification Document <strong>Status:</strong> Clarification Document
<strong>Related:</strong> <a href="multi-repo-strategy.html">Multi-Repo Strategy</a>, <a href="../user/hybrid-orchestrator.html">Hybrid Orchestrator v3.0</a></p> <strong>Related:</strong> <a href="multi-repo-strategy.html">Multi-Repo Strategy</a>, <a href="../user/hybrid-orchestrator.html">Hybrid Orchestrator v3.0</a></p>
<h2 id="executive-summary"><a class="header" href="#executive-summary">Executive Summary</a></h2> <h2 id="executive-summary"><a class="header" href="#executive-summary">Executive Summary</a></h2>
<p>This document clarifies <strong>how the Rust orchestrator integrates with Nushell core</strong> in both monorepo and multi-repo architectures. The orchestrator is a <strong>critical performance layer</strong> that coordinates Nushell business logic execution, solving deep call stack limitations while preserving all existing functionality.</p> <p>This document clarifies <strong>how the Rust orchestrator integrates with Nushell core</strong> in both monorepo and multi-repo architectures. The orchestrator is
a <strong>critical performance layer</strong> that coordinates Nushell business logic execution, solving deep call stack limitations while preserving all existing
functionality.</p>
<hr /> <hr />
<h2 id="current-architecture-hybrid-orchestrator-v30"><a class="header" href="#current-architecture-hybrid-orchestrator-v30">Current Architecture (Hybrid Orchestrator v3.0)</a></h2> <h2 id="current-architecture-hybrid-orchestrator-v30"><a class="header" href="#current-architecture-hybrid-orchestrator-v30">Current Architecture (Hybrid Orchestrator v3.0)</a></h2>
<h3 id="the-problem-being-solved"><a class="header" href="#the-problem-being-solved">The Problem Being Solved</a></h3> <h3 id="the-problem-being-solved"><a class="header" href="#the-problem-being-solved">The Problem Being Solved</a></h3>
@ -606,7 +608,7 @@ CLI → servers/list.nu → Query state → Return results
<ol> <ol>
<li> <li>
<p><strong>Eliminates Deep Call Stack Issues</strong></p> <p><strong>Eliminates Deep Call Stack Issues</strong></p>
<pre><code> <pre><code class="language-text">
Without Orchestrator: Without Orchestrator:
template.nu → calls → cluster.nu → calls → taskserv.nu → calls → provider.nu template.nu → calls → cluster.nu → calls → taskserv.nu → calls → provider.nu
(Deep nesting causes "Type not supported" errors) (Deep nesting causes "Type not supported" errors)
@ -617,22 +619,20 @@ Orchestrator → spawns → Nushell subprocess (flat execution)
</code></pre> </code></pre>
</li> </li>
</ol> <li>
<pre><code> <p><strong>Performance Optimization</strong></p>
2. **Performance Optimization** <pre><code class="language-rust">// Orchestrator executes tasks in parallel
let tasks = vec![task1, task2, task3, task4, task5];
```rust let results = futures::future::join_all(
// Orchestrator executes tasks in parallel tasks.iter().map(|t| execute_task(t))
let tasks = vec![task1, task2, task3, task4, task5]; ).await;
let results = futures::future::join_all( // 5 Nushell subprocesses run concurrently</code></pre>
tasks.iter().map(|t| execute_task(t)) </li>
).await; <li>
<p><strong>Reliable State Management</strong></p>
// 5 Nushell subprocesses run concurrently </li>
</code></pre>
<ol>
<li><strong>Reliable State Management</strong></li>
</ol> </ol>
<pre><code class="language-plaintext"> Orchestrator maintains: <pre><code class="language-plaintext"> Orchestrator maintains:
- Task queue (survives crashes) - Task queue (survives crashes)

File diff suppressed because one or more lines are too long

View File

@ -1,221 +0,0 @@
<!DOCTYPE HTML>
<html lang="en" class="ayu sidebar-visible" dir="ltr">
<head>
<!-- Book generated using mdBook -->
<meta charset="UTF-8">
<title>Workspace Config Architecture - Provisioning Platform Documentation</title>
<!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff">
<link rel="icon" href="../favicon.svg">
<link rel="shortcut icon" href="../favicon.png">
<link rel="stylesheet" href="../css/variables.css">
<link rel="stylesheet" href="../css/general.css">
<link rel="stylesheet" href="../css/chrome.css">
<link rel="stylesheet" href="../css/print.css" media="print">
<!-- Fonts -->
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
<link rel="stylesheet" href="../fonts/fonts.css">
<!-- Highlight.js Stylesheets -->
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
<!-- Custom theme stylesheets -->
<!-- Provide site root and default themes to javascript -->
<script>
const path_to_root = "../";
const default_light_theme = "ayu";
const default_dark_theme = "navy";
</script>
<!-- Start loading toc.js asap -->
<script src="../toc.js"></script>
</head>
<body>
<div id="mdbook-help-container">
<div id="mdbook-help-popup">
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
<div>
<p>Press <kbd></kbd> or <kbd></kbd> to navigate between chapters</p>
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
<p>Press <kbd>?</kbd> to show this help</p>
<p>Press <kbd>Esc</kbd> to hide this help</p>
</div>
</div>
</div>
<div id="body-container">
<!-- Work around some values being stored in localStorage wrapped in quotes -->
<script>
try {
let theme = localStorage.getItem('mdbook-theme');
let sidebar = localStorage.getItem('mdbook-sidebar');
if (theme.startsWith('"') && theme.endsWith('"')) {
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
}
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
}
} catch (e) { }
</script>
<!-- Set the theme before any content is loaded, prevents flash -->
<script>
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
let theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = default_theme; }
const html = document.documentElement;
html.classList.remove('ayu')
html.classList.add(theme);
html.classList.add("js");
</script>
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
<!-- Hide / unhide sidebar before it is displayed -->
<script>
let sidebar = null;
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
if (document.body.clientWidth >= 1080) {
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
sidebar = sidebar || 'visible';
} else {
sidebar = 'hidden';
}
sidebar_toggle.checked = sidebar === 'visible';
html.classList.remove('sidebar-visible');
html.classList.add("sidebar-" + sidebar);
</script>
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
<!-- populated by js -->
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
<noscript>
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
</noscript>
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
<div class="sidebar-resize-indicator"></div>
</div>
</nav>
<div id="page-wrapper" class="page-wrapper">
<div class="page">
<div id="menu-bar-hover-placeholder"></div>
<div id="menu-bar" class="menu-bar sticky">
<div class="left-buttons">
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
<i class="fa fa-bars"></i>
</label>
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
<i class="fa fa-paint-brush"></i>
</button>
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
</ul>
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
<i class="fa fa-search"></i>
</button>
</div>
<h1 class="menu-title">Provisioning Platform Documentation</h1>
<div class="right-buttons">
<a href="../print.html" title="Print this book" aria-label="Print this book">
<i id="print-button" class="fa fa-print"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
<i id="git-repository-button" class="fa fa-github"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/configuration/workspace-config-architecture.md" title="Suggest an edit" aria-label="Suggest an edit">
<i id="git-edit-button" class="fa fa-edit"></i>
</a>
</div>
</div>
<div id="search-wrapper" class="hidden">
<form id="searchbar-outer" class="searchbar-outer">
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
</form>
<div id="searchresults-outer" class="searchresults-outer hidden">
<div id="searchresults-header" class="searchresults-header"></div>
<ul id="searchresults">
</ul>
</div>
</div>
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
<script>
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
});
</script>
<div id="content" class="content">
<main>
<h1 id="workspace-config-architecture"><a class="header" href="#workspace-config-architecture">Workspace Config Architecture</a></h1>
</main>
<nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons -->
<a rel="prev" href="../configuration/config-validation.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<div style="clear: both"></div>
</nav>
</div>
</div>
<nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../configuration/config-validation.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
</nav>
</div>
<script>
window.playground_copyable = true;
</script>
<script src="../elasticlunr.min.js"></script>
<script src="../mark.min.js"></script>
<script src="../searcher.js"></script>
<script src="../clipboard.min.js"></script>
<script src="../highlight.js"></script>
<script src="../book.js"></script>
<!-- Custom JS scripts -->
</div>
</body>
</html>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -173,7 +173,8 @@
<div id="content" class="content"> <div id="content" class="content">
<main> <main>
<h1 id="build-system-documentation"><a class="header" href="#build-system-documentation">Build System Documentation</a></h1> <h1 id="build-system-documentation"><a class="header" href="#build-system-documentation">Build System Documentation</a></h1>
<p>This document provides comprehensive documentation for the provisioning projects build system, including the complete Makefile reference with 40+ targets, build tools, compilation instructions, and troubleshooting.</p> <p>This document provides comprehensive documentation for the provisioning projects build system, including the complete Makefile reference with 40+
targets, build tools, compilation instructions, and troubleshooting.</p>
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2> <h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
<ol> <ol>
<li><a href="#overview">Overview</a></li> <li><a href="#overview">Overview</a></li>
@ -1022,7 +1023,8 @@ make ci-test
# Full CI/CD pipeline # Full CI/CD pipeline
make ci-release make ci-release
</code></pre> </code></pre>
<p>This build system provides a comprehensive, maintainable foundation for the provisioning projects development lifecycle, from local development to production releases.</p> <p>This build system provides a comprehensive, maintainable foundation for the provisioning projects development lifecycle, from local development to
production releases.</p>
</main> </main>
@ -1032,7 +1034,7 @@ make ci-release
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
<a rel="next prefetch" href="../development/extensions.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right"> <a rel="next prefetch" href="../development/distribution-process.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i> <i class="fa fa-angle-right"></i>
</a> </a>
@ -1046,7 +1048,7 @@ make ci-release
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
<a rel="next prefetch" href="../development/extensions.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right"> <a rel="next prefetch" href="../development/distribution-process.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i> <i class="fa fa-angle-right"></i>
</a> </a>
</nav> </nav>

View File

@ -1,227 +0,0 @@
<!DOCTYPE HTML>
<html lang="en" class="ayu sidebar-visible" dir="ltr">
<head>
<!-- Book generated using mdBook -->
<meta charset="UTF-8">
<title>Configuration - Provisioning Platform Documentation</title>
<!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff">
<link rel="icon" href="../favicon.svg">
<link rel="shortcut icon" href="../favicon.png">
<link rel="stylesheet" href="../css/variables.css">
<link rel="stylesheet" href="../css/general.css">
<link rel="stylesheet" href="../css/chrome.css">
<link rel="stylesheet" href="../css/print.css" media="print">
<!-- Fonts -->
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
<link rel="stylesheet" href="../fonts/fonts.css">
<!-- Highlight.js Stylesheets -->
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
<!-- Custom theme stylesheets -->
<!-- Provide site root and default themes to javascript -->
<script>
const path_to_root = "../";
const default_light_theme = "ayu";
const default_dark_theme = "navy";
</script>
<!-- Start loading toc.js asap -->
<script src="../toc.js"></script>
</head>
<body>
<div id="mdbook-help-container">
<div id="mdbook-help-popup">
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
<div>
<p>Press <kbd></kbd> or <kbd></kbd> to navigate between chapters</p>
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
<p>Press <kbd>?</kbd> to show this help</p>
<p>Press <kbd>Esc</kbd> to hide this help</p>
</div>
</div>
</div>
<div id="body-container">
<!-- Work around some values being stored in localStorage wrapped in quotes -->
<script>
try {
let theme = localStorage.getItem('mdbook-theme');
let sidebar = localStorage.getItem('mdbook-sidebar');
if (theme.startsWith('"') && theme.endsWith('"')) {
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
}
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
}
} catch (e) { }
</script>
<!-- Set the theme before any content is loaded, prevents flash -->
<script>
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
let theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = default_theme; }
const html = document.documentElement;
html.classList.remove('ayu')
html.classList.add(theme);
html.classList.add("js");
</script>
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
<!-- Hide / unhide sidebar before it is displayed -->
<script>
let sidebar = null;
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
if (document.body.clientWidth >= 1080) {
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
sidebar = sidebar || 'visible';
} else {
sidebar = 'hidden';
}
sidebar_toggle.checked = sidebar === 'visible';
html.classList.remove('sidebar-visible');
html.classList.add("sidebar-" + sidebar);
</script>
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
<!-- populated by js -->
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
<noscript>
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
</noscript>
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
<div class="sidebar-resize-indicator"></div>
</div>
</nav>
<div id="page-wrapper" class="page-wrapper">
<div class="page">
<div id="menu-bar-hover-placeholder"></div>
<div id="menu-bar" class="menu-bar sticky">
<div class="left-buttons">
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
<i class="fa fa-bars"></i>
</label>
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
<i class="fa fa-paint-brush"></i>
</button>
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
</ul>
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
<i class="fa fa-search"></i>
</button>
</div>
<h1 class="menu-title">Provisioning Platform Documentation</h1>
<div class="right-buttons">
<a href="../print.html" title="Print this book" aria-label="Print this book">
<i id="print-button" class="fa fa-print"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
<i id="git-repository-button" class="fa fa-github"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/development/configuration.md" title="Suggest an edit" aria-label="Suggest an edit">
<i id="git-edit-button" class="fa fa-edit"></i>
</a>
</div>
</div>
<div id="search-wrapper" class="hidden">
<form id="searchbar-outer" class="searchbar-outer">
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
</form>
<div id="searchresults-outer" class="searchresults-outer hidden">
<div id="searchresults-header" class="searchresults-header"></div>
<ul id="searchresults">
</ul>
</div>
</div>
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
<script>
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
});
</script>
<div id="content" class="content">
<main>
<h1 id="configuration"><a class="header" href="#configuration">Configuration</a></h1>
</main>
<nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons -->
<a rel="prev" href="../development/command-handler-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../development/workflow.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
<div style="clear: both"></div>
</nav>
</div>
</div>
<nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../development/command-handler-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../development/workflow.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
</nav>
</div>
<script>
window.playground_copyable = true;
</script>
<script src="../elasticlunr.min.js"></script>
<script src="../mark.min.js"></script>
<script src="../searcher.js"></script>
<script src="../clipboard.min.js"></script>
<script src="../highlight.js"></script>
<script src="../book.js"></script>
<!-- Custom JS scripts -->
</div>
</body>
</html>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -173,7 +173,8 @@
<div id="content" class="content"> <div id="content" class="content">
<main> <main>
<h1 id="distribution-process-documentation"><a class="header" href="#distribution-process-documentation">Distribution Process Documentation</a></h1> <h1 id="distribution-process-documentation"><a class="header" href="#distribution-process-documentation">Distribution Process Documentation</a></h1>
<p>This document provides comprehensive documentation for the provisioning projects distribution process, covering release workflows, package generation, multi-platform distribution, and rollback procedures.</p> <p>This document provides comprehensive documentation for the provisioning projects distribution process, covering release workflows, package
generation, multi-platform distribution, and rollback procedures.</p>
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2> <h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
<ol> <ol>
<li><a href="#overview">Overview</a></li> <li><a href="#overview">Overview</a></li>
@ -188,7 +189,8 @@
<li><a href="#troubleshooting">Troubleshooting</a></li> <li><a href="#troubleshooting">Troubleshooting</a></li>
</ol> </ol>
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2> <h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
<p>The distribution system provides a comprehensive solution for creating, packaging, and distributing provisioning across multiple platforms with automated release management.</p> <p>The distribution system provides a comprehensive solution for creating, packaging, and distributing provisioning across multiple platforms with
automated release management.</p>
<p><strong>Key Features</strong>:</p> <p><strong>Key Features</strong>:</p>
<ul> <ul>
<li><strong>Multi-Platform Support</strong>: Linux, macOS, Windows with multiple architectures</li> <li><strong>Multi-Platform Support</strong>: Linux, macOS, Windows with multiple architectures</li>
@ -988,13 +990,14 @@ make status
top top
df -h df -h
</code></pre> </code></pre>
<p>This distribution process provides a robust, automated pipeline for creating, validating, and distributing provisioning across multiple platforms while maintaining high quality and reliability standards.</p> <p>This distribution process provides a robust, automated pipeline for creating, validating, and distributing provisioning across multiple platforms
while maintaining high quality and reliability standards.</p>
</main> </main>
<nav class="nav-wrapper" aria-label="Page navigation"> <nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons --> <!-- Mobile navigation buttons -->
<a rel="prev" href="../development/extensions.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left"> <a rel="prev" href="../development/build-system.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
@ -1008,7 +1011,7 @@ df -h
</div> </div>
<nav class="nav-wide-wrapper" aria-label="Page navigation"> <nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../development/extensions.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left"> <a rel="prev" href="../development/build-system.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -178,7 +178,8 @@
<strong>Priority:</strong> High <strong>Priority:</strong> High
<strong>Related:</strong> <a href="../architecture/repo-dist-analysis.html">Architecture Analysis</a></p> <strong>Related:</strong> <a href="../architecture/repo-dist-analysis.html">Architecture Analysis</a></p>
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2> <h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
<p>This guide provides step-by-step instructions for implementing the repository restructuring and distribution system improvements. Each phase includes specific commands, validation steps, and rollback procedures.</p> <p>This guide provides step-by-step instructions for implementing the repository restructuring and distribution system improvements. Each phase includes
specific commands, validation steps, and rollback procedures.</p>
<hr /> <hr />
<h2 id="prerequisites"><a class="header" href="#prerequisites">Prerequisites</a></h2> <h2 id="prerequisites"><a class="header" href="#prerequisites">Prerequisites</a></h2>
<h3 id="required-tools"><a class="header" href="#required-tools">Required Tools</a></h3> <h3 id="required-tools"><a class="header" href="#required-tools">Required Tools</a></h3>
@ -974,7 +975,7 @@ Day 16: Release prepared</li>
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
<a rel="next prefetch" href="../development/taskserv-developer-guide.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right"> <a rel="next prefetch" href="../development/project-structure.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i> <i class="fa fa-angle-right"></i>
</a> </a>
@ -988,7 +989,7 @@ Day 16: Release prepared</li>
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
<a rel="next prefetch" href="../development/taskserv-developer-guide.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right"> <a rel="next prefetch" href="../development/project-structure.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i> <i class="fa fa-angle-right"></i>
</a> </a>
</nav> </nav>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -173,7 +173,8 @@
<div id="content" class="content"> <div id="content" class="content">
<main> <main>
<h1 id="integration-guide"><a class="header" href="#integration-guide">Integration Guide</a></h1> <h1 id="integration-guide"><a class="header" href="#integration-guide">Integration Guide</a></h1>
<p>This document explains how the new project structure integrates with existing systems, API compatibility and versioning, database migration strategies, deployment considerations, and monitoring and observability.</p> <p>This document explains how the new project structure integrates with existing systems, API compatibility and versioning, database migration
strategies, deployment considerations, and monitoring and observability.</p>
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2> <h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
<ol> <ol>
<li><a href="#overview">Overview</a></li> <li><a href="#overview">Overview</a></li>
@ -187,7 +188,8 @@
<li><a href="#troubleshooting-integration-issues">Troubleshooting Integration Issues</a></li> <li><a href="#troubleshooting-integration-issues">Troubleshooting Integration Issues</a></li>
</ol> </ol>
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2> <h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
<p>Provisioning has been designed with integration as a core principle, ensuring seamless compatibility between new development-focused components and existing production systems while providing clear migration pathways.</p> <p>Provisioning has been designed with integration as a core principle, ensuring seamless compatibility between new development-focused components and
existing production systems while providing clear migration pathways.</p>
<p><strong>Integration Principles</strong>:</p> <p><strong>Integration Principles</strong>:</p>
<ul> <ul>
<li><strong>Backward Compatibility</strong>: All existing APIs and interfaces remain functional</li> <li><strong>Backward Compatibility</strong>: All existing APIs and interfaces remain functional</li>
@ -1244,7 +1246,8 @@ provisioning server create test-server 2xCPU-4 GB --debug-integration
} }
} }
</code></pre> </code></pre>
<p>This integration guide provides a comprehensive framework for seamlessly integrating new development components with existing production systems while maintaining reliability, compatibility, and clear migration pathways.</p> <p>This integration guide provides a comprehensive framework for seamlessly integrating new development components with existing production systems while
maintaining reliability, compatibility, and clear migration pathways.</p>
</main> </main>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -173,7 +173,8 @@
<div id="content" class="content"> <div id="content" class="content">
<main> <main>
<h1 id="project-structure-guide"><a class="header" href="#project-structure-guide">Project Structure Guide</a></h1> <h1 id="project-structure-guide"><a class="header" href="#project-structure-guide">Project Structure Guide</a></h1>
<p>This document provides a comprehensive overview of the provisioning projects structure after the major reorganization, explaining both the new development-focused organization and the preserved existing functionality.</p> <p>This document provides a comprehensive overview of the provisioning projects structure after the major reorganization, explaining both the new
development-focused organization and the preserved existing functionality.</p>
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2> <h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
<ol> <ol>
<li><a href="#overview">Overview</a></li> <li><a href="#overview">Overview</a></li>
@ -501,17 +502,18 @@ nu workspace/extensions/providers/my-provider/nulib/provider.nu test
<li><strong>Documentation</strong>: Comprehensive documentation and examples</li> <li><strong>Documentation</strong>: Comprehensive documentation and examples</li>
<li><strong>Testing Framework</strong>: Built-in testing and validation tools</li> <li><strong>Testing Framework</strong>: Built-in testing and validation tools</li>
</ul> </ul>
<p>This structure represents a significant evolution in the projects organization while maintaining complete backward compatibility and providing powerful new development capabilities.</p> <p>This structure represents a significant evolution in the projects organization while maintaining complete backward compatibility and providing
powerful new development capabilities.</p>
</main> </main>
<nav class="nav-wrapper" aria-label="Page navigation"> <nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons --> <!-- Mobile navigation buttons -->
<a rel="prev" href="../development/taskserv-quick-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left"> <a rel="prev" href="../development/implementation-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
<a rel="next prefetch" href="../development/provider-agnostic-architecture.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right"> <a rel="next prefetch" href="../development/ctrl-c-implementation-notes.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i> <i class="fa fa-angle-right"></i>
</a> </a>
@ -521,11 +523,11 @@ nu workspace/extensions/providers/my-provider/nulib/provider.nu test
</div> </div>
<nav class="nav-wide-wrapper" aria-label="Page navigation"> <nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../development/taskserv-quick-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left"> <a rel="prev" href="../development/implementation-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
<a rel="next prefetch" href="../development/provider-agnostic-architecture.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right"> <a rel="next prefetch" href="../development/ctrl-c-implementation-notes.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i> <i class="fa fa-angle-right"></i>
</a> </a>
</nav> </nav>

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -173,7 +173,8 @@
<div id="content" class="content"> <div id="content" class="content">
<main> <main>
<h1 id="development-workflow-guide"><a class="header" href="#development-workflow-guide">Development Workflow Guide</a></h1> <h1 id="development-workflow-guide"><a class="header" href="#development-workflow-guide">Development Workflow Guide</a></h1>
<p>This document outlines the recommended development workflows, coding practices, testing strategies, and debugging techniques for the provisioning project.</p> <p>This document outlines the recommended development workflows, coding practices, testing strategies, and debugging techniques for the provisioning
project.</p>
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2> <h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
<ol> <ol>
<li><a href="#overview">Overview</a></li> <li><a href="#overview">Overview</a></li>
@ -188,7 +189,8 @@
<li><a href="#best-practices">Best Practices</a></li> <li><a href="#best-practices">Best Practices</a></li>
</ol> </ol>
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2> <h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
<p>The provisioning project employs a multi-language, multi-component architecture requiring specific development workflows to maintain consistency, quality, and efficiency.</p> <p>The provisioning project employs a multi-language, multi-component architecture requiring specific development workflows to maintain consistency,
quality, and efficiency.</p>
<p><strong>Key Technologies</strong>:</p> <p><strong>Key Technologies</strong>:</p>
<ul> <ul>
<li><strong>Nushell</strong>: Primary scripting and automation language</li> <li><strong>Nushell</strong>: Primary scripting and automation language</li>
@ -1041,13 +1043,14 @@ def get-api-url [] {
} }
} }
</code></pre> </code></pre>
<p>This development workflow provides a comprehensive framework for efficient, quality-focused development while maintaining the projects architectural principles and ensuring smooth collaboration across the team.</p> <p>This development workflow provides a comprehensive framework for efficient, quality-focused development while maintaining the projects architectural
principles and ensuring smooth collaboration across the team.</p>
</main> </main>
<nav class="nav-wrapper" aria-label="Page navigation"> <nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons --> <!-- Mobile navigation buttons -->
<a rel="prev" href="../development/configuration.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left"> <a rel="prev" href="../development/command-handler-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>
@ -1061,7 +1064,7 @@ def get-api-url [] {
</div> </div>
<nav class="nav-wide-wrapper" aria-label="Page navigation"> <nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../development/configuration.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left"> <a rel="prev" href="../development/command-handler-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i> <i class="fa fa-angle-left"></i>
</a> </a>

File diff suppressed because one or more lines are too long

View File

@ -18,7 +18,7 @@ with others.
The OFL allows the licensed fonts to be used, studied, modified and The OFL allows the licensed fonts to be used, studied, modified and
redistributed freely as long as they are not sold by themselves. The redistributed freely as long as they are not sold by themselves. The
fonts, including any derivative works, can be bundled, embedded, fonts, including any derivative works, can be bundled, embedded,
redistributed and/or sold with any software provided that any reserved redistributed and/or sold with any software provided that any reserved
names are not used by derivative works. The fonts and derivatives, names are not used by derivative works. The fonts and derivatives,
however, cannot be released under any other type of license. The however, cannot be released under any other type of license. The

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">

File diff suppressed because one or more lines are too long

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
@ -181,9 +181,11 @@
<h1 id="provisioning-platform-documentation"><a class="header" href="#provisioning-platform-documentation">Provisioning Platform Documentation</a></h1> <h1 id="provisioning-platform-documentation"><a class="header" href="#provisioning-platform-documentation">Provisioning Platform Documentation</a></h1>
<p><strong>Last Updated</strong>: 2025-01-02 (Phase 3.A Cleanup Complete) <p><strong>Last Updated</strong>: 2025-01-02 (Phase 3.A Cleanup Complete)
<strong>Status</strong>: ✅ Primary documentation source (145 files consolidated)</p> <strong>Status</strong>: ✅ Primary documentation source (145 files consolidated)</p>
<p>Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell, KCL, and Rust.</p> <p>Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell,
Nickel, and Rust.</p>
<blockquote> <blockquote>
<p><strong>Note</strong>: Architecture Decision Records (ADRs) and high-level design documentation are in <code>docs/</code> directory. This location contains all user-facing, operational, and product documentation.</p> <p><strong>Note</strong>: Architecture Decision Records (ADRs) and design documentation are in <code>docs/</code>
directory. This location contains user-facing, operational, and product documentation.</p>
</blockquote> </blockquote>
<hr /> <hr />
<h2 id="quick-navigation"><a class="header" href="#quick-navigation">Quick Navigation</a></h2> <h2 id="quick-navigation"><a class="header" href="#quick-navigation">Quick Navigation</a></h2>
@ -318,7 +320,8 @@
<hr /> <hr />
<h2 id="key-concepts"><a class="header" href="#key-concepts">Key Concepts</a></h2> <h2 id="key-concepts"><a class="header" href="#key-concepts">Key Concepts</a></h2>
<h3 id="infrastructure-as-code-iac"><a class="header" href="#infrastructure-as-code-iac">Infrastructure as Code (IaC)</a></h3> <h3 id="infrastructure-as-code-iac"><a class="header" href="#infrastructure-as-code-iac">Infrastructure as Code (IaC)</a></h3>
<p>The provisioning platform uses <strong>declarative configuration</strong> to manage infrastructure. Instead of manually creating resources, you define what you want in Nickel configuration files, and the system makes it happen.</p> <p>The provisioning platform uses <strong>declarative configuration</strong> to manage infrastructure. Instead of manually creating resources, you define what you
want in Nickel configuration files, and the system makes it happen.</p>
<h3 id="mode-based-architecture"><a class="header" href="#mode-based-architecture">Mode-Based Architecture</a></h3> <h3 id="mode-based-architecture"><a class="header" href="#mode-based-architecture">Mode-Based Architecture</a></h3>
<p>The system supports four operational modes:</p> <p>The system supports four operational modes:</p>
<ul> <ul>
@ -357,7 +360,7 @@
<li>Study <strong><a href="architecture/design-principles.html">Design Principles</a></strong></li> <li>Study <strong><a href="architecture/design-principles.html">Design Principles</a></strong></li>
<li>Read relevant <strong><a href="architecture/">ADRs</a></strong></li> <li>Read relevant <strong><a href="architecture/">ADRs</a></strong></li>
<li>Follow <strong><a href="development/README.html">Development Guide</a></strong></li> <li>Follow <strong><a href="development/README.html">Development Guide</a></strong></li>
<li>Reference <strong>KCL Quick Reference</strong></li> <li>Reference <strong>Nickel Quick Reference</strong></li>
</ol> </ol>
<h3 id="for-operators"><a class="header" href="#for-operators">For Operators</a></h3> <h3 id="for-operators"><a class="header" href="#for-operators">For Operators</a></h3>
<ol> <ol>
@ -378,7 +381,7 @@
<h3 id="-infrastructure-automation"><a class="header" href="#-infrastructure-automation">✅ Infrastructure Automation</a></h3> <h3 id="-infrastructure-automation"><a class="header" href="#-infrastructure-automation">✅ Infrastructure Automation</a></h3>
<ul> <ul>
<li>Multi-cloud support (AWS, UpCloud, Local)</li> <li>Multi-cloud support (AWS, UpCloud, Local)</li>
<li>Declarative configuration with KCL</li> <li>Declarative configuration with Nickel</li>
<li>Automated dependency resolution</li> <li>Automated dependency resolution</li>
<li>Batch operations with rollback</li> <li>Batch operations with rollback</li>
</ul> </ul>
@ -458,7 +461,7 @@
<h2 id="technology-stack"><a class="header" href="#technology-stack">Technology Stack</a></h2> <h2 id="technology-stack"><a class="header" href="#technology-stack">Technology Stack</a></h2>
<div class="table-wrapper"><table><thead><tr><th>Component</th><th>Technology</th><th>Purpose</th></tr></thead><tbody> <div class="table-wrapper"><table><thead><tr><th>Component</th><th>Technology</th><th>Purpose</th></tr></thead><tbody>
<tr><td><strong>Core CLI</strong></td><td>Nushell 0.107.1</td><td>Shell and scripting</td></tr> <tr><td><strong>Core CLI</strong></td><td>Nushell 0.107.1</td><td>Shell and scripting</td></tr>
<tr><td><strong>Configuration</strong></td><td>KCL 0.11.2</td><td>Type-safe IaC</td></tr> <tr><td><strong>Configuration</strong></td><td>Nickel 1.0.0+</td><td>Type-safe IaC</td></tr>
<tr><td><strong>Orchestrator</strong></td><td>Rust</td><td>High-performance coordination</td></tr> <tr><td><strong>Orchestrator</strong></td><td>Rust</td><td>High-performance coordination</td></tr>
<tr><td><strong>Templates</strong></td><td>Jinja2 (nu_plugin_tera)</td><td>Code generation</td></tr> <tr><td><strong>Templates</strong></td><td>Jinja2 (nu_plugin_tera)</td><td>Code generation</td></tr>
<tr><td><strong>Secrets</strong></td><td>SOPS 3.10.2 + Age 1.2.1</td><td>Encryption</td></tr> <tr><td><strong>Secrets</strong></td><td>SOPS 3.10.2 + Age 1.2.1</td><td>Encryption</td></tr>

View File

@ -1,227 +0,0 @@
<!DOCTYPE HTML>
<html lang="en" class="ayu sidebar-visible" dir="ltr">
<head>
<!-- Book generated using mdBook -->
<meta charset="UTF-8">
<title>Backup Recovery - Provisioning Platform Documentation</title>
<!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff">
<link rel="icon" href="../favicon.svg">
<link rel="shortcut icon" href="../favicon.png">
<link rel="stylesheet" href="../css/variables.css">
<link rel="stylesheet" href="../css/general.css">
<link rel="stylesheet" href="../css/chrome.css">
<link rel="stylesheet" href="../css/print.css" media="print">
<!-- Fonts -->
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
<link rel="stylesheet" href="../fonts/fonts.css">
<!-- Highlight.js Stylesheets -->
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
<!-- Custom theme stylesheets -->
<!-- Provide site root and default themes to javascript -->
<script>
const path_to_root = "../";
const default_light_theme = "ayu";
const default_dark_theme = "navy";
</script>
<!-- Start loading toc.js asap -->
<script src="../toc.js"></script>
</head>
<body>
<div id="mdbook-help-container">
<div id="mdbook-help-popup">
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
<div>
<p>Press <kbd></kbd> or <kbd></kbd> to navigate between chapters</p>
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
<p>Press <kbd>?</kbd> to show this help</p>
<p>Press <kbd>Esc</kbd> to hide this help</p>
</div>
</div>
</div>
<div id="body-container">
<!-- Work around some values being stored in localStorage wrapped in quotes -->
<script>
try {
let theme = localStorage.getItem('mdbook-theme');
let sidebar = localStorage.getItem('mdbook-sidebar');
if (theme.startsWith('"') && theme.endsWith('"')) {
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
}
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
}
} catch (e) { }
</script>
<!-- Set the theme before any content is loaded, prevents flash -->
<script>
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
let theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = default_theme; }
const html = document.documentElement;
html.classList.remove('ayu')
html.classList.add(theme);
html.classList.add("js");
</script>
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
<!-- Hide / unhide sidebar before it is displayed -->
<script>
let sidebar = null;
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
if (document.body.clientWidth >= 1080) {
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
sidebar = sidebar || 'visible';
} else {
sidebar = 'hidden';
}
sidebar_toggle.checked = sidebar === 'visible';
html.classList.remove('sidebar-visible');
html.classList.add("sidebar-" + sidebar);
</script>
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
<!-- populated by js -->
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
<noscript>
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
</noscript>
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
<div class="sidebar-resize-indicator"></div>
</div>
</nav>
<div id="page-wrapper" class="page-wrapper">
<div class="page">
<div id="menu-bar-hover-placeholder"></div>
<div id="menu-bar" class="menu-bar sticky">
<div class="left-buttons">
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
<i class="fa fa-bars"></i>
</label>
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
<i class="fa fa-paint-brush"></i>
</button>
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
</ul>
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
<i class="fa fa-search"></i>
</button>
</div>
<h1 class="menu-title">Provisioning Platform Documentation</h1>
<div class="right-buttons">
<a href="../print.html" title="Print this book" aria-label="Print this book">
<i id="print-button" class="fa fa-print"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
<i id="git-repository-button" class="fa fa-github"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/operations/backup-recovery.md" title="Suggest an edit" aria-label="Suggest an edit">
<i id="git-edit-button" class="fa fa-edit"></i>
</a>
</div>
</div>
<div id="search-wrapper" class="hidden">
<form id="searchbar-outer" class="searchbar-outer">
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
</form>
<div id="searchresults-outer" class="searchresults-outer hidden">
<div id="searchresults-header" class="searchresults-header"></div>
<ul id="searchresults">
</ul>
</div>
</div>
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
<script>
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
});
</script>
<div id="content" class="content">
<main>
<h1 id="backup-and-recovery"><a class="header" href="#backup-and-recovery">Backup and Recovery</a></h1>
</main>
<nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons -->
<a rel="prev" href="../operations/coredns-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../operations/deployment.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
<div style="clear: both"></div>
</nav>
</div>
</div>
<nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../operations/coredns-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../operations/deployment.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
</nav>
</div>
<script>
window.playground_copyable = true;
</script>
<script src="../elasticlunr.min.js"></script>
<script src="../mark.min.js"></script>
<script src="../searcher.js"></script>
<script src="../clipboard.min.js"></script>
<script src="../highlight.js"></script>
<script src="../book.js"></script>
<!-- Custom JS scripts -->
</div>
</body>
</html>

View File

@ -1,227 +0,0 @@
<!DOCTYPE HTML>
<html lang="en" class="ayu sidebar-visible" dir="ltr">
<head>
<!-- Book generated using mdBook -->
<meta charset="UTF-8">
<title>Deployment - Provisioning Platform Documentation</title>
<!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff">
<link rel="icon" href="../favicon.svg">
<link rel="shortcut icon" href="../favicon.png">
<link rel="stylesheet" href="../css/variables.css">
<link rel="stylesheet" href="../css/general.css">
<link rel="stylesheet" href="../css/chrome.css">
<link rel="stylesheet" href="../css/print.css" media="print">
<!-- Fonts -->
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
<link rel="stylesheet" href="../fonts/fonts.css">
<!-- Highlight.js Stylesheets -->
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
<!-- Custom theme stylesheets -->
<!-- Provide site root and default themes to javascript -->
<script>
const path_to_root = "../";
const default_light_theme = "ayu";
const default_dark_theme = "navy";
</script>
<!-- Start loading toc.js asap -->
<script src="../toc.js"></script>
</head>
<body>
<div id="mdbook-help-container">
<div id="mdbook-help-popup">
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
<div>
<p>Press <kbd></kbd> or <kbd></kbd> to navigate between chapters</p>
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
<p>Press <kbd>?</kbd> to show this help</p>
<p>Press <kbd>Esc</kbd> to hide this help</p>
</div>
</div>
</div>
<div id="body-container">
<!-- Work around some values being stored in localStorage wrapped in quotes -->
<script>
try {
let theme = localStorage.getItem('mdbook-theme');
let sidebar = localStorage.getItem('mdbook-sidebar');
if (theme.startsWith('"') && theme.endsWith('"')) {
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
}
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
}
} catch (e) { }
</script>
<!-- Set the theme before any content is loaded, prevents flash -->
<script>
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
let theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = default_theme; }
const html = document.documentElement;
html.classList.remove('ayu')
html.classList.add(theme);
html.classList.add("js");
</script>
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
<!-- Hide / unhide sidebar before it is displayed -->
<script>
let sidebar = null;
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
if (document.body.clientWidth >= 1080) {
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
sidebar = sidebar || 'visible';
} else {
sidebar = 'hidden';
}
sidebar_toggle.checked = sidebar === 'visible';
html.classList.remove('sidebar-visible');
html.classList.add("sidebar-" + sidebar);
</script>
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
<!-- populated by js -->
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
<noscript>
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
</noscript>
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
<div class="sidebar-resize-indicator"></div>
</div>
</nav>
<div id="page-wrapper" class="page-wrapper">
<div class="page">
<div id="menu-bar-hover-placeholder"></div>
<div id="menu-bar" class="menu-bar sticky">
<div class="left-buttons">
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
<i class="fa fa-bars"></i>
</label>
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
<i class="fa fa-paint-brush"></i>
</button>
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
</ul>
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
<i class="fa fa-search"></i>
</button>
</div>
<h1 class="menu-title">Provisioning Platform Documentation</h1>
<div class="right-buttons">
<a href="../print.html" title="Print this book" aria-label="Print this book">
<i id="print-button" class="fa fa-print"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
<i id="git-repository-button" class="fa fa-github"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/operations/deployment.md" title="Suggest an edit" aria-label="Suggest an edit">
<i id="git-edit-button" class="fa fa-edit"></i>
</a>
</div>
</div>
<div id="search-wrapper" class="hidden">
<form id="searchbar-outer" class="searchbar-outer">
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
</form>
<div id="searchresults-outer" class="searchresults-outer hidden">
<div id="searchresults-header" class="searchresults-header"></div>
<ul id="searchresults">
</ul>
</div>
</div>
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
<script>
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
});
</script>
<div id="content" class="content">
<main>
<h1 id="deployment-guide"><a class="header" href="#deployment-guide">Deployment Guide</a></h1>
</main>
<nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons -->
<a rel="prev" href="../operations/backup-recovery.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../operations/monitoring.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
<div style="clear: both"></div>
</nav>
</div>
</div>
<nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../operations/backup-recovery.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../operations/monitoring.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
</nav>
</div>
<script>
window.playground_copyable = true;
</script>
<script src="../elasticlunr.min.js"></script>
<script src="../mark.min.js"></script>
<script src="../searcher.js"></script>
<script src="../clipboard.min.js"></script>
<script src="../highlight.js"></script>
<script src="../book.js"></script>
<!-- Custom JS scripts -->
</div>
</body>
</html>

View File

@ -1,227 +0,0 @@
<!DOCTYPE HTML>
<html lang="en" class="ayu sidebar-visible" dir="ltr">
<head>
<!-- Book generated using mdBook -->
<meta charset="UTF-8">
<title>Monitoring - Provisioning Platform Documentation</title>
<!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff">
<link rel="icon" href="../favicon.svg">
<link rel="shortcut icon" href="../favicon.png">
<link rel="stylesheet" href="../css/variables.css">
<link rel="stylesheet" href="../css/general.css">
<link rel="stylesheet" href="../css/chrome.css">
<link rel="stylesheet" href="../css/print.css" media="print">
<!-- Fonts -->
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
<link rel="stylesheet" href="../fonts/fonts.css">
<!-- Highlight.js Stylesheets -->
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
<!-- Custom theme stylesheets -->
<!-- Provide site root and default themes to javascript -->
<script>
const path_to_root = "../";
const default_light_theme = "ayu";
const default_dark_theme = "navy";
</script>
<!-- Start loading toc.js asap -->
<script src="../toc.js"></script>
</head>
<body>
<div id="mdbook-help-container">
<div id="mdbook-help-popup">
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
<div>
<p>Press <kbd></kbd> or <kbd></kbd> to navigate between chapters</p>
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
<p>Press <kbd>?</kbd> to show this help</p>
<p>Press <kbd>Esc</kbd> to hide this help</p>
</div>
</div>
</div>
<div id="body-container">
<!-- Work around some values being stored in localStorage wrapped in quotes -->
<script>
try {
let theme = localStorage.getItem('mdbook-theme');
let sidebar = localStorage.getItem('mdbook-sidebar');
if (theme.startsWith('"') && theme.endsWith('"')) {
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
}
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
}
} catch (e) { }
</script>
<!-- Set the theme before any content is loaded, prevents flash -->
<script>
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
let theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = default_theme; }
const html = document.documentElement;
html.classList.remove('ayu')
html.classList.add(theme);
html.classList.add("js");
</script>
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
<!-- Hide / unhide sidebar before it is displayed -->
<script>
let sidebar = null;
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
if (document.body.clientWidth >= 1080) {
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
sidebar = sidebar || 'visible';
} else {
sidebar = 'hidden';
}
sidebar_toggle.checked = sidebar === 'visible';
html.classList.remove('sidebar-visible');
html.classList.add("sidebar-" + sidebar);
</script>
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
<!-- populated by js -->
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
<noscript>
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
</noscript>
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
<div class="sidebar-resize-indicator"></div>
</div>
</nav>
<div id="page-wrapper" class="page-wrapper">
<div class="page">
<div id="menu-bar-hover-placeholder"></div>
<div id="menu-bar" class="menu-bar sticky">
<div class="left-buttons">
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
<i class="fa fa-bars"></i>
</label>
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
<i class="fa fa-paint-brush"></i>
</button>
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
</ul>
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
<i class="fa fa-search"></i>
</button>
</div>
<h1 class="menu-title">Provisioning Platform Documentation</h1>
<div class="right-buttons">
<a href="../print.html" title="Print this book" aria-label="Print this book">
<i id="print-button" class="fa fa-print"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
<i id="git-repository-button" class="fa fa-github"></i>
</a>
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/operations/monitoring.md" title="Suggest an edit" aria-label="Suggest an edit">
<i id="git-edit-button" class="fa fa-edit"></i>
</a>
</div>
</div>
<div id="search-wrapper" class="hidden">
<form id="searchbar-outer" class="searchbar-outer">
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
</form>
<div id="searchresults-outer" class="searchresults-outer hidden">
<div id="searchresults-header" class="searchresults-header"></div>
<ul id="searchresults">
</ul>
</div>
</div>
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
<script>
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
});
</script>
<div id="content" class="content">
<main>
<h1 id="monitoring-guide"><a class="header" href="#monitoring-guide">Monitoring Guide</a></h1>
</main>
<nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons -->
<a rel="prev" href="../operations/deployment.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../operations/production-readiness-checklist.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
<div style="clear: both"></div>
</nav>
</div>
</div>
<nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../operations/deployment.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../operations/production-readiness-checklist.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
</nav>
</div>
<script>
window.playground_copyable = true;
</script>
<script src="../elasticlunr.min.js"></script>
<script src="../mark.min.js"></script>
<script src="../searcher.js"></script>
<script src="../clipboard.min.js"></script>
<script src="../highlight.js"></script>
<script src="../book.js"></script>
<!-- Custom JS scripts -->
</div>
</body>
</html>

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">

View File

@ -8,7 +8,7 @@
<!-- Custom HTML head --> <!-- Custom HTML head -->
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"> <meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
<meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">

View File

@ -86,7 +86,7 @@ Declarative Infrastructure as Code (IaC) platform providing:
**Solution**: Unified abstraction layer with provider-agnostic interfaces. Write configuration once, deploy anywhere. **Solution**: Unified abstraction layer with provider-agnostic interfaces. Write configuration once, deploy anywhere.
```kcl ```
# Same configuration works on UpCloud, AWS, or local infrastructure # Same configuration works on UpCloud, AWS, or local infrastructure
server: Server { server: Server {
name = "web-01" name = "web-01"
@ -101,7 +101,7 @@ server: Server {
**Solution**: Automatic dependency resolution with topological sorting and health checks. **Solution**: Automatic dependency resolution with topological sorting and health checks.
```kcl ```
# Provisioning resolves: containerd → etcd → kubernetes → cilium # Provisioning resolves: containerd → etcd → kubernetes → cilium
taskservs = ["cilium"] # Automatically installs all dependencies taskservs = ["cilium"] # Automatically installs all dependencies
``` ```
@ -112,7 +112,7 @@ taskservs = ["cilium"] # Automatically installs all dependencies
**Solution**: Hierarchical configuration system with 476+ config accessors replacing 200+ ENV variables. **Solution**: Hierarchical configuration system with 476+ config accessors replacing 200+ ENV variables.
```plaintext ```
Defaults → User → Project → Infrastructure → Environment → Runtime Defaults → User → Project → Infrastructure → Environment → Runtime
``` ```
@ -120,7 +120,7 @@ Defaults → User → Project → Infrastructure → Environment → Runtime
**Problem**: Brittle shell scripts that don't handle failures, don't support rollback, hard to maintain. **Problem**: Brittle shell scripts that don't handle failures, don't support rollback, hard to maintain.
**Solution**: Declarative KCL configurations with validation, type safety, and automatic rollback. **Solution**: Declarative Nickel configurations with validation, type safety, and automatic rollback.
#### 5. **Lack of Visibility** #### 5. **Lack of Visibility**
@ -197,7 +197,7 @@ Clusters handle:
Isolated environments for different projects or deployment stages. Isolated environments for different projects or deployment stages.
```plaintext ```
workspace_librecloud/ # Production workspace workspace_librecloud/ # Production workspace
├── infra/ # Infrastructure definitions ├── infra/ # Infrastructure definitions
├── config/ # Workspace configuration ├── config/ # Workspace configuration
@ -211,7 +211,7 @@ workspace_dev/ # Development workspace
Switch between workspaces with single command: Switch between workspaces with single command:
```bash ```
provisioning workspace switch librecloud provisioning workspace switch librecloud
``` ```
@ -240,7 +240,7 @@ Coordinated sequences of operations with dependency management.
### System Components ### System Components
```plaintext ```
┌─────────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────────┐
│ User Interface Layer │ │ User Interface Layer │
│ • CLI (provisioning command) │ │ • CLI (provisioning command) │
@ -282,7 +282,7 @@ Coordinated sequences of operations with dependency management.
### Directory Structure ### Directory Structure
```plaintext ```
project-provisioning/ project-provisioning/
├── provisioning/ # Core provisioning system ├── provisioning/ # Core provisioning system
│ ├── core/ # Core engine and libraries │ ├── core/ # Core engine and libraries
@ -305,7 +305,7 @@ project-provisioning/
│ │ ├── oci-registry/ # OCI registry for extensions │ │ ├── oci-registry/ # OCI registry for extensions
│ │ └── installer/ # Platform installer (TUI + CLI) │ │ └── installer/ # Platform installer (TUI + CLI)
│ │ │ │
│ ├── kcl/ # KCL configuration schemas │ ├── schemas/ # Nickel configuration schemas
│ ├── config/ # Configuration files │ ├── config/ # Configuration files
│ ├── templates/ # Template files │ ├── templates/ # Template files
│ └── tools/ # Build and distribution tools │ └── tools/ # Build and distribution tools
@ -394,14 +394,14 @@ Hierarchical, config-driven architecture.
- **476+ config accessors** replacing 200+ ENV variables - **476+ config accessors** replacing 200+ ENV variables
- **Hierarchical loading**: defaults → user → project → infra → env → runtime - **Hierarchical loading**: defaults → user → project → infra → env → runtime
- **Variable interpolation**: `{{paths.base}}`, `{{env.HOME}}`, `{{now.date}}` - **Variable interpolation**: `{{paths.base}}`, `{{env.HOME}}`, `{{now.date}}`
- **Multi-format support**: TOML, YAML, KCL - **Multi-format support**: TOML, YAML, Nickel
### 3. **Batch Workflow System** (v3.1.0) ### 3. **Batch Workflow System** (v3.1.0)
Provider-agnostic batch operations with 85-90% token efficiency. Provider-agnostic batch operations with 85-90% token efficiency.
- **Multi-cloud support**: Mixed UpCloud + AWS + local in single workflow - **Multi-cloud support**: Mixed UpCloud + AWS + local in single workflow
- **KCL schema integration**: Type-safe workflow definitions - **Nickel schema integration**: Type-safe workflow definitions
- **Dependency resolution**: Topological sorting with soft/hard dependencies - **Dependency resolution**: Topological sorting with soft/hard dependencies
- **State management**: Checkpoint-based recovery with rollback - **State management**: Checkpoint-based recovery with rollback
- **Real-time monitoring**: Live progress tracking - **Real-time monitoring**: Live progress tracking
@ -471,7 +471,7 @@ Comprehensive version tracking and updates.
| Technology | Version | Purpose | Why | | Technology | Version | Purpose | Why |
| ------------ | --------- | --------- | ----- | | ------------ | --------- | --------- | ----- |
| **Nushell** | 0.107.1+ | Primary shell and scripting language | Data pipelines, cross-platform, modern parsers | | **Nushell** | 0.107.1+ | Primary shell and scripting language | Data pipelines, cross-platform, modern parsers |
| **KCL** | 0.11.3+ | Configuration language | Type safety, schema validation, immutability, constraint checking | | **Nickel** | 1.0.0+ | Configuration language | Type safety, schema validation, immutability, constraint checking |
| **Rust** | Latest | Platform services (orchestrator, control-center, installer) | Performance, memory safety, concurrency, reliability | | **Rust** | Latest | Platform services (orchestrator, control-center, installer) | Performance, memory safety, concurrency, reliability |
| **Tera** | Latest | Template engine | Jinja2-like syntax, configuration file rendering, variable interpolation, filters and functions | | **Tera** | Latest | Template engine | Jinja2-like syntax, configuration file rendering, variable interpolation, filters and functions |
@ -505,7 +505,6 @@ Comprehensive version tracking and updates.
| ------ | --------- | | ------ | --------- |
| **K9s** | Kubernetes management interface | | **K9s** | Kubernetes management interface |
| **nu_plugin_tera** | Nushell plugin for Tera template rendering | | **nu_plugin_tera** | Nushell plugin for Tera template rendering |
| **nu_plugin_kcl** | Nushell plugin for KCL integration (CLI required, plugin optional) |
| **glow** | Markdown rendering for interactive guides | | **glow** | Markdown rendering for interactive guides |
| **bat** | Syntax highlighting for file viewing and guides | | **bat** | Syntax highlighting for file viewing and guides |
@ -515,8 +514,8 @@ Comprehensive version tracking and updates.
### Data Flow ### Data Flow
```plaintext ```
1. User defines infrastructure in KCL 1. User defines infrastructure in Nickel
2. CLI loads configuration (hierarchical) 2. CLI loads configuration (hierarchical)
@ -541,7 +540,7 @@ Comprehensive version tracking and updates.
**Step 1**: Define infrastructure in Nickel **Step 1**: Define infrastructure in Nickel
```nickel ```
# infra/my-cluster.ncl # infra/my-cluster.ncl
let config = { let config = {
infra = { infra = {
@ -562,13 +561,13 @@ config
**Step 2**: Submit to Provisioning **Step 2**: Submit to Provisioning
```bash ```
provisioning server create --infra my-cluster provisioning server create --infra my-cluster
``` ```
**Step 3**: Provisioning executes workflow **Step 3**: Provisioning executes workflow
```plaintext ```
1. Create workflow: "deploy-my-cluster" 1. Create workflow: "deploy-my-cluster"
2. Resolve dependencies: 2. Resolve dependencies:
- containerd (required by kubernetes) - containerd (required by kubernetes)
@ -593,7 +592,7 @@ provisioning server create --infra my-cluster
**Step 4**: Verify deployment **Step 4**: Verify deployment
```bash ```
provisioning cluster status my-cluster provisioning cluster status my-cluster
``` ```
@ -601,7 +600,7 @@ provisioning cluster status my-cluster
Configuration values are resolved through a hierarchy: Configuration values are resolved through a hierarchy:
```plaintext ```
1. System Defaults (provisioning/config/config.defaults.toml) 1. System Defaults (provisioning/config/config.defaults.toml)
↓ (overridden by) ↓ (overridden by)
2. User Preferences (~/.config/provisioning/user_config.yaml) 2. User Preferences (~/.config/provisioning/user_config.yaml)
@ -617,7 +616,7 @@ Configuration values are resolved through a hierarchy:
**Example**: **Example**:
```toml ```
# System default # System default
[servers] [servers]
default_plan = "small" default_plan = "small"
@ -642,7 +641,7 @@ provisioning server create --plan xlarge # Overrides everything
Deploy Kubernetes clusters across different cloud providers with identical configuration. Deploy Kubernetes clusters across different cloud providers with identical configuration.
```bash ```
# UpCloud cluster # UpCloud cluster
provisioning cluster create k8s-prod --provider upcloud provisioning cluster create k8s-prod --provider upcloud
@ -654,7 +653,7 @@ provisioning cluster create k8s-prod --provider aws
Manage multiple environments with workspace switching. Manage multiple environments with workspace switching.
```bash ```
# Development # Development
provisioning workspace switch dev provisioning workspace switch dev
provisioning cluster create app-stack provisioning cluster create app-stack
@ -672,7 +671,7 @@ provisioning cluster create app-stack
Test infrastructure changes before deploying to production. Test infrastructure changes before deploying to production.
```bash ```
# Test Kubernetes upgrade locally # Test Kubernetes upgrade locally
provisioning test topology load kubernetes_3node | \ provisioning test topology load kubernetes_3node | \
test env cluster kubernetes --version 1.29.0 test env cluster kubernetes --version 1.29.0
@ -688,7 +687,7 @@ provisioning test env cleanup <env-id>
Deploy to multiple regions in parallel. Deploy to multiple regions in parallel.
```nickel ```
# workflows/multi-region.ncl # workflows/multi-region.ncl
let batch_workflow = { let batch_workflow = {
operations = [ operations = [
@ -716,7 +715,7 @@ let batch_workflow = {
batch_workflow batch_workflow
``` ```
```bash ```
provisioning batch submit workflows/multi-region.ncl provisioning batch submit workflows/multi-region.ncl
provisioning batch monitor <workflow-id> provisioning batch monitor <workflow-id>
``` ```
@ -725,7 +724,7 @@ provisioning batch monitor <workflow-id>
Recreate infrastructure from configuration. Recreate infrastructure from configuration.
```bash ```
# Infrastructure destroyed # Infrastructure destroyed
provisioning workspace switch prod provisioning workspace switch prod
@ -739,7 +738,7 @@ provisioning cluster create --infra backup-restore --wait
Automated testing and deployment pipelines. Automated testing and deployment pipelines.
```yaml ```
# .gitlab-ci.yml # .gitlab-ci.yml
test-infrastructure: test-infrastructure:
script: script:

View File

@ -11,7 +11,7 @@
**Status**: ✅ Primary documentation source (145 files consolidated) **Status**: ✅ Primary documentation source (145 files consolidated)
Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell, Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell,
KCL, and Rust. Nickel, and Rust.
> **Note**: Architecture Decision Records (ADRs) and design documentation are in `docs/` > **Note**: Architecture Decision Records (ADRs) and design documentation are in `docs/`
> directory. This location contains user-facing, operational, and product documentation. > directory. This location contains user-facing, operational, and product documentation.
@ -117,7 +117,7 @@ KCL, and Rust.
## Documentation Structure ## Documentation Structure
```plaintext ```
provisioning/docs/src/ provisioning/docs/src/
├── README.md (this file) # Documentation hub ├── README.md (this file) # Documentation hub
├── getting-started/ # Getting started guides ├── getting-started/ # Getting started guides
@ -214,7 +214,7 @@ Extensions and packages distributed as OCI artifacts, enabling:
2. Study **[Design Principles](architecture/design-principles.md)** 2. Study **[Design Principles](architecture/design-principles.md)**
3. Read relevant **[ADRs](architecture/)** 3. Read relevant **[ADRs](architecture/)**
4. Follow **[Development Guide](development/README.md)** 4. Follow **[Development Guide](development/README.md)**
5. Reference **KCL Quick Reference** 5. Reference **Nickel Quick Reference**
### For Operators ### For Operators
@ -237,7 +237,7 @@ Extensions and packages distributed as OCI artifacts, enabling:
### ✅ Infrastructure Automation ### ✅ Infrastructure Automation
- Multi-cloud support (AWS, UpCloud, Local) - Multi-cloud support (AWS, UpCloud, Local)
- Declarative configuration with KCL - Declarative configuration with Nickel
- Automated dependency resolution - Automated dependency resolution
- Batch operations with rollback - Batch operations with rollback
@ -322,7 +322,7 @@ Extensions and packages distributed as OCI artifacts, enabling:
| Component | Technology | Purpose | | Component | Technology | Purpose |
| ----------- | ------------ | --------- | | ----------- | ------------ | --------- |
| **Core CLI** | Nushell 0.107.1 | Shell and scripting | | **Core CLI** | Nushell 0.107.1 | Shell and scripting |
| **Configuration** | KCL 0.11.2 | Type-safe IaC | | **Configuration** | Nickel 1.0.0+ | Type-safe IaC |
| **Orchestrator** | Rust | High-performance coordination | | **Orchestrator** | Rust | High-performance coordination |
| **Templates** | Jinja2 (nu_plugin_tera) | Code generation | | **Templates** | Jinja2 (nu_plugin_tera) | Code generation |
| **Secrets** | SOPS 3.10.2 + Age 1.2.1 | Encryption | | **Secrets** | SOPS 3.10.2 + Age 1.2.1 | Encryption |

385
docs/src/README.md.bak2 Normal file
View File

@ -0,0 +1,385 @@
<p align="center">
<img src="resources/provisioning_logo.svg" alt="Provisioning Logo" width="300"/>
</p>
<p align="center">
<img src="resources/logo-text.svg" alt="Provisioning" width="500"/>
</p>
# Provisioning Platform Documentation
**Last Updated**: 2025-01-02 (Phase 3.A Cleanup Complete)
**Status**: ✅ Primary documentation source (145 files consolidated)
Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell,
Nickel, and Rust.
> **Note**: Architecture Decision Records (ADRs) and design documentation are in `docs/`
> directory. This location contains user-facing, operational, and product documentation.
---
## Quick Navigation
### 🚀 Getting Started
| Document | Description | Audience |
| ---------- | ------------- | ---------- |
| **[Installation Guide](getting-started/installation-guide.md)** | Install and configure the system | New Users |
| **[Getting Started](getting-started/getting-started.md)** | First steps and basic concepts | New Users |
| **[Quick Reference](getting-started/quickstart-cheatsheet.md)** | Command cheat sheet | All Users |
| **[From Scratch Guide](guides/from-scratch.md)** | Complete deployment walkthrough | New Users |
### 📚 User Guides
| Document | Description |
| ---------- | ------------- |
| **[CLI Reference](infrastructure/cli-reference.md)** | Complete command reference |
| **[Workspace Management](infrastructure/workspace-setup.md)** | Workspace creation and management |
| **[Workspace Switching](infrastructure/workspace-switching-guide.md)** | Switch between workspaces |
| **[Infrastructure Management](infrastructure/infrastructure-management.md)** | Server, taskserv, cluster operations |
| **[Service Management](operations/service-management-guide.md)** | Platform service lifecycle management |
| **[OCI Registry](integration/oci-registry-guide.md)** | OCI artifact management |
| **[Gitea Integration](integration/gitea-integration-guide.md)** | Git workflow and collaboration |
| **[CoreDNS Guide](operations/coredns-guide.md)** | DNS management |
| **[Test Environments](testing/test-environment-usage.md)** | Containerized testing |
| **[Extension Development](development/extension-development.md)** | Create custom extensions |
### 🏗️ Architecture
| Document | Description |
| ---------- | ------------- |
| **[System Overview](architecture/system-overview.md)** | High-level architecture |
| **[Multi-Repo Architecture](architecture/multi-repo-architecture.md)** | Repository structure and OCI distribution |
| **[Design Principles](architecture/design-principles.md)** | Architectural philosophy |
| **[Integration Patterns](architecture/integration-patterns.md)** | System integration patterns |
| **[Orchestrator Model](architecture/orchestrator-integration-model.md)** | Hybrid orchestration architecture |
### 📋 Architecture Decision Records (ADRs)
| ADR | Title | Status |
| ----- | ------- | -------- |
| **[ADR-001](architecture/adr/adr-001-project-structure.md)** | Project Structure Decision | Accepted |
| **[ADR-002](architecture/adr/adr-002-distribution-strategy.md)** | Distribution Strategy | Accepted |
| **[ADR-003](architecture/adr/adr-003-workspace-isolation.md)** | Workspace Isolation | Accepted |
| **[ADR-004](architecture/adr/adr-004-hybrid-architecture.md)** | Hybrid Architecture | Accepted |
| **[ADR-005](architecture/adr/adr-005-extension-framework.md)** | Extension Framework | Accepted |
| **[ADR-006](architecture/adr/adr-006-provisioning-cli-refactoring.md)** | CLI Refactoring | Accepted |
### 🔌 API Documentation
| Document | Description |
| ---------- | ------------- |
| **[REST API](api-reference/rest-api.md)** | HTTP API endpoints |
| **[WebSocket API](api-reference/websocket.md)** | Real-time event streams |
| **[Extensions API](development/extensions.md)** | Extension integration APIs |
| **[SDKs](api-reference/sdks.md)** | Client libraries |
| **[Integration Examples](api-reference/integration-examples.md)** | API usage examples |
### 🛠️ Development
| Document | Description |
| ---------- | ------------- |
| **[Development README](development/README.md)** | Developer overview |
| **[Implementation Guide](development/implementation-guide.md)** | Implementation details |
| **[Provider Development](development/quick-provider-guide.md)** | Create cloud providers |
| **[Taskserv Development](development/taskserv-developer-guide.md)** | Create task services |
| **[Extension Framework](development/extensions.md)** | Extension system |
| **[Command Handlers](development/command-handler-guide.md)** | CLI command development |
### 🐛 Troubleshooting
| Document | Description |
| ---------- | ------------- |
| **[Troubleshooting Guide](troubleshooting/troubleshooting-guide.md)** | Common issues and solutions |
### 📖 How-To Guides
| Document | Description |
| ---------- | ------------- |
| **[From Scratch](guides/from-scratch.md)** | Complete deployment from zero |
| **[Update Infrastructure](guides/update-infrastructure.md)** | Safe update procedures |
| **[Customize Infrastructure](guides/customize-infrastructure.md)** | Layer and template customization |
### 🔐 Configuration
| Document | Description |
| ---------- | ------------- |
| **[Workspace Config Architecture](configuration/workspace-config-architecture.md)** | Configuration architecture |
### 📦 Quick References
| Document | Description |
| ---------- | ------------- |
| **[Quickstart Cheatsheet](getting-started/quickstart-cheatsheet.md)** | Command shortcuts |
| **[OCI Quick Reference](quick-reference/oci.md)** | OCI operations |
---
## Documentation Structure
```plaintext
provisioning/docs/src/
├── README.md (this file) # Documentation hub
├── getting-started/ # Getting started guides
│ ├── installation-guide.md
│ ├── getting-started.md
│ └── quickstart-cheatsheet.md
├── architecture/ # System architecture
│ ├── adr/ # Architecture Decision Records
│ ├── design-principles.md
│ ├── integration-patterns.md
│ ├── system-overview.md
│ └── ... (and 10+ more architecture docs)
├── infrastructure/ # Infrastructure guides
│ ├── cli-reference.md
│ ├── workspace-setup.md
│ ├── workspace-switching-guide.md
│ └── infrastructure-management.md
├── api-reference/ # API documentation
│ ├── rest-api.md
│ ├── websocket.md
│ ├── integration-examples.md
│ └── sdks.md
├── development/ # Developer guides
│ ├── README.md
│ ├── implementation-guide.md
│ ├── quick-provider-guide.md
│ ├── taskserv-developer-guide.md
│ └── ... (15+ more developer docs)
├── guides/ # How-to guides
│ ├── from-scratch.md
│ ├── update-infrastructure.md
│ └── customize-infrastructure.md
├── operations/ # Operations guides
│ ├── service-management-guide.md
│ ├── coredns-guide.md
│ └── ... (more operations docs)
├── security/ # Security docs
├── integration/ # Integration guides
├── testing/ # Testing docs
├── configuration/ # Configuration docs
├── troubleshooting/ # Troubleshooting guides
└── quick-reference/ # Quick references
```
---
## Key Concepts
### Infrastructure as Code (IaC)
The provisioning platform uses **declarative configuration** to manage infrastructure. Instead of manually creating resources, you define what you
want in Nickel configuration files, and the system makes it happen.
### Mode-Based Architecture
The system supports four operational modes:
- **Solo**: Single developer local development
- **Multi-user**: Team collaboration with shared services
- **CI/CD**: Automated pipeline execution
- **Enterprise**: Production deployment with strict compliance
### Extension System
Extensibility through:
- **Providers**: Cloud platform integrations (AWS, UpCloud, Local)
- **Task Services**: Infrastructure components (Kubernetes, databases, etc.)
- **Clusters**: Complete deployment configurations
### OCI-Native Distribution
Extensions and packages distributed as OCI artifacts, enabling:
- Industry-standard packaging
- Efficient caching and bandwidth
- Version pinning and rollback
- Air-gapped deployments
---
## Documentation by Role
### For New Users
1. Start with **[Installation Guide](getting-started/installation-guide.md)**
2. Read **[Getting Started](getting-started/getting-started.md)**
3. Follow **[From Scratch Guide](guides/from-scratch.md)**
4. Reference **[Quickstart Cheatsheet](guides/quickstart-cheatsheet.md)**
### For Developers
1. Review **[System Overview](architecture/system-overview.md)**
2. Study **[Design Principles](architecture/design-principles.md)**
3. Read relevant **[ADRs](architecture/)**
4. Follow **[Development Guide](development/README.md)**
5. Reference **Nickel Quick Reference**
### For Operators
1. Understand **[Mode System](infrastructure/mode-system)**
2. Learn **[Service Management](operations/service-management-guide.md)**
3. Review **[Infrastructure Management](infrastructure/infrastructure-management.md)**
4. Study **[OCI Registry](integration/oci-registry-guide.md)**
### For Architects
1. Read **[System Overview](architecture/system-overview.md)**
2. Study all **[ADRs](architecture/)**
3. Review **[Integration Patterns](architecture/integration-patterns.md)**
4. Understand **[Multi-Repo Architecture](architecture/multi-repo-architecture.md)**
---
## System Capabilities
### ✅ Infrastructure Automation
- Multi-cloud support (AWS, UpCloud, Local)
- Declarative configuration with Nickel
- Automated dependency resolution
- Batch operations with rollback
### ✅ Workflow Orchestration
- Hybrid Rust/Nushell orchestration
- Checkpoint-based recovery
- Parallel execution with limits
- Real-time monitoring
### ✅ Test Environments
- Containerized testing
- Multi-node cluster simulation
- Topology templates
- Automated cleanup
### ✅ Mode-Based Operation
- Solo: Local development
- Multi-user: Team collaboration
- CI/CD: Automated pipelines
- Enterprise: Production deployment
### ✅ Extension Management
- OCI-native distribution
- Automatic dependency resolution
- Version management
- Local and remote sources
---
## Key Achievements
### 🚀 Batch Workflow System (v3.1.0)
- Provider-agnostic batch operations
- Mixed provider support (UpCloud + AWS + local)
- Dependency resolution with soft/hard dependencies
- Real-time monitoring and rollback
### 🏗️ Hybrid Orchestrator (v3.0.0)
- Solves Nushell deep call stack limitations
- Preserves all business logic
- REST API for external integration
- Checkpoint-based state management
### ⚙️ Configuration System (v2.0.0)
- Migrated from ENV to config-driven
- Hierarchical configuration loading
- Variable interpolation
- True IaC without hardcoded fallbacks
### 🎯 Modular CLI (v3.2.0)
- 84% reduction in main file size
- Domain-driven handlers
- 80+ shortcuts
- Bi-directional help system
### 🧪 Test Environment Service (v3.4.0)
- Automated containerized testing
- Multi-node cluster topologies
- CI/CD integration ready
- Template-based configurations
### 🔄 Workspace Switching (v2.0.5)
- Centralized workspace management
- Single-command workspace switching
- Active workspace tracking
- User preference system
---
## Technology Stack
| Component | Technology | Purpose |
| ----------- | ------------ | --------- |
| **Core CLI** | Nushell 0.107.1 | Shell and scripting |
| **Configuration** | KCL 0.11.2 | Type-safe IaC |
| **Orchestrator** | Rust | High-performance coordination |
| **Templates** | Jinja2 (nu_plugin_tera) | Code generation |
| **Secrets** | SOPS 3.10.2 + Age 1.2.1 | Encryption |
| **Distribution** | OCI (skopeo/crane/oras) | Artifact management |
---
## Support
### Getting Help
- **Documentation**: You're reading it!
- **Quick Reference**: Run `provisioning sc` or `provisioning guide quickstart`
- **Help System**: Run `provisioning help` or `provisioning <command> help`
- **Interactive Shell**: Run `provisioning nu` for Nushell REPL
### Reporting Issues
- Check **[Troubleshooting Guide](infrastructure/troubleshooting-guide.md)**
- Review **[FAQ](troubleshooting/troubleshooting-guide.md)**
- Enable debug mode: `provisioning --debug <command>`
- Check logs: `provisioning platform logs <service>`
---
## Contributing
This project welcomes contributions! See **[Development Guide](development/README.md)** for:
- Development setup
- Code style guidelines
- Testing requirements
- Pull request process
---
## License
[Add license information]
---
## Version History
| Version | Date | Major Changes |
| --------- | ------ | --------------- |
| **3.5.0** | 2025-10-06 | Mode system, OCI registry, comprehensive documentation |
| **3.4.0** | 2025-10-06 | Test environment service |
| **3.3.0** | 2025-09-30 | Interactive guides system |
| **3.2.0** | 2025-09-30 | Modular CLI refactoring |
| **3.1.0** | 2025-09-25 | Batch workflow system |
| **3.0.0** | 2025-09-25 | Hybrid orchestrator architecture |
| **2.0.5** | 2025-10-02 | Workspace switching system |
| **2.0.0** | 2025-09-23 | Configuration system migration |
---
**Maintained By**: Provisioning Team
**Last Review**: 2025-10-06
**Next Review**: 2026-01-06

View File

@ -21,6 +21,26 @@
--- ---
## AI Integration
- [Overview](ai/README.md)
- [Architecture](ai/architecture.md)
- [RAG System](ai/rag-system.md)
- [MCP Integration](ai/mcp-integration.md)
- [Configuration Guide](ai/configuration.md)
- [Security Policies](ai/security-policies.md)
- [Troubleshooting with AI](ai/troubleshooting-with-ai.md)
- [Cost Management](ai/cost-management.md)
### Planned Features (Q2 2025)
- [Natural Language Configuration](ai/natural-language-config.md)
- [Configuration Generation](ai/config-generation.md)
- [AI-Assisted Forms](ai/ai-assisted-forms.md)
- [AI Agents](ai/ai-agents.md)
---
## Architecture & Design ## Architecture & Design
- [System Overview](architecture/system-overview.md) - [System Overview](architecture/system-overview.md)
@ -51,8 +71,8 @@
- [ADR-007: KMS Simplification](architecture/adr/adr-007-kms-simplification.md) - [ADR-007: KMS Simplification](architecture/adr/adr-007-kms-simplification.md)
- [ADR-008: Cedar Authorization](architecture/adr/adr-008-cedar-authorization.md) - [ADR-008: Cedar Authorization](architecture/adr/adr-008-cedar-authorization.md)
- [ADR-009: Security System Complete](architecture/adr/adr-009-security-system-complete.md) - [ADR-009: Security System Complete](architecture/adr/adr-009-security-system-complete.md)
- [ADR-010: Configuration Format Strategy](architecture/adr/ADR-010-configuration-format-strategy.md) - [ADR-010: Configuration Format Strategy](architecture/adr/adr-010-configuration-format-strategy.md)
- [ADR-011: Nickel Migration](architecture/adr/ADR-011-nickel-migration.md) - [ADR-011: Nickel Migration](architecture/adr/adr-011-nickel-migration.md)
- [ADR-012: Nushell Nickel Plugin CLI Wrapper](architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.md) - [ADR-012: Nushell Nickel Plugin CLI Wrapper](architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.md)
- [ADR-013: Typdialog Web UI Backend Integration](architecture/adr/adr-013-typdialog-integration.md) - [ADR-013: Typdialog Web UI Backend Integration](architecture/adr/adr-013-typdialog-integration.md)
- [ADR-014: SecretumVault Integration](architecture/adr/adr-014-secretumvault-integration.md) - [ADR-014: SecretumVault Integration](architecture/adr/adr-014-secretumvault-integration.md)
@ -60,21 +80,12 @@
--- ---
## AI Integration ## Roadmap & Future Features
- [Overview](ai/README.md) - [Overview](roadmap/README.md)
- [Architecture](ai/architecture.md) - [AI Integration (Planned)](roadmap/ai-integration.md)
- [Natural Language Configuration](ai/natural-language-config.md) - [Native Plugins (Partial)](roadmap/native-plugins.md)
- [AI-Assisted Forms](ai/ai-assisted-forms.md) - [Nickel Workflows (Planned)](roadmap/nickel-workflows.md)
- [AI Agents](ai/ai-agents.md)
- [Configuration Generation](ai/config-generation.md)
- [RAG System](ai/rag-system.md)
- [MCP Integration](ai/mcp-integration.md)
- [Security Policies](ai/security-policies.md)
- [Troubleshooting with AI](ai/troubleshooting-with-ai.md)
- [API Reference](ai/api-reference.md)
- [Configuration](ai/configuration.md)
- [Cost Management](ai/cost-management.md)
--- ---
@ -93,33 +104,39 @@
## Development ## Development
- [Extension Development](development/extension-development.md)
- [Infrastructure-Specific Extensions](development/infrastructure-specific-extensions.md) - [Infrastructure-Specific Extensions](development/infrastructure-specific-extensions.md)
- [Quick Provider Guide](development/quick-provider-guide.md)
- [Command Handler Guide](development/command-handler-guide.md) - [Command Handler Guide](development/command-handler-guide.md)
- [Configuration](development/configuration.md)
- [Workflow](development/workflow.md) - [Workflow](development/workflow.md)
- [Integration](development/integration.md) - [Integration](development/integration.md)
- [Build System](development/build-system.md) - [Build System](development/build-system.md)
- [Extensions](development/extensions.md)
- [Distribution Process](development/distribution-process.md) - [Distribution Process](development/distribution-process.md)
- [Implementation Guide](development/implementation-guide.md) - [Implementation Guide](development/implementation-guide.md)
- [TaskServ Developer Guide](development/taskserv-developer-guide.md)
- [TaskServ Quick Guide](development/taskserv-quick-guide.md)
- [Project Structure](development/project-structure.md) - [Project Structure](development/project-structure.md)
- [Provider Agnostic Architecture](development/provider-agnostic-architecture.md)
- [Ctrl-C Implementation Notes](development/ctrl-c-implementation-notes.md) - [Ctrl-C Implementation Notes](development/ctrl-c-implementation-notes.md)
- [Auth Metadata Guide](development/auth-metadata-guide.md) - [Auth Metadata Guide](development/auth-metadata-guide.md)
- [Migration Guide](development/migration-guide.md)
- [KMS Simplification](development/kms-simplification.md) - [KMS Simplification](development/kms-simplification.md)
- [Migration Example](development/migration-example.md)
- [Glossary](development/glossary.md) - [Glossary](development/glossary.md)
- [Provider Distribution Guide](development/provider-distribution-guide.md)
- [TaskServ Categorization](development/taskserv-categorization.md)
- [Extension Registry](development/extension-registry.md)
- [MCP Server](development/mcp-server.md) - [MCP Server](development/mcp-server.md)
- [TypeDialog Platform Config Guide](development/typedialog-platform-config-guide.md) - [TypeDialog Platform Config Guide](development/typedialog-platform-config-guide.md)
- [Provider Comparison Matrix](development/provider-comparison.md)
### Extensions
- [Overview](development/extensions/README.md)
- [Extension Development](development/extensions/extension-development.md)
- [Extension Registry](development/extensions/extension-registry.md)
### Providers
- [Quick Provider Guide](development/providers/quick-provider-guide.md)
- [Provider Agnostic Architecture](development/providers/provider-agnostic-architecture.md)
- [Provider Development Guide](development/providers/provider-development-guide.md)
- [Provider Distribution Guide](development/providers/provider-distribution-guide.md)
- [Provider Comparison Matrix](development/providers/provider-comparison.md)
### TaskServs
- [TaskServ Quick Guide](development/taskservs/taskserv-quick-guide.md)
- [TaskServ Categorization](development/taskservs/taskserv-categorization.md)
--- ---
@ -128,11 +145,7 @@
- [Platform Deployment Guide](operations/deployment-guide.md) - [Platform Deployment Guide](operations/deployment-guide.md)
- [Service Management Guide](operations/service-management-guide.md) - [Service Management Guide](operations/service-management-guide.md)
- [Monitoring & Alerting Setup](operations/monitoring-alerting-setup.md) - [Monitoring & Alerting Setup](operations/monitoring-alerting-setup.md)
- [Service Management Quick Reference](operations/service-management-quickref.md)
- [CoreDNS Guide](operations/coredns-guide.md) - [CoreDNS Guide](operations/coredns-guide.md)
- [Backup Recovery](operations/backup-recovery.md)
- [Deployment](operations/deployment.md)
- [Monitoring](operations/monitoring.md)
- [Production Readiness Checklist](operations/production-readiness-checklist.md) - [Production Readiness Checklist](operations/production-readiness-checklist.md)
- [Break Glass Training Guide](operations/break-glass-training-guide.md) - [Break Glass Training Guide](operations/break-glass-training-guide.md)
- [Cedar Policies Production Guide](operations/cedar-policies-production-guide.md) - [Cedar Policies Production Guide](operations/cedar-policies-production-guide.md)
@ -154,20 +167,23 @@
- [Batch Workflow Multi-Provider Examples](infrastructure/batch-workflow-multi-provider.md) - [Batch Workflow Multi-Provider Examples](infrastructure/batch-workflow-multi-provider.md)
- [CLI Architecture](infrastructure/cli-architecture.md) - [CLI Architecture](infrastructure/cli-architecture.md)
- [Configuration System](infrastructure/configuration-system.md) - [Configuration System](infrastructure/configuration-system.md)
- [Workspace Setup](infrastructure/workspace-setup.md)
- [Workspace Switching Guide](infrastructure/workspace-switching-guide.md)
- [Workspace Switching System](infrastructure/workspace-switching-system.md)
- [CLI Reference](infrastructure/cli-reference.md) - [CLI Reference](infrastructure/cli-reference.md)
- [Workspace Config Architecture](infrastructure/workspace-config-architecture.md)
- [Dynamic Secrets Guide](infrastructure/dynamic-secrets-guide.md) - [Dynamic Secrets Guide](infrastructure/dynamic-secrets-guide.md)
- [Mode System Guide](infrastructure/mode-system-guide.md) - [Mode System Guide](infrastructure/mode-system-guide.md)
- [Workspace Guide](infrastructure/workspace-guide.md)
- [Workspace Enforcement Guide](infrastructure/workspace-enforcement-guide.md)
- [Workspace Infra Reference](infrastructure/workspace-infra-reference.md)
- [Workspace Config Commands](infrastructure/workspace-config-commands.md)
- [Config Rendering Guide](infrastructure/config-rendering-guide.md) - [Config Rendering Guide](infrastructure/config-rendering-guide.md)
- [Configuration](infrastructure/configuration.md) - [Configuration](infrastructure/configuration.md)
### Workspaces
- [Workspace Setup](infrastructure/workspaces/workspace-setup.md)
- [Workspace Guide](infrastructure/workspaces/workspace-guide.md)
- [Workspace Switching Guide](infrastructure/workspaces/workspace-switching-guide.md)
- [Workspace Switching System](infrastructure/workspaces/workspace-switching-system.md)
- [Workspace Config Architecture](infrastructure/workspaces/workspace-config-architecture.md)
- [Workspace Config Commands](infrastructure/workspaces/workspace-config-commands.md)
- [Workspace Enforcement Guide](infrastructure/workspaces/workspace-enforcement-guide.md)
- [Workspace Infra Reference](infrastructure/workspaces/workspace-infra-reference.md)
--- ---
## Security ## Security
@ -183,8 +199,6 @@
- [NuShell Plugins System](security/nushell-plugins-system.md) - [NuShell Plugins System](security/nushell-plugins-system.md)
- [Plugin Usage Guide](security/plugin-usage-guide.md) - [Plugin Usage Guide](security/plugin-usage-guide.md)
- [Secrets Management Guide](security/secrets-management-guide.md) - [Secrets Management Guide](security/secrets-management-guide.md)
- [Auth Quick Reference](security/auth-quick-reference.md)
- [Config Encryption Quick Reference](security/config-encryption-quickref.md)
- [KMS Service](security/kms-service.md) - [KMS Service](security/kms-service.md)
--- ---
@ -203,7 +217,6 @@
## Testing ## Testing
- [Test Environment Guide](testing/test-environment-guide.md) - [Test Environment Guide](testing/test-environment-guide.md)
- [Test Environment Usage](testing/test-environment-usage.md)
- [Test Environment System](testing/test-environment-system.md) - [Test Environment System](testing/test-environment-system.md)
- [TaskServ Validation Guide](testing/taskserv-validation-guide.md) - [TaskServ Validation Guide](testing/taskserv-validation-guide.md)
@ -224,7 +237,6 @@
- [Extension Development Quickstart](guides/extension-development-quickstart.md) - [Extension Development Quickstart](guides/extension-development-quickstart.md)
- [Guide System](guides/guide-system.md) - [Guide System](guides/guide-system.md)
- [Workspace Generation Quick Reference](guides/workspace-generation-quick-reference.md) - [Workspace Generation Quick Reference](guides/workspace-generation-quick-reference.md)
- [Workspace Documentation Migration](guides/workspace-documentation-migration.md)
### Multi-Provider Deployment Guides ### Multi-Provider Deployment Guides
@ -255,4 +267,3 @@
## Configuration ## Configuration
- [Config Validation](configuration/config-validation.md) - [Config Validation](configuration/config-validation.md)
- [Workspace Config Architecture](configuration/workspace-config-architecture.md)

View File

@ -1,6 +1,7 @@
# AI Integration - Intelligent Infrastructure Provisioning # AI Integration - Intelligent Infrastructure Provisioning
The provisioning platform integrates AI capabilities to provide intelligent assistance for infrastructure configuration, deployment, and troubleshooting. The provisioning platform integrates AI capabilities to provide intelligent assistance for infrastructure configuration, deployment, and
troubleshooting.
This section documents the AI system architecture, features, and usage patterns. This section documents the AI system architecture, features, and usage patterns.
## Overview ## Overview
@ -19,7 +20,7 @@ The AI integration consists of multiple components working together to provide i
### Natural Language Configuration ### Natural Language Configuration
Generate infrastructure configurations from plain English descriptions: Generate infrastructure configurations from plain English descriptions:
```bash ```
provisioning ai generate "Create a production PostgreSQL cluster with encryption and daily backups" provisioning ai generate "Create a production PostgreSQL cluster with encryption and daily backups"
``` ```
@ -30,7 +31,7 @@ Real-time suggestions and explanations as you fill out configuration forms via t
### Intelligent Troubleshooting ### Intelligent Troubleshooting
AI analyzes deployment failures and suggests fixes: AI analyzes deployment failures and suggests fixes:
```bash ```
provisioning ai troubleshoot deployment-12345 provisioning ai troubleshoot deployment-12345
``` ```
@ -38,13 +39,13 @@ provisioning ai troubleshoot deployment-12345
Configuration Optimization Configuration Optimization
AI reviews configurations and suggests performance and security improvements: AI reviews configurations and suggests performance and security improvements:
```bash ```
provisioning ai optimize workspaces/prod/config.ncl provisioning ai optimize workspaces/prod/config.ncl
``` ```
### Autonomous Agents ### Autonomous Agents
AI agents execute multi-step workflows with minimal human intervention: AI agents execute multi-step workflows with minimal human intervention:
```bash ```
provisioning ai agent --goal "Set up complete dev environment for Python app" provisioning ai agent --goal "Set up complete dev environment for Python app"
``` ```
@ -67,7 +68,7 @@ provisioning ai agent --goal "Set up complete dev environment for Python app"
### Enable AI Features ### Enable AI Features
```bash ```
# Edit provisioning config # Edit provisioning config
vim provisioning/config/ai.toml vim provisioning/config/ai.toml
@ -85,7 +86,7 @@ troubleshooting = true
### Generate Configuration from Natural Language ### Generate Configuration from Natural Language
```bash ```
# Simple generation # Simple generation
provisioning ai generate "PostgreSQL database with encryption" provisioning ai generate "PostgreSQL database with encryption"
@ -98,7 +99,7 @@ provisioning ai generate \
### Use AI-Assisted Forms ### Use AI-Assisted Forms
```bash ```
# Open typdialog web UI with AI assistance # Open typdialog web UI with AI assistance
provisioning workspace init --interactive --ai-assist provisioning workspace init --interactive --ai-assist
@ -109,7 +110,7 @@ provisioning workspace init --interactive --ai-assist
### Troubleshoot with AI ### Troubleshoot with AI
```bash ```
# Analyze failed deployment # Analyze failed deployment
provisioning ai troubleshoot deployment-12345 provisioning ai troubleshoot deployment-12345
@ -133,11 +134,11 @@ See [Security Policies](security-policies.md) for complete details.
## Supported LLM Providers ## Supported LLM Providers
| Provider | Models | Best For | | | Provider | Models | Best For | |
| ---------- | -------- | ---------- | | | ---------- | -------- | ---------- | |
| **Anthropic** | Claude Sonnet 4, Claude Opus 4 | Complex configs, long context | | | **Anthropic** | Claude Sonnet 4, Claude Opus 4 | Complex configs, long context | |
| **OpenAI** | GPT-4 Turbo, GPT-4 | Fast suggestions, tool calling | | | **OpenAI** | GPT-4 Turbo, GPT-4 | Fast suggestions, tool calling | |
| **Local** | Llama 3, Mistral | Air-gapped, privacy-critical | | | **Local** | Llama 3, Mistral | Air-gapped, privacy-critical | |
## Cost Considerations ## Cost Considerations

View File

@ -1 +1,532 @@
# AI Agents # Autonomous AI Agents (typdialog-ag)
**Status**: 🔴 Planned (Q2 2025 target)
Autonomous AI Agents is a planned feature that enables AI agents to execute multi-step
infrastructure provisioning workflows with minimal human intervention. Agents make
decisions, adapt to changing conditions, and execute complex tasks while maintaining
security and requiring human approval for critical operations.
## Feature Overview
### What It Does
Enable AI agents to manage complex provisioning workflows:
```
User Goal:
"Set up a complete development environment with:
- PostgreSQL database
- Redis cache
- Kubernetes cluster
- Monitoring stack
- Logging infrastructure"
AI Agent executes:
1. Analyzes requirements and constraints
2. Plans multi-step deployment sequence
3. Creates configurations for all components
4. Validates configurations against policies
5. Requests human approval for critical decisions
6. Executes deployment in correct order
7. Monitors for failures and adapts
8. Reports completion and recommendations
```
## Agent Capabilities
### Multi-Step Workflow Execution
Agents coordinate complex, multi-component deployments:
```
Goal: "Deploy production Kubernetes cluster with managed databases"
Agent Plan:
Phase 1: Infrastructure
├─ Create VPC and networking
├─ Set up security groups
└─ Configure IAM roles
Phase 2: Kubernetes
├─ Create EKS cluster
├─ Configure network plugins
├─ Set up autoscaling
└─ Install cluster add-ons
Phase 3: Managed Services
├─ Provision RDS PostgreSQL
├─ Configure backups
└─ Set up replicas
Phase 4: Observability
├─ Deploy Prometheus
├─ Deploy Grafana
├─ Configure log collection
└─ Set up alerting
Phase 5: Validation
├─ Run smoke tests
├─ Verify connectivity
└─ Check compliance
```
### Adaptive Decision Making
Agents adapt to conditions and make intelligent decisions:
```
Scenario: Database provisioning fails due to resource quota
Standard approach (human):
1. Detect failure
2. Investigate issue
3. Decide on fix (reduce size, change region, etc.)
4. Update config
5. Retry
Agent approach:
1. Detect failure
2. Analyze error: "Quota exceeded for db.r6g.xlarge"
3. Check available options:
- Try smaller instance: db.r6g.large (may be insufficient)
- Try different region: different cost, latency
- Request quota increase (requires human approval)
4. Ask human: "Quota exceeded. Suggest: use db.r6g.large instead
(slightly reduced performance). Approve? [yes/no/try-other]"
5. Execute based on approval
6. Continue workflow
```
### Dependency Management
Agents understand resource dependencies:
```
Knowledge graph of dependencies:
VPC ──→ Subnets ──→ EC2 Instances
├─────────→ Security Groups
└────→ NAT Gateway ──→ Route Tables
RDS ──→ DB Subnet Group ──→ VPC
├─────────→ Security Group
└────→ Parameter Group
Agent ensures:
- VPC exists before creating subnets
- Subnets exist before creating EC2
- Security groups reference correct VPC
- Deployment order respects all dependencies
- Rollback order is reverse of creation
```
## Architecture
### Agent Design Pattern
```
┌────────────────────────────────────────────────────────┐
│ Agent Supervisor (Orchestrator) │
│ - Accepts user goal │
│ - Plans workflow │
│ - Coordinates specialist agents │
│ - Requests human approvals │
│ - Monitors overall progress │
└────────────────────────────────────────────────────────┘
↑ ↑ ↑
│ │ │
↓ ↓ ↓
┌──────────────┐ ┌──────────────┐ ┌──────────────┐
│ Database │ │ Kubernetes │ │ Monitoring │
│ Specialist │ │ Specialist │ │ Specialist │
│ │ │ │ │ │
│ Tasks: │ │ Tasks: │ │ Tasks: │
│ - Create DB │ │ - Create K8s │ │ - Deploy │
│ - Configure │ │ - Configure │ │ Prometheus │
│ - Validate │ │ - Validate │ │ - Deploy │
│ - Report │ │ - Report │ │ Grafana │
└──────────────┘ └──────────────┘ └──────────────┘
```
### Agent Workflow
```
Start: User Goal
┌─────────────────────────────────────────┐
│ Goal Analysis & Planning │
│ - Parse user intent │
│ - Identify resources needed │
│ - Plan dependency graph │
│ - Generate task list │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ Resource Generation │
│ - Generate configs for each resource │
│ - Validate against schemas │
│ - Check compliance policies │
│ - Identify potential issues │
└──────────────┬──────────────────────────┘
Human Review Point?
├─ No issues: Continue
└─ Issues found: Request approval/modification
┌─────────────────────────────────────────┐
│ Execution Plan Verification │
│ - Check all configs are valid │
│ - Verify dependencies are resolvable │
│ - Estimate costs and timeline │
│ - Identify risks │
└──────────────┬──────────────────────────┘
Execute Workflow?
├─ User approves: Start execution
└─ User modifies: Return to planning
┌─────────────────────────────────────────┐
│ Phase-by-Phase Execution │
│ - Execute one logical phase │
│ - Monitor for errors │
│ - Report progress │
│ - Ask for decisions if needed │
└──────────────┬──────────────────────────┘
All Phases Complete?
├─ No: Continue to next phase
└─ Yes: Final validation
┌─────────────────────────────────────────┐
│ Final Validation & Reporting │
│ - Smoke tests │
│ - Connectivity tests │
│ - Compliance verification │
│ - Performance checks │
│ - Generate final report │
└──────────────┬──────────────────────────┘
Success: Deployment Complete
```
## Planned Agent Types
### 1. Database Specialist Agent
```
Responsibilities:
- Create and configure databases
- Set up replication and backups
- Configure encryption and security
- Monitor database health
- Handle database-specific issues
Examples:
- Provision PostgreSQL cluster with replication
- Set up MySQL with read replicas
- Configure MongoDB sharding
- Create backup pipelines
```
### 2. Kubernetes Specialist Agent
```
Responsibilities:
- Create and configure Kubernetes clusters
- Configure networking and ingress
- Set up autoscaling policies
- Deploy cluster add-ons
- Manage workload placement
Examples:
- Create EKS/GKE/AKS cluster
- Configure Istio service mesh
- Deploy Prometheus + Grafana
- Configure auto-scaling policies
```
### 3. Infrastructure Agent
```
Responsibilities:
- Create networking infrastructure
- Configure security and firewalls
- Set up load balancers
- Configure DNS and CDN
- Manage identity and access
Examples:
- Create VPC with subnets
- Configure security groups
- Set up application load balancer
- Configure Route53 DNS
```
### 4. Monitoring Agent
```
Responsibilities:
- Deploy monitoring stack
- Configure alerting
- Set up logging infrastructure
- Create dashboards
- Configure notification channels
Examples:
- Deploy Prometheus + Grafana
- Set up CloudWatch dashboards
- Configure log aggregation
- Set up PagerDuty integration
```
### 5. Compliance Agent
```
Responsibilities:
- Check security policies
- Verify compliance requirements
- Audit configurations
- Generate compliance reports
- Recommend security improvements
Examples:
- Check PCI-DSS compliance
- Verify encryption settings
- Audit access controls
- Generate compliance report
```
## Usage Examples
### Example 1: Development Environment Setup
```
$ provisioning ai agent --goal "Set up dev environment for Python web app"
Agent Plan Generated:
┌─────────────────────────────────────────┐
│ Environment: Development │
│ Components: PostgreSQL + Redis + Monitoring
│ │
│ Phase 1: Database (1-2 min) │
│ - PostgreSQL 15 │
│ - 10 GB storage │
│ - Dev security settings │
│ │
│ Phase 2: Cache (1 min) │
│ - Redis Cluster Mode disabled │
│ - Single node │
│ - 2 GB memory │
│ │
│ Phase 3: Monitoring (1-2 min) │
│ - Prometheus (metrics) │
│ - Grafana (dashboards) │
│ - Log aggregation │
│ │
│ Estimated time: 5-10 minutes │
│ Estimated cost: $15/month │
│ │
│ [Approve] [Modify] [Cancel] │
└─────────────────────────────────────────┘
Agent: Approve to proceed with setup.
User: Approve
[Agent execution starts]
Creating PostgreSQL... [████████░░] 80%
Creating Redis... [░░░░░░░░░░] 0%
[Waiting for PostgreSQL creation...]
PostgreSQL created successfully!
Connection string: postgresql://dev:pwd@db.internal:5432/app
Creating Redis... [████████░░] 80%
[Waiting for Redis creation...]
Redis created successfully!
Connection string: redis://cache.internal:6379
Deploying monitoring... [████████░░] 80%
[Waiting for Grafana startup...]
All services deployed successfully!
Grafana dashboards: [http://grafana.internal:3000](http://grafana.internal:3000)
```
### Example 2: Production Kubernetes Deployment
```
$ provisioning ai agent --interactive \
--goal "Deploy production Kubernetes cluster with managed databases"
Agent Analysis:
- Cluster size: 3-10 nodes (auto-scaling)
- Databases: RDS PostgreSQL + ElastiCache Redis
- Monitoring: Full observability stack
- Security: TLS, encryption, VPC isolation
Agent suggests modifications:
1. Enable cross-AZ deployment for HA
2. Add backup retention: 30 days
3. Add network policies for security
4. Enable cluster autoscaling
Approve all? [yes/review]
User: Review
Agent points out:
- Network policies may affect performance
- Cross-AZ increases costs by ~20%
- Backup retention meets compliance
User: Approve with modifications
- Network policies: use audit mode first
- Keep cross-AZ
- Keep backups
[Agent creates configs with modifications]
Configs generated:
✓ infrastructure/vpc.ncl
✓ infrastructure/kubernetes.ncl
✓ databases/postgres.ncl
✓ databases/redis.ncl
✓ monitoring/prometheus.ncl
✓ monitoring/grafana.ncl
Estimated deployment time: 15-20 minutes
Estimated cost: $2,500/month
[Start deployment?] [Review configs]
User: Review configs
[User reviews and approves]
[Agent executes deployment in phases]
```
## Safety and Control
### Human-in-the-Loop Checkpoints
Agents stop and ask humans for approval at critical points:
```
Automatic Approval (Agent decides):
- Create configuration
- Validate configuration
- Check dependencies
- Generate execution plan
Human Approval Required:
- First-time resource creation
- Cost changes > 10%
- Security policy changes
- Cross-region deployment
- Data deletion operations
- Major version upgrades
```
### Decision Logging
All decisions logged for audit trail:
```
Agent Decision Log:
| 2025-01-13 10:00:00 | Generate database config |
| 2025-01-13 10:00:05 | Config validation: PASS |
| 2025-01-13 10:00:07 | Requesting human approval: "Create new PostgreSQL instance" |
| 2025-01-13 10:00:45 | Human approval: APPROVED |
| 2025-01-13 10:00:47 | Cost estimate: $100/month - within budget |
| 2025-01-13 10:01:00 | Creating infrastructure... |
| 2025-01-13 10:02:15 | Database created successfully |
| 2025-01-13 10:02:16 | Running health checks... |
| 2025-01-13 10:02:45 | Health check: PASSED |
```
### Rollback Capability
Agents can rollback on failure:
```
Scenario: Database creation succeeds, but Kubernetes creation fails
Agent behavior:
1. Detect failure in Kubernetes phase
2. Try recovery (retry, different configuration)
3. Recovery fails
4. Ask human: "Kubernetes creation failed. Rollback database creation? [yes/no]"
5. If yes: Delete database, clean up, report failure
6. If no: Keep database, manual cleanup needed
Full rollback capability if entire workflow fails before human approval.
```
## Configuration
### Agent Settings
```
# In provisioning/config/ai.toml
[ai.agents]
enabled = true
# Agent decision-making
auto_approve_threshold = 0.95 # Approve if confidence > 95%
require_approval_for = [
"first_resource_creation",
"cost_change_above_percent",
"security_policy_change",
"data_deletion",
]
cost_change_threshold_percent = 10
# Execution control
max_parallel_phases = 2
phase_timeout_minutes = 30
execution_log_retention_days = 90
# Safety
dry_run_mode = false # Always perform dry run first
require_final_approval = true
rollback_on_failure = true
# Learning
track_agent_decisions = true
track_success_rate = true
improve_from_feedback = true
```
## Success Criteria (Q2 2025)
- ✅ Agents complete 5 standard workflows without human intervention
- ✅ Cost estimation accuracy within 5%
- ✅ Execution time matches or beats manual setup by 30%
- ✅ Success rate > 95% for tested scenarios
- ✅ Zero unapproved critical decisions
- ✅ Full decision audit trail for all operations
- ✅ Rollback capability tested and verified
- ✅ User satisfaction > 8/10 in testing
- ✅ Documentation complete with examples
- ✅ Integration with form assistance and NLC working
## Related Documentation
- [Architecture](architecture.md) - AI system overview
- [Natural Language Config](natural-language-config.md) - Config generation
- [AI-Assisted Forms](ai-assisted-forms.md) - Interactive forms
- [Configuration](configuration.md) - Setup guide
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Status**: 🔴 Planned
**Target Release**: Q2 2025
**Last Updated**: 2025-01-13
**Component**: typdialog-ag
**Architecture**: Complete
**Implementation**: In Design Phase

View File

@ -1 +1,438 @@
# AI-Assisted Forms # AI-Assisted Forms (typdialog-ai)
**Status**: 🔴 Planned (Q2 2025 target)
AI-Assisted Forms is a planned feature that integrates intelligent suggestions, context-aware assistance, and natural language understanding into the
typdialog web UI. This enables users to configure infrastructure through interactive forms with real-time AI guidance.
## Feature Overview
### What It Does
Enhance configuration forms with AI-powered assistance:
```
User typing in form field: "storage"
AI analyzes context:
- Current form (database configuration)
- Field type (storage capacity)
- Similar past configurations
- Best practices for this workload
Suggestions appear:
✓ "100 GB (standard production size)"
✓ "50 GB (development environment)"
✓ "500 GB (large-scale analytics)"
```
### Primary Use Cases
1. **Guided Configuration**: Step-by-step assistance filling complex forms
2. **Error Explanation**: AI explains validation failures in plain English
3. **Smart Autocomplete**: Suggestions based on context, not just keywords
4. **Learning**: New users learn patterns from AI explanations
5. **Efficiency**: Experienced users get quick suggestions
## Architecture
### User Interface Integration
```
┌────────────────────────────────────────┐
│ Typdialog Web UI (React/TypeScript) │
│ │
│ ┌──────────────────────────────────┐ │
│ │ Form Fields │ │
│ │ │ │
│ │ Database Engine: [postgresql ▼] │ │
│ │ Storage (GB): [100 GB ↓ ?] │ │
│ │ AI suggestions │ │
│ │ Encryption: [✓ enabled ] │ │
│ │ "Required for │ │
│ │ production" │ │
│ │ │ │
│ │ [← Back] [Next →] │ │
│ └──────────────────────────────────┘ │
│ ↓ │
│ AI Assistance Panel │
│ (suggestions & explanations) │
└────────────────────────────────────────┘
↓ ↑
User Input AI Service
(port 8083)
```
### Suggestion Pipeline
```
User Event (typing, focusing field, validation error)
┌─────────────────────────────────────┐
│ Context Extraction │
│ - Current field and value │
│ - Form schema and constraints │
│ - Other filled fields │
│ - User role and workspace │
└─────────────────────┬───────────────┘
┌─────────────────────────────────────┐
│ RAG Retrieval │
│ - Find similar configs │
│ - Get examples for field type │
│ - Retrieve relevant documentation │
│ - Find validation rules │
└─────────────────────┬───────────────┘
┌─────────────────────────────────────┐
│ Suggestion Generation │
│ - AI generates suggestions │
│ - Rank by relevance │
│ - Format for display │
│ - Generate explanation │
└─────────────────────┬───────────────┘
┌─────────────────────────────────────┐
│ Response Formatting │
│ - Debounce (don't update too fast) │
│ - Cache identical results │
│ - Stream if long response │
│ - Display to user │
└─────────────────────────────────────┘
```
## Planned Features
### 1. Smart Field Suggestions
Intelligent suggestions based on context:
```
Scenario: User filling database configuration form
1. Engine selection
User types: "post"
Suggestion: "postgresql" (99% match)
Explanation: "PostgreSQL is the most popular open-source relational database"
2. Storage size
User has selected: "postgresql", "production", "web-application"
Suggestions appear:
• "100 GB" (standard production web app database)
• "500 GB" (if expected growth > 1000 connections)
• "1 TB" (high-traffic SaaS platform)
Explanation: "For typical web applications with 1000s of concurrent users, 100 GB is recommended"
3. Backup frequency
User has selected: "production", "critical-data"
Suggestions appear:
• "Daily" (standard for critical databases)
• "Hourly" (for data warehouses with frequent updates)
Explanation: "Critical production data requires daily or more frequent backups"
```
### 2. Validation Error Explanation
Human-readable error messages with fixes:
```
User enters: "storage = -100"
Current behavior:
✗ Error: Expected positive integer
Planned AI behavior:
✗ Storage must be positive (1-65535 GB)
Why: Negative storage doesn't make sense.
Storage capacity must be at least 1 GB.
Fix suggestions:
• Use 100 GB (typical production size)
• Use 50 GB (development environment)
• Use your required size in GB
```
### 3. Field-to-Field Context Awareness
Suggestions change based on other fields:
```
Scenario: Multi-step configuration form
Step 1: Select environment
User: "production"
→ Form shows constraints: (min storage 50GB, encryption required, backup required)
Step 2: Select database engine
User: "postgresql"
→ Suggestions adapted:
- PostgreSQL 15 recommended for production
- Point-in-time recovery available
- Replication options highlighted
Step 3: Storage size
→ Suggestions show:
- Minimum 50 GB for production
- Examples from similar production configs
- Cost estimate updates in real-time
Step 4: Encryption
→ Suggestion appears: "Recommended: AES-256"
→ Explanation: "Required for production environments"
```
### 4. Inline Documentation
Quick access to relevant docs:
```
Field: "Backup Retention Days"
Suggestion popup:
┌─────────────────────────────────┐
│ Suggested value: 30 │
│ │
│ Why: 30 days is industry-standard│
│ standard for compliance (PCI-DSS)│
│ │
│ Learn more: │
│ → Backup best practices guide │
│ → Your compliance requirements │
│ → Cost vs retention trade-offs │
└─────────────────────────────────┘
```
### 5. Multi-Field Suggestions
Suggest multiple related fields together:
```
User selects: environment = "production"
AI suggests completing:
┌─────────────────────────────────┐
│ Complete Production Setup │
│ │
│ Based on production environment │
│ we recommend: │
│ │
│ Encryption: enabled │ ← Auto-fill
│ Backups: daily │ ← Auto-fill
│ Monitoring: enabled │ ← Auto-fill
│ High availability: enabled │ ← Auto-fill
│ Retention: 30 days │ ← Auto-fill
│ │
│ [Accept All] [Review] [Skip] │
└─────────────────────────────────┘
```
## Implementation Components
### Frontend (typdialog-ai JavaScript/TypeScript)
```
// React component for field with AI assistance
interface AIFieldProps {
fieldName: string;
fieldType: string;
currentValue: string;
formContext: Record<string, any>;
schema: FieldSchema;
}
function AIAssistedField({fieldName, formContext, schema}: AIFieldProps) {
const [suggestions, setSuggestions] = useState<Suggestion[]>([]);
const [explanation, setExplanation] = useState<string>("");
// Debounced suggestion generation
useEffect(() => {
const timer = setTimeout(async () => {
const suggestions = await ai.suggestFieldValue({
field: fieldName,
context: formContext,
schema: schema,
});
setSuggestions(suggestions);
| setExplanation(suggestions[0]?.explanation | | ""); |
}, 300); // Debounce 300ms
return () => clearTimeout(timer);
}, [formContext[fieldName]]);
return (
<div className="ai-field">
<input
value={formContext[fieldName]}
onChange={(e) => handleChange(e.target.value)}
/>
{suggestions.length > 0 && (
<div className="ai-suggestions">
{suggestions.map((s) => (
<button key={s.value} onClick={() => accept(s.value)}>
{s.label}
</button>
))}
{explanation && (
<p className="ai-explanation">{explanation}</p>
)}
</div>
)}
</div>
);
}
```
### Backend Service Integration
```
// In AI Service: field suggestion endpoint
async fn suggest_field_value(
req: SuggestFieldRequest,
) -> Result<Vec<Suggestion>> {
// Build context for the suggestion
let context = build_field_context(&req.form_context, &req.field_name)?;
// Retrieve relevant examples from RAG
let examples = rag.search_by_field(&req.field_name, &context)?;
// Generate suggestions via LLM
let suggestions = llm.generate_suggestions(
&req.field_name,
&req.field_type,
&context,
&examples,
).await?;
// Rank and format suggestions
let ranked = rank_suggestions(suggestions, &context);
Ok(ranked)
}
```
## Configuration
### Form Assistant Settings
```
# In provisioning/config/ai.toml
[ai.forms]
enabled = true
# Suggestion delivery
suggestions_enabled = true
suggestions_debounce_ms = 300
max_suggestions_per_field = 3
# Error explanations
error_explanations_enabled = true
explain_validation_errors = true
suggest_fixes = true
# Field context awareness
field_context_enabled = true
cross_field_suggestions = true
# Inline documentation
inline_docs_enabled = true
docs_link_type = "modal" # or "sidebar", "tooltip"
# Performance
cache_suggestions = true
cache_ttl_seconds = 3600
# Learning
track_accepted_suggestions = true
track_rejected_suggestions = true
```
## User Experience Flow
### Scenario: New User Configuring PostgreSQL
```
1. User opens typdialog form
- Form title: "Create Database"
- First field: "Database Engine"
- AI shows: "PostgreSQL recommended for relational data"
2. User types "post"
- Autocomplete shows: "postgresql"
- AI explains: "PostgreSQL is the most stable open-source database"
3. User selects "postgresql"
- Form progresses
- Next field: "Version"
- AI suggests: "PostgreSQL 15 (latest stable)"
- Explanation: "Version 15 is current stable, recommended for new deployments"
4. User selects version 15
- Next field: "Environment"
- User selects "production"
- AI note appears: "Production environment requires encryption and backups"
5. Next field: "Storage (GB)"
- Form shows: Minimum 50 GB (production requirement)
- AI suggestions:
• 100 GB (standard production)
• 250 GB (high-traffic site)
- User accepts: 100 GB
6. Validation error on next field
- Old behavior: "Invalid backup_days value"
- New behavior:
"Backup retention must be 1-35 days. Recommended: 30 days.
30-day retention meets compliance requirements for production systems."
7. User completes form
- Summary shows all AI-assisted decisions
- Generate button creates configuration
```
## Integration with Natural Language Generation
NLC and form assistance share the same backend:
```
Natural Language Generation AI-Assisted Forms
↓ ↓
"Create a PostgreSQL db" Select field values
↓ ↓
Intent Extraction Context Extraction
↓ ↓
RAG Search RAG Search (same results)
↓ ↓
LLM Generation LLM Suggestions
↓ ↓
Config Output Form Field Population
```
## Success Criteria (Q2 2025)
- ✅ Suggestions appear within 300ms of user action
- ✅ 80% suggestion acceptance rate in user testing
- ✅ Error explanations clearly explain issues and fixes
- ✅ Cross-field context awareness works for 5+ database scenarios
- ✅ Form completion time reduced by 40% with AI
- ✅ User satisfaction > 8/10 in testing
- ✅ No false suggestions (all suggestions are valid)
- ✅ Offline mode works with cached suggestions
## Related Documentation
- [Architecture](architecture.md) - AI system overview
- [Natural Language Config](natural-language-config.md) - Related generation feature
- [RAG System](rag-system.md) - Suggestion retrieval
- [Configuration](configuration.md) - Setup guide
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Status**: 🔴 Planned
**Target Release**: Q2 2025
**Last Updated**: 2025-01-13
**Component**: typdialog-ai
**Architecture**: Complete
**Implementation**: In Design Phase

View File

@ -1 +0,0 @@
# API Reference

View File

@ -1 +1,194 @@
# Architecture # AI Integration Architecture
## Overview
The provisioning platform's AI system provides intelligent capabilities for configuration generation, troubleshooting, and automation. The
architecture consists of multiple layers designed for reliability, security, and performance.
## Core Components - Production-Ready
### 1. AI Service (`provisioning/platform/ai-service`)
**Status**: ✅ Production-Ready (2,500+ lines Rust code)
The core AI service provides:
- Multi-provider LLM support (Anthropic Claude, OpenAI GPT-4, local models)
- Streaming response support for real-time feedback
- Request caching with LRU and semantic similarity
- Rate limiting and cost control
- Comprehensive error handling
- HTTP REST API on port 8083
**Supported Models**:
- Claude Sonnet 4, Claude Opus 4 (Anthropic)
- GPT-4 Turbo, GPT-4 (OpenAI)
- Llama 3, Mistral (local/on-premise)
### 2. RAG System (Retrieval-Augmented Generation)
**Status**: ✅ Production-Ready (22/22 tests passing)
The RAG system enables AI to access and reason over platform documentation:
- Vector embeddings via SurrealDB vector store
- Hybrid search: vector similarity + BM25 keyword search
- Document chunking (code and markdown aware)
- Relevance ranking and context selection
- Semantic caching for repeated queries
**Capabilities**:
```
provisioning ai query "How do I set up Kubernetes?"
provisioning ai template "Describe my infrastructure"
```
### 3. MCP Server (Model Context Protocol)
**Status**: ✅ Production-Ready
Provides Model Context Protocol integration:
- Standardized tool interface for LLMs
- Complex workflow composition
- Integration with external AI systems (Claude, other LLMs)
- Tool calling for provisioning operations
### 4. CLI Integration
**Status**: ✅ Production-Ready
Interactive commands:
```
provisioning ai template --prompt "Describe infrastructure"
provisioning ai query --prompt "Configuration question"
provisioning ai chat # Interactive mode
```
**Configuration**:
```
[ai]
enabled = true
provider = "anthropic" # or "openai" or "local"
model = "claude-sonnet-4"
[ai.cache]
enabled = true
semantic_similarity = true
ttl_seconds = 3600
[ai.limits]
max_tokens = 4096
temperature = 0.7
```
## Planned Components - Q2 2025
### Autonomous Agents (typdialog-ag)
**Status**: 🔴 Planned
Self-directed agents for complex tasks:
- Multi-step workflow execution
- Decision making and adaptation
- Monitoring and self-healing recommendations
### AI-Assisted Forms (typdialog-ai)
**Status**: 🔴 Planned
Real-time AI suggestions in configuration forms:
- Context-aware field recommendations
- Validation error explanations
- Auto-completion for infrastructure patterns
### Advanced Features
- Fine-tuning capabilities for custom models
- Autonomous workflow execution with human approval
- Cedar authorization policies for AI actions
- Custom knowledge bases per workspace
## Architecture Diagram
```
┌─────────────────────────────────────────────────┐
│ User Interface │
│ ├── CLI (provisioning ai ...) │
│ ├── Web UI (typdialog) │
│ └── MCP Client (Claude, etc.) │
└──────────────┬──────────────────────────────────┘
┌──────────────────────────────────────────────────┐
│ AI Service (Port 8083) │
│ ├── Request Router │
│ ├── Cache Layer (LRU + Semantic) │
│ ├── Prompt Engineering │
│ └── Response Streaming │
└──────┬─────────────────┬─────────────────────────┘
↓ ↓
┌─────────────┐ ┌──────────────────┐
│ RAG System │ │ LLM Provider │
│ SurrealDB │ │ ├── Anthropic │
│ Vector DB │ │ ├── OpenAI │
│ + BM25 │ │ └── Local Model │
└─────────────┘ └──────────────────┘
↓ ↓
┌──────────────────────────────────────┐
│ Cached Responses + Real Responses │
│ Streamed to User │
└──────────────────────────────────────┘
```
## Performance Characteristics
| | Metric | Value | |
| | -------- | ------- | |
| | Cold response (cache miss) | 2-5 seconds | |
| | Cached response | <500ms | |
| | Streaming start time | <1 second | |
| | AI service memory usage | ~200MB at rest | |
| | Cache size (configurable) | Up to 500MB | |
| | Vector DB (SurrealDB) | Included, auto-managed | |
## Security Model
### Cedar Authorization
All AI operations controlled by Cedar policies:
- User role-based access control
- Operation-specific permissions
- Complete audit logging
### Secret Protection
- Secrets never sent to external LLMs
- PII/sensitive data sanitized before API calls
- Encryption at rest in local cache
- HSM support for key storage
### Local Model Support
Air-gapped deployments:
- On-premise LLM models (Llama 3, Mistral)
- Zero external API calls
- Full data privacy compliance
- Ideal for classified environments
## Configuration
See [Configuration Guide](configuration.md) for:
- LLM provider setup
- Cache configuration
- Cost limits and budgets
- Security policies
## Related Documentation
- [RAG System](rag-system.md) - Retrieval implementation details
- [Security Policies](security-policies.md) - Authorization and safety controls
- [Configuration Guide](configuration.md) - Setup instructions
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Last Updated**: 2025-01-13
**Status**: ✅ Production-Ready (core system)
**Test Coverage**: 22/22 tests passing

View File

@ -1 +1,64 @@
# Configuration Generation # Configuration Generation (typdialog-prov-gen)
**Status**: 🔴 Planned for Q2 2025
## Overview
The Configuration Generator (typdialog-prov-gen) will provide template-based Nickel configuration generation with AI-powered customization.
## Planned Features
### Template Selection
- Library of production-ready infrastructure templates
- AI recommends templates based on requirements
- Preview before generation
### Customization via Natural Language
```
provisioning ai config-gen \
--template "kubernetes-cluster" \
--customize "Add Prometheus monitoring, increase replicas to 5, use us-east-1"
```
### Multi-Provider Support
- AWS, Hetzner, UpCloud, local infrastructure
- Automatic provider-specific optimizations
- Cost estimation across providers
### Validation and Testing
- Type-checking via Nickel before deployment
- Dry-run execution for safety
- Test data fixtures for verification
## Architecture
```
Template Library
Template Selection (AI + User)
Customization Layer (NL → Nickel)
Validation (Type + Runtime)
Generated Configuration
```
## Integration Points
- typdialog web UI for template browsing
- CLI for batch generation
- AI service for customization suggestions
- Nickel for type-safe validation
## Related Documentation
- [Natural Language Configuration](natural-language-config.md) - NL to config generation
- [Architecture](architecture.md) - AI system overview
- [Configuration Guide](configuration.md) - Setup instructions
---
**Status**: 🔴 Planned
**Expected Release**: Q2 2025
**Priority**: High (enables non-technical users to generate configs)

View File

@ -1 +1,601 @@
# Configuration # AI System Configuration Guide
**Status**: ✅ Production-Ready (Configuration system)
Complete setup guide for AI features in the provisioning platform. This guide covers LLM provider configuration, feature enablement, cache setup, cost
controls, and security settings.
## Quick Start
### Minimal Configuration
```
# provisioning/config/ai.toml
[ai]
enabled = true
provider = "anthropic" # or "openai" or "local"
model = "claude-sonnet-4"
api_key = "sk-ant-..." # Set via PROVISIONING_AI_API_KEY env var
[ai.cache]
enabled = true
[ai.limits]
max_tokens = 4096
temperature = 0.7
```
### Initialize Configuration
```
# Generate default configuration
provisioning config init ai
# Edit configuration
provisioning config edit ai
# Validate configuration
provisioning config validate ai
# Show current configuration
provisioning config show ai
```
## Provider Configuration
### Anthropic Claude
```
[ai]
enabled = true
provider = "anthropic"
model = "claude-sonnet-4" # or "claude-opus-4", "claude-haiku-4"
api_key = "${PROVISIONING_AI_API_KEY}"
api_base = "[https://api.anthropic.com"](https://api.anthropic.com")
# Request parameters
[ai.request]
max_tokens = 4096
temperature = 0.7
top_p = 0.95
top_k = 40
# Supported models
# - claude-opus-4: Most capable, for complex reasoning ($15/MTok input, $45/MTok output)
# - claude-sonnet-4: Balanced (recommended), ($3/MTok input, $15/MTok output)
# - claude-haiku-4: Fast, for simple tasks ($0.80/MTok input, $4/MTok output)
```
### OpenAI GPT-4
```
[ai]
enabled = true
provider = "openai"
model = "gpt-4-turbo" # or "gpt-4", "gpt-4o"
api_key = "${OPENAI_API_KEY}"
api_base = "[https://api.openai.com/v1"](https://api.openai.com/v1")
[ai.request]
max_tokens = 4096
temperature = 0.7
top_p = 0.95
# Supported models
# - gpt-4: Most capable ($0.03/1K input, $0.06/1K output)
# - gpt-4-turbo: Better at code ($0.01/1K input, $0.03/1K output)
# - gpt-4o: Latest, multi-modal ($5/MTok input, $15/MTok output)
```
### Local Models
```
[ai]
enabled = true
provider = "local"
model = "llama2-70b" # or "mistral", "neural-chat"
api_base = "[http://localhost:8000"](http://localhost:8000") # Local Ollama or LM Studio
# Local model support
# - Ollama: docker run -d -v ollama:/root/.ollama -p 11434:11434 ollama/ollama
# - LM Studio: GUI app with API
# - vLLM: High-throughput serving
# - llama.cpp: CPU inference
[ai.local]
gpu_enabled = true
gpu_memory_gb = 24
max_batch_size = 4
```
## Feature Configuration
### Enable Specific Features
```
[ai.features]
# Core features (production-ready)
rag_search = true # Retrieve-Augmented Generation
config_generation = true # Generate Nickel from natural language
mcp_server = true # Model Context Protocol server
troubleshooting = true # AI-assisted debugging
# Form assistance (planned Q2 2025)
form_assistance = false # AI suggestions in forms
form_explanations = false # AI explains validation errors
# Agents (planned Q2 2025)
autonomous_agents = false # AI agents for workflows
agent_learning = false # Agents learn from deployments
# Advanced features
fine_tuning = false # Fine-tune models for domain
knowledge_base = false # Custom knowledge base per workspace
```
## Cache Configuration
### Cache Strategy
```
[ai.cache]
enabled = true
cache_type = "memory" # or "redis", "disk"
ttl_seconds = 3600 # Cache entry lifetime
# Memory cache (recommended for single server)
[ai.cache.memory]
max_size_mb = 500
eviction_policy = "lru" # Least Recently Used
# Redis cache (recommended for distributed)
[ai.cache.redis]
url = "redis://localhost:6379"
db = 0
password = "${REDIS_PASSWORD}"
ttl_seconds = 3600
# Disk cache (recommended for persistent caching)
[ai.cache.disk]
path = "/var/cache/provisioning/ai"
max_size_mb = 5000
# Semantic caching (for RAG)
[ai.cache.semantic]
enabled = true
similarity_threshold = 0.95 # Cache hit if query similarity > 0.95
cache_embeddings = true # Cache embedding vectors
```
### Cache Metrics
```
# Monitor cache performance
provisioning admin cache stats ai
# Clear cache
provisioning admin cache clear ai
# Analyze cache efficiency
provisioning admin cache analyze ai --hours 24
```
## Rate Limiting and Cost Control
### Rate Limits
```
[ai.limits]
# Tokens per request
max_tokens = 4096
max_input_tokens = 8192
max_output_tokens = 4096
# Requests per minute/hour
rpm_limit = 60 # Requests per minute
rpm_burst = 100 # Allow bursts up to 100 RPM
# Daily cost limit
daily_cost_limit_usd = 100
warn_at_percent = 80 # Warn when at 80% of daily limit
stop_at_percent = 95 # Stop accepting requests at 95%
# Token usage tracking
track_token_usage = true
track_cost_per_request = true
```
### Cost Budgeting
```
[ai.budget]
enabled = true
monthly_limit_usd = 1000
# Budget alerts
alert_at_percent = [50, 75, 90]
alert_email = "ops@company.com"
alert_slack = "[https://hooks.slack.com/services/..."](https://hooks.slack.com/services/...")
# Cost by provider
[ai.budget.providers]
anthropic_limit = 500
openai_limit = 300
local_limit = 0 # Free (run locally)
```
### Track Costs
```
# View cost metrics
provisioning admin costs show ai --period month
# Forecast cost
provisioning admin costs forecast ai --days 30
# Analyze cost by feature
provisioning admin costs analyze ai --by feature
# Export cost report
provisioning admin costs export ai --format csv --output costs.csv
```
## Security Configuration
### Authentication
```
[ai.auth]
# API key from environment variable
api_key = "${PROVISIONING_AI_API_KEY}"
# Or from secure store
api_key_vault = "secrets/ai-api-key"
# Token rotation
rotate_key_days = 90
rotation_alert_days = 7
# Request signing (for cloud providers)
sign_requests = true
signing_method = "hmac-sha256"
```
### Authorization (Cedar)
```
[ai.authorization]
enabled = true
policy_file = "provisioning/policies/ai-policies.cedar"
# Example policies:
# allow(principal, action, resource) when principal.role == "admin"
# allow(principal == ?principal, action == "ai_generate_config", resource)
# when principal.workspace == resource.workspace
```
### Data Protection
```
[ai.security]
# Sanitize data before sending to external LLM
sanitize_pii = true
sanitize_secrets = true
redact_patterns = [
"(?i)password\\s*[:=]\\s*[^\\s]+", # Passwords
"(?i)api[_-]?key\\s*[:=]\\s*[^\\s]+", # API keys
"(?i)secret\\s*[:=]\\s*[^\\s]+", # Secrets
]
# Encryption
encryption_enabled = true
encryption_algorithm = "aes-256-gcm"
key_derivation = "argon2id"
# Local-only mode (never send to external LLM)
local_only = false # Set true for air-gapped deployments
```
## RAG Configuration
### Vector Store Setup
```
[ai.rag]
enabled = true
# SurrealDB backend
[ai.rag.database]
url = "surreal://localhost:8000"
username = "root"
password = "${SURREALDB_PASSWORD}"
namespace = "provisioning"
database = "ai_rag"
# Embedding model
[ai.rag.embedding]
provider = "openai" # or "anthropic", "local"
model = "text-embedding-3-small"
batch_size = 100
cache_embeddings = true
# Search configuration
[ai.rag.search]
hybrid_enabled = true
vector_weight = 0.7 # Weight for vector search
keyword_weight = 0.3 # Weight for BM25 search
top_k = 5 # Number of results to return
rerank_enabled = false # Use cross-encoder to rerank results
# Chunking strategy
[ai.rag.chunking]
markdown_chunk_size = 1024
markdown_overlap = 256
code_chunk_size = 512
code_overlap = 128
```
### Index Management
```
# Create indexes
provisioning ai index create rag
# Rebuild indexes
provisioning ai index rebuild rag
# Show index status
provisioning ai index status rag
# Remove old indexes
provisioning ai index cleanup rag --older-than 30days
```
## MCP Server Configuration
### MCP Server Setup
```
[ai.mcp]
enabled = true
port = 3000
host = "127.0.0.1" # Change to 0.0.0.0 for network access
# Tool registry
[ai.mcp.tools]
generate_config = true
validate_config = true
search_docs = true
troubleshoot_deployment = true
get_schema = true
check_compliance = true
# Rate limiting for tool calls
rpm_limit = 30
burst_limit = 50
# Tool request timeout
timeout_seconds = 30
```
### MCP Client Configuration
```
~/.claude/claude_desktop_config.json:
{
"mcpServers": {
"provisioning": {
"command": "provisioning-mcp-server",
"args": ["--config", "/etc/provisioning/ai.toml"],
"env": {
"PROVISIONING_API_KEY": "sk-ant-...",
"RUST_LOG": "info"
}
}
}
}
```
## Logging and Observability
### Logging Configuration
```
[ai.logging]
level = "info" # or "debug", "warn", "error"
format = "json" # or "text"
output = "stdout" # or "file"
# Log file
[ai.logging.file]
path = "/var/log/provisioning/ai.log"
max_size_mb = 100
max_backups = 10
retention_days = 30
# Log filters
[ai.logging.filters]
log_requests = true
log_responses = false # Don't log full responses (verbose)
log_token_usage = true
log_costs = true
```
### Metrics and Monitoring
```
# View AI service metrics
provisioning admin metrics show ai
# Prometheus metrics endpoint
curl [http://localhost:8083/metrics](http://localhost:8083/metrics)
# Key metrics:
# - ai_requests_total: Total requests by provider/model
# - ai_request_duration_seconds: Request latency
# - ai_token_usage_total: Token consumption by provider
# - ai_cost_total: Cumulative cost by provider
# - ai_cache_hits: Cache hit rate
# - ai_errors_total: Errors by type
```
## Health Checks
### Configuration Validation
```
# Validate configuration syntax
provisioning config validate ai
# Test provider connectivity
provisioning ai test provider anthropic
# Test RAG system
provisioning ai test rag
# Test MCP server
provisioning ai test mcp
# Full health check
provisioning ai health-check
```
## Environment Variables
### Common Settings
```
# Provider configuration
export PROVISIONING_AI_PROVIDER="anthropic"
export PROVISIONING_AI_MODEL="claude-sonnet-4"
export PROVISIONING_AI_API_KEY="sk-ant-..."
# Feature flags
export PROVISIONING_AI_ENABLED="true"
export PROVISIONING_AI_CACHE_ENABLED="true"
export PROVISIONING_AI_RAG_ENABLED="true"
# Cost control
export PROVISIONING_AI_DAILY_LIMIT_USD="100"
export PROVISIONING_AI_RPM_LIMIT="60"
# Security
export PROVISIONING_AI_SANITIZE_PII="true"
export PROVISIONING_AI_LOCAL_ONLY="false"
# Logging
export RUST_LOG="provisioning::ai=info"
```
## Troubleshooting Configuration
### Common Issues
**Issue**: API key not recognized
```
# Check environment variable is set
echo $PROVISIONING_AI_API_KEY
# Test connectivity
provisioning ai test provider anthropic
# Verify key format (should start with sk-ant- or sk-)
| provisioning config show ai | grep api_key |
```
**Issue**: Cache not working
```
# Check cache status
provisioning admin cache stats ai
# Clear cache and restart
provisioning admin cache clear ai
provisioning service restart ai-service
# Enable cache debugging
RUST_LOG=provisioning::cache=debug provisioning-ai-service
```
**Issue**: RAG search not finding results
```
# Rebuild RAG indexes
provisioning ai index rebuild rag
# Test search
provisioning ai query "test query"
# Check index status
provisioning ai index status rag
```
## Upgrading Configuration
### Backward Compatibility
New AI versions automatically migrate old configurations:
```
# Check configuration version
provisioning config version ai
# Migrate configuration to latest version
provisioning config migrate ai --auto
# Backup before migration
provisioning config backup ai
```
## Production Deployment
### Recommended Production Settings
```
[ai]
enabled = true
provider = "anthropic"
model = "claude-sonnet-4"
api_key = "${PROVISIONING_AI_API_KEY}"
[ai.features]
rag_search = true
config_generation = true
mcp_server = true
troubleshooting = true
[ai.cache]
enabled = true
cache_type = "redis"
ttl_seconds = 3600
[ai.limits]
rpm_limit = 60
daily_cost_limit_usd = 1000
max_tokens = 4096
[ai.security]
sanitize_pii = true
sanitize_secrets = true
encryption_enabled = true
[ai.logging]
level = "warn" # Less verbose in production
format = "json"
output = "file"
[ai.rag.database]
url = "surreal://surrealdb-cluster:8000"
```
## Related Documentation
- [Architecture](architecture.md) - System overview
- [RAG System](rag-system.md) - Vector database setup
- [MCP Integration](mcp-integration.md) - MCP configuration
- [Security Policies](security-policies.md) - Authorization policies
- [Cost Management](cost-management.md) - Budget tracking
---
**Last Updated**: 2025-01-13
**Status**: ✅ Production-Ready
**Versions Supported**: v1.0+

View File

@ -1 +1,497 @@
# Cost Management # AI Cost Management and Optimization
**Status**: ✅ Production-Ready (cost tracking, budgets, caching benefits)
Comprehensive guide to managing LLM API costs, optimizing usage through caching and rate limiting, and tracking spending. The provisioning platform
includes built-in cost controls to prevent runaway spending while maximizing value.
## Cost Overview
### API Provider Pricing
| | Provider | Model | Input | Output | Per MTok | |
| | ---------- | ------- | ------- | -------- | ---------- | |
| | **Anthropic** | Claude Sonnet 4 | $3 | $15 | $0.003 input / $0.015 output | |
| | | Claude Opus 4 | $15 | $45 | Higher accuracy, longer context | |
| | | Claude Haiku 4 | $0.80 | $4 | Fast, for simple queries | |
| | **OpenAI** | GPT-4 Turbo | $0.01 | $0.03 | Per 1K tokens | |
| | | GPT-4 | $0.03 | $0.06 | Legacy, avoid | |
| | | GPT-4o | $5 | $15 | Per MTok | |
| | **Local** | Llama 2, Mistral | Free | Free | Hardware cost only | |
### Cost Examples
```
Scenario 1: Generate simple database configuration
- Input: 500 tokens (description + schema)
- Output: 200 tokens (generated config)
- Cost: (500 × $3 + 200 × $15) / 1,000,000 = $0.0045
- With caching (hit rate 50%): $0.0023
Scenario 2: Deep troubleshooting analysis
- Input: 5000 tokens (logs + context)
- Output: 2000 tokens (analysis + recommendations)
- Cost: (5000 × $3 + 2000 × $15) / 1,000,000 = $0.045
- With caching (hit rate 70%): $0.0135
Scenario 3: Monthly usage (typical organization)
- ~1000 config generations @ $0.005 = $5
- ~500 troubleshooting calls @ $0.045 = $22.50
- ~2000 form assists @ $0.002 = $4
- ~200 agent executions @ $0.10 = $20
- **Total: ~$50-100/month for small org**
- **Total: ~$500-1000/month for large org**
```
## Cost Control Mechanisms
### Request Caching
Caching is the primary cost reduction strategy, cutting costs by 50-80%:
```
Without Caching:
User 1: "Generate PostgreSQL config" → API call → $0.005
User 2: "Generate PostgreSQL config" → API call → $0.005
Total: $0.010 (2 identical requests)
With LRU Cache:
User 1: "Generate PostgreSQL config" → API call → $0.005
User 2: "Generate PostgreSQL config" → Cache hit → $0.00001
Total: $0.00501 (500x cost reduction for identical)
With Semantic Cache:
User 1: "Generate PostgreSQL database config" → API call → $0.005
User 2: "Create a PostgreSQL database" → Semantic hit → $0.00001
(Slightly different wording, but same intent)
Total: $0.00501 (near 500x reduction for similar)
```
### Cache Configuration
```
[ai.cache]
enabled = true
cache_type = "redis" # Distributed cache across instances
ttl_seconds = 3600 # 1-hour cache lifetime
# Cache size limits
max_size_mb = 500
eviction_policy = "lru" # Least Recently Used
# Semantic caching - cache similar queries
[ai.cache.semantic]
enabled = true
similarity_threshold = 0.95 # Cache if 95%+ similar to previous query
cache_embeddings = true # Cache embedding vectors themselves
# Cache metrics
[ai.cache.metrics]
track_hit_rate = true
track_space_usage = true
alert_on_low_hit_rate = true
```
### Rate Limiting
Prevent usage spikes from unexpected costs:
```
[ai.limits]
# Per-request limits
max_tokens = 4096
max_input_tokens = 8192
max_output_tokens = 4096
# Throughput limits
rpm_limit = 60 # 60 requests per minute
rpm_burst = 100 # Allow burst to 100
daily_request_limit = 5000 # Max 5000 requests/day
# Cost limits
daily_cost_limit_usd = 100 # Stop at $100/day
monthly_cost_limit_usd = 2000 # Stop at $2000/month
# Budget alerts
warn_at_percent = 80 # Warn when at 80% of daily budget
stop_at_percent = 95 # Stop when at 95% of budget
```
### Workspace-Level Budgets
```
[ai.workspace_budgets]
# Per-workspace cost limits
dev.daily_limit_usd = 10
staging.daily_limit_usd = 50
prod.daily_limit_usd = 100
# Can override globally for specific workspaces
teams.team-a.monthly_limit = 500
teams.team-b.monthly_limit = 300
```
## Cost Tracking
### Track Spending
```
# View current month spending
provisioning admin costs show ai
# Forecast monthly spend
provisioning admin costs forecast ai --days-remaining 15
# Analyze by feature
provisioning admin costs analyze ai --by feature
# Analyze by user
provisioning admin costs analyze ai --by user
# Export for billing
provisioning admin costs export ai --format csv --output costs.csv
```
### Cost Breakdown
```
Month: January 2025
Total Spending: $285.42
By Feature:
Config Generation: $150.00 (52%) [300 requests × avg $0.50]
Troubleshooting: $95.00 (33%) [80 requests × avg $1.19]
Form Assistance: $30.00 (11%) [5000 requests × avg $0.006]
Agents: $10.42 (4%) [20 runs × avg $0.52]
By Provider:
Anthropic (Claude): $200.00 (70%)
OpenAI (GPT-4): $85.42 (30%)
Local: $0 (0%)
By User:
alice@company.com: $50.00 (18%)
bob@company.com: $45.00 (16%)
...
other (20 users): $190.42 (67%)
By Workspace:
production: $150.00 (53%)
staging: $85.00 (30%)
development: $50.42 (18%)
Cache Performance:
Requests: 50,000
Cache hits: 35,000 (70%)
Cache misses: 15,000 (30%)
Cost savings from cache: ~$175 (38% reduction)
```
## Optimization Strategies
### Strategy 1: Increase Cache Hit Rate
```
# Longer TTL = more cache hits
[ai.cache]
ttl_seconds = 7200 # 2 hours instead of 1 hour
# Semantic caching helps with slight variations
[ai.cache.semantic]
enabled = true
similarity_threshold = 0.90 # Lower threshold = more hits
# Result: Increase hit rate from 65% → 80%
# Cost reduction: 15% → 23%
```
### Strategy 2: Use Local Models
```
[ai]
provider = "local"
model = "mistral-7b" # Free, runs on GPU
# Cost: Hardware ($5-20/month) instead of API calls
# Savings: 50-100 config generations/month × $0.005 = $0.25-0.50
# Hardware amortized cost: <$0.50/month on existing GPU
# Tradeoff: Slightly lower quality, 2x slower
```
### Strategy 3: Use Haiku for Simple Tasks
```
Task Complexity vs Model:
Simple (form assist): Claude Haiku 4 ($0.80/$4)
Medium (config gen): Claude Sonnet 4 ($3/$15)
Complex (agents): Claude Opus 4 ($15/$45)
Example optimization:
Before: All tasks use Sonnet 4
- 5000 form assists/month: 5000 × $0.006 = $30
After: Route by complexity
- 5000 form assists → Haiku: 5000 × $0.001 = $5 (83% savings)
- 200 config gen → Sonnet: 200 × $0.005 = $1
- 10 agent runs → Opus: 10 × $0.10 = $1
```
### Strategy 4: Batch Operations
```
# Instead of individual requests, batch similar operations:
# Before: 100 configs, 100 separate API calls
provisioning ai generate "PostgreSQL config" --output db1.ncl
provisioning ai generate "PostgreSQL config" --output db2.ncl
# ... 100 calls = $0.50
# After: Batch similar requests
provisioning ai batch --input configs-list.yaml
# Groups similar requests, reuses cache
# ... 3-5 API calls = $0.02 (90% savings)
```
### Strategy 5: Smart Feature Enablement
```
[ai.features]
# Enable high-ROI features
config_generation = true # High value, moderate cost
troubleshooting = true # High value, higher cost
rag_search = true # Low cost, high value
# Disable low-ROI features if cost-constrained
form_assistance = false # Low value, non-zero cost (if budget tight)
agents = false # Complex, requires multiple calls
```
## Budget Management Workflow
### 1. Set Budget
```
# Set monthly budget
provisioning config set ai.budget.monthly_limit_usd 500
# Set daily limit
provisioning config set ai.limits.daily_cost_limit_usd 50
# Set workspace limits
provisioning config set ai.workspace_budgets.prod.monthly_limit 300
provisioning config set ai.workspace_budgets.dev.monthly_limit 100
```
### 2. Monitor Spending
```
# Daily check
provisioning admin costs show ai
# Weekly analysis
provisioning admin costs analyze ai --period week
# Monthly review
provisioning admin costs analyze ai --period month
```
### 3. Adjust If Needed
```
# If overspending:
# - Increase cache TTL
# - Enable local models for simple tasks
# - Reduce form assistance (high volume, low cost but adds up)
# - Route complex tasks to Haiku instead of Opus
# If underspending:
# - Enable new features (agents, form assistance)
# - Increase rate limits
# - Lower cache hit requirements (broader semantic matching)
```
### 4. Forecast and Plan
```
# Current monthly run rate
provisioning admin costs forecast ai
# If trending over budget, recommend actions:
# - Reduce daily limit
# - Switch to local model for 50% of tasks
# - Increase batch processing
# If trending under budget:
# - Enable agents for automation workflows
# - Enable form assistance across all workspaces
```
## Cost Allocation
### Chargeback Models
**Per-Workspace Model**:
```
Development workspace: $50/month
Staging workspace: $100/month
Production workspace: $300/month
------
Total: $450/month
```
**Per-User Model**:
```
Each user charged based on their usage
Encourages efficiency
Difficult to track/allocate
```
**Shared Pool Model**:
```
All teams share $1000/month budget
Budget splits by consumption rate
Encourages optimization
Most flexible
```
## Cost Reporting
### Generate Reports
```
# Monthly cost report
provisioning admin costs report ai \
--format pdf \
--period month \
--output cost-report-2025-01.pdf
# Detailed analysis for finance
provisioning admin costs report ai \
--format xlsx \
--include-forecasts \
--include-optimization-suggestions
# Executive summary
provisioning admin costs report ai \
--format markdown \
--summary-only
```
## Cost-Benefit Analysis
### ROI Examples
```
Scenario 1: Developer Time Savings
Problem: Manual config creation takes 2 hours
Solution: AI config generation, 10 minutes (12x faster)
Time saved: 1.83 hours/config
Hourly rate: $100
Value: $183/config
AI cost: $0.005/config
ROI: 36,600x (far exceeds cost)
Scenario 2: Troubleshooting Efficiency
Problem: Manual debugging takes 4 hours
Solution: AI troubleshooting analysis, 2 minutes
Time saved: 3.97 hours
Value: $397/incident
AI cost: $0.045/incident
ROI: 8,822x
Scenario 3: Reduction in Failed Deployments
Before: 5% of 1000 deployments fail (50 failures)
Failure cost: $500 each (lost time, data cleanup)
Total: $25,000/month
After: With AI analysis, 2% fail (20 failures)
Total: $10,000/month
Savings: $15,000/month
AI cost: $200/month
Net savings: $14,800/month
ROI: 74:1
```
## Advanced Cost Optimization
### Hybrid Strategy (Recommended)
```
✓ Local models for:
- Form assistance (high volume, low complexity)
- Simple validation checks
- Document retrieval (RAG)
Cost: Hardware only (~$500 setup)
✓ Cloud API for:
- Complex generation (requires latest model capability)
- Troubleshooting (needs high accuracy)
- Agents (complex reasoning)
Cost: $50-200/month per organization
Result:
- 70% of requests → Local (free after hardware amortization)
- 30% of requests → Cloud ($50/month)
- 80% overall cost reduction vs cloud-only
```
## Monitoring and Alerts
### Cost Anomaly Detection
```
# Enable anomaly detection
provisioning config set ai.monitoring.anomaly_detection true
# Set thresholds
provisioning config set ai.monitoring.cost_spike_percent 150
# Alert if daily cost is 150% of average
# System alerts:
# - Daily cost exceeded by 10x normal
# - New expensive operation (agent run)
# - Cache hit rate dropped below 40%
# - Rate limit nearly exhausted
```
### Alert Configuration
```
[ai.monitoring.alerts]
enabled = true
spike_threshold_percent = 150
check_interval_minutes = 5
[ai.monitoring.alerts.channels]
email = "ops@company.com"
slack = "[https://hooks.slack.com/..."](https://hooks.slack.com/...")
pagerduty = "integration-key"
# Alert thresholds
[ai.monitoring.alerts.thresholds]
daily_budget_warning_percent = 80
daily_budget_critical_percent = 95
monthly_budget_warning_percent = 70
```
## Related Documentation
- [Architecture](architecture.md) - AI system overview
- [Configuration](configuration.md) - Cost control settings
- [Security Policies](security-policies.md) - Cost-aware policies
- [RAG System](rag-system.md) - Caching details
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Last Updated**: 2025-01-13
**Status**: ✅ Production-Ready
**Average Savings**: 50-80% through caching
**Typical Cost**: $50-500/month per organization
**ROI**: 100:1 to 10,000:1 depending on use case

View File

@ -1 +1,594 @@
# MCP Integration # Model Context Protocol (MCP) Integration
**Status**: ✅ Production-Ready (MCP 0.6.0+, integrated with Claude, compatible with all LLMs)
The MCP server provides standardized Model Context Protocol integration, allowing external LLMs (Claude, GPT-4, local models) to access provisioning
platform capabilities as tools. This enables complex multi-step workflows, tool composition, and integration with existing LLM applications.
## Architecture Overview
The MCP integration follows the Model Context Protocol specification:
```
┌──────────────────────────────────────────────────────────────┐
│ External LLM (Claude, GPT-4, etc.) │
└────────────────────┬─────────────────────────────────────────┘
│ Tool Calls (JSON-RPC)
┌──────────────────────────────────────────────────────────────┐
│ MCP Server (provisioning/platform/crates/mcp-server) │
│ │
│ ┌───────────────────────────────────────────────────────┐ │
│ │ Tool Registry │ │
│ │ - generate_config(description, schema) │ │
│ │ - validate_config(config) │ │
│ │ - search_docs(query) │ │
│ │ - troubleshoot_deployment(logs) │ │
│ │ - get_schema(name) │ │
│ │ - check_compliance(config, policy) │ │
│ └───────────────────────────────────────────────────────┘ │
│ │ │
│ ▼ │
│ ┌───────────────────────────────────────────────────────┐ │
│ │ Implementation Layer │ │
│ │ - AI Service client (ai-service port 8083) │ │
│ │ - Validator client │ │
│ │ - RAG client (SurrealDB) │ │
│ │ - Schema loader │ │
│ └───────────────────────────────────────────────────────┘ │
└──────────────────────────────────────────────────────────────┘
```
## MCP Server Launch
The MCP server is started as a stdio-based service:
```
# Start MCP server (stdio transport)
provisioning-mcp-server --config /etc/provisioning/ai.toml
# With debug logging
RUST_LOG=debug provisioning-mcp-server --config /etc/provisioning/ai.toml
# In Claude Desktop configuration
~/.claude/claude_desktop_config.json:
{
"mcpServers": {
"provisioning": {
"command": "provisioning-mcp-server",
"args": ["--config", "/etc/provisioning/ai.toml"],
"env": {
"PROVISIONING_TOKEN": "your-auth-token"
}
}
}
}
```
## Available Tools
### 1. Config Generation
**Tool**: `generate_config`
Generate infrastructure configuration from natural language description.
```
{
"name": "generate_config",
"description": "Generate a Nickel infrastructure configuration from a natural language description",
"inputSchema": {
"type": "object",
"properties": {
"description": {
"type": "string",
"description": "Natural language description of desired infrastructure"
},
"schema": {
"type": "string",
"description": "Target schema name (e.g., 'database', 'kubernetes', 'network'). Optional."
},
"format": {
"type": "string",
"enum": ["nickel", "toml"],
"description": "Output format (default: nickel)"
}
},
"required": ["description"]
}
}
```
**Example Usage**:
```
# Via MCP client
mcp-client provisioning generate_config \
--description "Production PostgreSQL cluster with encryption and daily backups" \
--schema database
# Claude desktop prompt:
# @provisioning: Generate a production PostgreSQL setup with automated backups
```
**Response**:
```
{
database = {
engine = "postgresql",
version = "15.0",
instance = {
instance_class = "db.r6g.xlarge",
allocated_storage_gb = 100,
iops = 3000,
},
security = {
encryption_enabled = true,
encryption_key_id = "kms://prod-db-key",
tls_enabled = true,
tls_version = "1.3",
},
backup = {
enabled = true,
retention_days = 30,
preferred_window = "03:00-04:00",
copy_to_region = "us-west-2",
},
monitoring = {
enhanced_monitoring_enabled = true,
monitoring_interval_seconds = 60,
log_exports = ["postgresql"],
},
}
}
```
### 2. Config Validation
**Tool**: `validate_config`
Validate a Nickel configuration against schemas and policies.
```
{
"name": "validate_config",
"description": "Validate a Nickel configuration file",
"inputSchema": {
"type": "object",
"properties": {
"config": {
"type": "string",
"description": "Nickel configuration content or file path"
},
"schema": {
"type": "string",
"description": "Schema name to validate against (optional)"
},
"strict": {
"type": "boolean",
"description": "Enable strict validation (default: true)"
}
},
"required": ["config"]
}
}
```
**Example Usage**:
```
# Validate configuration
mcp-client provisioning validate_config \
--config "$(cat workspaces/prod/database.ncl)"
# With specific schema
mcp-client provisioning validate_config \
--config "workspaces/prod/kubernetes.ncl" \
--schema kubernetes
```
**Response**:
```
{
"valid": true,
"errors": [],
"warnings": [
"Consider enabling automated backups for production use"
],
"metadata": {
"schema": "kubernetes",
"version": "1.28",
"validated_at": "2025-01-13T10:45:30Z"
}
}
```
### 3. Documentation Search
**Tool**: `search_docs`
Search infrastructure documentation using RAG system.
```
{
"name": "search_docs",
"description": "Search provisioning documentation for information",
"inputSchema": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query (natural language)"
},
"top_k": {
"type": "integer",
"description": "Number of results (default: 5)"
},
"doc_type": {
"type": "string",
"enum": ["guide", "schema", "example", "troubleshooting"],
"description": "Filter by document type (optional)"
}
},
"required": ["query"]
}
}
```
**Example Usage**:
```
# Search documentation
mcp-client provisioning search_docs \
--query "How do I configure PostgreSQL with replication?"
# Get examples
mcp-client provisioning search_docs \
--query "Kubernetes networking" \
--doc_type example \
--top_k 3
```
**Response**:
```
{
"results": [
{
"source": "provisioning/docs/src/guides/database-replication.md",
"excerpt": "PostgreSQL logical replication enables streaming of changes...",
"relevance": 0.94,
"section": "Setup Logical Replication"
},
{
"source": "provisioning/schemas/database.ncl",
"excerpt": "replication = { enabled = true, mode = \"logical\", ... }",
"relevance": 0.87,
"section": "Replication Configuration"
}
]
}
```
### 4. Deployment Troubleshooting
**Tool**: `troubleshoot_deployment`
Analyze deployment failures and suggest fixes.
```
{
"name": "troubleshoot_deployment",
"description": "Analyze deployment logs and suggest fixes",
"inputSchema": {
"type": "object",
"properties": {
"deployment_id": {
"type": "string",
"description": "Deployment ID (e.g., 'deploy-2025-01-13-001')"
},
"logs": {
"type": "string",
"description": "Deployment logs (optional, if deployment_id not provided)"
},
"error_analysis_depth": {
"type": "string",
"enum": ["shallow", "deep"],
"description": "Analysis depth (default: deep)"
}
}
}
}
```
**Example Usage**:
```
# Troubleshoot recent deployment
mcp-client provisioning troubleshoot_deployment \
--deployment_id "deploy-2025-01-13-001"
# With custom logs
mcp-client provisioning troubleshoot_deployment \
| --logs "$(journalctl -u provisioning --no-pager | tail -100)" |
```
**Response**:
```
{
"status": "failure",
"root_cause": "Database connection timeout during migration phase",
"analysis": {
"phase": "database_migration",
"error_type": "connectivity",
"confidence": 0.95
},
"suggestions": [
"Verify database security group allows inbound on port 5432",
"Check database instance status (may be rebooting)",
"Increase connection timeout in configuration"
],
"corrected_config": "...generated Nickel config with fixes...",
"similar_issues": [
"[https://docs/troubleshooting/database-connectivity.md"](https://docs/troubleshooting/database-connectivity.md")
]
}
```
### 5. Get Schema
**Tool**: `get_schema`
Retrieve schema definition with examples.
```
{
"name": "get_schema",
"description": "Get a provisioning schema definition",
"inputSchema": {
"type": "object",
"properties": {
"schema_name": {
"type": "string",
"description": "Schema name (e.g., 'database', 'kubernetes')"
},
"format": {
"type": "string",
"enum": ["schema", "example", "documentation"],
"description": "Response format (default: schema)"
}
},
"required": ["schema_name"]
}
}
```
**Example Usage**:
```
# Get schema definition
mcp-client provisioning get_schema --schema_name database
# Get example configuration
mcp-client provisioning get_schema \
--schema_name kubernetes \
--format example
```
### 6. Compliance Check
**Tool**: `check_compliance`
Verify configuration against compliance policies (Cedar).
```
{
"name": "check_compliance",
"description": "Check configuration against compliance policies",
"inputSchema": {
"type": "object",
"properties": {
"config": {
"type": "string",
"description": "Configuration to check"
},
"policy_set": {
"type": "string",
"description": "Policy set to check against (e.g., 'pci-dss', 'hipaa', 'sox')"
}
},
"required": ["config", "policy_set"]
}
}
```
**Example Usage**:
```
# Check against PCI-DSS
mcp-client provisioning check_compliance \
--config "$(cat workspaces/prod/database.ncl)" \
--policy_set pci-dss
```
## Integration Examples
### Claude Desktop (Most Common)
```
~/.claude/claude_desktop_config.json:
{
"mcpServers": {
"provisioning": {
"command": "provisioning-mcp-server",
"args": ["--config", "/etc/provisioning/ai.toml"],
"env": {
"PROVISIONING_API_KEY": "sk-...",
"PROVISIONING_BASE_URL": "[http://localhost:8083"](http://localhost:8083")
}
}
}
}
```
**Usage in Claude**:
```
User: I need a production Kubernetes cluster in AWS with automatic scaling
Claude can now use provisioning tools:
I'll help you create a production Kubernetes cluster. Let me:
1. Search the documentation for best practices
2. Generate a configuration template
3. Validate it against your policies
4. Provide the final configuration
```
### OpenAI Function Calling
```
import openai
tools = [
{
"type": "function",
"function": {
"name": "generate_config",
"description": "Generate infrastructure configuration",
"parameters": {
"type": "object",
"properties": {
"description": {
"type": "string",
"description": "Infrastructure description"
}
},
"required": ["description"]
}
}
}
]
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[{"role": "user", "content": "Create a PostgreSQL database"}],
tools=tools
)
```
### Local LLM Integration (Ollama)
```
# Start Ollama with provisioning MCP
OLLAMA_MCP_SERVERS=provisioning://localhost:3000 \
ollama serve
# Use with llama2 or mistral
curl [http://localhost:11434/api/generate](http://localhost:11434/api/generate) \
-d '{
"model": "mistral",
"prompt": "Create a Kubernetes cluster",
"tools": [{"type": "mcp", "server": "provisioning"}]
}'
```
## Error Handling
Tools return consistent error responses:
```
{
"error": {
"code": "VALIDATION_ERROR",
"message": "Configuration has 3 validation errors",
"details": [
{
"field": "database.version",
"message": "PostgreSQL version 9.6 is deprecated",
"severity": "error"
},
{
"field": "backup.retention_days",
"message": "Recommended minimum is 30 days for production",
"severity": "warning"
}
]
}
}
```
## Performance
| | Operation | Latency | Notes | |
| | ----------- | --------- | ------- | |
| | generate_config | 2-5s | Depends on LLM and config complexity | |
| | validate_config | 500-1000ms | Parallel schema validation | |
| | search_docs | 300-800ms | RAG hybrid search | |
| | troubleshoot | 3-8s | Depends on log size and analysis depth | |
| | get_schema | 100-300ms | Cached schema retrieval | |
| | check_compliance | 500-2000ms | Policy evaluation | |
## Configuration
See [Configuration Guide](configuration.md) for MCP-specific settings:
- MCP server port and binding
- Tool registry customization
- Rate limiting for tool calls
- Access control (Cedar policies)
## Security
### Authentication
- Tools require valid provisioning API token
- Token scoped to user's workspace
- All tool calls authenticated and logged
### Authorization
- Cedar policies control which tools user can call
- Example: `allow(principal, action, resource)` when `role == "admin"`
- Detailed audit trail of all tool invocations
### Data Protection
- Secrets never passed through MCP
- Configuration sanitized before analysis
- PII removed from logs sent to external LLMs
## Monitoring and Debugging
```
# Monitor MCP server
provisioning admin mcp status
# View MCP tool calls
provisioning admin logs --filter "mcp_tools" --tail 100
# Debug tool response
RUST_LOG=provisioning::mcp=debug provisioning-mcp-server
```
## Related Documentation
- [Architecture](architecture.md) - AI system overview
- [RAG System](rag-system.md) - Documentation search
- [Configuration](configuration.md) - MCP setup
- [API Reference](api-reference.md) - Detailed API endpoints
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Last Updated**: 2025-01-13
**Status**: ✅ Production-Ready
**MCP Version**: 0.6.0+
**Supported LLMs**: Claude, GPT-4, Llama, Mistral, all MCP-compatible models

View File

@ -1 +1,469 @@
# Natural Language Configuration # Natural Language Configuration Generation
**Status**: 🔴 Planned (Q2 2025 target)
Natural Language Configuration (NLC) is a planned feature that enables users to describe infrastructure requirements in plain English and have the
system automatically generate validated Nickel configurations. This feature combines natural language understanding with schema-aware generation and
validation.
## Feature Overview
### What It Does
Transform infrastructure descriptions into production-ready Nickel configurations:
```
User Input:
"Create a production PostgreSQL cluster with 100GB storage,
daily backups, encryption enabled, and cross-region replication
to us-west-2"
System Output:
provisioning/schemas/database.ncl (validated, production-ready)
```
### Primary Use Cases
1. **Rapid Prototyping**: From description to working config in seconds
2. **Infrastructure Documentation**: Describe infrastructure as code
3. **Configuration Templates**: Generate reusable patterns
4. **Non-Expert Operations**: Enable junior developers to provision infrastructure
5. **Configuration Migration**: Describe existing infrastructure to generate Nickel
## Architecture
### Generation Pipeline
```
Input Description (Natural Language)
┌─────────────────────────────────────┐
│ Understanding & Analysis │
│ - Intent extraction │
│ - Entity recognition │
│ - Constraint identification │
│ - Best practice inference │
└─────────────────────┬───────────────┘
┌─────────────────────────────────────┐
│ RAG Context Retrieval │
│ - Find similar configs │
│ - Retrieve best practices │
│ - Get schema examples │
│ - Identify constraints │
└─────────────────────┬───────────────┘
┌─────────────────────────────────────┐
│ Schema-Aware Generation │
│ - Map entities to schema fields │
│ - Apply type constraints │
│ - Include required fields │
│ - Generate valid Nickel │
└─────────────────────┬───────────────┘
┌─────────────────────────────────────┐
│ Validation & Refinement │
│ - Type checking │
│ - Schema validation │
│ - Policy compliance │
│ - Security checks │
└─────────────────────┬───────────────┘
┌─────────────────────────────────────┐
│ Output & Explanation │
│ - Generated Nickel config │
│ - Decision rationale │
│ - Alternative suggestions │
│ - Warnings if any │
└─────────────────────────────────────┘
```
## Planned Implementation Details
### 1. Intent Extraction
Extract structured intent from natural language:
```
Input: "Create a production PostgreSQL cluster with encryption and backups"
Extracted Intent:
{
resource_type: "database",
engine: "postgresql",
environment: "production",
requirements: [
{constraint: "encryption", type: "boolean", value: true},
{constraint: "backups", type: "enabled", frequency: "daily"},
],
modifiers: ["production"],
}
```
### 2. Entity Mapping
Map natural language entities to schema fields:
```
Description Terms → Schema Fields:
"100GB storage" → database.instance.allocated_storage_gb = 100
"daily backups" → backup.enabled = true, backup.frequency = "daily"
"encryption" → security.encryption_enabled = true
"cross-region" → backup.copy_to_region = "us-west-2"
"PostgreSQL 15" → database.engine_version = "15.0"
```
### 3. Prompt Engineering
Sophisticated prompting for schema-aware generation:
```
System Prompt:
You are generating Nickel infrastructure configurations.
Generate ONLY valid Nickel syntax.
Follow these rules:
- Use record syntax: `field = value`
- Type annotations must be valid
- All required fields must be present
- Apply best practices for [ENVIRONMENT]
Schema Context:
[Database schema from provisioning/schemas/database.ncl]
Examples:
[3 relevant examples from RAG]
User Request:
[User natural language description]
Generate the complete Nickel configuration.
Start with: let { database = {
```
### 4. Iterative Refinement
Handle generation errors through iteration:
```
Attempt 1: Generate initial config
↓ Validate
✗ Error: field `version` type mismatch (string vs number)
↓ Re-prompt with error
Attempt 2: Fix with context from error
↓ Validate
✓ Success: Config is valid
```
## Command Interface
### CLI Usage
```
# Simple generation
provisioning ai generate "PostgreSQL database for production"
# With schema specification
provisioning ai generate \
--schema database \
"Create PostgreSQL 15 with encryption and daily backups"
# Interactive generation (refine output)
provisioning ai generate --interactive \
"Kubernetes cluster on AWS"
# Generate and validate
provisioning ai generate \
--validate \
"Production Redis cluster with sentinel"
# Generate and save directly
provisioning ai generate \
--schema database \
--output workspaces/prod/database.ncl \
"PostgreSQL production setup"
# Batch generation from file
provisioning ai generate --batch descriptions.yaml
```
### Interactive Refinement
```
$ provisioning ai generate --interactive
> Describe infrastructure: Create production PostgreSQL cluster
Generated configuration shown.
> Refine: Add cross-region backup to us-west-2
Configuration updated.
> Refine: Use larger instance class for performance
Configuration updated.
> Accept? [y/n]: y
Configuration saved to: workspaces/prod/database.ncl
```
## Example Generations
### Example 1: Simple Database
**Input**:
```
"PostgreSQL database with 50GB storage and encryption"
```
**Output**:
```
let {
database = {
engine = "postgresql",
version = "15.0",
instance = {
instance_class = "db.t3.medium",
allocated_storage_gb = 50,
iops = 1000,
},
security = {
encryption_enabled = true,
tls_enabled = true,
tls_version = "1.3",
},
backup = {
enabled = true,
retention_days = 7,
},
}
}
```
**Rationale**:
- PostgreSQL 15 is current stable version
- db.t3.medium suitable for 50GB with general workload
- Encryption enabled per requirement
- Automatic backups with 7-day retention (default)
### Example 2: Complex Kubernetes Setup
**Input**:
```
"Production Kubernetes cluster in AWS with 3 availability zones,
auto-scaling from 3 to 10 nodes, managed PostgreSQL, and monitoring"
```
**Output**:
```
let {
kubernetes = {
version = "1.28.0",
cluster = {
name = "prod-cluster",
region = "us-east-1",
availability_zones = ["us-east-1a", "us-east-1b", "us-east-1c"],
},
node_group = {
min_size = 3,
max_size = 10,
desired_size = 3,
instance_type = "t3.large",
auto_scaling = {
enabled = true,
target_cpu = 70,
scale_down_delay = 300,
},
},
managed_services = {
postgres = {
enabled = true,
engine = "postgresql",
version = "15.0",
storage_gb = 100,
},
},
monitoring = {
prometheus = {enabled = true},
grafana = {enabled = true},
cloudwatch_integration = true,
},
networking = {
vpc_cidr = "10.0.0.0/16",
enable_nat_gateway = true,
enable_dns_hostnames = true,
},
}
}
```
**Rationale**:
- 3 AZs for high availability
- t3.large balances cost and performance for general workload
- Auto-scaling target 70% CPU (best practice)
- Managed PostgreSQL reduces operational overhead
- Full observability with Prometheus + Grafana
## Configuration and Constraints
### Configurable Generation Parameters
```
# In provisioning/config/ai.toml
[ai.generation]
# Which schema to use by default
default_schema = "database"
# Whether to require explicit environment specification
require_environment = false
# Optimization targets
optimization_target = "balanced" # or "cost", "performance"
# Best practices to always apply
best_practices = [
"encryption",
"high_availability",
"monitoring",
"backup",
]
# Constraints that limit generation
[ai.generation.constraints]
min_storage_gb = 10
max_instances = 100
allowed_engines = ["postgresql", "mysql", "mongodb"]
# Validation before accepting generated config
[ai.generation.validation]
strict_mode = true
require_security_review = false
require_compliance_check = true
```
### Safety Guardrails
1. **Required Fields**: All schema required fields must be present
2. **Type Validation**: Generated values must match schema types
3. **Security Checks**: Encryption/backups enabled for production
4. **Cost Estimation**: Warn if projected cost exceeds threshold
5. **Resource Limits**: Enforce organizational constraints
6. **Policy Compliance**: Check against Cedar policies
## User Workflow
### Typical Usage Session
```
# 1. Describe infrastructure need
$ provisioning ai generate "I need a database for my web app"
# System generates basic config, suggests refinements
# Generated config shown with explanations
# 2. Refine if needed
$ provisioning ai generate --interactive
# 3. Review and validate
$ provisioning ai validate workspaces/dev/database.ncl
# 4. Deploy
$ provisioning workspace apply workspaces/dev
# 5. Monitor
$ provisioning workspace logs database
```
## Integration with Other Systems
### RAG Integration
NLC uses RAG to find similar configurations:
```
User: "Create Kubernetes cluster"
RAG searches for:
- Existing Kubernetes configs in workspaces
- Kubernetes documentation and examples
- Best practices from provisioning/docs/guides/kubernetes.md
Context fed to LLM for generation
```
### Form Assistance
NLC and form assistance share components:
- Intent extraction for pre-filling forms
- Constraint validation for form field values
- Explanation generation for validation errors
### CLI Integration
```
# Generate then preview
| provisioning ai generate "PostgreSQL prod" | \ |
provisioning config preview
# Generate and apply
provisioning ai generate \
--apply \
--environment prod \
"PostgreSQL cluster"
```
## Testing and Validation
### Test Cases (Planned)
1. **Simple Descriptions**: Single resource, few requirements
- "PostgreSQL database"
- "Redis cache"
2. **Complex Descriptions**: Multiple resources, constraints
- "Kubernetes with managed database and monitoring"
- "Multi-region deployment with failover"
3. **Edge Cases**:
- Conflicting requirements
- Ambiguous specifications
- Deprecated technologies
4. **Refinement Cycles**:
- Interactive generation with multiple refines
- Error recovery and re-prompting
- User feedback incorporation
## Success Criteria (Q2 2025)
- ✅ Generates valid Nickel for 90% of user descriptions
- ✅ Generated configs pass all schema validation
- ✅ Supports top 10 infrastructure patterns
- ✅ Interactive refinement works smoothly
- ✅ Error messages explain issues clearly
- ✅ User testing with non-experts succeeds
- ✅ Documentation complete with examples
- ✅ Integration with form assistance operational
## Related Documentation
- [Architecture](architecture.md) - AI system overview
- [AI-Assisted Forms](ai-assisted-forms.md) - Related form feature
- [RAG System](rag-system.md) - Context retrieval
- [Configuration](configuration.md) - Setup guide
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Status**: 🔴 Planned
**Target Release**: Q2 2025
**Last Updated**: 2025-01-13
**Architecture**: Complete
**Implementation**: In Design Phase

View File

@ -1 +1,450 @@
# RAG System # Retrieval-Augmented Generation (RAG) System
**Status**: ✅ Production-Ready (SurrealDB 1.5.0+, 22/22 tests passing)
The RAG system enables the AI service to access, retrieve, and reason over infrastructure documentation, schemas, and past configurations. This allows
the AI to generate contextually accurate infrastructure configurations and provide intelligent troubleshooting advice grounded in actual platform
knowledge.
## Architecture Overview
The RAG system consists of:
1. **Document Store**: SurrealDB vector store with semantic indexing
2. **Hybrid Search**: Vector similarity + BM25 keyword search
3. **Chunk Management**: Intelligent document chunking for code and markdown
4. **Context Ranking**: Relevance scoring for retrieved documents
5. **Semantic Cache**: Deduplication of repeated queries
## Core Components
### 1. Vector Embeddings
The system uses embedding models to convert documents into vector representations:
```
┌─────────────────────┐
│ Document Source │
│ (Markdown, Code) │
└──────────┬──────────┘
┌──────────────────────────────────┐
│ Chunking & Tokenization │
│ - Code-aware splits │
│ - Markdown aware │
│ - Preserves context │
└──────────┬───────────────────────┘
┌──────────────────────────────────┐
│ Embedding Model │
│ (OpenAI Ada, Anthropic, Local) │
└──────────┬───────────────────────┘
┌──────────────────────────────────┐
│ Vector Storage (SurrealDB) │
│ - Vector index │
│ - Metadata indexed │
│ - BM25 index for keywords │
└──────────────────────────────────┘
```
### 2. SurrealDB Integration
SurrealDB serves as the vector database and knowledge store:
```
# Configuration in provisioning/schemas/ai.ncl
let {
rag = {
enabled = true,
db_url = "surreal://localhost:8000",
namespace = "provisioning",
database = "ai_rag",
# Collections for different document types
collections = {
documentation = {
chunking_strategy = "markdown",
chunk_size = 1024,
overlap = 256,
},
schemas = {
chunking_strategy = "code",
chunk_size = 512,
overlap = 128,
},
deployments = {
chunking_strategy = "json",
chunk_size = 2048,
overlap = 512,
},
},
# Embedding configuration
embedding = {
provider = "openai", # or "anthropic", "local"
model = "text-embedding-3-small",
cache_vectors = true,
},
# Search configuration
search = {
hybrid_enabled = true,
vector_weight = 0.7,
keyword_weight = 0.3,
top_k = 5, # Number of results to return
semantic_cache = true,
},
}
}
```
### 3. Document Chunking
Intelligent chunking preserves context while managing token limits:
#### Markdown Chunking Strategy
```
Input Document: provisioning/docs/src/guides/from-scratch.md
Chunks:
[1] Header + first section (up to 1024 tokens)
[2] Next logical section + overlap with [1]
[3] Code examples preserve as atomic units
[4] Continue with overlap...
Each chunk includes:
- Original section heading (for context)
- Content
- Source file and line numbers
- Metadata (doctype, category, version)
```
#### Code Chunking Strategy
```
Input Document: provisioning/schemas/main.ncl
Chunks:
[1] Top-level let binding + comments
[2] Function definition (atomic, preserves signature)
[3] Type definition (atomic, preserves interface)
[4] Implementation blocks with context overlap
Each chunk preserves:
- Type signatures
- Function signatures
- Import statements needed for context
- Comments and docstrings
```
## Hybrid Search
The system implements dual search strategy for optimal results:
### Vector Similarity Search
```
// Find semantically similar documents
async fn vector_search(query: &str, top_k: usize) -> Vec<Document> {
let embedding = embed(query).await?;
// L2 distance in SurrealDB
db.query("
SELECT *, vector::similarity::cosine(embedding, $embedding) AS score
FROM documents
WHERE embedding <~> $embedding
ORDER BY score DESC
LIMIT $top_k
")
.bind(("embedding", embedding))
.bind(("top_k", top_k))
.await
}
```
**Use case**: Semantic understanding of intent
- Query: "How to configure PostgreSQL"
- Finds: Documents about database configuration, examples, schemas
### BM25 Keyword Search
```
// Find documents with matching keywords
async fn keyword_search(query: &str, top_k: usize) -> Vec<Document> {
// BM25 full-text search in SurrealDB
db.query("
SELECT *, search::bm25(.) AS score
FROM documents
WHERE text @@ $query
ORDER BY score DESC
LIMIT $top_k
")
.bind(("query", query))
.bind(("top_k", top_k))
.await
}
```
**Use case**: Exact term matching
- Query: "SurrealDB configuration"
- Finds: Documents mentioning SurrealDB specifically
### Hybrid Results
```
async fn hybrid_search(
query: &str,
vector_weight: f32,
keyword_weight: f32,
top_k: usize,
) -> Vec<Document> {
let vector_results = vector_search(query, top_k * 2).await?;
let keyword_results = keyword_search(query, top_k * 2).await?;
let mut scored = HashMap::new();
// Score from vector search
for (i, doc) in vector_results.iter().enumerate() {
*scored.entry(doc.id).or_insert(0.0) +=
vector_weight * (1.0 - (i as f32 / top_k as f32));
}
// Score from keyword search
for (i, doc) in keyword_results.iter().enumerate() {
*scored.entry(doc.id).or_insert(0.0) +=
keyword_weight * (1.0 - (i as f32 / top_k as f32));
}
// Return top-k by combined score
let mut results: Vec<_> = scored.into_iter().collect();
| results.sort_by( | a, b | b.1.partial_cmp(&a.1).unwrap()); |
| Ok(results.into_iter().take(top_k).map( | (id, _) | ...).collect()) |
}
```
## Semantic Caching
Reduces API calls by caching embeddings of repeated queries:
```
struct SemanticCache {
queries: Arc<DashMap<Vec<f32>, CachedResult>>,
similarity_threshold: f32,
}
impl SemanticCache {
async fn get(&self, query: &str) -> Option<CachedResult> {
let embedding = embed(query).await?;
// Find cached query with similar embedding
// (cosine distance < threshold)
for entry in self.queries.iter() {
let distance = cosine_distance(&embedding, entry.key());
if distance < self.similarity_threshold {
return Some(entry.value().clone());
}
}
None
}
async fn insert(&self, query: &str, result: CachedResult) {
let embedding = embed(query).await?;
self.queries.insert(embedding, result);
}
}
```
**Benefits**:
- 50-80% reduction in embedding API calls
- Identical queries return in <10ms
- Similar queries reuse cached context
## Ingestion Workflow
### Document Indexing
```
# Index all documentation
provisioning ai index-docs provisioning/docs/src
# Index schemas
provisioning ai index-schemas provisioning/schemas
# Index past deployments
provisioning ai index-deployments workspaces/*/deployments
# Watch directory for changes (development mode)
provisioning ai watch docs provisioning/docs/src
```
### Programmatic Indexing
```
// In ai-service on startup
async fn initialize_rag() -> Result<()> {
let rag = RAGSystem::new(&config.rag).await?;
// Index documentation
let docs = load_markdown_docs("provisioning/docs/src")?;
for doc in docs {
rag.ingest_document(&doc).await?;
}
// Index schemas
let schemas = load_nickel_schemas("provisioning/schemas")?;
for schema in schemas {
rag.ingest_schema(&schema).await?;
}
Ok(())
}
```
## Usage Examples
### Query the RAG System
```
# Search for context-aware information
provisioning ai query "How do I configure PostgreSQL with encryption?"
# Get configuration template
provisioning ai template "Describe production Kubernetes on AWS"
# Interactive mode
provisioning ai chat
> What are the best practices for database backup?
```
### AI Service Integration
```
// AI service uses RAG to enhance generation
async fn generate_config(user_request: &str) -> Result<String> {
// Retrieve relevant context
let context = rag.search(user_request, top_k=5).await?;
// Build prompt with context
let prompt = build_prompt_with_context(user_request, &context);
// Generate configuration
let config = llm.generate(&prompt).await?;
// Validate against schemas
validate_nickel_config(&config)?;
Ok(config)
}
```
### Form Assistance Integration
```
// In typdialog-ai (JavaScript/TypeScript)
async function suggestFieldValue(fieldName, currentInput) {
// Query RAG for similar configurations
const context = await rag.search(
`Field: ${fieldName}, Input: ${currentInput}`,
{ topK: 3, semantic: true }
);
// Generate suggestion using context
const suggestion = await ai.suggest({
field: fieldName,
input: currentInput,
context: context,
});
return suggestion;
}
```
## Performance Characteristics
| | Operation | Time | Cache Hit | |
| | ----------- | ------ | ----------- | |
| | Vector embedding | 200-500ms | N/A | |
| | Vector search (cold) | 300-800ms | N/A | |
| | Keyword search | 50-200ms | N/A | |
| | Hybrid search | 500-1200ms | <100ms cached | |
| | Semantic cache hit | 10-50ms | Always | |
**Typical query flow**:
1. Embedding: 300ms
2. Vector search: 400ms
3. Keyword search: 100ms
4. Ranking: 50ms
5. **Total**: ~850ms (first call), <100ms (cached)
## Configuration
See [Configuration Guide](configuration.md) for detailed RAG setup:
- LLM provider for embeddings
- SurrealDB connection
- Chunking strategies
- Search weights and limits
- Cache settings and TTLs
## Limitations and Considerations
### Document Freshness
- RAG indexes static snapshots
- Changes to documentation require re-indexing
- Use watch mode during development
### Token Limits
- Large documents chunked to fit LLM context
- Some context may be lost in chunking
- Adjustable chunk size vs. context trade-off
### Embedding Quality
- Quality depends on embedding model
- Domain-specific models perform better
- Fine-tuning possible for specialized vocabularies
## Monitoring and Debugging
### Query Metrics
```
# View RAG search metrics
provisioning ai metrics show rag
# Analysis of search quality
provisioning ai eval-rag --sample-queries 100
```
### Debug Mode
```
# In provisioning/config/ai.toml
[ai.rag.debug]
enabled = true
log_embeddings = true # Log embedding vectors
log_search_scores = true # Log relevance scores
log_context_used = true # Log context retrieved
```
## Related Documentation
- [Architecture](architecture.md) - AI system overview
- [MCP Integration](mcp-integration.md) - RAG access via MCP
- [Configuration](configuration.md) - RAG setup guide
- [API Reference](api-reference.md) - RAG API endpoints
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Last Updated**: 2025-01-13
**Status**: ✅ Production-Ready
**Test Coverage**: 22/22 tests passing
**Database**: SurrealDB 1.5.0+

View File

@ -1 +1,535 @@
# Security Policies # AI Security Policies and Cedar Authorization
**Status**: ✅ Production-Ready (Cedar integration, policy enforcement)
Comprehensive documentation of security controls, authorization policies, and data protection mechanisms for the AI system. All AI operations are
controlled through Cedar policies and include strict secret isolation.
## Security Model Overview
### Defense in Depth
```
┌─────────────────────────────────────────┐
│ User Request to AI │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ Layer 1: Authentication │
│ - Verify user identity │
│ - Validate API token/credentials │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ Layer 2: Authorization (Cedar) │
│ - Check if user can access AI features │
│ - Verify workspace permissions │
│ - Check role-based access │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ Layer 3: Data Sanitization │
│ - Remove secrets from data │
│ - Redact PII │
│ - Filter sensitive information │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ Layer 4: Request Validation │
│ - Check request parameters │
│ - Verify resource constraints │
│ - Apply rate limits │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ Layer 5: External API Call │
│ - Only if all previous checks pass │
│ - Encrypted TLS connection │
│ - No secrets in request │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ Layer 6: Audit Logging │
│ - Log all AI operations │
│ - Capture user, time, action │
│ - Store in tamper-proof log │
└─────────────────────────────────────────┘
```
## Cedar Policies
### Policy Engine Setup
```
// File: provisioning/policies/ai-policies.cedar
// Core principle: Least privilege
// All actions denied by default unless explicitly allowed
// Admin users can access all AI features
permit(
principal == ?principal,
action == Action::"ai_generate_config",
resource == ?resource
)
when {
principal.role == "admin"
};
// Developers can use AI within their workspace
permit(
principal == ?principal,
action in [
Action::"ai_query",
Action::"ai_generate_config",
Action::"ai_troubleshoot"
],
resource == ?resource
)
when {
principal.role in ["developer", "senior_engineer"]
&& principal.workspace == resource.workspace
};
// Operators can access troubleshooting and queries
permit(
principal == ?principal,
action in [
Action::"ai_query",
Action::"ai_troubleshoot"
],
resource == ?resource
)
when {
principal.role in ["operator", "devops"]
};
// Form assistance enabled for all authenticated users
permit(
principal == ?principal,
action == Action::"ai_form_assistance",
resource == ?resource
)
when {
principal.authenticated == true
};
// Agents (when available) require explicit approval
permit(
principal == ?principal,
action == Action::"ai_agent_execute",
resource == ?resource
)
when {
principal.role == "automation_admin"
&& resource.requires_approval == true
};
// MCP tool access - restrictive by default
permit(
principal == ?principal,
action == Action::"mcp_tool_call",
resource == ?resource
)
when {
principal.role == "admin"
| | | (principal.role == "developer" && resource.tool in ["generate_config", "validate_config"]) |
};
// Cost control policies
permit(
principal == ?principal,
action == Action::"ai_generate_config",
resource == ?resource
)
when {
// User must have remaining budget
principal.ai_budget_remaining_usd > resource.estimated_cost_usd
// Workspace must be under budget
&& resource.workspace.ai_budget_remaining_usd > resource.estimated_cost_usd
};
```
### Policy Best Practices
1. **Explicit Allow**: Only allow specific actions, deny by default
2. **Workspace Isolation**: Users can't access AI in other workspaces
3. **Role-Based**: Use consistent role definitions
4. **Cost-Aware**: Check budgets before operations
5. **Audit Trail**: Log all policy decisions
## Data Sanitization
### Automatic PII Removal
Before sending data to external LLMs, the system removes:
```
Patterns Removed:
├─ Passwords: password="...", pwd=..., etc.
├─ API Keys: api_key=..., api-key=..., etc.
├─ Tokens: token=..., bearer=..., etc.
├─ Email addresses: user@example.com (unless necessary for context)
├─ Phone numbers: +1-555-0123 patterns
├─ Credit cards: 4111-1111-1111-1111 patterns
├─ SSH keys: -----BEGIN RSA PRIVATE KEY-----...
└─ AWS/GCP/Azure: AKIA2..., AIza..., etc.
```
### Configuration
```
[ai.security]
sanitize_pii = true
sanitize_secrets = true
# Custom redaction patterns
redact_patterns = [
# Database passwords
"(?i)db[_-]?password\\s*[:=]\\s*'?[^'\\n]+'?",
# Generic secrets
"(?i)secret\\s*[:=]\\s*'?[^'\\n]+'?",
# API endpoints that shouldn't be logged
"https?://api[.-]secret\\..+",
]
# Exceptions (patterns NOT to redact)
preserve_patterns = [
# Preserve example.com domain for docs
"example\\.com",
# Preserve placeholder emails
"user@example\\.com",
]
```
### Example Sanitization
**Before**:
```
Error configuring database:
connection_string: postgresql://dbadmin:MySecurePassword123@prod-db.us-east-1.rds.amazonaws.com:5432/app
api_key: sk-ant-abc123def456
vault_token: hvs.CAESIyg7...
```
**After Sanitization**:
```
Error configuring database:
connection_string: postgresql://dbadmin:[REDACTED]@prod-db.us-east-1.rds.amazonaws.com:5432/app
api_key: [REDACTED]
vault_token: [REDACTED]
```
## Secret Isolation
### Never Access Secrets Directly
AI cannot directly access secrets. Instead:
```
User wants: "Configure PostgreSQL with encrypted backups"
AI generates: Configuration schema with placeholders
User inserts: Actual secret values (connection strings, passwords)
System encrypts: Secrets remain encrypted at rest
Deployment: Uses secrets from secure store (Vault, AWS Secrets Manager)
```
### Secret Protection Rules
1. **No Direct Access**: AI never reads from Vault/Secrets Manager
2. **Never in Logs**: Secrets never logged or stored in cache
3. **Sanitization**: All secrets redacted before sending to LLM
4. **Encryption**: Secrets encrypted at rest and in transit
5. **Audit Trail**: All access to secrets logged
6. **TTL**: Temporary secrets auto-expire
## Local Models Support
### Air-Gapped Deployments
For environments requiring zero external API calls:
```
# Deploy local Ollama with provisioning support
docker run -d \
--name provisioning-ai \
-p 11434:11434 \
-v ollama:/root/.ollama \
-e OLLAMA_HOST=0.0.0.0:11434 \
ollama/ollama
# Pull model
ollama pull mistral
ollama pull llama2-70b
# Configure provisioning to use local model
provisioning config edit ai
[ai]
provider = "local"
model = "mistral"
api_base = "[http://localhost:11434"](http://localhost:11434")
```
### Benefits
- ✅ Zero external API calls
- ✅ Full data privacy (no LLM vendor access)
- ✅ Compliance with classified/regulated data
- ✅ No API key exposure
- ✅ Deterministic (same results each run)
### Performance Trade-offs
| | Factor | Local | Cloud | |
| | -------- | ------- | ------- | |
| | Privacy | Excellent | Requires trust | |
| | Cost | Free (hardware) | Per token | |
| | Speed | 5-30s/response | 2-5s/response | |
| | Quality | Good (70B models) | Excellent (Opus) | |
| | Hardware | Requires GPU | None | |
## HSM Integration
### Hardware Security Module Support
For highly sensitive environments:
```
[ai.security.hsm]
enabled = true
provider = "aws-cloudhsm" # or "thales", "yubihsm"
[ai.security.hsm.aws]
cluster_id = "cluster-123"
customer_ca_cert = "/etc/provisioning/certs/customerCA.crt"
server_cert = "/etc/provisioning/certs/server.crt"
server_key = "/etc/provisioning/certs/server.key"
```
## Encryption
### Data at Rest
```
[ai.security.encryption]
enabled = true
algorithm = "aes-256-gcm"
key_derivation = "argon2id"
# Key rotation
key_rotation_enabled = true
key_rotation_days = 90
rotation_alert_days = 7
# Encrypted storage
cache_encryption = true
log_encryption = true
```
### Data in Transit
```
All external LLM API calls:
├─ TLS 1.3 (minimum)
├─ Certificate pinning (optional)
├─ Mutual TLS (with cloud providers)
└─ No plaintext transmission
```
## Audit Logging
### What Gets Logged
```
{
"timestamp": "2025-01-13T10:30:45Z",
"event_type": "ai_action",
"action": "generate_config",
"principal": {
"user_id": "user-123",
"role": "developer",
"workspace": "prod"
},
"resource": {
"type": "database",
"name": "prod-postgres"
},
"authorization": {
"decision": "permit",
"policy": "ai-policies.cedar",
"reason": "developer role in workspace"
},
"cost": {
"tokens_used": 1250,
"estimated_cost_usd": 0.037
},
"sanitization": {
"items_redacted": 3,
"patterns_matched": ["db_password", "api_key", "token"]
},
"status": "success"
}
```
### Audit Trail Access
```
# View recent AI actions
provisioning audit log ai --tail 100
# Filter by user
provisioning audit log ai --user alice@company.com
# Filter by action
provisioning audit log ai --action generate_config
# Filter by time range
provisioning audit log ai --from "2025-01-01" --to "2025-01-13"
# Export for analysis
provisioning audit export ai --format csv --output audit.csv
# Full-text search
provisioning audit search ai "error in database configuration"
```
## Compliance Frameworks
### Built-in Compliance Checks
```
[ai.compliance]
frameworks = ["pci-dss", "hipaa", "sox", "gdpr"]
[ai.compliance.pci-dss]
enabled = true
# Requires encryption, audit logs, access controls
[ai.compliance.hipaa]
enabled = true
# Requires local models, encrypted storage, audit logs
[ai.compliance.gdpr]
enabled = true
# Requires data deletion, consent tracking, privacy by design
```
### Compliance Reports
```
# Generate compliance report
provisioning audit compliance-report \
--framework pci-dss \
--period month \
--output report.pdf
# Verify compliance
provisioning audit verify-compliance \
--framework hipaa \
--verbose
```
## Security Best Practices
### For Administrators
1. **Rotate API Keys**: Every 90 days minimum
2. **Monitor Budget**: Set up alerts at 80% and 90%
3. **Review Policies**: Quarterly policy audit
4. **Audit Logs**: Weekly review of AI operations
5. **Update Models**: Use latest stable models
6. **Test Recovery**: Monthly rollback drills
### For Developers
1. **Use Workspace Isolation**: Never share workspace access
2. **Don't Log Secrets**: Use sanitization, never bypass it
3. **Validate Outputs**: Always review AI-generated configs
4. **Report Issues**: Security issues to `security-ai@company.com`
5. **Stay Updated**: Follow security bulletins
### For Operators
1. **Monitor Costs**: Alert if exceeding 110% of budget
2. **Watch Errors**: Unusual error patterns may indicate attacks
3. **Check Audit Logs**: Unauthorized access attempts
4. **Test Policies**: Periodically verify Cedar policies work
5. **Backup Configs**: Secure backup of policy files
## Incident Response
### Compromised API Key
```
# 1. Immediately revoke key
provisioning admin revoke-key ai-api-key-123
# 2. Rotate key
provisioning admin rotate-key ai \
--notify ops-team@company.com
# 3. Audit usage since compromise
provisioning audit log ai \
--since "2025-01-13T09:00:00Z" \
--api-key-id ai-api-key-123
# 4. Review any generated configs from this period
# Configs generated while key was compromised may need review
```
### Unauthorized Access
```
# Review Cedar policy logs
provisioning audit log ai \
--decision deny \
--last-hour
# Check for pattern
provisioning audit search ai "authorization.*deny" \
--trend-analysis
# Update policies if needed
provisioning policy update ai-policies.cedar
```
## Security Checklist
### Pre-Production
- ✅ Cedar policies reviewed and tested
- ✅ API keys rotated and secured
- ✅ Data sanitization tested with real secrets
- ✅ Encryption enabled for cache
- ✅ Audit logging configured
- ✅ Cost limits set appropriately
- ✅ Local-only mode tested (if needed)
- ✅ HSM configured (if required)
### Ongoing
- ✅ Monthly policy review
- ✅ Weekly audit log review
- ✅ Quarterly key rotation
- ✅ Annual compliance assessment
- ✅ Continuous budget monitoring
- ✅ Error pattern analysis
## Related Documentation
- [Architecture](architecture.md) - System overview
- [Configuration](configuration.md) - Security settings
- [Cost Management](cost-management.md) - Budget controls
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Last Updated**: 2025-01-13
**Status**: ✅ Production-Ready
**Compliance**: PCI-DSS, HIPAA, SOX, GDPR
**Cedar Version**: 3.0+

View File

@ -1 +1,502 @@
# Troubleshooting with AI # AI-Assisted Troubleshooting and Debugging
**Status**: ✅ Production-Ready (AI troubleshooting analysis, log parsing)
The AI troubleshooting system provides intelligent debugging assistance for infrastructure failures. The system analyzes deployment logs, identifies
root causes, suggests fixes, and generates corrected configurations based on failure patterns.
## Feature Overview
### What It Does
Transform deployment failures into actionable insights:
```
Deployment Fails with Error
AI analyzes logs:
- Identifies failure phase (networking, database, k8s, etc.)
- Detects root cause (resource limits, configuration, timeout)
- Correlates with similar past failures
- Reviews deployment configuration
AI generates report:
- Root cause explanation in plain English
- Configuration issues identified
- Suggested fixes with rationale
- Alternative solutions
- Links to relevant documentation
Developer reviews and accepts:
- Understands what went wrong
- Knows how to fix it
- Can implement fix with confidence
```
## Troubleshooting Workflow
### Automatic Detection and Analysis
```
┌──────────────────────────────────────────┐
│ Deployment Monitoring │
│ - Watches deployment for failures │
│ - Captures logs in real-time │
│ - Detects failure events │
└──────────────┬───────────────────────────┘
┌──────────────────────────────────────────┐
│ Log Collection │
│ - Gather all relevant logs │
│ - Include stack traces │
│ - Capture metrics at failure time │
│ - Get resource usage data │
└──────────────┬───────────────────────────┘
┌──────────────────────────────────────────┐
│ Context Retrieval (RAG) │
│ - Find similar past failures │
│ - Retrieve troubleshooting guides │
│ - Get schema constraints │
│ - Find best practices │
└──────────────┬───────────────────────────┘
┌──────────────────────────────────────────┐
│ AI Analysis │
│ - Identify failure pattern │
│ - Determine root cause │
│ - Generate hypotheses │
│ - Score likely causes │
└──────────────┬───────────────────────────┘
┌──────────────────────────────────────────┐
│ Solution Generation │
│ - Create fixed configuration │
│ - Generate step-by-step fix guide │
│ - Suggest preventative measures │
│ - Provide alternative approaches │
└──────────────┬───────────────────────────┘
┌──────────────────────────────────────────┐
│ Report and Recommendations │
│ - Explain what went wrong │
│ - Show how to fix it │
│ - Provide corrected configuration │
│ - Link to prevention strategies │
└──────────────────────────────────────────┘
```
## Usage Examples
### Example 1: Database Connection Timeout
**Failure**:
```
Deployment: deploy-2025-01-13-001
Status: FAILED at phase database_migration
Error: connection timeout after 30s connecting to postgres://...
```
**Run Troubleshooting**:
```
$ provisioning ai troubleshoot deploy-2025-01-13-001
Analyzing deployment failure...
╔════════════════════════════════════════════════════════════════╗
║ Root Cause Analysis: Database Connection Timeout ║
╠════════════════════════════════════════════════════════════════╣
║ ║
║ Phase: database_migration (occurred during migration job) ║
║ Error: Timeout after 30 seconds connecting to database ║
║ ║
║ Most Likely Causes (confidence): ║
║ 1. Database security group blocks migration job (85%) ║
║ 2. Database instance not fully initialized yet (60%) ║
║ 3. Network connectivity issue (40%) ║
║ ║
║ Analysis: ║
║ - Database was created only 2 seconds before connection ║
║ - Migration job started immediately (no wait time) ║
║ - Security group: allows 5432 only from default SG ║
║ - Migration pod uses different security group ║
║ ║
╠════════════════════════════════════════════════════════════════╣
║ Recommended Fix ║
╠════════════════════════════════════════════════════════════════╣
║ ║
║ Issue: Migration security group not in database's inbound ║
║ ║
║ Solution: Add migration pod security group to DB inbound ║
║ ║
║ database.security_group.ingress = [ ║
║ { ║
║ from_port = 5432, ║
║ to_port = 5432, ║
║ source_security_group = "migration-pods-sg" ║
║ } ║
║ ] ║
║ ║
║ Alternative: Add 30-second wait after database creation ║
║ ║
║ deployment.phases.database.post_actions = [ ║
║ {action = "wait_for_database", timeout_seconds = 30} ║
║ ] ║
║ ║
╠════════════════════════════════════════════════════════════════╣
║ Prevention ║
╠════════════════════════════════════════════════════════════════╣
║ ║
║ To prevent this in future deployments: ║
║ ║
║ 1. Always verify security group rules before migration ║
║ 2. Add health check: `SELECT 1` before starting migration ║
║ 3. Increase initial timeout: database can be slow to start ║
║ 4. Use RDS wait condition instead of time-based wait ║
║ ║
║ See: docs/troubleshooting/database-connectivity.md ║
║ docs/guides/database-migrations.md ║
║ ║
╚════════════════════════════════════════════════════════════════╝
Generate corrected configuration? [yes/no]: yes
Configuration generated and saved to:
workspaces/prod/database.ncl.fixed
Changes made:
✓ Added migration security group to database inbound
✓ Added health check before migration
✓ Increased connection timeout to 60s
Ready to redeploy with corrected configuration? [yes/no]: yes
```
### Example 2: Kubernetes Deployment Error
**Failure**:
```
Deployment: deploy-2025-01-13-002
Status: FAILED at phase kubernetes_workload
Error: failed to create deployment app: Pod exceeded capacity
```
**Troubleshooting**:
```
$ provisioning ai troubleshoot deploy-2025-01-13-002 --detailed
╔════════════════════════════════════════════════════════════════╗
║ Root Cause: Pod Exceeded Node Capacity ║
╠════════════════════════════════════════════════════════════════╣
║ ║
║ Failure Analysis: ║
║ ║
║ Error: Pod requests 4CPU/8GB, but largest node has 2CPU/4GB ║
║ Cluster: 3 nodes, each t3.medium (2CPU/4GB) ║
║ Pod requirements: ║
║ - CPU: 4 (requested) + 2 (reserved system) = 6 needed ║
║ - Memory: 8Gi (requested) + 1Gi (system) = 9Gi needed ║
║ ║
║ Why this happened: ║
║ Pod spec updated to 4CPU/8GB but node group wasn't ║
║ Node group still has t3.medium (too small) ║
║ No autoscaling configured (won't scale up automatically) ║
║ ║
║ Solution Options: ║
║ 1. Reduce pod resource requests to 2CPU/4GB (simpler) ║
║ 2. Scale up node group to t3.large (2x cost, safer) ║
║ 3. Use both: t3.large nodes + reduce pod requests ║
║ ║
╠════════════════════════════════════════════════════════════════╣
║ Recommended: Option 2 (Scale up nodes) ║
╠════════════════════════════════════════════════════════════════╣
║ ║
║ Reason: Pod requests are reasonable for production app ║
║ Better to scale infrastructure than reduce resources ║
║ ║
║ Changes needed: ║
║ ║
║ kubernetes.node_group = { ║
║ instance_type = "t3.large" # was t3.medium ║
║ min_size = 3 ║
║ max_size = 10 ║
║ ║
║ auto_scaling = { ║
║ enabled = true ║
║ target_cpu_percent = 70 ║
║ } ║
║ } ║
║ ║
║ Cost Impact: ║
║ Current: 3 × t3.medium = ~$90/month ║
║ Proposed: 3 × t3.large = ~$180/month ║
║ With autoscaling, average: ~$150/month (some scale-down) ║
║ ║
╚════════════════════════════════════════════════════════════════╝
```
## CLI Commands
### Basic Troubleshooting
```
# Troubleshoot recent deployment
provisioning ai troubleshoot deploy-2025-01-13-001
# Get detailed analysis
provisioning ai troubleshoot deploy-2025-01-13-001 --detailed
# Analyze with specific focus
provisioning ai troubleshoot deploy-2025-01-13-001 --focus networking
# Get alternative solutions
provisioning ai troubleshoot deploy-2025-01-13-001 --alternatives
```
### Working with Logs
```
# Troubleshoot from custom logs
provisioning ai troubleshoot \
| --logs "$(journalctl -u provisioning --no-pager | tail -100)" |
# Troubleshoot from file
provisioning ai troubleshoot --log-file /var/log/deployment.log
# Troubleshoot from cloud provider
provisioning ai troubleshoot \
--cloud-logs aws-deployment-123 \
--region us-east-1
```
### Generate Reports
```
# Generate detailed troubleshooting report
provisioning ai troubleshoot deploy-123 \
--report \
--output troubleshooting-report.md
# Generate with suggestions
provisioning ai troubleshoot deploy-123 \
--report \
--include-suggestions \
--output report-with-fixes.md
# Generate compliance report (PCI-DSS, HIPAA)
provisioning ai troubleshoot deploy-123 \
--report \
--compliance pci-dss \
--output compliance-report.pdf
```
## Analysis Depth
### Shallow Analysis (Fast)
```
provisioning ai troubleshoot deploy-123 --depth shallow
Analyzes:
- First error message
- Last few log lines
- Basic pattern matching
- Returns in 30-60 seconds
```
### Deep Analysis (Thorough)
```
provisioning ai troubleshoot deploy-123 --depth deep
Analyzes:
- Full log context
- Correlates multiple errors
- Checks resource metrics
- Compares to past failures
- Generates alternative hypotheses
- Returns in 5-10 seconds
```
## Integration with Monitoring
### Automatic Troubleshooting
```
# Enable auto-troubleshoot on failures
provisioning config set ai.troubleshooting.auto_analyze true
# Deployments that fail automatically get analyzed
# Reports available in provisioning dashboard
# Alerts sent to on-call engineer with analysis
```
### WebUI Integration
```
Deployment Dashboard
├─ deployment-123 [FAILED]
│ └─ AI Analysis
│ ├─ Root Cause: Database timeout
│ ├─ Suggested Fix: ✓ View
│ ├─ Corrected Config: ✓ Download
│ └─ Alternative Solutions: 3 options
```
## Learning from Failures
### Pattern Recognition
The system learns common failure patterns:
```
Collected Patterns:
├─ Database Timeouts (25% of failures)
│ └─ Usually: Security group, connection pool, slow startup
├─ Kubernetes Pod Failures (20%)
│ └─ Usually: Insufficient resources, bad config
├─ Network Connectivity (15%)
│ └─ Usually: Security groups, routing, DNS
└─ Other (40%)
└─ Various causes, each analyzed individually
```
### Improvement Tracking
```
# See patterns in your deployments
provisioning ai analytics failures --period month
Month Summary:
Total deployments: 50
Failed: 5 (10% failure rate)
Common causes:
1. Security group rules (3 failures, 60%)
2. Resource limits (1 failure, 20%)
3. Configuration error (1 failure, 20%)
Improvement opportunities:
- Pre-check security groups before deployment
- Add health checks for resource sizing
- Add configuration validation
```
## Configuration
### Troubleshooting Settings
```
[ai.troubleshooting]
enabled = true
# Analysis depth
default_depth = "deep" # or "shallow" for speed
max_analysis_time_seconds = 30
# Features
auto_analyze_failed_deployments = true
generate_corrected_config = true
suggest_prevention = true
# Learning
track_failure_patterns = true
learn_from_similar_failures = true
improve_suggestions_over_time = true
# Reporting
auto_send_report = false # Email report to user
report_format = "markdown" # or "json", "pdf"
include_alternatives = true
# Cost impact analysis
estimate_fix_cost = true
estimate_alternative_costs = true
```
### Failure Detection
```
[ai.troubleshooting.detection]
# Monitor logs for these patterns
watch_patterns = [
"error",
"timeout",
"failed",
"unable to",
"refused",
"denied",
"exceeded",
"quota",
]
# Minimum log lines before analyzing
min_log_lines = 10
# Time window for log collection
log_window_seconds = 300
```
## Best Practices
### For Effective Troubleshooting
1. **Keep Detailed Logs**: Enable verbose logging in deployments
2. **Include Context**: Share full logs, not just error snippet
3. **Check Suggestions**: Review AI suggestions even if obvious
4. **Learn Patterns**: Track recurring failures and address root cause
5. **Update Configs**: Use corrected configs from AI, validate them
### For Prevention
1. **Use Health Checks**: Add database/service health checks
2. **Test Before Deploy**: Use dry-run to catch issues early
3. **Monitor Metrics**: Watch CPU/memory before failures occur
4. **Review Policies**: Ensure security groups are correct
5. **Document Changes**: When updating configs, note the change
## Limitations
### What AI Can Troubleshoot
✅ Configuration errors
✅ Resource limit problems
✅ Networking/security group issues
✅ Database connectivity problems
✅ Deployment ordering issues
✅ Common application errors
✅ Performance problems
### What Requires Human Review
⚠️ Data corruption scenarios
⚠️ Multi-failure cascades
⚠️ Unclear error messages
⚠️ Custom application code failures
⚠️ Third-party service issues
⚠️ Physical infrastructure failures
## Examples and Guides
### Common Issues - Quick Links
- [Database Connectivity](../troubleshooting/database-connectivity.md)
- [Kubernetes Pod Failures](../troubleshooting/kubernetes-pods.md)
- [Network Configuration](../troubleshooting/networking.md)
- [Performance Issues](../troubleshooting/performance.md)
- [Resource Limits](../troubleshooting/resource-limits.md)
## Related Documentation
- [Architecture](architecture.md) - AI system overview
- [RAG System](rag-system.md) - Context retrieval for troubleshooting
- [Configuration](configuration.md) - Setup guide
- [Security Policies](security-policies.md) - Safe log handling
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
---
**Last Updated**: 2025-01-13
**Status**: ✅ Production-Ready
**Success Rate**: 85-95% accuracy in root cause identification
**Supported**: All deployment types (infrastructure, Kubernetes, database)

View File

@ -12,7 +12,7 @@ API reference for programmatic access to the Provisioning Platform.
## Quick Start ## Quick Start
```bash ```
# Check API health # Check API health
curl http://localhost:9090/health curl http://localhost:9090/health

View File

@ -16,7 +16,7 @@ All extensions follow a standardized structure and API for seamless integration.
### Standard Directory Layout ### Standard Directory Layout
```plaintext ```
extension-name/ extension-name/
├── manifest.toml # Extension metadata ├── manifest.toml # Extension metadata
├── schemas/ # Nickel configuration files ├── schemas/ # Nickel configuration files
@ -71,7 +71,7 @@ All providers must implement the following interface:
Create `schemas/settings.ncl`: Create `schemas/settings.ncl`:
```nickel ```
# Provider settings schema # Provider settings schema
{ {
ProviderSettings = { ProviderSettings = {
@ -146,7 +146,7 @@ schema ServerConfig {
Create `nulib/mod.nu`: Create `nulib/mod.nu`:
```nushell ```
use std log use std log
# Provider name and version # Provider name and version
@ -231,7 +231,7 @@ export def "test-connection" [config: record] -> record {
Create `nulib/create.nu`: Create `nulib/create.nu`:
```nushell ```
use std log use std log
use utils.nu * use utils.nu *
@ -368,7 +368,7 @@ def wait-for-server-ready [server_id: string] -> string {
Add provider metadata in `metadata.toml`: Add provider metadata in `metadata.toml`:
```toml ```
[extension] [extension]
name = "my-provider" name = "my-provider"
type = "provider" type = "provider"
@ -429,7 +429,7 @@ Task services must implement:
Create `schemas/version.ncl`: Create `schemas/version.ncl`:
```nickel ```
# Task service version configuration # Task service version configuration
{ {
taskserv_version = { taskserv_version = {
@ -483,7 +483,7 @@ Create `schemas/version.ncl`:
Create `nulib/mod.nu`: Create `nulib/mod.nu`:
```nushell ```
use std log use std log
use ../../../lib_provisioning * use ../../../lib_provisioning *
@ -697,7 +697,7 @@ Clusters orchestrate multiple components:
Create `schemas/cluster.ncl`: Create `schemas/cluster.ncl`:
```nickel ```
# Cluster configuration schema # Cluster configuration schema
{ {
ClusterConfig = { ClusterConfig = {
@ -812,7 +812,7 @@ Create `schemas/cluster.ncl`:
Create `nulib/mod.nu`: Create `nulib/mod.nu`:
```nushell ```
use std log use std log
use ../../../lib_provisioning * use ../../../lib_provisioning *
@ -1065,7 +1065,7 @@ Extensions should include comprehensive tests:
Create `tests/unit_tests.nu`: Create `tests/unit_tests.nu`:
```nushell ```
use std testing use std testing
export def test_provider_config_validation [] { export def test_provider_config_validation [] {
@ -1096,7 +1096,7 @@ export def test_server_creation_check_mode [] {
Create `tests/integration_tests.nu`: Create `tests/integration_tests.nu`:
```nushell ```
use std testing use std testing
export def test_full_server_lifecycle [] { export def test_full_server_lifecycle [] {
@ -1127,7 +1127,7 @@ export def test_full_server_lifecycle [] {
### Running Tests ### Running Tests
```bash ```
# Run unit tests # Run unit tests
nu tests/unit_tests.nu nu tests/unit_tests.nu
@ -1151,7 +1151,7 @@ Each extension must include:
### API Documentation Template ### API Documentation Template
```markdown ```
# Extension Name API # Extension Name API
## Overview ## Overview

View File

@ -18,7 +18,7 @@ Provisioning offers multiple integration points:
#### Full-Featured Python Client #### Full-Featured Python Client
```python ```
import asyncio import asyncio
import json import json
import logging import logging
@ -416,7 +416,7 @@ if __name__ == "__main__":
#### Complete JavaScript/TypeScript Client #### Complete JavaScript/TypeScript Client
```typescript ```
import axios, { AxiosInstance, AxiosResponse } from 'axios'; import axios, { AxiosInstance, AxiosResponse } from 'axios';
import WebSocket from 'ws'; import WebSocket from 'ws';
import { EventEmitter } from 'events'; import { EventEmitter } from 'events';
@ -925,7 +925,7 @@ export { ProvisioningClient, Task, BatchConfig };
### Comprehensive Error Handling ### Comprehensive Error Handling
```python ```
class ProvisioningErrorHandler: class ProvisioningErrorHandler:
"""Centralized error handling for provisioning operations""" """Centralized error handling for provisioning operations"""
@ -1028,7 +1028,7 @@ async def robust_workflow_execution():
### Circuit Breaker Pattern ### Circuit Breaker Pattern
```typescript ```
class CircuitBreaker { class CircuitBreaker {
private failures = 0; private failures = 0;
private nextAttempt = Date.now(); private nextAttempt = Date.now();
@ -1104,7 +1104,7 @@ class ResilientProvisioningClient {
### Connection Pooling and Caching ### Connection Pooling and Caching
```python ```
import asyncio import asyncio
import aiohttp import aiohttp
from cachetools import TTLCache from cachetools import TTLCache
@ -1222,7 +1222,7 @@ async def high_performance_workflow():
### WebSocket Connection Pooling ### WebSocket Connection Pooling
```javascript ```
class WebSocketPool { class WebSocketPool {
constructor(maxConnections = 5) { constructor(maxConnections = 5) {
this.maxConnections = maxConnections; this.maxConnections = maxConnections;
@ -1290,13 +1290,13 @@ The Python SDK provides a comprehensive interface for provisioning:
#### Installation #### Installation
```bash ```
pip install provisioning-client pip install provisioning-client
``` ```
#### Quick Start #### Quick Start
```python ```
from provisioning_client import ProvisioningClient from provisioning_client import ProvisioningClient
# Initialize client # Initialize client
@ -1319,7 +1319,7 @@ print(f"Workflow completed: {task.status}")
#### Advanced Usage #### Advanced Usage
```python ```
# Use with async context manager # Use with async context manager
async with ProvisioningClient() as client: async with ProvisioningClient() as client:
# Batch operations # Batch operations
@ -1340,13 +1340,13 @@ async with ProvisioningClient() as client:
#### Installation #### Installation
```bash ```
npm install @provisioning/client npm install @provisioning/client
``` ```
#### Usage #### Usage
```typescript ```
import { ProvisioningClient } from '@provisioning/client'; import { ProvisioningClient } from '@provisioning/client';
const client = new ProvisioningClient({ const client = new ProvisioningClient({
@ -1373,7 +1373,7 @@ await client.connectWebSocket();
### Workflow Orchestration Pipeline ### Workflow Orchestration Pipeline
```python ```
class WorkflowPipeline: class WorkflowPipeline:
"""Orchestrate complex multi-step workflows""" """Orchestrate complex multi-step workflows"""
@ -1462,7 +1462,7 @@ async def complex_deployment():
### Event-Driven Architecture ### Event-Driven Architecture
```javascript ```
class EventDrivenWorkflowManager { class EventDrivenWorkflowManager {
constructor(client) { constructor(client) {
this.client = client; this.client = client;

View File

@ -69,7 +69,7 @@ The provisioning platform provides a comprehensive Nushell library with reusable
## Usage Example ## Usage Example
```nushell ```
# Load provisioning library # Load provisioning library
use provisioning/core/nulib/lib_provisioning * use provisioning/core/nulib/lib_provisioning *

View File

@ -17,7 +17,7 @@ The path resolution system provides a hierarchical and configurable mechanism fo
The system follows a specific hierarchy for loading configuration files: The system follows a specific hierarchy for loading configuration files:
```plaintext ```
1. System defaults (config.defaults.toml) 1. System defaults (config.defaults.toml)
2. User configuration (config.user.toml) 2. User configuration (config.user.toml)
3. Project configuration (config.project.toml) 3. Project configuration (config.project.toml)
@ -30,7 +30,7 @@ The system follows a specific hierarchy for loading configuration files:
The system searches for configuration files in these locations: The system searches for configuration files in these locations:
```bash ```
# Default search paths (in order) # Default search paths (in order)
/usr/local/provisioning/config.defaults.toml /usr/local/provisioning/config.defaults.toml
$HOME/.config/provisioning/config.user.toml $HOME/.config/provisioning/config.user.toml
@ -59,7 +59,7 @@ Resolves configuration file paths using the search hierarchy.
**Example:** **Example:**
```nushell ```
use path-resolution.nu * use path-resolution.nu *
let config_path = (resolve-config-path "config.user.toml" []) let config_path = (resolve-config-path "config.user.toml" [])
# Returns: "/home/user/.config/provisioning/config.user.toml" # Returns: "/home/user/.config/provisioning/config.user.toml"
@ -76,7 +76,7 @@ Discovers extension paths (providers, taskservs, clusters).
**Returns:** **Returns:**
```nushell ```
{ {
base_path: "/usr/local/provisioning/providers/upcloud", base_path: "/usr/local/provisioning/providers/upcloud",
schemas_path: "/usr/local/provisioning/providers/upcloud/schemas", schemas_path: "/usr/local/provisioning/providers/upcloud/schemas",
@ -92,7 +92,7 @@ Gets current workspace path configuration.
**Returns:** **Returns:**
```nushell ```
{ {
base: "/usr/local/provisioning", base: "/usr/local/provisioning",
current_infra: "/workspace/infra/production", current_infra: "/workspace/infra/production",
@ -130,7 +130,7 @@ Interpolates variables in path templates.
**Example:** **Example:**
```nushell ```
let template = "{{paths.base}}/infra/{{env.USER}}/{{git.branch}}" let template = "{{paths.base}}/infra/{{env.USER}}/{{git.branch}}"
let result = (interpolate-path $template { let result = (interpolate-path $template {
paths: { base: "/usr/local/provisioning" }, paths: { base: "/usr/local/provisioning" },
@ -150,7 +150,7 @@ Discovers all available providers.
**Returns:** **Returns:**
```nushell ```
[ [
{ {
name: "upcloud", name: "upcloud",
@ -185,7 +185,7 @@ Gets provider-specific configuration and paths.
**Returns:** **Returns:**
```nushell ```
{ {
name: "upcloud", name: "upcloud",
base_path: "/usr/local/provisioning/providers/upcloud", base_path: "/usr/local/provisioning/providers/upcloud",
@ -214,7 +214,7 @@ Discovers all available task services.
**Returns:** **Returns:**
```nushell ```
[ [
{ {
name: "kubernetes", name: "kubernetes",
@ -245,7 +245,7 @@ Gets task service configuration and version information.
**Returns:** **Returns:**
```nushell ```
{ {
name: "kubernetes", name: "kubernetes",
path: "/usr/local/provisioning/taskservs/kubernetes", path: "/usr/local/provisioning/taskservs/kubernetes",
@ -272,7 +272,7 @@ Discovers all available cluster configurations.
**Returns:** **Returns:**
```nushell ```
[ [
{ {
name: "buildkit", name: "buildkit",
@ -312,7 +312,7 @@ Gets environment-specific configuration.
**Returns:** **Returns:**
```nushell ```
{ {
name: "production", name: "production",
paths: { paths: {
@ -359,7 +359,7 @@ Discovers available workspaces and infrastructure directories.
**Returns:** **Returns:**
```nushell ```
[ [
{ {
name: "production", name: "production",
@ -405,7 +405,7 @@ Analyzes project structure and identifies components.
**Returns:** **Returns:**
```nushell ```
{ {
root: "/workspace/project", root: "/workspace/project",
type: "provisioning_workspace", type: "provisioning_workspace",
@ -458,7 +458,7 @@ Gets path resolution cache statistics.
**Returns:** **Returns:**
```nushell ```
{ {
enabled: true, enabled: true,
size: 150, size: 150,
@ -485,7 +485,7 @@ Normalizes paths for cross-platform compatibility.
**Example:** **Example:**
```nushell ```
# On Windows # On Windows
normalize-path "path/to/file" # Returns: "path\to\file" normalize-path "path/to/file" # Returns: "path\to\file"
@ -519,7 +519,7 @@ Validates all paths in configuration.
**Returns:** **Returns:**
```nushell ```
{ {
valid: true, valid: true,
errors: [], errors: [],
@ -541,7 +541,7 @@ Validates extension directory structure.
**Returns:** **Returns:**
```nushell ```
{ {
valid: true, valid: true,
required_files: [ required_files: [
@ -561,7 +561,7 @@ Validates extension directory structure.
The path resolution API is exposed via Nushell commands: The path resolution API is exposed via Nushell commands:
```bash ```
# Show current path configuration # Show current path configuration
provisioning show paths provisioning show paths
@ -584,7 +584,7 @@ provisioning workspace set /path/to/infra
### Python Integration ### Python Integration
```python ```
import subprocess import subprocess
import json import json
@ -612,7 +612,7 @@ providers = resolver.discover_providers()
### JavaScript/Node.js Integration ### JavaScript/Node.js Integration
```javascript ```
const { exec } = require('child_process'); const { exec } = require('child_process');
const util = require('util'); const util = require('util');
const execAsync = util.promisify(exec); const execAsync = util.promisify(exec);
@ -697,7 +697,7 @@ The system provides graceful fallbacks:
Monitor path resolution performance: Monitor path resolution performance:
```nushell ```
# Get resolution statistics # Get resolution statistics
provisioning debug path-stats provisioning debug path-stats

View File

@ -18,7 +18,7 @@ All providers must implement the following interface:
### Required Functions ### Required Functions
```nushell ```
# Provider initialization # Provider initialization
export def init [] -> record { ... } export def init [] -> record { ... }
@ -37,7 +37,7 @@ export def get-pricing [plan: string] -> record { ... }
Each provider requires configuration in Nickel format: Each provider requires configuration in Nickel format:
```nickel ```
# Example: UpCloud provider configuration # Example: UpCloud provider configuration
{ {
provider = { provider = {
@ -57,7 +57,7 @@ Each provider requires configuration in Nickel format:
### 1. Directory Structure ### 1. Directory Structure
```plaintext ```
provisioning/extensions/providers/my-provider/ provisioning/extensions/providers/my-provider/
├── nulib/ ├── nulib/
│ └── my_provider.nu # Provider implementation │ └── my_provider.nu # Provider implementation
@ -69,7 +69,7 @@ provisioning/extensions/providers/my-provider/
### 2. Implementation Template ### 2. Implementation Template
```nushell ```
# my_provider.nu # my_provider.nu
export def init [] { export def init [] {
{ {
@ -94,7 +94,7 @@ export def list-servers [] {
### 3. Nickel Schema ### 3. Nickel Schema
```nickel ```
# main.ncl # main.ncl
{ {
MyProvider = { MyProvider = {
@ -118,7 +118,7 @@ Providers are automatically discovered from:
- `provisioning/extensions/providers/*/nu/*.nu` - `provisioning/extensions/providers/*/nu/*.nu`
- User workspace: `workspace/extensions/providers/*/nu/*.nu` - User workspace: `workspace/extensions/providers/*/nu/*.nu`
```bash ```
# Discover available providers # Discover available providers
provisioning module discover providers provisioning module discover providers
@ -130,7 +130,7 @@ provisioning module load providers workspace my-provider
### Create Servers ### Create Servers
```nushell ```
use my_provider.nu * use my_provider.nu *
let plan = { let plan = {
@ -144,13 +144,13 @@ create-servers $plan
### List Servers ### List Servers
```nushell ```
list-servers | where status == "running" | select hostname ip_address list-servers | where status == "running" | select hostname ip_address
``` ```
### Get Pricing ### Get Pricing
```nushell ```
get-pricing "small" | to yaml get-pricing "small" | to yaml
``` ```
@ -158,7 +158,7 @@ get-pricing "small" | to yaml
Use the test environment system to test providers: Use the test environment system to test providers:
```bash ```
# Test provider without real resources # Test provider without real resources
provisioning test env single my-provider --check provisioning test env single my-provider --check
``` ```

View File

@ -20,13 +20,13 @@ Provisioning exposes two main REST APIs:
All API endpoints (except health checks) require JWT authentication via the Authorization header: All API endpoints (except health checks) require JWT authentication via the Authorization header:
```http ```
Authorization: Bearer <jwt_token> Authorization: Bearer <jwt_token>
``` ```
### Getting Access Token ### Getting Access Token
```http ```
POST /auth/login POST /auth/login
Content-Type: application/json Content-Type: application/json
@ -47,7 +47,7 @@ Check orchestrator health status.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "Orchestrator is healthy" "data": "Orchestrator is healthy"
@ -68,7 +68,7 @@ List all workflow tasks.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": [ "data": [
@ -99,7 +99,7 @@ Get specific task status and details.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -126,7 +126,7 @@ Submit server creation workflow.
**Request Body:** **Request Body:**
```json ```
{ {
"infra": "production", "infra": "production",
"settings": "config.ncl", "settings": "config.ncl",
@ -137,7 +137,7 @@ Submit server creation workflow.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "uuid-task-id" "data": "uuid-task-id"
@ -150,7 +150,7 @@ Submit task service workflow.
**Request Body:** **Request Body:**
```json ```
{ {
"operation": "create", "operation": "create",
"taskserv": "kubernetes", "taskserv": "kubernetes",
@ -163,7 +163,7 @@ Submit task service workflow.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "uuid-task-id" "data": "uuid-task-id"
@ -176,7 +176,7 @@ Submit cluster workflow.
**Request Body:** **Request Body:**
```json ```
{ {
"operation": "create", "operation": "create",
"cluster_type": "buildkit", "cluster_type": "buildkit",
@ -189,7 +189,7 @@ Submit cluster workflow.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "uuid-task-id" "data": "uuid-task-id"
@ -204,7 +204,7 @@ Execute batch workflow operation.
**Request Body:** **Request Body:**
```json ```
{ {
"name": "multi_cloud_deployment", "name": "multi_cloud_deployment",
"version": "1.0.0", "version": "1.0.0",
@ -235,7 +235,7 @@ Execute batch workflow operation.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -263,7 +263,7 @@ List all batch operations.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": [ "data": [
@ -288,7 +288,7 @@ Get batch operation status.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -317,7 +317,7 @@ Cancel running batch operation.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "Operation cancelled" "data": "Operation cancelled"
@ -336,7 +336,7 @@ Get real-time workflow progress.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -360,7 +360,7 @@ Get workflow state snapshots.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": [ "data": [
@ -380,7 +380,7 @@ Get system-wide metrics.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -403,7 +403,7 @@ Get system health status.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -424,7 +424,7 @@ Get state manager statistics.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -444,7 +444,7 @@ Create new checkpoint.
**Request Body:** **Request Body:**
```json ```
{ {
"name": "before_major_update", "name": "before_major_update",
"description": "Checkpoint before deploying v2.0.0" "description": "Checkpoint before deploying v2.0.0"
@ -453,7 +453,7 @@ Create new checkpoint.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "checkpoint-uuid" "data": "checkpoint-uuid"
@ -466,7 +466,7 @@ List all checkpoints.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": [ "data": [
@ -491,7 +491,7 @@ Get specific checkpoint details.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -511,7 +511,7 @@ Execute rollback operation.
**Request Body:** **Request Body:**
```json ```
{ {
"checkpoint_id": "checkpoint-uuid" "checkpoint_id": "checkpoint-uuid"
} }
@ -519,7 +519,7 @@ Execute rollback operation.
Or for partial rollback: Or for partial rollback:
```json ```
{ {
"operation_ids": ["op-1", "op-2", "op-3"] "operation_ids": ["op-1", "op-2", "op-3"]
} }
@ -527,7 +527,7 @@ Or for partial rollback:
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -550,7 +550,7 @@ Restore system state from checkpoint.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "State restored from checkpoint checkpoint-uuid" "data": "State restored from checkpoint checkpoint-uuid"
@ -563,7 +563,7 @@ Get rollback system statistics.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -585,7 +585,7 @@ Authenticate user and get JWT token.
**Request Body:** **Request Body:**
```json ```
{ {
"username": "admin", "username": "admin",
"password": "secure_password", "password": "secure_password",
@ -595,7 +595,7 @@ Authenticate user and get JWT token.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -617,7 +617,7 @@ Refresh JWT token.
**Request Body:** **Request Body:**
```json ```
{ {
"token": "current-jwt-token" "token": "current-jwt-token"
} }
@ -625,7 +625,7 @@ Refresh JWT token.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -641,7 +641,7 @@ Logout and invalidate token.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "Successfully logged out" "data": "Successfully logged out"
@ -661,7 +661,7 @@ List all users.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": [ "data": [
@ -684,7 +684,7 @@ Create new user.
**Request Body:** **Request Body:**
```json ```
{ {
"username": "newuser", "username": "newuser",
"email": "newuser@example.com", "email": "newuser@example.com",
@ -696,7 +696,7 @@ Create new user.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -719,7 +719,7 @@ Update existing user.
**Request Body:** **Request Body:**
```json ```
{ {
"email": "updated@example.com", "email": "updated@example.com",
"roles": ["admin", "operator"], "roles": ["admin", "operator"],
@ -729,7 +729,7 @@ Update existing user.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "User updated successfully" "data": "User updated successfully"
@ -746,7 +746,7 @@ Delete user.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "User deleted successfully" "data": "User deleted successfully"
@ -761,7 +761,7 @@ List all policies.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": [ "data": [
@ -783,7 +783,7 @@ Create new policy.
**Request Body:** **Request Body:**
```json ```
{ {
"name": "new_policy", "name": "new_policy",
"version": "1.0.0", "version": "1.0.0",
@ -800,7 +800,7 @@ Create new policy.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": { "data": {
@ -821,7 +821,7 @@ Update policy.
**Request Body:** **Request Body:**
```json ```
{ {
"name": "updated_policy", "name": "updated_policy",
"rules": [...] "rules": [...]
@ -830,7 +830,7 @@ Update policy.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": "Policy updated successfully" "data": "Policy updated successfully"
@ -855,7 +855,7 @@ Get audit logs.
**Response:** **Response:**
```json ```
{ {
"success": true, "success": true,
"data": [ "data": [
@ -876,7 +876,7 @@ Get audit logs.
All endpoints may return error responses in this format: All endpoints may return error responses in this format:
```json ```
{ {
"success": false, "success": false,
"error": "Detailed error message" "error": "Detailed error message"
@ -904,7 +904,7 @@ API endpoints are rate-limited:
Rate limit headers are included in responses: Rate limit headers are included in responses:
```http ```
X-RateLimit-Limit: 100 X-RateLimit-Limit: 100
X-RateLimit-Remaining: 95 X-RateLimit-Remaining: 95
X-RateLimit-Reset: 1632150000 X-RateLimit-Reset: 1632150000
@ -918,7 +918,7 @@ Prometheus-compatible metrics endpoint.
**Response:** **Response:**
```plaintext ```
# HELP orchestrator_tasks_total Total number of tasks # HELP orchestrator_tasks_total Total number of tasks
# TYPE orchestrator_tasks_total counter # TYPE orchestrator_tasks_total counter
orchestrator_tasks_total{status="completed"} 150 orchestrator_tasks_total{status="completed"} 150
@ -937,7 +937,7 @@ Real-time event streaming via WebSocket connection.
**Connection:** **Connection:**
```javascript ```
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt-token'); const ws = new WebSocket('ws://localhost:9090/ws?token=jwt-token');
ws.onmessage = function(event) { ws.onmessage = function(event) {
@ -948,7 +948,7 @@ ws.onmessage = function(event) {
**Event Format:** **Event Format:**
```json ```
{ {
"event_type": "TaskStatusChanged", "event_type": "TaskStatusChanged",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -967,7 +967,7 @@ ws.onmessage = function(event) {
### Python SDK Example ### Python SDK Example
```python ```
import requests import requests
class ProvisioningClient: class ProvisioningClient:
@ -1007,7 +1007,7 @@ print(f"Task ID: {result['data']}")
### JavaScript/Node.js SDK Example ### JavaScript/Node.js SDK Example
```javascript ```
const axios = require('axios'); const axios = require('axios');
class ProvisioningClient { class ProvisioningClient {
@ -1051,7 +1051,7 @@ The system supports webhooks for external integrations:
Configure webhooks in the system configuration: Configure webhooks in the system configuration:
```toml ```
[webhooks] [webhooks]
enabled = true enabled = true
endpoints = [ endpoints = [
@ -1065,7 +1065,7 @@ endpoints = [
### Webhook Payload ### Webhook Payload
```json ```
{ {
"event": "task.completed", "event": "task.completed",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -1087,7 +1087,7 @@ For endpoints that return lists, use pagination parameters:
Pagination metadata is included in response headers: Pagination metadata is included in response headers:
```http ```
X-Total-Count: 1500 X-Total-Count: 1500
X-Limit: 50 X-Limit: 50
X-Offset: 100 X-Offset: 100
@ -1098,7 +1098,7 @@ Link: </api/endpoint?offset=150&limit=50>; rel="next"
The API uses header-based versioning: The API uses header-based versioning:
```http ```
Accept: application/vnd.provisioning.v1+json Accept: application/vnd.provisioning.v1+json
``` ```
@ -1108,7 +1108,7 @@ Current version: v1
Use the included test suite to validate API functionality: Use the included test suite to validate API functionality:
```bash ```
# Run API integration tests # Run API integration tests
cd src/orchestrator cd src/orchestrator
cargo test --test api_tests cargo test --test api_tests

View File

@ -23,7 +23,7 @@ Provisioning provides SDKs in multiple languages to facilitate integration:
### Installation ### Installation
```bash ```
# Install from PyPI # Install from PyPI
pip install provisioning-client pip install provisioning-client
@ -33,7 +33,7 @@ pip install git+https://github.com/provisioning-systems/python-client.git
### Quick Start ### Quick Start
```python ```
from provisioning_client import ProvisioningClient from provisioning_client import ProvisioningClient
import asyncio import asyncio
@ -79,7 +79,7 @@ if __name__ == "__main__":
#### WebSocket Integration #### WebSocket Integration
```python ```
async def monitor_workflows(): async def monitor_workflows():
client = ProvisioningClient() client = ProvisioningClient()
await client.authenticate() await client.authenticate()
@ -103,7 +103,7 @@ async def monitor_workflows():
#### Batch Operations #### Batch Operations
```python ```
async def execute_batch_deployment(): async def execute_batch_deployment():
client = ProvisioningClient() client = ProvisioningClient()
await client.authenticate() await client.authenticate()
@ -158,7 +158,7 @@ async def execute_batch_deployment():
#### Error Handling with Retries #### Error Handling with Retries
```python ```
from provisioning_client.exceptions import ( from provisioning_client.exceptions import (
ProvisioningAPIError, ProvisioningAPIError,
AuthenticationError, AuthenticationError,
@ -209,7 +209,7 @@ async def robust_workflow():
#### ProvisioningClient Class #### ProvisioningClient Class
```python ```
class ProvisioningClient: class ProvisioningClient:
def __init__(self, def __init__(self,
base_url: str = "http://localhost:9090", base_url: str = "http://localhost:9090",
@ -258,7 +258,7 @@ class ProvisioningClient:
### Installation ### Installation
```bash ```
# npm # npm
npm install @provisioning/client npm install @provisioning/client
@ -271,7 +271,7 @@ pnpm add @provisioning/client
### Quick Start ### Quick Start
```typescript ```
import { ProvisioningClient } from '@provisioning/client'; import { ProvisioningClient } from '@provisioning/client';
async function main() { async function main() {
@ -308,7 +308,7 @@ main();
### React Integration ### React Integration
```tsx ```
import React, { useState, useEffect } from 'react'; import React, { useState, useEffect } from 'react';
import { ProvisioningClient } from '@provisioning/client'; import { ProvisioningClient } from '@provisioning/client';
@ -434,7 +434,7 @@ export default WorkflowDashboard;
### Node.js CLI Tool ### Node.js CLI Tool
```typescript ```
#!/usr/bin/env node #!/usr/bin/env node
import { Command } from 'commander'; import { Command } from 'commander';
@ -590,7 +590,7 @@ program.parse();
### API Reference ### API Reference
```typescript ```
interface ProvisioningClientOptions { interface ProvisioningClientOptions {
baseUrl?: string; baseUrl?: string;
authUrl?: string; authUrl?: string;
@ -644,13 +644,13 @@ class ProvisioningClient extends EventEmitter {
### Installation ### Installation
```bash ```
go get github.com/provisioning-systems/go-client go get github.com/provisioning-systems/go-client
``` ```
### Quick Start ### Quick Start
```go ```
package main package main
import ( import (
@ -711,7 +711,7 @@ func main() {
### WebSocket Integration ### WebSocket Integration
```go ```
package main package main
import ( import (
@ -777,7 +777,7 @@ func main() {
### HTTP Client with Retry Logic ### HTTP Client with Retry Logic
```go ```
package main package main
import ( import (
@ -868,7 +868,7 @@ func main() {
Add to your `Cargo.toml`: Add to your `Cargo.toml`:
```toml ```
[dependencies] [dependencies]
provisioning-rs = "2.0.0" provisioning-rs = "2.0.0"
tokio = { version = "1.0", features = ["full"] } tokio = { version = "1.0", features = ["full"] }
@ -876,7 +876,7 @@ tokio = { version = "1.0", features = ["full"] }
### Quick Start ### Quick Start
```rust ```
use provisioning_rs::{ProvisioningClient, Config, CreateServerRequest}; use provisioning_rs::{ProvisioningClient, Config, CreateServerRequest};
use tokio; use tokio;
@ -932,7 +932,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
### WebSocket Integration ### WebSocket Integration
```rust ```
use provisioning_rs::{ProvisioningClient, Config, WebSocketEvent}; use provisioning_rs::{ProvisioningClient, Config, WebSocketEvent};
use futures_util::StreamExt; use futures_util::StreamExt;
use tokio; use tokio;
@ -988,7 +988,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
### Batch Operations ### Batch Operations
```rust ```
use provisioning_rs::{BatchOperationRequest, BatchOperation}; use provisioning_rs::{BatchOperationRequest, BatchOperation};
#[tokio::main] #[tokio::main]

View File

@ -30,7 +30,7 @@ The main WebSocket endpoint for real-time events and monitoring.
**Example Connection:** **Example Connection:**
```javascript ```
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt-token&events=task,batch,system'); const ws = new WebSocket('ws://localhost:9090/ws?token=jwt-token&events=task,batch,system');
``` ```
@ -64,7 +64,7 @@ Live log streaming endpoint.
All WebSocket connections require authentication via JWT token: All WebSocket connections require authentication via JWT token:
```javascript ```
// Include token in connection URL // Include token in connection URL
const ws = new WebSocket('ws://localhost:9090/ws?token=' + jwtToken); const ws = new WebSocket('ws://localhost:9090/ws?token=' + jwtToken);
@ -93,7 +93,7 @@ ws.onopen = function() {
Fired when a workflow task status changes. Fired when a workflow task status changes.
```json ```
{ {
"event_type": "TaskStatusChanged", "event_type": "TaskStatusChanged",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -116,7 +116,7 @@ Fired when a workflow task status changes.
Fired when batch operation status changes. Fired when batch operation status changes.
```json ```
{ {
"event_type": "BatchOperationUpdate", "event_type": "BatchOperationUpdate",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -150,7 +150,7 @@ Fired when batch operation status changes.
Fired when system health status changes. Fired when system health status changes.
```json ```
{ {
"event_type": "SystemHealthUpdate", "event_type": "SystemHealthUpdate",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -185,7 +185,7 @@ Fired when system health status changes.
Fired when workflow progress changes. Fired when workflow progress changes.
```json ```
{ {
"event_type": "WorkflowProgressUpdate", "event_type": "WorkflowProgressUpdate",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -215,7 +215,7 @@ Fired when workflow progress changes.
Real-time log streaming. Real-time log streaming.
```json ```
{ {
"event_type": "LogEntry", "event_type": "LogEntry",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -241,7 +241,7 @@ Real-time log streaming.
Real-time metrics streaming. Real-time metrics streaming.
```json ```
{ {
"event_type": "MetricUpdate", "event_type": "MetricUpdate",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -266,7 +266,7 @@ Real-time metrics streaming.
Applications can define custom event types: Applications can define custom event types:
```json ```
{ {
"event_type": "CustomApplicationEvent", "event_type": "CustomApplicationEvent",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -283,7 +283,7 @@ Applications can define custom event types:
### Connection Management ### Connection Management
```javascript ```
class ProvisioningWebSocket { class ProvisioningWebSocket {
constructor(baseUrl, token, options = {}) { constructor(baseUrl, token, options = {}) {
this.baseUrl = baseUrl; this.baseUrl = baseUrl;
@ -430,7 +430,7 @@ ws.subscribe(['TaskStatusChanged', 'WorkflowProgressUpdate']);
### Real-Time Dashboard Example ### Real-Time Dashboard Example
```javascript ```
class ProvisioningDashboard { class ProvisioningDashboard {
constructor(wsUrl, token) { constructor(wsUrl, token) {
this.ws = new ProvisioningWebSocket(wsUrl, token); this.ws = new ProvisioningWebSocket(wsUrl, token);
@ -542,7 +542,7 @@ const dashboard = new ProvisioningDashboard('ws://localhost:9090', jwtToken);
The orchestrator implements WebSocket support using Axum and Tokio: The orchestrator implements WebSocket support using Axum and Tokio:
```rust ```
use axum::{ use axum::{
extract::{ws::WebSocket, ws::WebSocketUpgrade, Query, State}, extract::{ws::WebSocket, ws::WebSocketUpgrade, Query, State},
response::Response, response::Response,
@ -702,7 +702,7 @@ fn has_event_permission(claims: &Claims, event_type: &str) -> bool {
### Client-Side Filtering ### Client-Side Filtering
```javascript ```
// Subscribe to specific event types // Subscribe to specific event types
ws.subscribe(['TaskStatusChanged', 'WorkflowProgressUpdate']); ws.subscribe(['TaskStatusChanged', 'WorkflowProgressUpdate']);
@ -741,7 +741,7 @@ Events can be filtered on the server side based on:
### Connection Errors ### Connection Errors
```javascript ```
ws.on('error', (error) => { ws.on('error', (error) => {
console.error('WebSocket error:', error); console.error('WebSocket error:', error);
@ -780,7 +780,7 @@ ws.on('disconnected', (event) => {
### Heartbeat and Keep-Alive ### Heartbeat and Keep-Alive
```javascript ```
class ProvisioningWebSocket { class ProvisioningWebSocket {
constructor(baseUrl, token, options = {}) { constructor(baseUrl, token, options = {}) {
// ... existing code ... // ... existing code ...
@ -835,7 +835,7 @@ class ProvisioningWebSocket {
To improve performance, the server can batch multiple events into single WebSocket messages: To improve performance, the server can batch multiple events into single WebSocket messages:
```json ```
{ {
"type": "batch", "type": "batch",
"timestamp": "2025-09-26T10:00:00Z", "timestamp": "2025-09-26T10:00:00Z",
@ -856,7 +856,7 @@ To improve performance, the server can batch multiple events into single WebSock
Enable message compression for large events: Enable message compression for large events:
```javascript ```
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt&compression=true'); const ws = new WebSocket('ws://localhost:9090/ws?token=jwt&compression=true');
``` ```

View File

@ -28,7 +28,7 @@ The system needed a clear, maintainable structure that supports:
Adopt a **domain-driven hybrid structure** organized around functional boundaries: Adopt a **domain-driven hybrid structure** organized around functional boundaries:
```plaintext ```
src/ src/
├── core/ # Core system and CLI entry point ├── core/ # Core system and CLI entry point
├── platform/ # High-performance coordination layer (Rust orchestrator) ├── platform/ # High-performance coordination layer (Rust orchestrator)

View File

@ -49,7 +49,7 @@ Implement a **layered distribution strategy** with clear separation between deve
### Distribution Structure ### Distribution Structure
```plaintext ```
# User Distribution # User Distribution
/usr/local/bin/ /usr/local/bin/
├── provisioning # Main CLI entry point ├── provisioning # Main CLI entry point
@ -153,7 +153,7 @@ Use environment variables to control what gets installed.
### Configuration Hierarchy ### Configuration Hierarchy
```plaintext ```
System Defaults (lowest precedence) System Defaults (lowest precedence)
└── User Configuration └── User Configuration
└── Project Configuration └── Project Configuration

View File

@ -33,7 +33,7 @@ Implement **isolated user workspaces** with clear boundaries and hierarchical co
### Workspace Structure ### Workspace Structure
```plaintext ```
~/workspace/provisioning/ # User workspace root ~/workspace/provisioning/ # User workspace root
├── config/ ├── config/
│ ├── user.toml # User preferences and overrides │ ├── user.toml # User preferences and overrides
@ -141,7 +141,7 @@ Store all user configuration in database.
### Workspace Initialization ### Workspace Initialization
```bash ```
# Automatic workspace creation on first run # Automatic workspace creation on first run
provisioning workspace init provisioning workspace init
@ -163,7 +163,7 @@ provisioning workspace validate
### Backup and Migration ### Backup and Migration
```bash ```
# Backup entire workspace # Backup entire workspace
provisioning workspace backup --output ~/backup/provisioning-workspace.tar.gz provisioning workspace backup --output ~/backup/provisioning-workspace.tar.gz

View File

@ -54,7 +54,7 @@ Implement a **Hybrid Rust/Nushell Architecture** with clear separation of concer
#### Rust → Nushell Communication #### Rust → Nushell Communication
```rust ```
// Rust orchestrator invokes Nushell scripts via process execution // Rust orchestrator invokes Nushell scripts via process execution
let result = Command::new("nu") let result = Command::new("nu")
.arg("-c") .arg("-c")
@ -64,7 +64,7 @@ let result = Command::new("nu")
#### Nushell → Rust Communication #### Nushell → Rust Communication
```bash ```
# Nushell submits workflows to Rust orchestrator via HTTP API # Nushell submits workflows to Rust orchestrator via HTTP API
http post "http://localhost:9090/workflows/servers/create" { http post "http://localhost:9090/workflows/servers/create" {
name: "server-name", name: "server-name",

View File

@ -45,7 +45,7 @@ Implement a **registry-based extension framework** with structured discovery and
### Extension Structure ### Extension Structure
```plaintext ```
extensions/ extensions/
├── providers/ # Provider extensions ├── providers/ # Provider extensions
│ └── custom-cloud/ │ └── custom-cloud/
@ -75,7 +75,7 @@ extensions/
### Extension Manifest (extension.toml) ### Extension Manifest (extension.toml)
```toml ```
[extension] [extension]
name = "custom-provider" name = "custom-provider"
version = "1.0.0" version = "1.0.0"
@ -186,7 +186,7 @@ Traditional plugin architecture with dynamic loading.
### Extension Loading Lifecycle ### Extension Loading Lifecycle
```bash ```
# Extension discovery and validation # Extension discovery and validation
provisioning extension discover provisioning extension discover
provisioning extension validate --extension custom-provider provisioning extension validate --extension custom-provider
@ -208,7 +208,7 @@ provisioning extension update custom-provider
Extensions integrate with hierarchical configuration system: Extensions integrate with hierarchical configuration system:
```toml ```
# System configuration includes extension settings # System configuration includes extension settings
[custom_provider] [custom_provider]
api_endpoint = "https://api.custom-cloud.com" api_endpoint = "https://api.custom-cloud.com"
@ -238,7 +238,7 @@ timeout = 30
### Provider Extension Pattern ### Provider Extension Pattern
```nushell ```
# extensions/providers/custom-cloud/nulib/provider.nu # extensions/providers/custom-cloud/nulib/provider.nu
export def list-servers [] -> table { export def list-servers [] -> table {
http get $"($config.custom_provider.api_endpoint)/servers" http get $"($config.custom_provider.api_endpoint)/servers"
@ -260,7 +260,7 @@ export def create-server [name: string, config: record] -> record {
### Task Service Extension Pattern ### Task Service Extension Pattern
```nushell ```
# extensions/taskservs/custom-service/nulib/service.nu # extensions/taskservs/custom-service/nulib/service.nu
export def install [server: string] -> nothing { export def install [server: string] -> nothing {
let manifest_data = open ./manifests/deployment.yaml let manifest_data = open ./manifests/deployment.yaml

View File

@ -40,7 +40,7 @@ monolithic structure created multiple critical problems:
We refactored the monolithic CLI into a **modular, domain-driven architecture** with the following structure: We refactored the monolithic CLI into a **modular, domain-driven architecture** with the following structure:
```plaintext ```
provisioning/core/nulib/ provisioning/core/nulib/
├── provisioning (211 lines) ⬅️ 84% reduction ├── provisioning (211 lines) ⬅️ 84% reduction
├── main_provisioning/ ├── main_provisioning/
@ -63,7 +63,7 @@ provisioning/core/nulib/
Single source of truth for all flag parsing and argument building: Single source of truth for all flag parsing and argument building:
```nushell ```
export def parse_common_flags [flags: record]: nothing -> record export def parse_common_flags [flags: record]: nothing -> record
export def build_module_args [flags: record, extra: string = ""]: nothing -> string export def build_module_args [flags: record, extra: string = ""]: nothing -> string
export def set_debug_env [flags: record] export def set_debug_env [flags: record]
@ -81,7 +81,7 @@ export def get_debug_flag [flags: record]: nothing -> string
Central routing with 80+ command mappings: Central routing with 80+ command mappings:
```nushell ```
export def get_command_registry []: nothing -> record # 80+ shortcuts export def get_command_registry []: nothing -> record # 80+ shortcuts
export def dispatch_command [args: list, flags: record] # Main router export def dispatch_command [args: list, flags: record] # Main router
``` ```
@ -148,7 +148,7 @@ Eliminated repetition:
All handlers depend on abstractions (flag records, not concrete flags): All handlers depend on abstractions (flag records, not concrete flags):
```nushell ```
# Handler signature # Handler signature
export def handle_infrastructure_command [ export def handle_infrastructure_command [
command: string command: string
@ -182,7 +182,7 @@ export def handle_infrastructure_command [
Users can now access help in multiple ways: Users can now access help in multiple ways:
```bash ```
# All these work equivalently: # All these work equivalently:
provisioning help workspace provisioning help workspace
provisioning workspace help # ⬅️ NEW: Bi-directional provisioning workspace help # ⬅️ NEW: Bi-directional
@ -192,7 +192,7 @@ provisioning help ws # ⬅️ NEW: Shortcut in help
**Implementation:** **Implementation:**
```nushell ```
# Intercept "command help" → "help command" # Intercept "command help" → "help command"
let first_op = if ($ops_list | length) > 0 { ($ops_list | get 0) } else { "" } let first_op = if ($ops_list | length) > 0 { ($ops_list | get 0) } else { "" }
if $first_op in ["help" "h"] { if $first_op in ["help" "h"] {
@ -242,7 +242,7 @@ Comprehensive test suite created (`tests/test_provisioning_refactor.nu`):
### Test Results ### Test Results
```plaintext ```
📋 Testing main help... ✅ 📋 Testing main help... ✅
📋 Testing category help... ✅ 📋 Testing category help... ✅
🔄 Testing bi-directional help... ✅ 🔄 Testing bi-directional help... ✅
@ -319,7 +319,7 @@ Comprehensive test suite created (`tests/test_provisioning_refactor.nu`):
### Before: Repetitive Flag Handling ### Before: Repetitive Flag Handling
```nushell ```
"server" => { "server" => {
let use_check = if $check { "--check "} else { "" } let use_check = if $check { "--check "} else { "" }
let use_yes = if $yes { "--yes" } else { "" } let use_yes = if $yes { "--yes" } else { "" }
@ -335,7 +335,7 @@ Comprehensive test suite created (`tests/test_provisioning_refactor.nu`):
### After: Clean, Reusable ### After: Clean, Reusable
```nushell ```
def handle_server [ops: string, flags: record] { def handle_server [ops: string, flags: record] {
let args = build_module_args $flags $ops let args = build_module_args $flags $ops
run_module $args "server" --exec run_module $args "server" --exec

View File

@ -128,7 +128,7 @@ Remove support for:
### For Development ### For Development
```bash ```
# 1. Install Age # 1. Install Age
brew install age # or apt install age brew install age # or apt install age
@ -142,7 +142,7 @@ age-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisionin
### For Production ### For Production
```bash ```
# 1. Set up Cosmian KMS (cloud or self-hosted) # 1. Set up Cosmian KMS (cloud or self-hosted)
# 2. Create master key in Cosmian # 2. Create master key in Cosmian
# 3. Migrate secrets from Vault/AWS to Cosmian # 3. Migrate secrets from Vault/AWS to Cosmian

View File

@ -117,7 +117,7 @@ Use Casbin authorization library.
#### Architecture #### Architecture
```plaintext ```
┌─────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────┐
│ Orchestrator │ │ Orchestrator │
├─────────────────────────────────────────────────────────┤ ├─────────────────────────────────────────────────────────┤
@ -143,7 +143,7 @@ Use Casbin authorization library.
#### Policy Organization #### Policy Organization
```plaintext ```
provisioning/config/cedar-policies/ provisioning/config/cedar-policies/
├── schema.cedar # Entity and action definitions ├── schema.cedar # Entity and action definitions
├── production.cedar # Production environment policies ├── production.cedar # Production environment policies
@ -154,7 +154,7 @@ provisioning/config/cedar-policies/
#### Rust Implementation #### Rust Implementation
```plaintext ```
provisioning/platform/orchestrator/src/security/ provisioning/platform/orchestrator/src/security/
├── cedar.rs # Cedar engine integration (450 lines) ├── cedar.rs # Cedar engine integration (450 lines)
├── policy_loader.rs # Policy loading with hot reload (320 lines) ├── policy_loader.rs # Policy loading with hot reload (320 lines)
@ -190,7 +190,7 @@ provisioning/platform/orchestrator/src/security/
#### Context Variables #### Context Variables
```rust ```
AuthorizationContext { AuthorizationContext {
mfa_verified: bool, // MFA verification status mfa_verified: bool, // MFA verification status
ip_address: String, // Client IP address ip_address: String, // Client IP address
@ -204,7 +204,7 @@ AuthorizationContext {
#### Example Policy #### Example Policy
```cedar ```
// Production deployments require MFA verification // Production deployments require MFA verification
@id("prod-deploy-mfa") @id("prod-deploy-mfa")
@description("All production deployments must have MFA verification") @description("All production deployments must have MFA verification")

View File

@ -249,7 +249,7 @@ Implement a complete security architecture using 12 specialized components organ
### End-to-End Request Flow ### End-to-End Request Flow
```plaintext ```
1. User Request 1. User Request
2. Rate Limiting (100 req/min per IP) 2. Rate Limiting (100 req/min per IP)
@ -271,7 +271,7 @@ Implement a complete security architecture using 12 specialized components organ
### Emergency Access Flow ### Emergency Access Flow
```plaintext ```
1. Emergency Request (reason + justification) 1. Emergency Request (reason + justification)
2. Multi-Party Approval (2+ approvers, different teams) 2. Multi-Party Approval (2+ approvers, different teams)
@ -382,7 +382,7 @@ Implement a complete security architecture using 12 specialized components organ
### Development ### Development
```bash ```
# Start all services # Start all services
cd provisioning/platform/kms-service && cargo run & cd provisioning/platform/kms-service && cargo run &
cd provisioning/platform/orchestrator && cargo run & cd provisioning/platform/orchestrator && cargo run &
@ -391,7 +391,7 @@ cd provisioning/platform/control-center && cargo run &
### Production ### Production
```bash ```
# Kubernetes deployment # Kubernetes deployment
kubectl apply -f k8s/security-stack.yaml kubectl apply -f k8s/security-stack.yaml
@ -410,7 +410,7 @@ systemctl start provisioning-control-center
### Environment Variables ### Environment Variables
```bash ```
# JWT # JWT
export JWT_ISSUER="control-center" export JWT_ISSUER="control-center"
export JWT_AUDIENCE="orchestrator,cli" export JWT_AUDIENCE="orchestrator,cli"
@ -433,7 +433,7 @@ export MFA_WEBAUTHN_RP_ID="provisioning.example.com"
### Config Files ### Config Files
```toml ```
# provisioning/config/security.toml # provisioning/config/security.toml
[jwt] [jwt]
issuer = "control-center" issuer = "control-center"
@ -470,7 +470,7 @@ pii_anonymization = true
### Run All Tests ### Run All Tests
```bash ```
# Control Center (JWT, MFA) # Control Center (JWT, MFA)
cd provisioning/platform/control-center cd provisioning/platform/control-center
cargo test cargo test
@ -489,7 +489,7 @@ nu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu
### Integration Tests ### Integration Tests
```bash ```
# Full security flow # Full security flow
cd provisioning/platform/orchestrator cd provisioning/platform/orchestrator
cargo test --test security_integration_tests cargo test --test security_integration_tests

View File

@ -65,7 +65,7 @@ Define and document the three-format approach through:
**Move template files to proper directory structure and correct extensions**: **Move template files to proper directory structure and correct extensions**:
```plaintext ```
Previous (KCL): Previous (KCL):
provisioning/kcl/templates/*.k (had Nushell/Jinja2 code, not KCL) provisioning/kcl/templates/*.k (had Nushell/Jinja2 code, not KCL)
@ -326,7 +326,7 @@ Current (Nickel):
Currently, 15/16 files in `provisioning/kcl/templates/` have `.k` extension but contain Nushell/Jinja2 code, not KCL: Currently, 15/16 files in `provisioning/kcl/templates/` have `.k` extension but contain Nushell/Jinja2 code, not KCL:
```plaintext ```
provisioning/kcl/templates/ provisioning/kcl/templates/
├── server.ncl # Actually Nushell/Jinja2 template ├── server.ncl # Actually Nushell/Jinja2 template
├── taskserv.ncl # Actually Nushell/Jinja2 template ├── taskserv.ncl # Actually Nushell/Jinja2 template
@ -343,7 +343,7 @@ This causes:
Reorganize into type-specific directories: Reorganize into type-specific directories:
```plaintext ```
provisioning/templates/ provisioning/templates/
├── nushell/ # Nushell code generation (*.nu.j2) ├── nushell/ # Nushell code generation (*.nu.j2)
│ ├── server.nu.j2 │ ├── server.nu.j2

View File

@ -112,7 +112,7 @@ The provisioning system required:
**Example - UpCloud Provider**: **Example - UpCloud Provider**:
```nickel ```
# upcloud/nickel/main.ncl (migrated from upcloud/kcl/) # upcloud/nickel/main.ncl (migrated from upcloud/kcl/)
let contracts = import "./contracts.ncl" in let contracts = import "./contracts.ncl" in
let defaults = import "./defaults.ncl" in let defaults = import "./defaults.ncl" in
@ -171,7 +171,7 @@ let defaults = import "./defaults.ncl" in
**File 1: Contracts** (`batch_contracts.ncl`): **File 1: Contracts** (`batch_contracts.ncl`):
```nickel ```
{ {
BatchScheduler = { BatchScheduler = {
strategy | String, strategy | String,
@ -184,7 +184,7 @@ let defaults = import "./defaults.ncl" in
**File 2: Defaults** (`batch_defaults.ncl`): **File 2: Defaults** (`batch_defaults.ncl`):
```nickel ```
{ {
scheduler = { scheduler = {
strategy = "dependency_first", strategy = "dependency_first",
@ -197,7 +197,7 @@ let defaults = import "./defaults.ncl" in
**File 3: Main** (`batch.ncl`): **File 3: Main** (`batch.ncl`):
```nickel ```
let contracts = import "./batch_contracts.ncl" in let contracts = import "./batch_contracts.ncl" in
let defaults = import "./batch_defaults.ncl" in let defaults = import "./batch_defaults.ncl" in
@ -218,7 +218,7 @@ let defaults = import "./batch_defaults.ncl" in
### Domain-Organized Architecture ### Domain-Organized Architecture
```plaintext ```
provisioning/schemas/ provisioning/schemas/
├── lib/ # Storage, TaskServDef, ClusterDef ├── lib/ # Storage, TaskServDef, ClusterDef
├── config/ # Settings, defaults, workspace_config ├── config/ # Settings, defaults, workspace_config
@ -233,7 +233,7 @@ provisioning/schemas/
**Import pattern**: **Import pattern**:
```nickel ```
let provisioning = import "./main.ncl" in let provisioning = import "./main.ncl" in
provisioning.lib # For Storage, TaskServDef provisioning.lib # For Storage, TaskServDef
provisioning.config.settings # For Settings, Defaults provisioning.config.settings # For Settings, Defaults
@ -254,7 +254,7 @@ provisioning.operations.workflows
- No snapshot overhead - No snapshot overhead
- Usage: Local development, testing, experimentation - Usage: Local development, testing, experimentation
```bash ```
# workspace_librecloud/nickel/main.ncl # workspace_librecloud/nickel/main.ncl
import "../../provisioning/schemas/main.ncl" import "../../provisioning/schemas/main.ncl"
import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl" import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
@ -264,13 +264,13 @@ import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
Create immutable snapshots for reproducible deployments: Create immutable snapshots for reproducible deployments:
```bash ```
provisioning workspace freeze --version "2025-12-15-prod-v1" --env production provisioning workspace freeze --version "2025-12-15-prod-v1" --env production
``` ```
**Frozen structure** (`.frozen/{version}/`): **Frozen structure** (`.frozen/{version}/`):
```plaintext ```
├── provisioning/schemas/ # Snapshot of central schemas ├── provisioning/schemas/ # Snapshot of central schemas
├── extensions/ # Snapshot of all extensions ├── extensions/ # Snapshot of all extensions
└── workspace/ # Snapshot of workspace configs └── workspace/ # Snapshot of workspace configs
@ -285,7 +285,7 @@ provisioning workspace freeze --version "2025-12-15-prod-v1" --env production
**Deploy from frozen snapshot**: **Deploy from frozen snapshot**:
```bash ```
provisioning deploy --frozen "2025-12-15-prod-v1" --infra wuji provisioning deploy --frozen "2025-12-15-prod-v1" --infra wuji
``` ```
@ -308,7 +308,7 @@ provisioning deploy --frozen "2025-12-15-prod-v1" --infra wuji
**Key Feature**: Nickel schemas → Type-safe UIs → Nickel output **Key Feature**: Nickel schemas → Type-safe UIs → Nickel output
```bash ```
# Nickel schema → Interactive form # Nickel schema → Interactive form
typedialog form --schema server.ncl --output json typedialog form --schema server.ncl --output json

View File

@ -19,7 +19,7 @@ The architectural decision was whether the plugin should:
Nickel configurations in provisioning use the **module system**: Nickel configurations in provisioning use the **module system**:
```nickel ```
# config/database.ncl # config/database.ncl
import "lib/defaults" as defaults import "lib/defaults" as defaults
import "lib/validation" as valid import "lib/validation" as valid
@ -47,7 +47,7 @@ Implement the `nu_plugin_nickel` plugin as a **CLI wrapper** that invokes the ex
### Architecture Diagram ### Architecture Diagram
```plaintext ```
┌─────────────────────────────┐ ┌─────────────────────────────┐
│ Nushell Script │ │ Nushell Script │
│ │ │ │
@ -288,7 +288,7 @@ This makes direct usage risky. The CLI is the documented, proven interface.
The plugin uses the **correct Nickel command syntax**: The plugin uses the **correct Nickel command syntax**:
```rust ```
// Correct: // Correct:
cmd.arg("export").arg(file).arg("--format").arg(format); cmd.arg("export").arg(file).arg("--format").arg(format);
// Results in: "nickel export /file --format json" // Results in: "nickel export /file --format json"
@ -323,7 +323,7 @@ Plugin correctly processes JSON output:
This enables Nushell cell path access: This enables Nushell cell path access:
```nushell ```
nickel-export json /config.ncl | .database.host # ✅ Works nickel-export json /config.ncl | .database.host # ✅ Works
``` ```
@ -343,7 +343,7 @@ nickel-export json /config.ncl | .database.host # ✅ Works
**Manual Verification**: **Manual Verification**:
```bash ```
# Test module imports # Test module imports
nickel-export json /workspace/config.ncl nickel-export json /workspace/config.ncl

View File

@ -78,7 +78,7 @@ integration with the provisioning orchestrator.
### Architecture Diagram ### Architecture Diagram
```text ```
┌─────────────────────────────────────────┐ ┌─────────────────────────────────────────┐
│ Nushell Script │ │ Nushell Script │
│ │ │ │
@ -167,7 +167,7 @@ integration with the provisioning orchestrator.
Nushell's `input` command is limited: Nushell's `input` command is limited:
```nushell ```
# Current: No validation, no security # Current: No validation, no security
let password = input "Password: " # ❌ Shows in terminal let password = input "Password: " # ❌ Shows in terminal
let region = input "AWS Region: " # ❌ No autocomplete/validation let region = input "AWS Region: " # ❌ No autocomplete/validation
@ -184,7 +184,7 @@ let region = input "AWS Region: " # ❌ No autocomplete/validation
Nickel is declarative and cannot prompt users: Nickel is declarative and cannot prompt users:
```nickel ```
# Nickel defines what the config looks like, NOT how to get it # Nickel defines what the config looks like, NOT how to get it
{ {
database = { database = {
@ -243,7 +243,7 @@ Nickel is declarative and cannot prompt users:
### Mitigation Strategies ### Mitigation Strategies
**Non-Interactive Mode**: **Non-Interactive Mode**:
```rust ```
// Support both interactive and non-interactive // Support both interactive and non-interactive
if terminal::is_interactive() { if terminal::is_interactive() {
// Show TUI dialog // Show TUI dialog
@ -255,7 +255,7 @@ if terminal::is_interactive() {
``` ```
**Testing**: **Testing**:
```rust ```
// Unit tests: Test form validation logic (no TUI) // Unit tests: Test form validation logic (no TUI)
#[test] #[test]
fn test_validate_workspace_name() { fn test_validate_workspace_name() {
@ -267,7 +267,7 @@ fn test_validate_workspace_name() {
``` ```
**Scriptability**: **Scriptability**:
```bash ```
# Batch mode: Provide config via file # Batch mode: Provide config via file
provisioning workspace init --config workspace.toml provisioning workspace init --config workspace.toml
@ -316,7 +316,7 @@ provisioning workspace init --interactive
### Form Definition Pattern ### Form Definition Pattern
```rust ```
use typdialog::Form; use typdialog::Form;
pub fn workspace_initialization_form() -> Result<WorkspaceConfig> { pub fn workspace_initialization_form() -> Result<WorkspaceConfig> {
@ -353,7 +353,7 @@ pub fn workspace_initialization_form() -> Result<WorkspaceConfig> {
### Integration with Nickel ### Integration with Nickel
```rust ```
// 1. Get validated input from TUI dialog // 1. Get validated input from TUI dialog
let config = workspace_initialization_form()?; let config = workspace_initialization_form()?;
@ -370,7 +370,7 @@ fs::write("workspace/config.toml", config_toml)?;
### CLI Command Structure ### CLI Command Structure
```rust ```
// provisioning/core/cli/src/commands/workspace.rs // provisioning/core/cli/src/commands/workspace.rs
#[derive(Parser)] #[derive(Parser)]
@ -404,7 +404,7 @@ pub fn handle_workspace_init(args: InitArgs) -> Result<()> {
### Validation Rules ### Validation Rules
```rust ```
pub fn validate_workspace_name(name: &str) -> Result<(), String> { pub fn validate_workspace_name(name: &str) -> Result<(), String> {
// Alphanumeric, hyphens, 3-32 chars // Alphanumeric, hyphens, 3-32 chars
let re = Regex::new(r"^[a-z0-9-]{3,32}$").unwrap(); let re = Regex::new(r"^[a-z0-9-]{3,32}$").unwrap();
@ -425,7 +425,7 @@ pub fn validate_region(region: &str) -> Result<(), String> {
### Security: Password Handling ### Security: Password Handling
```rust ```
use zeroize::Zeroizing; use zeroize::Zeroizing;
pub fn get_secure_password() -> Result<Zeroizing<String>> { pub fn get_secure_password() -> Result<Zeroizing<String>> {
@ -447,7 +447,7 @@ pub fn get_secure_password() -> Result<Zeroizing<String>> {
## Testing Strategy ## Testing Strategy
**Unit Tests**: **Unit Tests**:
```rust ```
#[test] #[test]
fn test_workspace_name_validation() { fn test_workspace_name_validation() {
assert!(validate_workspace_name("my-workspace").is_ok()); assert!(validate_workspace_name("my-workspace").is_ok());
@ -457,7 +457,7 @@ fn test_workspace_name_validation() {
``` ```
**Integration Tests**: **Integration Tests**:
```rust ```
// Use non-interactive mode with config files // Use non-interactive mode with config files
#[test] #[test]
fn test_workspace_init_non_interactive() { fn test_workspace_init_non_interactive() {
@ -481,7 +481,7 @@ fn test_workspace_init_non_interactive() {
``` ```
**Manual Testing**: **Manual Testing**:
```bash ```
# Test interactive flow # Test interactive flow
cargo build --release cargo build --release
./target/release/provisioning workspace init --interactive ./target/release/provisioning workspace init --interactive
@ -495,7 +495,7 @@ cargo build --release
## Configuration Integration ## Configuration Integration
**CLI Flag**: **CLI Flag**:
```toml ```
# provisioning/config/config.defaults.toml # provisioning/config/config.defaults.toml
[ui] [ui]
interactive_mode = "auto" # "auto" | "always" | "never" interactive_mode = "auto" # "auto" | "always" | "never"
@ -503,7 +503,7 @@ dialog_theme = "default" # "default" | "minimal" | "colorful"
``` ```
**Environment Override**: **Environment Override**:
```bash ```
# Force non-interactive mode (for CI/CD) # Force non-interactive mode (for CI/CD)
export PROVISIONING_INTERACTIVE=false export PROVISIONING_INTERACTIVE=false
@ -523,7 +523,7 @@ export PROVISIONING_INTERACTIVE=true
- Validation rule patterns - Validation rule patterns
**Configuration Schema**: **Configuration Schema**:
```nickel ```
# provisioning/schemas/workspace.ncl # provisioning/schemas/workspace.ncl
{ {
WorkspaceConfig = { WorkspaceConfig = {

View File

@ -93,7 +93,7 @@ Integrate **SecretumVault** as the centralized secrets management system for the
### Architecture Diagram ### Architecture Diagram
```text ```
┌─────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────┐
│ Provisioning CLI / Orchestrator / Services │ │ Provisioning CLI / Orchestrator / Services │
│ │ │ │
@ -273,7 +273,7 @@ SOPS is excellent for **static secrets in git**, but inadequate for:
### Mitigation Strategies ### Mitigation Strategies
**High Availability**: **High Availability**:
```bash ```
# Deploy SecretumVault cluster (3 nodes) # Deploy SecretumVault cluster (3 nodes)
provisioning deploy secretum-vault --ha --replicas 3 provisioning deploy secretum-vault --ha --replicas 3
@ -282,7 +282,7 @@ provisioning deploy secretum-vault --ha --replicas 3
``` ```
**Migration from SOPS**: **Migration from SOPS**:
```bash ```
# Phase 1: Import existing SOPS secrets into SecretumVault # Phase 1: Import existing SOPS secrets into SecretumVault
provisioning secrets migrate --from-sops config/secrets.yaml provisioning secrets migrate --from-sops config/secrets.yaml
@ -291,7 +291,7 @@ provisioning secrets migrate --from-sops config/secrets.yaml
``` ```
**Fallback Strategy**: **Fallback Strategy**:
```rust ```
// Graceful degradation if vault unavailable // Graceful degradation if vault unavailable
let secret = match vault_client.get_secret("database/password").await { let secret = match vault_client.get_secret("database/password").await {
Ok(s) => s, Ok(s) => s,
@ -305,7 +305,7 @@ let secret = match vault_client.get_secret("database/password").await {
``` ```
**Operational Monitoring**: **Operational Monitoring**:
```toml ```
# prometheus metrics # prometheus metrics
secretum_vault_request_duration_seconds secretum_vault_request_duration_seconds
secretum_vault_secret_lease_expiry secretum_vault_secret_lease_expiry
@ -351,7 +351,7 @@ secretum_vault_raft_leader_changes
### SecretumVault Deployment ### SecretumVault Deployment
```bash ```
# Deploy via provisioning system # Deploy via provisioning system
provisioning deploy secretum-vault \ provisioning deploy secretum-vault \
--ha \ --ha \
@ -367,7 +367,7 @@ provisioning vault unseal --key-shares 5 --key-threshold 3
### Rust Client Library ### Rust Client Library
```rust ```
// provisioning/core/libs/secretum-client/src/lib.rs // provisioning/core/libs/secretum-client/src/lib.rs
use secretum_vault::{Client, SecretEngine, Auth}; use secretum_vault::{Client, SecretEngine, Auth};
@ -402,7 +402,7 @@ impl VaultClient {
### Nushell Integration ### Nushell Integration
```nushell ```
# Nushell commands via Rust CLI wrapper # Nushell commands via Rust CLI wrapper
provisioning secrets get database/prod/password provisioning secrets get database/prod/password
provisioning secrets set api/keys/stripe --value "sk_live_xyz" provisioning secrets set api/keys/stripe --value "sk_live_xyz"
@ -413,7 +413,7 @@ provisioning secrets list database/
### Nickel Configuration Integration ### Nickel Configuration Integration
```nickel ```
# provisioning/schemas/database.ncl # provisioning/schemas/database.ncl
{ {
database = { database = {
@ -429,7 +429,7 @@ provisioning secrets list database/
### Cedar Policy for Secret Access ### Cedar Policy for Secret Access
```cedar ```
// policy: developers can read dev secrets, not prod // policy: developers can read dev secrets, not prod
permit( permit(
principal in Group::"developers", principal in Group::"developers",
@ -455,7 +455,7 @@ permit(
### Dynamic Database Credentials ### Dynamic Database Credentials
```rust ```
// Application requests temporary DB credentials // Application requests temporary DB credentials
let creds = vault_client let creds = vault_client
.database() .database()
@ -472,7 +472,7 @@ println!("TTL: {}", creds.lease_duration); // 1h
### Secret Rotation Automation ### Secret Rotation Automation
```toml ```
# secretum-vault config # secretum-vault config
[[rotation_policies]] [[rotation_policies]]
path = "database/prod/password" path = "database/prod/password"
@ -487,7 +487,7 @@ max_age = "90d"
### Audit Log Format ### Audit Log Format
```json ```
{ {
"timestamp": "2025-01-08T12:34:56Z", "timestamp": "2025-01-08T12:34:56Z",
"type": "request", "type": "request",
@ -515,7 +515,7 @@ max_age = "90d"
## Testing Strategy ## Testing Strategy
**Unit Tests**: **Unit Tests**:
```rust ```
#[tokio::test] #[tokio::test]
async fn test_get_secret() { async fn test_get_secret() {
let vault = mock_vault_client(); let vault = mock_vault_client();
@ -533,7 +533,7 @@ async fn test_dynamic_credentials_generation() {
``` ```
**Integration Tests**: **Integration Tests**:
```bash ```
# Test vault deployment # Test vault deployment
provisioning deploy secretum-vault --test-mode provisioning deploy secretum-vault --test-mode
provisioning vault init provisioning vault init
@ -551,7 +551,7 @@ provisioning secrets rotate test/secret
``` ```
**Security Tests**: **Security Tests**:
```rust ```
#[tokio::test] #[tokio::test]
async fn test_unauthorized_access_denied() { async fn test_unauthorized_access_denied() {
let vault = vault_client_with_limited_token(); let vault = vault_client_with_limited_token();
@ -563,7 +563,7 @@ async fn test_unauthorized_access_denied() {
## Configuration Integration ## Configuration Integration
**Provisioning Config**: **Provisioning Config**:
```toml ```
# provisioning/config/config.defaults.toml # provisioning/config/config.defaults.toml
[secrets] [secrets]
provider = "secretum-vault" # "secretum-vault" | "sops" | "env" provider = "secretum-vault" # "secretum-vault" | "sops" | "env"
@ -583,7 +583,7 @@ max_size = "100MB"
``` ```
**Environment Variables**: **Environment Variables**:
```bash ```
export VAULT_ADDR="https://vault.example.com:8200" export VAULT_ADDR="https://vault.example.com:8200"
export VAULT_TOKEN="s.abc123def456..." export VAULT_TOKEN="s.abc123def456..."
export VAULT_NAMESPACE="provisioning" export VAULT_NAMESPACE="provisioning"

View File

@ -100,7 +100,7 @@ All AI components are **schema-aware**, **security-enforced**, and **human-super
### Architecture Diagram ### Architecture Diagram
```text ```
┌─────────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────────┐
│ User Interfaces │ │ User Interfaces │
│ │ │ │
@ -268,7 +268,7 @@ All AI components are **schema-aware**, **security-enforced**, and **human-super
Traditional AI code generation fails for infrastructure because: Traditional AI code generation fails for infrastructure because:
```text ```
Generic AI (like GitHub Copilot): Generic AI (like GitHub Copilot):
❌ Generates syntactically correct but semantically wrong configs ❌ Generates syntactically correct but semantically wrong configs
❌ Doesn't understand cloud provider constraints ❌ Doesn't understand cloud provider constraints
@ -278,7 +278,7 @@ Generic AI (like GitHub Copilot):
``` ```
**Schema-aware AI** (our approach): **Schema-aware AI** (our approach):
```nickel ```
# Nickel schema provides ground truth # Nickel schema provides ground truth
{ {
Database = { Database = {
@ -303,7 +303,7 @@ Generic AI (like GitHub Copilot):
LLMs alone have limitations: LLMs alone have limitations:
```text ```
Pure LLM: Pure LLM:
❌ Knowledge cutoff (no recent updates) ❌ Knowledge cutoff (no recent updates)
❌ Hallucinations (invents plausible-sounding configs) ❌ Hallucinations (invents plausible-sounding configs)
@ -312,7 +312,7 @@ Pure LLM:
``` ```
**RAG-enhanced LLM**: **RAG-enhanced LLM**:
```toml ```
Query: "How to configure Postgres with encryption?" Query: "How to configure Postgres with encryption?"
RAG retrieves: RAG retrieves:
@ -332,7 +332,7 @@ LLM generates answer WITH retrieved context:
AI-generated infrastructure configs require human approval: AI-generated infrastructure configs require human approval:
```rust ```
// All AI operations require approval // All AI operations require approval
pub async fn ai_generate_config(request: GenerateRequest) -> Result<Config> { pub async fn ai_generate_config(request: GenerateRequest) -> Result<Config> {
let ai_generated = ai_service.generate(request).await?; let ai_generated = ai_service.generate(request).await?;
@ -414,7 +414,7 @@ No single LLM provider is best for all tasks:
### Mitigation Strategies ### Mitigation Strategies
**Cost Control**: **Cost Control**:
```toml ```
[ai.rate_limiting] [ai.rate_limiting]
requests_per_minute = 60 requests_per_minute = 60
tokens_per_day = 1000000 tokens_per_day = 1000000
@ -427,7 +427,7 @@ ttl = "1h"
``` ```
**Latency Optimization**: **Latency Optimization**:
```rust ```
// Streaming responses for real-time feedback // Streaming responses for real-time feedback
pub async fn ai_generate_stream(request: GenerateRequest) -> impl Stream<Item = String> { pub async fn ai_generate_stream(request: GenerateRequest) -> impl Stream<Item = String> {
ai_service ai_service
@ -438,7 +438,7 @@ pub async fn ai_generate_stream(request: GenerateRequest) -> impl Stream<Item =
``` ```
**Privacy (Local Models)**: **Privacy (Local Models)**:
```toml ```
[ai] [ai]
provider = "local" provider = "local"
model_path = "/opt/provisioning/models/llama-3-70b" model_path = "/opt/provisioning/models/llama-3-70b"
@ -447,7 +447,7 @@ model_path = "/opt/provisioning/models/llama-3-70b"
``` ```
**Validation (Defense in Depth)**: **Validation (Defense in Depth)**:
```text ```
AI generates config AI generates config
Nickel schema validation (syntax, types, contracts) Nickel schema validation (syntax, types, contracts)
@ -460,7 +460,7 @@ Deployment
``` ```
**Observability**: **Observability**:
```toml ```
[ai.observability] [ai.observability]
trace_all_requests = true trace_all_requests = true
store_conversations = true store_conversations = true
@ -510,7 +510,7 @@ conversation_retention = "30d"
### AI Service API ### AI Service API
```rust ```
// platform/crates/ai-service/src/lib.rs // platform/crates/ai-service/src/lib.rs
#[async_trait] #[async_trait]
@ -609,7 +609,7 @@ impl AIService for AIServiceImpl {
### MCP Server Integration ### MCP Server Integration
```rust ```
// platform/crates/mcp-server/src/lib.rs // platform/crates/mcp-server/src/lib.rs
pub struct MCPClient { pub struct MCPClient {
@ -675,7 +675,7 @@ impl ToolRegistry {
### RAG System Implementation ### RAG System Implementation
```rust ```
// platform/crates/rag/src/lib.rs // platform/crates/rag/src/lib.rs
pub struct RAGService { pub struct RAGService {
@ -750,7 +750,7 @@ pub struct QdrantStore {
### typdialog-ai Integration ### typdialog-ai Integration
```rust ```
// typdialog-ai/src/form_assistant.rs // typdialog-ai/src/form_assistant.rs
pub struct FormAssistant { pub struct FormAssistant {
@ -813,7 +813,7 @@ impl FormAssistant {
### typdialog-ag Agents ### typdialog-ag Agents
```rust ```
// typdialog-ag/src/agent.rs // typdialog-ag/src/agent.rs
pub struct ProvisioningAgent { pub struct ProvisioningAgent {
@ -891,7 +891,7 @@ impl ProvisioningAgent {
### Cedar Policies for AI ### Cedar Policies for AI
```cedar ```
// AI cannot access secrets without explicit permission // AI cannot access secrets without explicit permission
forbid( forbid(
principal == Service::"ai-service", principal == Service::"ai-service",
@ -931,7 +931,7 @@ forbid(
## Testing Strategy ## Testing Strategy
**Unit Tests**: **Unit Tests**:
```rust ```
#[tokio::test] #[tokio::test]
async fn test_ai_config_generation_validates() { async fn test_ai_config_generation_validates() {
let ai_service = mock_ai_service(); let ai_service = mock_ai_service();
@ -960,7 +960,7 @@ async fn test_ai_cannot_access_secrets() {
``` ```
**Integration Tests**: **Integration Tests**:
```rust ```
#[tokio::test] #[tokio::test]
async fn test_end_to_end_ai_config_generation() { async fn test_end_to_end_ai_config_generation() {
// User provides natural language // User provides natural language
@ -991,7 +991,7 @@ async fn test_end_to_end_ai_config_generation() {
``` ```
**RAG Quality Tests**: **RAG Quality Tests**:
```rust ```
#[tokio::test] #[tokio::test]
async fn test_rag_retrieval_accuracy() { async fn test_rag_retrieval_accuracy() {
let rag = rag_service(); let rag = rag_service();
@ -1018,7 +1018,7 @@ async fn test_rag_retrieval_accuracy() {
## Security Considerations ## Security Considerations
**AI Access Control**: **AI Access Control**:
```bash ```
AI Service Permissions (enforced by Cedar): AI Service Permissions (enforced by Cedar):
✅ CAN: Read Nickel schemas ✅ CAN: Read Nickel schemas
✅ CAN: Generate configurations ✅ CAN: Generate configurations
@ -1031,7 +1031,7 @@ AI Service Permissions (enforced by Cedar):
``` ```
**Data Privacy**: **Data Privacy**:
```toml ```
[ai.privacy] [ai.privacy]
# Sanitize before sending to LLM # Sanitize before sending to LLM
sanitize_secrets = true sanitize_secrets = true
@ -1048,7 +1048,7 @@ sanitize_credentials = true
``` ```
**Audit Trail**: **Audit Trail**:
```rust ```
// Every AI operation logged // Every AI operation logged
pub struct AIAuditLog { pub struct AIAuditLog {
timestamp: DateTime<Utc>, timestamp: DateTime<Utc>,
@ -1066,7 +1066,7 @@ pub struct AIAuditLog {
**Estimated Costs** (per month, based on typical usage): **Estimated Costs** (per month, based on typical usage):
```text ```
Assumptions: Assumptions:
- 100 active users - 100 active users
- 10 AI config generations per user per day - 10 AI config generations per user per day

View File

@ -0,0 +1,160 @@
# ADR-016: Schema-Driven Accessor Generation Pattern
**Status**: Proposed
**Date**: 2026-01-13
**Author**: Architecture Team
**Supersedes**: Manual accessor maintenance in `lib_provisioning/config/accessor.nu`
## Context
The `lib_provisioning/config/accessor.nu` file contains 1567 lines across 187 accessor functions. Analysis reveals that 95% of these functions follow
an identical mechanical pattern:
```
export def get-{field-name} [--config: record] {
config-get "{path.to.field}" {default_value} --config $config
}
```
This represents significant technical debt:
1. **Manual Maintenance Burden**: Adding a new config field requires manually writing a new accessor function
2. **Schema Drift Risk**: No automated validation that accessor matches the actual Nickel schema
3. **Code Duplication**: Nearly identical functions across 187 definitions
4. **Testing Complexity**: Each accessor requires manual testing
## Problem Statement
**Current Architecture**:
- Nickel schemas define configuration structure (source of truth)
- Accessor functions manually mirror the schema structure
- No automated synchronization between schema and accessors
- High risk of accessor-schema mismatch
**Key Metrics**:
- 1567 lines of accessor code
- 187 repetitive functions
- ~95% code similarity
## Decision
Implement **Schema-Driven Accessor Generation**: automatically generate accessor functions from Nickel schema definitions.
### Architecture
```
Nickel Schema (contracts.ncl)
[Parse & Extract Schema Structure]
[Generate Nushell Functions]
accessor_generated.nu (800 lines)
[Validation & Integration]
CI/CD enforces: schema hash == generated code
```
### Generation Process
1. **Schema Parsing**: Extract field paths, types, and defaults from Nickel contracts
2. **Code Generation**: Create accessor functions with Nushell 0.109 compliance
3. **Validation**: Verify generated code against schema
4. **CI Integration**: Detect schema changes, validate generated code matches
### Compliance Requirements
**Nushell 0.109 Guidelines**:
- No `try-catch` blocks (use `do-complete` pattern)
- No `reduce --init` (use `reduce --fold`)
- No mutable variables (use immutable bindings)
- No type annotations on boolean flags
- Use `each` not `map`, `is-not-empty` not `length`
**Nickel Compliance**:
- Schema-first design (schema is source of truth)
- Type contracts enforce structure
- `| doc` before `| default` ordering
## Consequences
### Positive
- **Elimination of Manual Maintenance**: New config fields automatically get accessors
- **Zero Schema Drift**: Automatic validation ensures accessors match schema
- **Reduced Code Size**: 1567 lines → ~400 lines (manual core) + ~800 lines (generated)
- **Type Safety**: Generated code guarantees type correctness
- **Consistency**: All 187 functions use identical pattern
### Negative
- **Tool Complexity**: Generator must parse Nickel and emit valid Nushell
- **CI/CD Changes**: Build must validate schema hash
- **Initial Migration**: One-time effort to verify generated code matches manual versions
## Implementation Strategy
1. **Create Generator** (`tools/codegen/accessor_generator.nu`)
- Parse Nickel schema files
- Extract paths, types, defaults
- Generate valid Nushell code
- Emit with proper formatting
2. **Generate Accessors** (`lib_provisioning/config/accessor_generated.nu`)
- Run generator on `provisioning/schemas/config/settings/contracts.ncl`
- Output 187 accessor functions
- Verify compatibility with existing code
3. **Validation**
- Integration tests comparing manual vs generated output
- Signature validator ensuring generated functions match patterns
- CI check for schema hash validity
4. **Gradual Adoption**
- Keep manual accessors temporarily
- Feature flag to switch between manual and generated
- Gradual migration of dependent code
## Testing Strategy
1. **Unit Tests**
- Each generated accessor returns correct type
- Default values applied correctly
- Path resolution handles nested fields
2. **Integration Tests**
- Generated accessors produce identical output to manual versions
- Config loading pipeline works with generated accessors
- Fallback behavior preserved
3. **Regression Tests**
- All existing config access patterns work
- Performance within 5% of manual version
- No breaking changes to public API
## Related ADRs
- **ADR-010**: Configuration Format Strategy (TOML/YAML/Nickel)
- **ADR-011**: Nickel Migration (schema-first architecture)
## Open Questions
1. Should accessors be regenerated on every build or only on schema changes?
2. How do we handle conditional fields (if X then Y)?
3. What's the fallback strategy if generator fails?
## Timeline
- **Phase 1**: Generator implementation (foundation)
- **Phase 2**: Generate and validate accessor functions
- **Phase 3**: Integration tests and feature flags
- **Phase 4**: Full migration and manual code removal
## References
- Nickel Language: [https://nickel-lang.org/](https://nickel-lang.org/)
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
- Current Accessor Implementation: `provisioning/core/nulib/lib_provisioning/config/accessor.nu`
- Schema Source: `provisioning/schemas/config/settings/contracts.ncl`

View File

@ -0,0 +1,226 @@
# ADR-017: Plugin Wrapper Abstraction Framework
**Status**: Proposed
**Date**: 2026-01-13
**Author**: Architecture Team
**Supersedes**: Manual plugin wrapper implementations in `lib_provisioning/plugins/`
## Context
The provisioning system integrates with four critical plugins, each with its own wrapper layer:
1. **auth.nu** (1066 lines) - Authentication plugin wrapper
2. **orchestrator.nu** (~500 lines) - Orchestrator plugin wrapper
3. **secretumvault.nu** (~500 lines) - Secrets vault plugin wrapper
4. **kms.nu** (~500 lines) - Key management service plugin wrapper
Analysis reveals ~90% code duplication across these wrappers:
```
# Pattern repeated 4 times with minor variations:
export def plugin-available? [] {
# Check if plugin is installed
}
export def try-plugin-call [method args] {
# Try to call the plugin
# On failure, fallback to HTTP
}
export def http-fallback-call [endpoint method args] {
# HTTP endpoint fallback
}
```
## Problem Statement
**Current Architecture**:
- Each plugin has manual wrapper implementation
- ~3000 total lines across 4 files
- Boilerplate code repeated for each plugin method
- HTTP fallback logic duplicated
- Error handling inconsistent
- Testing each wrapper requires custom setup
**Key Metrics**:
- 3000 lines of plugin wrapper code
- 90% code similarity
- 85% reduction opportunity
## Decision
Implement **Plugin Wrapper Abstraction Framework**: replace manual plugin wrappers with a generic proxy framework + declarative YAML definitions.
### Architecture
```
Plugin Definition (YAML)
├─ plugin: auth
├─ methods:
│ ├─ login(username, password)
│ ├─ logout()
│ └─ status()
└─ http_endpoint: http://localhost:8001
Generic Plugin Proxy Framework
├─ availability() - Check if plugin installed
├─ call() - Try plugin, fallback to HTTP
├─ http_fallback() - HTTP call with retry
└─ error_handler() - Consistent error handling
Generated Wrappers
├─ auth_wrapper.nu (150 lines, autogenerated)
├─ orchestrator_wrapper.nu (150 lines)
├─ vault_wrapper.nu (150 lines)
└─ kms_wrapper.nu (150 lines)
```
### Mechanism
**Plugin Call Flow**:
1. **Check Availability**: Is plugin installed and running?
2. **Try Plugin Call**: Execute plugin method with timeout
3. **On Failure**: Fall back to HTTP endpoint
4. **Error Handling**: Unified error response format
5. **Retry Logic**: Configurable retry with exponential backoff
### Error Handling Pattern
**Nushell 0.109 Compliant** (do-complete pattern, no try-catch):
```
def call-plugin-with-fallback [method: string args: record] {
let plugin_result = (
do {
# Try plugin call
call-plugin $method $args
} | complete
)
if $plugin_result.exit_code != 0 {
# Fall back to HTTP
call-http-endpoint $method $args
} else {
$plugin_result.stdout | from json
}
}
```
## Consequences
### Positive
- **85% Code Reduction**: 3000 lines → 200 (proxy) + 600 (generated)
- **Consistency**: All plugins use identical call pattern
- **Maintainability**: Single proxy implementation vs 4 wrapper files
- **Testability**: Mock proxy for testing, no plugin-specific setup needed
- **Extensibility**: New plugins require only YAML definition
### Negative
- **Abstraction Overhead**: Proxy layer adds indirection
- **YAML Schema**: Must maintain schema for plugin definitions
- **Migration Risk**: Replacing working code requires careful testing
## Implementation Strategy
1. **Create Generic Proxy** (`lib_provisioning/plugins/proxy.nu`)
- Plugin availability detection
- Call execution with error handling
- HTTP fallback mechanism
- Retry logic with backoff
2. **Define Plugin Schema** (`lib_provisioning/plugins/definitions/plugin.schema.yaml`)
- Plugin metadata (name, http_endpoint)
- Method definitions (parameters, return types)
- Fallback configuration (retry count, timeout)
3. **Plugin Definitions** (`lib_provisioning/plugins/definitions/`)
- `auth.yaml` - Authentication plugin
- `orchestrator.yaml` - Orchestrator plugin
- `secretumvault.yaml` - Secrets vault plugin
- `kms.yaml` - Key management service plugin
4. **Code Generator** (`tools/codegen/plugin_wrapper_generator.nu`)
- Parse plugin YAML definitions
- Generate wrapper functions
- Ensure Nushell 0.109 compliance
5. **Integration**
- Feature flag: `$env.PROVISIONING_USE_GENERATED_PLUGINS`
- Gradual migration from manual to generated wrappers
- Full compatibility with existing code
## Testing Strategy
1. **Unit Tests**
- Plugin availability detection
- Successful plugin calls
- HTTP fallback on plugin failure
- Error handling and retry logic
2. **Integration Tests**
- Real plugin calls with actual plugins
- Mock HTTP server for fallback testing
- Timeout handling
- Retry with backoff
3. **Contract Tests**
- Plugin method signatures match definitions
- Return values have expected structure
- Error responses consistent
## Plugin Definitions
### auth.yaml Example
```
plugin: auth
http_endpoint: http://localhost:8001
methods:
login:
params:
username: string
password: string
returns: {token: string}
logout:
params: {}
returns: {status: string}
status:
params: {}
returns: {authenticated: bool}
```
## Rollback Strategy
**Feature Flag Approach**:
```
# Use original manual wrappers
export PROVISIONING_USE_GENERATED_PLUGINS=false
# Use new generated proxy framework
export PROVISIONING_USE_GENERATED_PLUGINS=true
```
Allows parallel operation and gradual migration.
## Related ADRs
- **ADR-012**: Nushell/Nickel Plugin CLI Wrapper
- **ADR-013**: TypeDialog Integration (forms for plugin configuration)
## Open Questions
1. Should plugin definitions be YAML or Nickel?
2. How do we handle plugin discovery automatically?
3. What's the expected HTTP endpoint format for all plugins?
4. Should retry logic be configurable per plugin?
## References
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
- Do-Complete Pattern: Error handling without try-catch
- Plugin Framework: `provisioning/core/nulib/lib_provisioning/plugins/`

View File

@ -0,0 +1,281 @@
# ADR-018: Help System Fluent Integration & Data-Driven Architecture
**Status**: Proposed
**Date**: 2026-01-13
**Author**: Architecture Team
**Supersedes**: Hardcoded help strings in `main_provisioning/help_system.nu`
## Context
The current help system in `main_provisioning/help_system.nu` (1303 lines) consists almost entirely of hardcoded string concatenation with embedded
ANSI formatting codes:
```
def help-infrastructure [] {
print "╔════════════════════════════════════════════════════╗"
print "║ SERVER & INFRASTRUCTURE ║"
print "╚════════════════════════════════════════════════════╝"
}
```
**Current Problems**:
1. **No Internationalization**: Help text trapped in English-only code
2. **Hard to Maintain**: Updating text requires editing Nushell code
3. **Mixed Concerns**: Content (strings) mixed with presentation (ANSI codes)
4. **No Hot-Reload**: Changes require recompilation
5. **Difficult to Test**: String content buried in function definitions
## Problem Statement
**Metrics**:
- 1303 lines of code-embedded help text
- 17 help categories with 65 strings total
- All help functions manually maintained
- No separation of data from presentation
## Decision
Implement **Data-Driven Help with Mozilla Fluent Integration**:
1. Extract help content to Fluent files (`.ftl` format)
2. Support multilingual help (English base, Spanish translations)
3. Implement runtime language resolution via `LANG` environment variable
4. Reduce help_system.nu to wrapper functions only
### Architecture
```
Help Content (Fluent Files)
├─ en-US/help.ftl (65 strings - English base)
└─ es-ES/help.ftl (65 strings - Spanish translations)
Language Detection & Loading
├─ Check LANG environment variable
├─ Load appropriate Fluent file
└─ Implement fallback chain (es-ES → en-US)
Help System Wrapper
├─ help-main [] - Display main menu
├─ help-infrastructure [] - Infrastructure category
├─ help-orchestration [] - Orchestration category
└─ help-setup [] - Setup category
User Interface
├─ LANG=en_US provisioning help infrastructure
└─ LANG=es_ES provisioning help infrastructure
```
## Implementation
### 1. Fluent File Structure
**en-US/help.ftl**:
```
help-main-title = PROVISIONING SYSTEM
help-main-subtitle = Layered Infrastructure Automation
help-main-categories = COMMAND CATEGORIES
help-main-categories-hint = Use 'provisioning help <category>' for details
help-main-infrastructure-name = infrastructure
help-main-infrastructure-desc = Server, taskserv, cluster, VM, and infra management
help-main-orchestration-name = orchestration
help-main-orchestration-desc = Workflow, batch operations, and orchestrator control
help-infrastructure-title = SERVER & INFRASTRUCTURE
help-infra-server = Server Operations
help-infra-server-create = Create a new server
help-infra-server-list = List all servers
help-infra-server-status = Show server status
help-infra-taskserv = TaskServ Management
help-infra-taskserv-create = Deploy taskserv to server
help-infra-cluster = Cluster Management
help-infra-vm = Virtual Machine Operations
help-orchestration-title = ORCHESTRATION & WORKFLOWS
help-orch-control = Orchestrator Management
help-orch-start = Start orchestrator [--background]
help-orch-workflows = Single Task Workflows
help-orch-batch = Multi-Provider Batch Operations
```
**es-ES/help.ftl** (Spanish translations):
```
help-main-title = SISTEMA DE PROVISIÓN
help-main-subtitle = Automatización de Infraestructura por Capas
help-main-categories = CATEGORÍAS DE COMANDOS
help-main-categories-hint = Use 'provisioning help <categoría>' para más detalles
help-main-infrastructure-name = infraestructura
help-main-infrastructure-desc = Gestión de servidores, taskserv, clusters, VM e infraestructura
help-main-orchestration-name = orquestación
help-main-orchestration-desc = Flujos de trabajo, operaciones por lotes y control del orquestador
help-infrastructure-title = SERVIDOR E INFRAESTRUCTURA
help-infra-server = Operaciones de Servidor
help-infra-server-create = Crear un nuevo servidor
help-infra-server-list = Listar todos los servidores
help-infra-server-status = Mostrar estado del servidor
help-infra-taskserv = Gestión de TaskServ
help-infra-taskserv-create = Desplegar taskserv en servidor
help-infra-cluster = Gestión de Clusters
help-infra-vm = Operaciones de Máquinas Virtuales
help-orchestration-title = ORQUESTACIÓN Y FLUJOS DE TRABAJO
help-orch-control = Gestión del Orquestador
help-orch-start = Iniciar orquestador [--background]
help-orch-workflows = Flujos de Trabajo de Tarea Única
help-orch-batch = Operaciones por Lotes Multi-Proveedor
```
### 2. Fluent Loading in Nushell
```
def load-fluent-file [category: string] {
let lang = ($env.LANG? | default "en_US" | str replace "_" "-")
let fluent_path = $"provisioning/locales/($lang)/help.ftl"
# Parse Fluent file and extract strings for category
# Fallback to en-US if lang not available
}
```
### 3. Help System Wrapper
```
export def help-infrastructure [] {
let strings = (load-fluent-file "infrastructure")
# Apply formatting and render
print $"╔════════════════════════════════════════════════════╗"
print $"║ ($strings.title | str upcase) ║"
print $"╚════════════════════════════════════════════════════╝"
}
```
## Consequences
### Positive
- **Internationalization Ready**: Easy to add new languages (Portuguese, French, Japanese)
- **Data/Presentation Separation**: Content in Fluent, formatting in Nushell
- **Maintainability**: Edit Fluent files, not Nushell code
- **Hot-Reload Support**: Can update help text without recompilation
- **Testing**: Help content testable independently from rendering
- **Code Reduction**: 1303 lines → ~50 lines (wrapper) + ~700 lines (Fluent data)
### Negative
- **Tool Complexity**: Need Fluent parser and loader
- **Fallback Chain Management**: Must handle missing translations gracefully
- **Performance**: File I/O for loading translations (mitigated by caching)
## Integration Strategy
### Phase 1: Infrastructure & Extraction
- ✅ Create `provisioning/locales/` directory structure
- ✅ Create `i18n-config.toml` with locale configuration
- ✅ Extract strings to `en-US/help.ftl` (65 strings)
- ✅ Create Spanish translations `es-ES/help.ftl`
### Phase 2: Integration (This Task)
- [ ] Modify `help_system.nu` to load from Fluent
- [ ] Implement language detection (`$env.LANG`)
- [ ] Implement fallback chain logic
- [ ] Test with `LANG=en_US` and `LANG=es_ES`
### Phase 3: Validation & Documentation
- [ ] Comprehensive integration tests
- [ ] Performance benchmarks
- [ ] Documentation for adding new languages
- [ ] Examples in provisioning/docs/
## Language Resolution Flow
```
1. Check LANG environment variable
LANG=es_ES.UTF-8 → extract "es_ES" or "es-ES"
2. Check if locale file exists
provisioning/locales/es-ES/help.ftl exists? → YES
3. Load locale file
Parse and extract help strings
4. On missing key:
Check fallback chain in i18n-config.toml
es-ES → en-US
5. Render with formatting
Apply ANSI codes, boxes, alignment
```
## Testing Strategy
### Unit Tests
```
# Test language detection
LANG=en_US provisioning help infrastructure
# Expected: English output
LANG=es_ES provisioning help infrastructure
# Expected: Spanish output
LANG=fr_FR provisioning help infrastructure
# Expected: Fallback to English (fr-FR not available)
```
## File Structure
```
provisioning/
├── locales/
│ ├── i18n-config.toml # Locale metadata & fallback chains
│ ├── en-US/
│ │ └── help.ftl # 65 English help strings
│ └── es-ES/
│ └── help.ftl # 65 Spanish help strings
└── core/nulib/main_provisioning/
└── help_system.nu # ~50 lines (wrapper only)
```
## Configuration
**i18n-config.toml** defines:
```
[locales]
default = "en-US"
fallback = "en-US"
[locales.en-US]
name = "English (United States)"
[locales.es-ES]
name = "Spanish (Spain)"
[fallback_chains]
es-ES = ["en-US"]
```
## Related ADRs
- **ADR-010**: Configuration Format Strategy
- **ADR-011**: Nickel Migration
- **ADR-013**: TypeDialog Integration (forms also use Fluent)
## Open Questions
1. Should help strings support Fluent attributes for metadata?
2. Should we implement Fluent caching for performance?
3. How do we handle dynamic help (commands not in Fluent)?
4. Should help system auto-update when Fluent files change?
## References
- Mozilla Fluent: [https://projectfluent.org/](https://projectfluent.org/)
- Fluent Syntax: [https://projectfluent.org/fluent/guide/](https://projectfluent.org/fluent/guide/)
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
- Current Help Implementation: `provisioning/core/nulib/main_provisioning/help_system.nu`
- Fluent Files: `provisioning/locales/{en-US,es-ES}/help.ftl`

View File

@ -0,0 +1,263 @@
# ADR-019: Configuration Loader Modularization
**Status**: Proposed
**Date**: 2026-01-13
**Author**: Architecture Team
**Supersedes**: Monolithic loader in `lib_provisioning/config/loader.nu`
## Context
The `lib_provisioning/config/loader.nu` file (2199 lines) is a monolithic implementation mixing multiple unrelated concerns:
```
Current Structure (2199 lines):
├─ Cache lookup/storage (300 lines)
├─ Nickel evaluation (400 lines)
├─ TOML/YAML parsing (250 lines)
├─ Environment variable loading (200 lines)
├─ Configuration hierarchy merging (400 lines)
├─ Validation logic (250 lines)
├─ Error handling (200 lines)
└─ Helper utilities (150 lines)
```
**Problems**:
1. **Single Responsibility Violation**: One file handling 7 different concerns
2. **Testing Difficulty**: Can't test TOML parsing without cache setup
3. **Change Amplification**: Modifying one component affects entire file
4. **Code Reuse**: Hard to reuse individual loaders in other contexts
5. **Maintenance Burden**: 2199 lines of tightly coupled code
## Problem Statement
**Metrics**:
- 2199 lines in single file
- 7 distinct responsibilities mixed together
- Hard to test individual components
- Changes in one area risk breaking others
## Decision
Implement **Layered Loader Architecture**: decompose monolithic loader into specialized, testable modules with a thin orchestrator.
### Target Architecture
```
lib_provisioning/config/
├── loader.nu # ORCHESTRATOR (< 300 lines)
│ └─ Coordinates loading pipeline
├── loaders/ # SPECIALIZED LOADERS
│ ├── nickel_loader.nu # Nickel evaluation + cache (150 lines)
│ ├── toml_loader.nu # TOML parsing (80 lines)
│ ├── yaml_loader.nu # YAML parsing (80 lines)
│ ├── env_loader.nu # Environment variables (100 lines)
│ └── hierarchy.nu # Configuration merging (200 lines)
├── cache/ # EXISTING - already modular
│ ├── core.nu # Cache core
│ ├── nickel.nu # Nickel-specific caching
│ └── final.nu # Final config caching
└── validation/ # EXTRACTED
└── config_validator.nu # Validation rules (100 lines)
```
### Module Responsibilities
**loader.nu (Orchestrator)**:
- Define loading pipeline
- Coordinate loaders
- Handle high-level errors
- Return final config
**nickel_loader.nu**:
- Evaluate Nickel files
- Apply Nickel type contracts
- Cache Nickel evaluation results
- Handle schema validation
**toml_loader.nu**:
- Parse TOML configuration files
- Extract key-value pairs
- Validate TOML structure
- Return parsed records
**yaml_loader.nu**:
- Parse YAML configuration files
- Convert to Nushell records
- Handle YAML nesting
- Return normalized records
**env_loader.nu**:
- Load environment variables
- Filter by prefix (PROVISIONING_*)
- Override existing values
- Return environment records
**hierarchy.nu**:
- Merge multiple config sources
- Apply precedence rules
- Handle nested merging
- Return unified config
**config_validator.nu**:
- Validate against schema
- Check required fields
- Enforce type constraints
- Return validation results
## Consequences
### Positive
- **Separation of Concerns**: Each module has single responsibility
- **Testability**: Can unit test each loader independently
- **Reusability**: Loaders can be used in other contexts
- **Maintainability**: Changes isolated to specific module
- **Debugging**: Easier to isolate issues
- **Performance**: Can optimize individual loaders
### Negative
- **Increased Complexity**: More files to maintain
- **Integration Overhead**: Must coordinate between modules
- **Migration Effort**: Refactoring existing monolithic code
## Implementation Strategy
### Phase 1: Extract Specialized Loaders
Create each loader as independent module:
1. **toml_loader.nu**
```nushell
export def load-toml [path: string] {
let content = (open $path)
$content
}
```
2. **yaml_loader.nu**
```nushell
export def load-yaml [path: string] {
let content = (open --raw $path | from yaml)
$content
}
```
3. **env_loader.nu**
```nushell
export def load-environment [] {
$env
| to json
| from json
| select --contains "PROVISIONING_"
}
```
4. **hierarchy.nu**
```nushell
export def merge-configs [base override] {
$base | merge $override
}
```
### Phase 2: Refactor Nickel Loader
Extract Nickel evaluation logic:
```
export def evaluate-nickel [file: string] {
let result = (
do {
^nickel export $file
} | complete
)
if $result.exit_code != 0 {
error $result.stderr
} else {
$result.stdout | from json
}
}
```
### Phase 3: Create Orchestrator
Implement thin loader.nu:
```
export def load-provisioning-config [] {
let env_config = (env-loader load-environment)
let toml_config = (toml-loader load-toml "config.toml")
let nickel_config = (nickel-loader evaluate-nickel "main.ncl")
let merged = (
{}
| hierarchy merge-configs $toml_config
| hierarchy merge-configs $nickel_config
| hierarchy merge-configs $env_config
)
let validated = (config-validator validate $merged)
$validated
}
```
### Phase 4: Testing
Create test for each module:
```
tests/config/
├── loaders/
│ ├── test_nickel_loader.nu
│ ├── test_toml_loader.nu
│ ├── test_yaml_loader.nu
│ ├── test_env_loader.nu
│ └── test_hierarchy.nu
└── test_orchestrator.nu
```
## Performance Considerations
**Baseline**: Current monolithic loader ~500ms
**Layered Architecture**:
- Individual loaders: ~50-100ms each
- Orchestration: ~50ms
- Total expected: ~400-500ms (within 5% tolerance)
**Optimization**:
- Cache Nickel evaluation (largest cost)
- Lazy load YAML (if rarely used)
- Environment variable filtering
## Backward Compatibility
**Public API Unchanged**:
```
# Current usage (unchanged)
let config = (load-provisioning-config)
```
**Internal Only**: Refactoring is internal to loader module, no breaking changes to consumers.
## Related ADRs
- **ADR-010**: Configuration Format Strategy
- **ADR-011**: Nickel Migration
- **ADR-016**: Schema-Driven Accessor Generation
## Open Questions
1. Should each loader have its own cache layer?
2. How do we handle circular dependencies between loaders?
3. Should validation run after each loader or only at end?
4. What's the rollback strategy if orchestration fails?
## References
- Current Implementation: `provisioning/core/nulib/lib_provisioning/config/loader.nu`
- Cache System: `provisioning/core/nulib/lib_provisioning/config/cache/`
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`

View File

@ -0,0 +1,313 @@
# ADR-020: Command Handler Domain Splitting
**Status**: Proposed
**Date**: 2026-01-13
**Author**: Architecture Team
**Supersedes**: Monolithic command handlers in `main_provisioning/commands/`
## Context
Two large monolithic command handler files mix disparate domains:
**commands/utilities.nu** (1112 lines):
- SSH operations (150 lines)
- SOPS secret editing (200 lines)
- Cache management (180 lines)
- Provider listing (100 lines)
- Plugin operations (150 lines)
- Shell information (80 lines)
- Guide system (120 lines)
- QR code generation (50 lines)
**commands/integrations.nu** (1184 lines):
- prov-ecosystem bridge (400 lines)
- provctl integration (350 lines)
- External API calls (434 lines)
**Problem Statement**:
1. **Mixed Concerns**: Each file handles 7-10 unrelated domains
2. **Navigation Difficulty**: Hard to find specific functionality
3. **Testing Complexity**: Can't test SSH without SOPS setup
4. **Reusability**: Command logic locked in monolithic files
5. **Maintenance Burden**: Changes in one domain affect entire file
## Decision
Implement **Domain-Based Command Modules**: split monolithic handlers into focused domain modules organized by responsibility.
### Target Architecture
```
main_provisioning/commands/
├── dispatcher.nu # Routes commands to domain handlers
├── utilities/ # Split by domain
│ ├── ssh.nu # SSH operations (150 lines)
│ ├── sops.nu # SOPS editing (200 lines)
│ ├── cache.nu # Cache management (180 lines)
│ ├── providers.nu # Provider listing (100 lines)
│ ├── plugins.nu # Plugin operations (150 lines)
│ ├── shell.nu # Shell information (80 lines)
│ ├── guides.nu # Guide system (120 lines)
│ └── qr.nu # QR code generation (50 lines)
└── integrations/ # Split by integration
├── prov_ecosystem.nu # Prov-ecosystem bridge (400 lines)
├── provctl.nu # Provctl integration (350 lines)
└── external_apis.nu # External API calls (434 lines)
```
### Module Organization
**utilities/ssh.nu**:
- SSH connection management
- Key management
- Remote command execution
- Connection pooling
**utilities/sops.nu**:
- SOPS secret file editing
- Encryption/decryption
- Key rotation
- Secret validation
**utilities/cache.nu**:
- Cache lookup
- Cache invalidation
- Cache statistics
- Cleanup operations
**utilities/providers.nu**:
- List available providers
- Provider capabilities
- Provider health check
- Provider registration
**utilities/plugins.nu**:
- Plugin discovery
- Plugin loading
- Plugin execution
- Plugin management
**utilities/shell.nu**:
- Nushell info
- Shell configuration
- Environment variables
- Shell capabilities
**utilities/guides.nu**:
- Guide listing
- Guide rendering
- Guide search
- Interactive guides
**utilities/qr.nu**:
- QR code generation
- QR code display
- Code formatting
- Error handling
**integrations/prov_ecosystem.nu**:
- Prov-ecosystem API calls
- Data synchronization
- Registry integration
- Extension discovery
**integrations/provctl.nu**:
- Provctl command bridge
- Orchestrator integration
- Workflow execution
- Status monitoring
**integrations/external_apis.nu**:
- Third-party API integration
- HTTP calls
- Data transformation
- Error handling
## Consequences
### Positive
- **Single Responsibility**: Each module handles one domain
- **Easier Navigation**: Find functionality by domain name
- **Testable**: Can test SSH independently from SOPS
- **Maintainable**: Changes isolated to domain module
- **Reusable**: Modules can be imported by other components
- **Scalable**: Easy to add new domains
### Negative
- **More Files**: 11 modules vs 2 monolithic files
- **Import Overhead**: More module imports needed
- **Coordination Complexity**: Dispatcher must route correctly
## Implementation Strategy
### Phase 1: Extract Utilities Domain
Create `utilities/` directory with 8 modules:
1. **utilities/ssh.nu** - Extract SSH operations
2. **utilities/sops.nu** - Extract SOPS operations
3. **utilities/cache.nu** - Extract cache operations
4. **utilities/providers.nu** - Extract provider operations
5. **utilities/plugins.nu** - Extract plugin operations
6. **utilities/shell.nu** - Extract shell operations
7. **utilities/guides.nu** - Extract guide operations
8. **utilities/qr.nu** - Extract QR operations
### Phase 2: Extract Integrations Domain
Create `integrations/` directory with 3 modules:
1. **integrations/prov_ecosystem.nu** - Extract prov-ecosystem
2. **integrations/provctl.nu** - Extract provctl
3. **integrations/external_apis.nu** - Extract external APIs
### Phase 3: Create Dispatcher
Implement `dispatcher.nu`:
```
export def provision-ssh [args] {
use ./utilities/ssh.nu *
handle-ssh-command $args
}
export def provision-sops [args] {
use ./utilities/sops.nu *
handle-sops-command $args
}
export def provision-cache [args] {
use ./utilities/cache.nu *
handle-cache-command $args
}
```
### Phase 4: Maintain Backward Compatibility
Keep public exports in original files for compatibility:
```
# commands/utilities.nu (compatibility layer)
use ./utilities/ssh.nu *
use ./utilities/sops.nu *
use ./utilities/cache.nu *
# Re-export all functions (unchanged public API)
export use ./utilities/ssh.nu
export use ./utilities/sops.nu
```
### Phase 5: Testing
Create test structure:
```
tests/commands/
├── utilities/
│ ├── test_ssh.nu
│ ├── test_sops.nu
│ ├── test_cache.nu
│ ├── test_providers.nu
│ ├── test_plugins.nu
│ ├── test_shell.nu
│ ├── test_guides.nu
│ └── test_qr.nu
└── integrations/
├── test_prov_ecosystem.nu
├── test_provctl.nu
└── test_external_apis.nu
```
## Module Interface Example
**utilities/ssh.nu**:
```
# Connect to remote host
export def ssh-connect [host: string --port: int = 22] {
# Implementation
}
# Execute remote command
export def ssh-exec [host: string command: string] {
# Implementation
}
# Close SSH connection
export def ssh-close [host: string] {
# Implementation
}
```
## File Structure
```
main_provisioning/commands/
├── dispatcher.nu # Route to domain handlers
├── utilities/
│ ├── mod.nu # Utilities module index
│ ├── ssh.nu # 150 lines
│ ├── sops.nu # 200 lines
│ ├── cache.nu # 180 lines
│ ├── providers.nu # 100 lines
│ ├── plugins.nu # 150 lines
│ ├── shell.nu # 80 lines
│ ├── guides.nu # 120 lines
│ └── qr.nu # 50 lines
├── integrations/
│ ├── mod.nu # Integrations module index
│ ├── prov_ecosystem.nu # 400 lines
│ ├── provctl.nu # 350 lines
│ └── external_apis.nu # 434 lines
└── README.md # Command routing guide
```
## CLI Interface (Unchanged)
Users see no change in CLI:
```
provisioning ssh host.example.com
provisioning sops edit config.yaml
provisioning cache clear
provisioning list providers
provisioning guide from-scratch
```
## Backward Compatibility Strategy
**Import Path Options**:
```
# Option 1: Import from domain module (new way)
use ./utilities/ssh.nu *
connect $host
# Option 2: Import from compatibility layer (old way)
use ./utilities.nu *
connect $host
```
Both paths work without breaking existing code.
## Related ADRs
- **ADR-006**: Provisioning CLI Refactoring
- **ADR-012**: Nushell/Nickel Plugin CLI Wrapper
## Open Questions
1. Should we create a module registry for discoverability?
2. Should domain modules be loadable as plugins?
3. How do we handle shared utilities between domains?
4. Should we implement hot-reloading for domain modules?
## References
- Current Implementation: `provisioning/core/nulib/main_provisioning/commands/`
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
- Module System: Nushell module documentation

View File

@ -43,7 +43,7 @@ The Provisioning Platform is a modern, cloud-native infrastructure automation sy
### Architecture at a Glance ### Architecture at a Glance
```plaintext ```
┌─────────────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────────────┐
│ Provisioning Platform │ │ Provisioning Platform │
├─────────────────────────────────────────────────────────────────────┤ ├─────────────────────────────────────────────────────────────────────┤
@ -93,7 +93,7 @@ The Provisioning Platform is a modern, cloud-native infrastructure automation sy
### High-Level Architecture ### High-Level Architecture
```plaintext ```
┌────────────────────────────────────────────────────────────────────────────┐ ┌────────────────────────────────────────────────────────────────────────────┐
│ PRESENTATION LAYER │ │ PRESENTATION LAYER │
├────────────────────────────────────────────────────────────────────────────┤ ├────────────────────────────────────────────────────────────────────────────┤
@ -191,7 +191,7 @@ The system is organized into three separate repositories:
#### **provisioning-core** #### **provisioning-core**
```plaintext ```
Core system functionality Core system functionality
├── CLI interface (Nushell entry point) ├── CLI interface (Nushell entry point)
├── Core libraries (lib_provisioning) ├── Core libraries (lib_provisioning)
@ -205,7 +205,7 @@ Core system functionality
#### **provisioning-extensions** #### **provisioning-extensions**
```plaintext ```
All provider, taskserv, cluster extensions All provider, taskserv, cluster extensions
├── providers/ ├── providers/
│ ├── aws/ │ ├── aws/
@ -229,7 +229,7 @@ All provider, taskserv, cluster extensions
#### **provisioning-platform** #### **provisioning-platform**
```plaintext ```
Platform services Platform services
├── orchestrator/ (Rust) ├── orchestrator/ (Rust)
├── control-center/ (Rust/Yew) ├── control-center/ (Rust/Yew)
@ -255,7 +255,7 @@ Platform services
**Architecture**: **Architecture**:
```plaintext ```
Main CLI (211 lines) Main CLI (211 lines)
Command Dispatcher (264 lines) Command Dispatcher (264 lines)
@ -281,7 +281,7 @@ Domain Handlers (7 modules)
**Hierarchical Loading**: **Hierarchical Loading**:
```plaintext ```
1. System defaults (config.defaults.toml) 1. System defaults (config.defaults.toml)
2. User config (~/.provisioning/config.user.toml) 2. User config (~/.provisioning/config.user.toml)
3. Workspace config (workspace/config/provisioning.yaml) 3. Workspace config (workspace/config/provisioning.yaml)
@ -303,7 +303,7 @@ Domain Handlers (7 modules)
**Architecture**: **Architecture**:
```rust ```
src/ src/
├── main.rs // Entry point ├── main.rs // Entry point
├── api/ ├── api/
@ -342,7 +342,7 @@ src/
**Workflow Types**: **Workflow Types**:
```plaintext ```
workflows/ workflows/
├── server_create.nu // Server provisioning ├── server_create.nu // Server provisioning
├── taskserv.nu // Task service management ├── taskserv.nu // Task service management
@ -371,7 +371,7 @@ workflows/
**Extension Structure**: **Extension Structure**:
```plaintext ```
extension-name/ extension-name/
├── schemas/ ├── schemas/
│ ├── main.ncl // Main schema │ ├── main.ncl // Main schema
@ -401,7 +401,7 @@ Each extension packaged as OCI artifact:
**Module System**: **Module System**:
```bash ```
# Discover available extensions # Discover available extensions
provisioning module discover taskservs provisioning module discover taskservs
@ -414,7 +414,7 @@ provisioning module list taskserv my-workspace
**Layer System** (Configuration Inheritance): **Layer System** (Configuration Inheritance):
```plaintext ```
Layer 1: Core (provisioning/extensions/{type}/{name}) Layer 1: Core (provisioning/extensions/{type}/{name})
Layer 2: Workspace (workspace/extensions/{type}/{name}) Layer 2: Workspace (workspace/extensions/{type}/{name})
@ -438,7 +438,7 @@ Layer 3: Infrastructure (workspace/infra/{infra}/extensions/{type}/{name})
**Example**: **Example**:
```nickel ```
let { TaskservDependencies } = import "provisioning/dependencies.ncl" in let { TaskservDependencies } = import "provisioning/dependencies.ncl" in
{ {
kubernetes = TaskservDependencies { kubernetes = TaskservDependencies {
@ -467,7 +467,7 @@ let { TaskservDependencies } = import "provisioning/dependencies.ncl" in
**Lifecycle Management**: **Lifecycle Management**:
```bash ```
# Start all auto-start services # Start all auto-start services
provisioning platform start provisioning platform start
@ -485,7 +485,7 @@ provisioning platform logs orchestrator --follow
**Architecture**: **Architecture**:
```plaintext ```
User Command (CLI) User Command (CLI)
Test Orchestrator (Rust) Test Orchestrator (Rust)
@ -520,7 +520,7 @@ The platform supports four operational modes that adapt the system from individu
### Mode Comparison ### Mode Comparison
```plaintext ```
┌───────────────────────────────────────────────────────────────────────┐ ┌───────────────────────────────────────────────────────────────────────┐
│ MODE ARCHITECTURE │ │ MODE ARCHITECTURE │
├───────────────┬───────────────┬───────────────┬───────────────────────┤ ├───────────────┬───────────────┬───────────────┬───────────────────────┤
@ -562,7 +562,7 @@ The platform supports four operational modes that adapt the system from individu
**Switching Modes**: **Switching Modes**:
```bash ```
# Check current mode # Check current mode
provisioning mode current provisioning mode current
@ -577,7 +577,7 @@ provisioning mode validate enterprise
#### Solo Mode #### Solo Mode
```bash ```
# 1. Default mode, no setup needed # 1. Default mode, no setup needed
provisioning workspace init provisioning workspace init
@ -590,7 +590,7 @@ provisioning server create
#### Multi-User Mode #### Multi-User Mode
```bash ```
# 1. Switch mode and authenticate # 1. Switch mode and authenticate
provisioning mode switch multi-user provisioning mode switch multi-user
provisioning auth login provisioning auth login
@ -609,7 +609,7 @@ provisioning workspace unlock my-infra
#### CI/CD Mode #### CI/CD Mode
```yaml ```
# GitLab CI # GitLab CI
deploy: deploy:
stage: deploy stage: deploy
@ -626,7 +626,7 @@ deploy:
#### Enterprise Mode #### Enterprise Mode
```bash ```
# 1. Switch to enterprise, verify K8s # 1. Switch to enterprise, verify K8s
provisioning mode switch enterprise provisioning mode switch enterprise
kubectl get pods -n provisioning-system kubectl get pods -n provisioning-system
@ -654,7 +654,7 @@ provisioning workspace unlock prod-deployment
### Service Communication ### Service Communication
```plaintext ```
┌──────────────────────────────────────────────────────────────────────┐ ┌──────────────────────────────────────────────────────────────────────┐
│ NETWORK LAYER │ │ NETWORK LAYER │
├──────────────────────────────────────────────────────────────────────┤ ├──────────────────────────────────────────────────────────────────────┤
@ -732,7 +732,7 @@ provisioning workspace unlock prod-deployment
### Data Storage ### Data Storage
```plaintext ```
┌────────────────────────────────────────────────────────────────┐ ┌────────────────────────────────────────────────────────────────┐
│ DATA LAYER │ │ DATA LAYER │
├────────────────────────────────────────────────────────────────┤ ├────────────────────────────────────────────────────────────────┤
@ -813,7 +813,7 @@ provisioning workspace unlock prod-deployment
**Configuration Loading**: **Configuration Loading**:
```plaintext ```
1. Load system defaults (config.defaults.toml) 1. Load system defaults (config.defaults.toml)
2. Merge user config (~/.provisioning/config.user.toml) 2. Merge user config (~/.provisioning/config.user.toml)
3. Load workspace config (workspace/config/provisioning.yaml) 3. Load workspace config (workspace/config/provisioning.yaml)
@ -824,7 +824,7 @@ provisioning workspace unlock prod-deployment
**State Persistence**: **State Persistence**:
```plaintext ```
Workflow execution Workflow execution
Create checkpoint (JSON) Create checkpoint (JSON)
@ -836,7 +836,7 @@ On failure, load checkpoint and resume
**OCI Artifact Flow**: **OCI Artifact Flow**:
```plaintext ```
1. Package extension (oci-package.nu) 1. Package extension (oci-package.nu)
2. Push to OCI registry (provisioning oci push) 2. Push to OCI registry (provisioning oci push)
3. Extension stored as OCI artifact 3. Extension stored as OCI artifact
@ -850,7 +850,7 @@ On failure, load checkpoint and resume
### Security Layers ### Security Layers
```plaintext ```
┌─────────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────────┐
│ SECURITY ARCHITECTURE │ │ SECURITY ARCHITECTURE │
├─────────────────────────────────────────────────────────────────┤ ├─────────────────────────────────────────────────────────────────┤
@ -921,7 +921,7 @@ On failure, load checkpoint and resume
**SOPS Integration**: **SOPS Integration**:
```bash ```
# Edit encrypted file # Edit encrypted file
provisioning sops workspace/secrets/keys.yaml.enc provisioning sops workspace/secrets/keys.yaml.enc
@ -931,7 +931,7 @@ provisioning sops workspace/secrets/keys.yaml.enc
**KMS Integration** (Enterprise): **KMS Integration** (Enterprise):
```yaml ```
# workspace/config/provisioning.yaml # workspace/config/provisioning.yaml
secrets: secrets:
provider: "kms" provider: "kms"
@ -945,7 +945,7 @@ secrets:
**CI/CD Mode** (Required): **CI/CD Mode** (Required):
```bash ```
# Sign OCI artifact # Sign OCI artifact
cosign sign oci://registry/kubernetes:1.28.0 cosign sign oci://registry/kubernetes:1.28.0
@ -955,7 +955,7 @@ cosign verify oci://registry/kubernetes:1.28.0
**Enterprise Mode** (Mandatory): **Enterprise Mode** (Mandatory):
```bash ```
# Pull with verification # Pull with verification
provisioning extension pull kubernetes --verify-signature provisioning extension pull kubernetes --verify-signature
@ -970,7 +970,7 @@ provisioning extension pull kubernetes --verify-signature
#### 1. **Binary Deployment** (Solo, Multi-user) #### 1. **Binary Deployment** (Solo, Multi-user)
```plaintext ```
User Machine User Machine
├── ~/.provisioning/bin/ ├── ~/.provisioning/bin/
│ ├── provisioning-orchestrator │ ├── provisioning-orchestrator
@ -986,7 +986,7 @@ User Machine
#### 2. **Docker Deployment** (Multi-user, CI/CD) #### 2. **Docker Deployment** (Multi-user, CI/CD)
```plaintext ```
Docker Daemon Docker Daemon
├── Container: provisioning-orchestrator ├── Container: provisioning-orchestrator
├── Container: provisioning-control-center ├── Container: provisioning-control-center
@ -1001,7 +1001,7 @@ Docker Daemon
#### 3. **Docker Compose Deployment** (Multi-user) #### 3. **Docker Compose Deployment** (Multi-user)
```yaml ```
# provisioning/platform/docker-compose.yaml # provisioning/platform/docker-compose.yaml
services: services:
orchestrator: orchestrator:
@ -1039,7 +1039,7 @@ services:
#### 4. **Kubernetes Deployment** (CI/CD, Enterprise) #### 4. **Kubernetes Deployment** (CI/CD, Enterprise)
```yaml ```
# Namespace: provisioning-system # Namespace: provisioning-system
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
@ -1085,7 +1085,7 @@ spec:
#### 5. **Remote Deployment** (All modes) #### 5. **Remote Deployment** (All modes)
```yaml ```
# Connect to remotely-running services # Connect to remotely-running services
services: services:
orchestrator: orchestrator:
@ -1108,7 +1108,7 @@ services:
#### 1. **Hybrid Language Integration** (Rust ↔ Nushell) #### 1. **Hybrid Language Integration** (Rust ↔ Nushell)
```plaintext ```
Rust Orchestrator Rust Orchestrator
↓ (HTTP API) ↓ (HTTP API)
Nushell CLI Nushell CLI
@ -1124,7 +1124,7 @@ File-based Task Queue
#### 2. **Provider Abstraction** #### 2. **Provider Abstraction**
```plaintext ```
Unified Provider Interface Unified Provider Interface
├── create_server(config) -> Server ├── create_server(config) -> Server
├── delete_server(id) -> bool ├── delete_server(id) -> bool
@ -1139,7 +1139,7 @@ Provider Implementations:
#### 3. **OCI Registry Integration** #### 3. **OCI Registry Integration**
```plaintext ```
Extension Development Extension Development
Package (oci-package.nu) Package (oci-package.nu)
@ -1157,7 +1157,7 @@ Load into Workspace
#### 4. **Gitea Integration** (Multi-user, Enterprise) #### 4. **Gitea Integration** (Multi-user, Enterprise)
```plaintext ```
Workspace Operations Workspace Operations
Check Lock Status (Gitea API) Check Lock Status (Gitea API)
@ -1179,7 +1179,7 @@ Release Lock (Delete lock file)
#### 5. **CoreDNS Integration** #### 5. **CoreDNS Integration**
```plaintext ```
Service Registration Service Registration
Update CoreDNS Corefile Update CoreDNS Corefile

View File

@ -86,7 +86,7 @@ Original comprehensive loader that handles:
## Module Dependency Graph ## Module Dependency Graph
```plaintext ```
Help/Status Commands Help/Status Commands
loader-lazy.nu loader-lazy.nu
@ -110,7 +110,7 @@ loader.nu (full configuration)
### Fast Path (Help Commands) ### Fast Path (Help Commands)
```nushell ```
# Uses minimal loader - 23ms # Uses minimal loader - 23ms
./provisioning help infrastructure ./provisioning help infrastructure
./provisioning workspace list ./provisioning workspace list
@ -119,7 +119,7 @@ loader.nu (full configuration)
### Medium Path (Status Operations) ### Medium Path (Status Operations)
```nushell ```
# Uses minimal loader with some full config - ~50ms # Uses minimal loader with some full config - ~50ms
./provisioning status ./provisioning status
./provisioning workspace active ./provisioning workspace active
@ -128,7 +128,7 @@ loader.nu (full configuration)
### Full Path (Infrastructure Operations) ### Full Path (Infrastructure Operations)
```nushell ```
# Uses full loader - ~150ms # Uses full loader - ~150ms
./provisioning server create --infra myinfra ./provisioning server create --infra myinfra
./provisioning taskserv create kubernetes ./provisioning taskserv create kubernetes
@ -139,7 +139,7 @@ loader.nu (full configuration)
### Lazy Loading Decision Logic ### Lazy Loading Decision Logic
```nushell ```
# In loader-lazy.nu # In loader-lazy.nu
let is_fast_command = ( let is_fast_command = (
$command == "help" or $command == "help" or
@ -160,7 +160,7 @@ if $is_fast_command {
The minimal loader returns a lightweight config record: The minimal loader returns a lightweight config record:
```nushell ```
{ {
workspace: { workspace: {
name: "librecloud" name: "librecloud"
@ -247,7 +247,7 @@ Only add if:
### Performance Testing ### Performance Testing
```bash ```
# Benchmark minimal loader # Benchmark minimal loader
time nu -n -c "use loader-minimal.nu *; get-active-workspace" time nu -n -c "use loader-minimal.nu *; get-active-workspace"

View File

@ -13,7 +13,7 @@ Control-Center uses **SurrealDB with kv-mem backend**, an embedded in-memory dat
### Database Configuration ### Database Configuration
```toml ```
[database] [database]
url = "memory" # In-memory backend url = "memory" # In-memory backend
namespace = "control_center" namespace = "control_center"
@ -24,7 +24,7 @@ database = "main"
**Production Alternative**: Switch to remote WebSocket connection for persistent storage: **Production Alternative**: Switch to remote WebSocket connection for persistent storage:
```toml ```
[database] [database]
url = "ws://localhost:8000" url = "ws://localhost:8000"
namespace = "control_center" namespace = "control_center"
@ -79,7 +79,7 @@ Control-Center also supports (via Cargo.toml dependencies):
Orchestrator uses simple file-based storage by default: Orchestrator uses simple file-based storage by default:
```toml ```
[orchestrator.storage] [orchestrator.storage]
type = "filesystem" # Default type = "filesystem" # Default
backend_path = "{{orchestrator.paths.data_dir}}/queue.rkvs" backend_path = "{{orchestrator.paths.data_dir}}/queue.rkvs"
@ -87,7 +87,7 @@ backend_path = "{{orchestrator.paths.data_dir}}/queue.rkvs"
**Resolved Path**: **Resolved Path**:
```plaintext ```
{{workspace.path}}/.orchestrator/data/queue.rkvs {{workspace.path}}/.orchestrator/data/queue.rkvs
``` ```
@ -95,7 +95,7 @@ backend_path = "{{orchestrator.paths.data_dir}}/queue.rkvs"
For production deployments, switch to SurrealDB: For production deployments, switch to SurrealDB:
```toml ```
[orchestrator.storage] [orchestrator.storage]
type = "surrealdb-server" # or surrealdb-embedded type = "surrealdb-server" # or surrealdb-embedded
@ -115,7 +115,7 @@ password = "secret"
All services load configuration in this order (priority: low → high): All services load configuration in this order (priority: low → high):
```plaintext ```
1. System Defaults provisioning/config/config.defaults.toml 1. System Defaults provisioning/config/config.defaults.toml
2. Service Defaults provisioning/platform/{service}/config.defaults.toml 2. Service Defaults provisioning/platform/{service}/config.defaults.toml
3. Workspace Config workspace/{name}/config/provisioning.yaml 3. Workspace Config workspace/{name}/config/provisioning.yaml
@ -128,7 +128,7 @@ All services load configuration in this order (priority: low → high):
Configs support dynamic variable interpolation: Configs support dynamic variable interpolation:
```toml ```
[paths] [paths]
base = "/Users/Akasha/project-provisioning/provisioning" base = "/Users/Akasha/project-provisioning/provisioning"
data_dir = "{{paths.base}}/data" # Resolves to: /Users/.../data data_dir = "{{paths.base}}/data" # Resolves to: /Users/.../data
@ -175,7 +175,7 @@ All services use workspace-aware paths:
**Orchestrator**: **Orchestrator**:
```toml ```
[orchestrator.paths] [orchestrator.paths]
base = "{{workspace.path}}/.orchestrator" base = "{{workspace.path}}/.orchestrator"
data_dir = "{{orchestrator.paths.base}}/data" data_dir = "{{orchestrator.paths.base}}/data"
@ -185,7 +185,7 @@ queue_dir = "{{orchestrator.paths.data_dir}}/queue"
**Control-Center**: **Control-Center**:
```toml ```
[paths] [paths]
base = "{{workspace.path}}/.control-center" base = "{{workspace.path}}/.control-center"
data_dir = "{{paths.base}}/data" data_dir = "{{paths.base}}/data"
@ -194,7 +194,7 @@ logs_dir = "{{paths.base}}/logs"
**Result** (workspace: `workspace-librecloud`): **Result** (workspace: `workspace-librecloud`):
```plaintext ```
workspace-librecloud/ workspace-librecloud/
├── .orchestrator/ ├── .orchestrator/
│ ├── data/ │ ├── data/
@ -214,7 +214,7 @@ Any config value can be overridden via environment variables:
### Control-Center ### Control-Center
```bash ```
# Override server port # Override server port
export CONTROL_CENTER_SERVER_PORT=8081 export CONTROL_CENTER_SERVER_PORT=8081
@ -227,7 +227,7 @@ export CONTROL_CENTER_JWT_ISSUER="my-issuer"
### Orchestrator ### Orchestrator
```bash ```
# Override orchestrator port # Override orchestrator port
export ORCHESTRATOR_SERVER_PORT=8080 export ORCHESTRATOR_SERVER_PORT=8080
@ -241,7 +241,7 @@ export ORCHESTRATOR_QUEUE_MAX_CONCURRENT_TASKS=10
### Naming Convention ### Naming Convention
```plaintext ```
{SERVICE}_{SECTION}_{KEY} = value {SERVICE}_{SECTION}_{KEY} = value
``` ```
@ -259,7 +259,7 @@ export ORCHESTRATOR_QUEUE_MAX_CONCURRENT_TASKS=10
**Container paths** (resolved inside container): **Container paths** (resolved inside container):
```toml ```
[paths] [paths]
base = "/app/provisioning" base = "/app/provisioning"
data_dir = "/data" # Mounted volume data_dir = "/data" # Mounted volume
@ -268,7 +268,7 @@ logs_dir = "/var/log/orchestrator" # Mounted volume
**Docker Compose volumes**: **Docker Compose volumes**:
```yaml ```
services: services:
orchestrator: orchestrator:
volumes: volumes:
@ -289,7 +289,7 @@ volumes:
**Host paths** (macOS/Linux): **Host paths** (macOS/Linux):
```toml ```
[paths] [paths]
base = "/Users/Akasha/project-provisioning/provisioning" base = "/Users/Akasha/project-provisioning/provisioning"
data_dir = "{{workspace.path}}/.orchestrator/data" data_dir = "{{workspace.path}}/.orchestrator/data"
@ -302,7 +302,7 @@ logs_dir = "{{workspace.path}}/.orchestrator/logs"
Check current configuration: Check current configuration:
```bash ```
# Show effective configuration # Show effective configuration
provisioning env provisioning env
@ -322,7 +322,7 @@ PROVISIONING_DEBUG=true ./orchestrator --show-config
**Cosmian KMS** uses its own database (when deployed): **Cosmian KMS** uses its own database (when deployed):
```bash ```
# KMS database location (Docker) # KMS database location (Docker)
/data/kms.db # SQLite database inside KMS container /data/kms.db # SQLite database inside KMS container
@ -332,7 +332,7 @@ PROVISIONING_DEBUG=true ./orchestrator --show-config
KMS also integrates with Control-Center's KMS hybrid backend (local + remote): KMS also integrates with Control-Center's KMS hybrid backend (local + remote):
```toml ```
[kms] [kms]
mode = "hybrid" # local, remote, or hybrid mode = "hybrid" # local, remote, or hybrid

View File

@ -32,7 +32,7 @@ without code changes. Hardcoded values defeat the purpose of IaC and create main
**Example**: **Example**:
```toml ```
# ✅ PAP Compliant - Configuration-driven # ✅ PAP Compliant - Configuration-driven
[providers.aws] [providers.aws]
regions = ["us-west-2", "us-east-1"] regions = ["us-west-2", "us-east-1"]
@ -62,7 +62,7 @@ configuration management and domain-specific operations.
**Language Responsibility Matrix**: **Language Responsibility Matrix**:
```plaintext ```
Rust Layer: Rust Layer:
├── Workflow orchestration and coordination ├── Workflow orchestration and coordination
├── REST API servers and HTTP endpoints ├── REST API servers and HTTP endpoints
@ -111,7 +111,7 @@ flexibility while maintaining predictability.
**Domain Organization**: **Domain Organization**:
```plaintext ```
├── core/ # Core system and library functions ├── core/ # Core system and library functions
├── platform/ # High-performance coordination layer ├── platform/ # High-performance coordination layer
├── provisioning/ # Main business logic with providers and services ├── provisioning/ # Main business logic with providers and services
@ -160,7 +160,7 @@ evolution.
**Recovery Strategies**: **Recovery Strategies**:
```plaintext ```
Operation Level: Operation Level:
├── Atomic operations with rollback ├── Atomic operations with rollback
├── Retry logic with exponential backoff ├── Retry logic with exponential backoff
@ -203,7 +203,7 @@ gains.
**Security Implementation**: **Security Implementation**:
```plaintext ```
Authentication & Authorization: Authentication & Authorization:
├── API authentication for external access ├── API authentication for external access
├── Role-based access control for operations ├── Role-based access control for operations
@ -234,7 +234,7 @@ the system.
**Testing Strategy**: **Testing Strategy**:
```plaintext ```
Unit Testing: Unit Testing:
├── Configuration validation tests ├── Configuration validation tests
├── Individual component tests ├── Individual component tests
@ -272,7 +272,7 @@ System Testing:
**Error Categories**: **Error Categories**:
```plaintext ```
Configuration Errors: Configuration Errors:
├── Invalid configuration syntax ├── Invalid configuration syntax
├── Missing required configuration ├── Missing required configuration
@ -300,7 +300,7 @@ System Errors:
**Observability Implementation**: **Observability Implementation**:
```plaintext ```
Logging: Logging:
├── Structured JSON logging ├── Structured JSON logging
├── Configurable log levels ├── Configurable log levels
@ -358,7 +358,7 @@ Monitoring:
**Debt Management Strategy**: **Debt Management Strategy**:
```plaintext ```
Assessment: Assessment:
├── Regular code quality reviews ├── Regular code quality reviews
├── Performance profiling and optimization ├── Performance profiling and optimization
@ -382,7 +382,7 @@ Improvement:
**Trade-off Categories**: **Trade-off Categories**:
```plaintext ```
Performance vs. Maintainability: Performance vs. Maintainability:
├── Rust coordination layer for performance ├── Rust coordination layer for performance
├── Nushell business logic for maintainability ├── Nushell business logic for maintainability

View File

@ -19,7 +19,7 @@ This document describes the **hybrid selective integration** of prov-ecosystem a
### Three-Layer Integration ### Three-Layer Integration
```plaintext ```
┌─────────────────────────────────────────────┐ ┌─────────────────────────────────────────────┐
│ Provisioning CLI (provisioning/core/cli/) │ │ Provisioning CLI (provisioning/core/cli/) │
│ ✅ 80+ command shortcuts │ │ ✅ 80+ command shortcuts │
@ -70,7 +70,7 @@ This document describes the **hybrid selective integration** of prov-ecosystem a
**Key Types**: **Key Types**:
```rust ```
pub enum ContainerRuntime { pub enum ContainerRuntime {
Docker, Docker,
Podman, Podman,
@ -85,7 +85,7 @@ pub struct ComposeAdapter { ... }
**Nushell Functions**: **Nushell Functions**:
```nushell ```
runtime-detect # Auto-detect available runtime runtime-detect # Auto-detect available runtime
runtime-exec # Execute command in detected runtime runtime-exec # Execute command in detected runtime
runtime-compose # Adapt docker-compose for runtime runtime-compose # Adapt docker-compose for runtime
@ -112,7 +112,7 @@ runtime-list # List all available runtimes
**Key Types**: **Key Types**:
```rust ```
pub struct SshConfig { ... } pub struct SshConfig { ... }
pub struct SshPool { ... } pub struct SshPool { ... }
pub enum DeploymentStrategy { pub enum DeploymentStrategy {
@ -124,7 +124,7 @@ pub enum DeploymentStrategy {
**Nushell Functions**: **Nushell Functions**:
```nushell ```
ssh-pool-connect # Create SSH pool connection ssh-pool-connect # Create SSH pool connection
ssh-pool-exec # Execute on SSH pool ssh-pool-exec # Execute on SSH pool
ssh-pool-status # Check pool status ssh-pool-status # Check pool status
@ -153,7 +153,7 @@ ssh-circuit-breaker-status # Check circuit breaker
**Key Types**: **Key Types**:
```rust ```
pub enum BackupBackend { pub enum BackupBackend {
Restic, Restic,
Borg, Borg,
@ -169,7 +169,7 @@ pub struct BackupManager { ... }
**Nushell Functions**: **Nushell Functions**:
```nushell ```
backup-create # Create backup job backup-create # Create backup job
backup-restore # Restore from snapshot backup-restore # Restore from snapshot
backup-list # List snapshots backup-list # List snapshots
@ -199,7 +199,7 @@ backup-status # Check backup status
**Key Types**: **Key Types**:
```rust ```
pub enum GitProvider { pub enum GitProvider {
GitHub, GitHub,
GitLab, GitLab,
@ -212,7 +212,7 @@ pub struct GitOpsOrchestrator { ... }
**Nushell Functions**: **Nushell Functions**:
```nushell ```
gitops-rules # Load rules from config gitops-rules # Load rules from config
gitops-watch # Watch for Git events gitops-watch # Watch for Git events
gitops-trigger # Manually trigger deployment gitops-trigger # Manually trigger deployment
@ -243,7 +243,7 @@ gitops-status # Get GitOps status
**Nushell Functions**: **Nushell Functions**:
```nushell ```
service-install # Install service service-install # Install service
service-start # Start service service-start # Start service
service-stop # Stop service service-stop # Stop service
@ -300,7 +300,7 @@ All implementations follow project standards:
## File Structure ## File Structure
```plaintext ```
provisioning/ provisioning/
├── platform/integrations/ ├── platform/integrations/
│ └── provisioning-bridge/ # Rust bridge crate │ └── provisioning-bridge/ # Rust bridge crate
@ -338,7 +338,7 @@ provisioning/
### Runtime Abstraction ### Runtime Abstraction
```nushell ```
# Auto-detect available runtime # Auto-detect available runtime
let runtime = (runtime-detect) let runtime = (runtime-detect)
@ -351,7 +351,7 @@ let compose_cmd = (runtime-compose "./docker-compose.yml")
### SSH Advanced ### SSH Advanced
```nushell ```
# Connect to SSH pool # Connect to SSH pool
let pool = (ssh-pool-connect "server01.example.com" "root" --port 22) let pool = (ssh-pool-connect "server01.example.com" "root" --port 22)
@ -364,7 +364,7 @@ ssh-circuit-breaker-status
### Backup System ### Backup System
```nushell ```
# Schedule regular backups # Schedule regular backups
backup-schedule "daily-app-backup" "0 2 * * *" \ backup-schedule "daily-app-backup" "0 2 * * *" \
--paths ["/opt/app" "/var/lib/app"] \ --paths ["/opt/app" "/var/lib/app"] \
@ -381,7 +381,7 @@ backup-restore "snapshot-001" --restore_path "."
### GitOps Events ### GitOps Events
```nushell ```
# Load GitOps rules # Load GitOps rules
let rules = (gitops-rules "./gitops-rules.yaml") let rules = (gitops-rules "./gitops-rules.yaml")
@ -394,7 +394,7 @@ gitops-trigger "deploy-app" --environment "prod"
### Service Management ### Service Management
```nushell ```
# Install service # Install service
service-install "my-app" "/usr/local/bin/my-app" \ service-install "my-app" "/usr/local/bin/my-app" \
--user "appuser" \ --user "appuser" \
@ -418,7 +418,7 @@ service-restart-policy "my-app" --policy "on-failure" --delay-secs 5
Existing `provisioning` CLI will gain new command tree: Existing `provisioning` CLI will gain new command tree:
```bash ```
provisioning runtime detect|exec|compose|info|list provisioning runtime detect|exec|compose|info|list
provisioning ssh pool connect|exec|status|strategies provisioning ssh pool connect|exec|status|strategies
provisioning backup create|restore|list|schedule|retention|status provisioning backup create|restore|list|schedule|retention|status
@ -430,7 +430,7 @@ provisioning service install|start|stop|restart|status|list|policy|detect-init
All integrations use Nickel schemas from `provisioning/schemas/integrations/`: All integrations use Nickel schemas from `provisioning/schemas/integrations/`:
```nickel ```
let { IntegrationConfig } = import "provisioning/integrations.ncl" in let { IntegrationConfig } = import "provisioning/integrations.ncl" in
{ {
runtime = { ... }, runtime = { ... },
@ -445,7 +445,7 @@ let { IntegrationConfig } = import "provisioning/integrations.ncl" in
Nushell plugins can be created for performance-critical operations: Nushell plugins can be created for performance-critical operations:
```bash ```
provisioning plugin list provisioning plugin list
# [installed] # [installed]
# nu_plugin_runtime # nu_plugin_runtime
@ -460,7 +460,7 @@ provisioning plugin list
### Rust Tests ### Rust Tests
```bash ```
cd provisioning/platform/integrations/provisioning-bridge cd provisioning/platform/integrations/provisioning-bridge
cargo test --all cargo test --all
cargo test -p provisioning-bridge --lib cargo test -p provisioning-bridge --lib
@ -469,7 +469,7 @@ cargo test -p provisioning-bridge --doc
### Nushell Tests ### Nushell Tests
```bash ```
nu provisioning/core/nulib/integrations/runtime.nu nu provisioning/core/nulib/integrations/runtime.nu
nu provisioning/core/nulib/integrations/ssh_advanced.nu nu provisioning/core/nulib/integrations/ssh_advanced.nu
``` ```

View File

@ -15,7 +15,7 @@ workflows, and enable extensible functionality. This document outlines the key i
**Implementation**: **Implementation**:
```rust ```
use tokio::process::Command; use tokio::process::Command;
use serde_json; use serde_json;
@ -35,7 +35,7 @@ pub async fn execute_nushell_workflow(
**Data Exchange Format**: **Data Exchange Format**:
```json ```
{ {
"status": "success" | "error" | "partial", "status": "success" | "error" | "partial",
"result": { "result": {
@ -54,7 +54,7 @@ pub async fn execute_nushell_workflow(
**Implementation**: **Implementation**:
```nushell ```
def submit-workflow [workflow: record] -> record { def submit-workflow [workflow: record] -> record {
let payload = $workflow | to json let payload = $workflow | to json
@ -68,7 +68,7 @@ def submit-workflow [workflow: record] -> record {
**API Contract**: **API Contract**:
```json ```
{ {
"workflow_id": "wf-456", "workflow_id": "wf-456",
"name": "multi_cloud_deployment", "name": "multi_cloud_deployment",
@ -86,7 +86,7 @@ def submit-workflow [workflow: record] -> record {
**Interface Definition**: **Interface Definition**:
```nushell ```
# Standard provider interface that all providers must implement # Standard provider interface that all providers must implement
export def list-servers [] -> table { export def list-servers [] -> table {
# Provider-specific implementation # Provider-specific implementation
@ -107,7 +107,7 @@ export def get-server [id: string] -> record {
**Configuration Integration**: **Configuration Integration**:
```toml ```
[providers.aws] [providers.aws]
region = "us-west-2" region = "us-west-2"
credentials_profile = "default" credentials_profile = "default"
@ -125,7 +125,7 @@ network_mode = "bridge"
#### Provider Discovery and Loading #### Provider Discovery and Loading
```nushell ```
def load-providers [] -> table { def load-providers [] -> table {
let provider_dirs = glob "providers/*/nulib" let provider_dirs = glob "providers/*/nulib"
@ -150,7 +150,7 @@ def load-providers [] -> table {
**Implementation**: **Implementation**:
```nushell ```
def resolve-configuration [context: record] -> record { def resolve-configuration [context: record] -> record {
let base_config = open config.defaults.toml let base_config = open config.defaults.toml
let user_config = if ("config.user.toml" | path exists) { let user_config = if ("config.user.toml" | path exists) {
@ -173,7 +173,7 @@ def resolve-configuration [context: record] -> record {
#### Variable Interpolation Pattern #### Variable Interpolation Pattern
```nushell ```
def interpolate-variables [config: record] -> record { def interpolate-variables [config: record] -> record {
let interpolations = { let interpolations = {
"{{paths.base}}": ($env.PWD), "{{paths.base}}": ($env.PWD),
@ -200,7 +200,7 @@ def interpolate-variables [config: record] -> record {
**Implementation (Rust)**: **Implementation (Rust)**:
```rust ```
use petgraph::{Graph, Direction}; use petgraph::{Graph, Direction};
use std::collections::HashMap; use std::collections::HashMap;
@ -229,7 +229,7 @@ impl DependencyResolver {
#### Parallel Execution Pattern #### Parallel Execution Pattern
```rust ```
use tokio::task::JoinSet; use tokio::task::JoinSet;
use futures::stream::{FuturesUnordered, StreamExt}; use futures::stream::{FuturesUnordered, StreamExt};
@ -265,7 +265,7 @@ pub async fn execute_parallel_batch(
**Implementation**: **Implementation**:
```rust ```
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct WorkflowCheckpoint { pub struct WorkflowCheckpoint {
pub workflow_id: String, pub workflow_id: String,
@ -309,7 +309,7 @@ impl CheckpointManager {
#### Rollback Pattern #### Rollback Pattern
```rust ```
pub struct RollbackManager { pub struct RollbackManager {
rollback_stack: Vec<RollbackAction>, rollback_stack: Vec<RollbackAction>,
} }
@ -349,7 +349,7 @@ impl RollbackManager {
**Event Definition**: **Event Definition**:
```rust ```
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub enum SystemEvent { pub enum SystemEvent {
WorkflowStarted { workflow_id: String, name: String }, WorkflowStarted { workflow_id: String, name: String },
@ -363,7 +363,7 @@ pub enum SystemEvent {
**Event Bus Implementation**: **Event Bus Implementation**:
```rust ```
use tokio::sync::broadcast; use tokio::sync::broadcast;
pub struct EventBus { pub struct EventBus {
@ -392,7 +392,7 @@ impl EventBus {
#### Extension Discovery and Loading #### Extension Discovery and Loading
```nushell ```
def discover-extensions [] -> table { def discover-extensions [] -> table {
let extension_dirs = glob "extensions/*/extension.toml" let extension_dirs = glob "extensions/*/extension.toml"
@ -417,7 +417,7 @@ def discover-extensions [] -> table {
#### Extension Interface Pattern #### Extension Interface Pattern
```nushell ```
# Standard extension interface # Standard extension interface
export def extension-info [] -> record { export def extension-info [] -> record {
{ {
@ -452,7 +452,7 @@ export def extension-deactivate [] -> nothing {
**Base API Structure**: **Base API Structure**:
```rust ```
use axum::{ use axum::{
extract::{Path, State}, extract::{Path, State},
response::Json, response::Json,
@ -473,7 +473,7 @@ pub fn create_api_router(state: AppState) -> Router {
**Standard Response Format**: **Standard Response Format**:
```json ```
{ {
"status": "success" | "error" | "pending", "status": "success" | "error" | "pending",
"data": { ... }, "data": { ... },
@ -494,7 +494,7 @@ pub fn create_api_router(state: AppState) -> Router {
### Structured Error Pattern ### Structured Error Pattern
```rust ```
#[derive(thiserror::Error, Debug)] #[derive(thiserror::Error, Debug)]
pub enum ProvisioningError { pub enum ProvisioningError {
#[error("Configuration error: {message}")] #[error("Configuration error: {message}")]
@ -513,7 +513,7 @@ pub enum ProvisioningError {
### Error Recovery Pattern ### Error Recovery Pattern
```nushell ```
def with-retry [operation: closure, max_attempts: int = 3] { def with-retry [operation: closure, max_attempts: int = 3] {
mut attempts = 0 mut attempts = 0
mut last_error = null mut last_error = null
@ -540,7 +540,7 @@ def with-retry [operation: closure, max_attempts: int = 3] {
### Caching Strategy Pattern ### Caching Strategy Pattern
```rust ```
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use std::collections::HashMap; use std::collections::HashMap;
@ -583,7 +583,7 @@ impl<T: Clone> Cache<T> {
### Streaming Pattern for Large Data ### Streaming Pattern for Large Data
```nushell ```
def process-large-dataset [source: string] -> nothing { def process-large-dataset [source: string] -> nothing {
# Stream processing instead of loading entire dataset # Stream processing instead of loading entire dataset
open $source open $source
@ -600,7 +600,7 @@ def process-large-dataset [source: string] -> nothing {
### Integration Test Pattern ### Integration Test Pattern
```rust ```
#[cfg(test)] #[cfg(test)]
mod integration_tests { mod integration_tests {
use super::*; use super::*;

View File

@ -24,7 +24,7 @@ distributed extension management through OCI registry integration.
**Purpose**: Core system functionality - CLI, libraries, base schemas **Purpose**: Core system functionality - CLI, libraries, base schemas
```plaintext ```
provisioning-core/ provisioning-core/
├── core/ ├── core/
│ ├── cli/ # Command-line interface │ ├── cli/ # Command-line interface
@ -82,7 +82,7 @@ provisioning-core/
**Purpose**: All provider, taskserv, and cluster extensions **Purpose**: All provider, taskserv, and cluster extensions
```plaintext ```
provisioning-extensions/ provisioning-extensions/
├── providers/ ├── providers/
│ ├── aws/ │ ├── aws/
@ -143,7 +143,7 @@ Each extension published separately as OCI artifact:
**Extension Manifest** (`manifest.yaml`): **Extension Manifest** (`manifest.yaml`):
```yaml ```
name: kubernetes name: kubernetes
type: taskserv type: taskserv
version: 1.28.0 version: 1.28.0
@ -183,7 +183,7 @@ min_provisioning_version: "3.0.0"
**Purpose**: Platform services (orchestrator, control-center, MCP server, API gateway) **Purpose**: Platform services (orchestrator, control-center, MCP server, API gateway)
```plaintext ```
provisioning-platform/ provisioning-platform/
├── orchestrator/ # Rust orchestrator service ├── orchestrator/ # Rust orchestrator service
│ ├── src/ │ ├── src/
@ -238,7 +238,7 @@ Standard Docker images in OCI registry:
### Registry Structure ### Registry Structure
```plaintext ```
OCI Registry (localhost:5000 or harbor.company.com) OCI Registry (localhost:5000 or harbor.company.com)
├── provisioning-core/ ├── provisioning-core/
│ ├── v3.5.0 # Core system artifact │ ├── v3.5.0 # Core system artifact
@ -263,7 +263,7 @@ OCI Registry (localhost:5000 or harbor.company.com)
Each extension packaged as OCI artifact: Each extension packaged as OCI artifact:
```plaintext ```
kubernetes-1.28.0.tar.gz kubernetes-1.28.0.tar.gz
├── schemas/ # Nickel schemas ├── schemas/ # Nickel schemas
│ ├── kubernetes.ncl │ ├── kubernetes.ncl
@ -291,7 +291,7 @@ kubernetes-1.28.0.tar.gz
**File**: `workspace/config/provisioning.yaml` **File**: `workspace/config/provisioning.yaml`
```yaml ```
# Core system dependency # Core system dependency
dependencies: dependencies:
core: core:
@ -363,7 +363,7 @@ The system resolves dependencies in this order:
### Dependency Resolution Commands ### Dependency Resolution Commands
```bash ```
# Resolve and install all dependencies # Resolve and install all dependencies
provisioning dep resolve provisioning dep resolve
@ -386,7 +386,7 @@ provisioning dep tree kubernetes
### CLI Commands ### CLI Commands
```bash ```
# Pull extension from OCI registry # Pull extension from OCI registry
provisioning oci pull kubernetes:1.28.0 provisioning oci pull kubernetes:1.28.0
@ -419,7 +419,7 @@ provisioning oci copy \
### OCI Configuration ### OCI Configuration
```bash ```
# Show OCI configuration # Show OCI configuration
provisioning oci config provisioning oci config
@ -442,7 +442,7 @@ provisioning oci config
### 1. Develop Extension ### 1. Develop Extension
```bash ```
# Create new extension from template # Create new extension from template
provisioning generate extension taskserv redis provisioning generate extension taskserv redis
@ -466,7 +466,7 @@ provisioning generate extension taskserv redis
### 2. Test Extension Locally ### 2. Test Extension Locally
```bash ```
# Load extension from local path # Load extension from local path
provisioning module load taskserv workspace_dev redis --source local provisioning module load taskserv workspace_dev redis --source local
@ -479,7 +479,7 @@ provisioning test extension redis
### 3. Package Extension ### 3. Package Extension
```bash ```
# Validate extension structure # Validate extension structure
provisioning oci package validate ./extensions/taskservs/redis provisioning oci package validate ./extensions/taskservs/redis
@ -491,7 +491,7 @@ provisioning oci package ./extensions/taskservs/redis
### 4. Publish Extension ### 4. Publish Extension
```bash ```
# Login to registry (one-time) # Login to registry (one-time)
provisioning oci login localhost:5000 provisioning oci login localhost:5000
@ -511,7 +511,7 @@ provisioning oci tags redis
### 5. Use Published Extension ### 5. Use Published Extension
```bash ```
# Add to workspace configuration # Add to workspace configuration
# workspace/config/provisioning.yaml: # workspace/config/provisioning.yaml:
# dependencies: # dependencies:
@ -534,7 +534,7 @@ provisioning dep resolve
**Using Zot (lightweight OCI registry)**: **Using Zot (lightweight OCI registry)**:
```bash ```
# Start local OCI registry # Start local OCI registry
provisioning oci-registry start provisioning oci-registry start
@ -555,7 +555,7 @@ provisioning oci-registry status
**Using Harbor**: **Using Harbor**:
```yaml ```
# workspace/config/provisioning.yaml # workspace/config/provisioning.yaml
dependencies: dependencies:
registry: registry:
@ -591,7 +591,7 @@ dependencies:
### Phase 2: Gradual Migration ### Phase 2: Gradual Migration
```bash ```
# Migrate extensions one by one # Migrate extensions one by one
for ext in (ls provisioning/extensions/taskservs); do for ext in (ls provisioning/extensions/taskservs); do
provisioning oci publish $ext.name provisioning oci publish $ext.name

View File

@ -79,7 +79,7 @@ dependency model.
**Contents:** **Contents:**
```plaintext ```
provisioning-core/ provisioning-core/
├── nulib/ # Nushell libraries ├── nulib/ # Nushell libraries
│ ├── lib_provisioning/ # Core library functions │ ├── lib_provisioning/ # Core library functions
@ -120,7 +120,7 @@ provisioning-core/
**Installation Path:** **Installation Path:**
```plaintext ```
/usr/local/ /usr/local/
├── bin/provisioning ├── bin/provisioning
├── lib/provisioning/ ├── lib/provisioning/
@ -135,7 +135,7 @@ provisioning-core/
**Contents:** **Contents:**
```plaintext ```
provisioning-platform/ provisioning-platform/
├── orchestrator/ # Rust orchestrator ├── orchestrator/ # Rust orchestrator
│ ├── src/ │ ├── src/
@ -180,7 +180,7 @@ provisioning-platform/
**Installation Path:** **Installation Path:**
```plaintext ```
/usr/local/ /usr/local/
├── bin/ ├── bin/
│ ├── provisioning-orchestrator │ ├── provisioning-orchestrator
@ -203,7 +203,7 @@ provisioning-platform/
**Contents:** **Contents:**
```plaintext ```
provisioning-extensions/ provisioning-extensions/
├── registry/ # Extension registry ├── registry/ # Extension registry
│ ├── index.json # Searchable index │ ├── index.json # Searchable index
@ -252,7 +252,7 @@ provisioning-extensions/
**Installation:** **Installation:**
```bash ```
# Install extension via core CLI # Install extension via core CLI
provisioning extension install mongodb provisioning extension install mongodb
provisioning extension install azure-provider provisioning extension install azure-provider
@ -261,7 +261,7 @@ provisioning extension install azure-provider
**Extension Structure:** **Extension Structure:**
Each extension is self-contained: Each extension is self-contained:
```plaintext ```
mongodb/ mongodb/
├── manifest.toml # Extension metadata ├── manifest.toml # Extension metadata
├── taskserv.nu # Implementation ├── taskserv.nu # Implementation
@ -279,7 +279,7 @@ mongodb/
**Contents:** **Contents:**
```plaintext ```
provisioning-workspace/ provisioning-workspace/
├── templates/ # Workspace templates ├── templates/ # Workspace templates
│ ├── minimal/ # Minimal starter │ ├── minimal/ # Minimal starter
@ -315,7 +315,7 @@ provisioning-workspace/
**Usage:** **Usage:**
```bash ```
# Create workspace from template # Create workspace from template
provisioning workspace init my-project --template kubernetes provisioning workspace init my-project --template kubernetes
@ -333,7 +333,7 @@ provisioning workspace init
**Contents:** **Contents:**
```plaintext ```
provisioning-distribution/ provisioning-distribution/
├── release-automation/ # Automated release workflows ├── release-automation/ # Automated release workflows
│ ├── build-all.nu # Build all packages │ ├── build-all.nu # Build all packages
@ -385,7 +385,7 @@ provisioning-distribution/
### Package-Based Dependencies (Not Submodules) ### Package-Based Dependencies (Not Submodules)
```plaintext ```
┌─────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────┐
│ provisioning-distribution │ │ provisioning-distribution │
│ (Release orchestration & registry) │ │ (Release orchestration & registry) │
@ -416,7 +416,7 @@ provisioning-distribution/
**Method:** Loose coupling via CLI + REST API **Method:** Loose coupling via CLI + REST API
```nushell ```
# Platform calls Core CLI (subprocess) # Platform calls Core CLI (subprocess)
def create-server [name: string] { def create-server [name: string] {
# Orchestrator executes Core CLI # Orchestrator executes Core CLI
@ -431,7 +431,7 @@ def submit-workflow [workflow: record] {
**Version Compatibility:** **Version Compatibility:**
```toml ```
# platform/Cargo.toml # platform/Cargo.toml
[package.metadata.provisioning] [package.metadata.provisioning]
core-version = "^3.0" # Compatible with core 3.x core-version = "^3.0" # Compatible with core 3.x
@ -441,7 +441,7 @@ core-version = "^3.0" # Compatible with core 3.x
**Method:** Plugin/module system **Method:** Plugin/module system
```nushell ```
# Extension manifest # Extension manifest
# extensions/mongodb/manifest.toml # extensions/mongodb/manifest.toml
[extension] [extension]
@ -465,7 +465,7 @@ provisioning extension install mongodb
**Method:** Git templates or package templates **Method:** Git templates or package templates
```bash ```
# Option 1: GitHub template repository # Option 1: GitHub template repository
gh repo create my-infra --template provisioning-workspace gh repo create my-infra --template provisioning-workspace
cd my-infra cd my-infra
@ -486,7 +486,7 @@ provisioning workspace create my-infra --template kubernetes
Each repository maintains independent semantic versioning: Each repository maintains independent semantic versioning:
```plaintext ```
provisioning-core: 3.2.1 provisioning-core: 3.2.1
provisioning-platform: 2.5.3 provisioning-platform: 2.5.3
provisioning-extensions: (per-extension versioning) provisioning-extensions: (per-extension versioning)
@ -497,7 +497,7 @@ provisioning-workspace: 1.4.0
**`provisioning-distribution/version-management/versions.toml`:** **`provisioning-distribution/version-management/versions.toml`:**
```toml ```
# Version compatibility matrix # Version compatibility matrix
[compatibility] [compatibility]
@ -536,7 +536,7 @@ workspace = "1.3.0"
**Coordinated releases** for major versions: **Coordinated releases** for major versions:
```bash ```
# Major release: All repos release together # Major release: All repos release together
provisioning-core: 3.0.0 provisioning-core: 3.0.0
provisioning-platform: 2.0.0 provisioning-platform: 2.0.0
@ -553,7 +553,7 @@ provisioning-platform: 2.1.0 (improves orchestrator, core stays 3.1.x)
### Working on Single Repository ### Working on Single Repository
```bash ```
# Developer working on core only # Developer working on core only
git clone https://github.com/yourorg/provisioning-core git clone https://github.com/yourorg/provisioning-core
cd provisioning-core cd provisioning-core
@ -574,7 +574,7 @@ just install-dev
### Working Across Repositories ### Working Across Repositories
```bash ```
# Scenario: Adding new feature requiring core + platform changes # Scenario: Adding new feature requiring core + platform changes
# 1. Clone both repositories # 1. Clone both repositories
@ -615,7 +615,7 @@ cargo test
### Testing Cross-Repo Integration ### Testing Cross-Repo Integration
```bash ```
# Integration tests in provisioning-distribution # Integration tests in provisioning-distribution
cd provisioning-distribution cd provisioning-distribution
@ -636,7 +636,7 @@ just test-bundle stable-3.3
Each repository releases independently: Each repository releases independently:
```bash ```
# Core release # Core release
cd provisioning-core cd provisioning-core
git tag v3.2.1 git tag v3.2.1
@ -656,7 +656,7 @@ git push --tags
Distribution repository creates tested bundles: Distribution repository creates tested bundles:
```bash ```
cd provisioning-distribution cd provisioning-distribution
# Create bundle # Create bundle
@ -679,7 +679,7 @@ just publish-bundle stable-3.2
#### Option 1: Bundle Installation (Recommended for Users) #### Option 1: Bundle Installation (Recommended for Users)
```bash ```
# Install stable bundle (easiest) # Install stable bundle (easiest)
curl -fsSL https://get.provisioning.io | sh curl -fsSL https://get.provisioning.io | sh
@ -691,7 +691,7 @@ curl -fsSL https://get.provisioning.io | sh
#### Option 2: Individual Component Installation #### Option 2: Individual Component Installation
```bash ```
# Install only core (minimal) # Install only core (minimal)
curl -fsSL https://get.provisioning.io/core | sh curl -fsSL https://get.provisioning.io/core | sh
@ -704,7 +704,7 @@ provisioning extension install mongodb
#### Option 3: Custom Combination #### Option 3: Custom Combination
```bash ```
# Install specific versions # Install specific versions
provisioning install core@3.1.0 provisioning install core@3.1.0
provisioning install platform@2.4.0 provisioning install platform@2.4.0
@ -760,7 +760,7 @@ provisioning install platform@2.4.0
**Core CI (`provisioning-core/.github/workflows/ci.yml`):** **Core CI (`provisioning-core/.github/workflows/ci.yml`):**
```yaml ```
name: Core CI name: Core CI
on: [push, pull_request] on: [push, pull_request]
@ -792,7 +792,7 @@ jobs:
**Platform CI (`provisioning-platform/.github/workflows/ci.yml`):** **Platform CI (`provisioning-platform/.github/workflows/ci.yml`):**
```yaml ```
name: Platform CI name: Platform CI
on: [push, pull_request] on: [push, pull_request]
@ -829,7 +829,7 @@ jobs:
**Distribution CI (`provisioning-distribution/.github/workflows/integration.yml`):** **Distribution CI (`provisioning-distribution/.github/workflows/integration.yml`):**
```yaml ```
name: Integration Tests name: Integration Tests
on: on:
@ -862,7 +862,7 @@ jobs:
### Monorepo Structure ### Monorepo Structure
```plaintext ```
provisioning/ (One repo, ~500 MB) provisioning/ (One repo, ~500 MB)
├── core/ (Nushell) ├── core/ (Nushell)
├── platform/ (Rust) ├── platform/ (Rust)
@ -873,7 +873,7 @@ provisioning/ (One repo, ~500 MB)
### Multi-Repo Structure ### Multi-Repo Structure
```plaintext ```
provisioning-core/ (Repo 1, ~50 MB) provisioning-core/ (Repo 1, ~50 MB)
├── nulib/ ├── nulib/
├── cli/ ├── cli/

View File

@ -10,7 +10,7 @@
### Prerequisites ### Prerequisites
```bash ```
# Install Nickel # Install Nickel
brew install nickel brew install nickel
# or from source: https://nickel-lang.org/getting-started/ # or from source: https://nickel-lang.org/getting-started/
@ -21,7 +21,7 @@ nickel --version # Should be 1.0+
### Directory Structure for Examples ### Directory Structure for Examples
```bash ```
mkdir -p ~/nickel-examples/{simple,complex,production} mkdir -p ~/nickel-examples/{simple,complex,production}
cd ~/nickel-examples cd ~/nickel-examples
``` ```
@ -32,7 +32,7 @@ cd ~/nickel-examples
### Step 1: Create Contract File ### Step 1: Create Contract File
```bash ```
cat > simple/server_contracts.ncl << 'EOF' cat > simple/server_contracts.ncl << 'EOF'
{ {
ServerConfig = { ServerConfig = {
@ -47,7 +47,7 @@ EOF
### Step 2: Create Defaults File ### Step 2: Create Defaults File
```bash ```
cat > simple/server_defaults.ncl << 'EOF' cat > simple/server_defaults.ncl << 'EOF'
{ {
web_server = { web_server = {
@ -76,7 +76,7 @@ EOF
### Step 3: Create Main Module with Hybrid Interface ### Step 3: Create Main Module with Hybrid Interface
```bash ```
cat > simple/server.ncl << 'EOF' cat > simple/server.ncl << 'EOF'
let contracts = import "./server_contracts.ncl" in let contracts = import "./server_contracts.ncl" in
let defaults = import "./server_defaults.ncl" in let defaults = import "./server_defaults.ncl" in
@ -110,7 +110,7 @@ EOF
### Test: Export and Validate JSON ### Test: Export and Validate JSON
```bash ```
cd simple/ cd simple/
# Export to JSON # Export to JSON
@ -133,7 +133,7 @@ nickel export server.ncl --format json | jq '.production_web_server.cpu_cores'
### Usage in Consumer Module ### Usage in Consumer Module
```bash ```
cat > simple/consumer.ncl << 'EOF' cat > simple/consumer.ncl << 'EOF'
let server = import "./server.ncl" in let server = import "./server.ncl" in
@ -162,14 +162,14 @@ nickel export consumer.ncl --format json | jq '.staging_web'
### Create Provider Structure ### Create Provider Structure
```bash ```
mkdir -p complex/upcloud/{contracts,defaults,main} mkdir -p complex/upcloud/{contracts,defaults,main}
cd complex/upcloud cd complex/upcloud
``` ```
### Provider Contracts ### Provider Contracts
```bash ```
cat > upcloud_contracts.ncl << 'EOF' cat > upcloud_contracts.ncl << 'EOF'
{ {
StorageBackup = { StorageBackup = {
@ -196,7 +196,7 @@ EOF
### Provider Defaults ### Provider Defaults
```bash ```
cat > upcloud_defaults.ncl << 'EOF' cat > upcloud_defaults.ncl << 'EOF'
{ {
backup = { backup = {
@ -223,7 +223,7 @@ EOF
### Provider Main Module ### Provider Main Module
```bash ```
cat > upcloud_main.ncl << 'EOF' cat > upcloud_main.ncl << 'EOF'
let contracts = import "./upcloud_contracts.ncl" in let contracts = import "./upcloud_contracts.ncl" in
let defaults = import "./upcloud_defaults.ncl" in let defaults = import "./upcloud_defaults.ncl" in
@ -281,7 +281,7 @@ EOF
### Test Provider Configuration ### Test Provider Configuration
```bash ```
# Export provider config # Export provider config
nickel export upcloud_main.ncl --format json | jq '.production_high_availability' nickel export upcloud_main.ncl --format json | jq '.production_high_availability'
@ -296,7 +296,7 @@ nickel export upcloud_main.ncl --format json | jq '.production_high_availability
### Consumer Using Provider ### Consumer Using Provider
```bash ```
cat > upcloud_consumer.ncl << 'EOF' cat > upcloud_consumer.ncl << 'EOF'
let upcloud = import "./upcloud_main.ncl" in let upcloud = import "./upcloud_main.ncl" in
@ -332,7 +332,7 @@ nickel export upcloud_consumer.ncl --format json | jq '.ha_stack | keys'
### Taskserv Contracts (from wuji) ### Taskserv Contracts (from wuji)
```bash ```
cat > production/taskserv_contracts.ncl << 'EOF' cat > production/taskserv_contracts.ncl << 'EOF'
{ {
Dependency = { Dependency = {
@ -352,7 +352,7 @@ EOF
### Taskserv Defaults ### Taskserv Defaults
```bash ```
cat > production/taskserv_defaults.ncl << 'EOF' cat > production/taskserv_defaults.ncl << 'EOF'
{ {
kubernetes = { kubernetes = {
@ -407,7 +407,7 @@ EOF
### Taskserv Main ### Taskserv Main
```bash ```
cat > production/taskserv.ncl << 'EOF' cat > production/taskserv.ncl << 'EOF'
let contracts = import "./taskserv_contracts.ncl" in let contracts = import "./taskserv_contracts.ncl" in
let defaults = import "./taskserv_defaults.ncl" in let defaults = import "./taskserv_defaults.ncl" in
@ -453,7 +453,7 @@ EOF
### Test Taskserv Setup ### Test Taskserv Setup
```bash ```
# Export stack # Export stack
nickel export taskserv.ncl --format json | jq '.wuji_k8s_stack | keys' nickel export taskserv.ncl --format json | jq '.wuji_k8s_stack | keys'
# Output: ["kubernetes", "cilium", "containerd", "etcd"] # Output: ["kubernetes", "cilium", "containerd", "etcd"]
@ -477,7 +477,7 @@ nickel export taskserv.ncl --format json | jq '.staging_stack | length'
### Base Infrastructure ### Base Infrastructure
```bash ```
cat > production/infrastructure.ncl << 'EOF' cat > production/infrastructure.ncl << 'EOF'
let servers = import "./server.ncl" in let servers = import "./server.ncl" in
let taskservs = import "./taskserv.ncl" in let taskservs = import "./taskserv.ncl" in
@ -520,7 +520,7 @@ nickel export infrastructure.ncl --format json | jq '.production.taskservs | key
### Extending Infrastructure (Nickel Advantage!) ### Extending Infrastructure (Nickel Advantage!)
```bash ```
cat > production/infrastructure_extended.ncl << 'EOF' cat > production/infrastructure_extended.ncl << 'EOF'
let infra = import "./infrastructure.ncl" in let infra = import "./infrastructure.ncl" in
@ -557,7 +557,7 @@ nickel export infrastructure_extended.ncl --format json | \
### Validation Functions ### Validation Functions
```bash ```
cat > production/validation.ncl << 'EOF' cat > production/validation.ncl << 'EOF'
let validate_server = fun server => let validate_server = fun server =>
if server.cpu_cores <= 0 then if server.cpu_cores <= 0 then
@ -586,7 +586,7 @@ EOF
### Using Validations ### Using Validations
```bash ```
cat > production/validated_config.ncl << 'EOF' cat > production/validated_config.ncl << 'EOF'
let server = import "./server.ncl" in let server = import "./server.ncl" in
let taskserv = import "./taskserv.ncl" in let taskserv = import "./taskserv.ncl" in
@ -632,7 +632,7 @@ nickel export validated_config.ncl --format json
### Run All Examples ### Run All Examples
```bash ```
#!/bin/bash #!/bin/bash
# test_all_examples.sh # test_all_examples.sh
@ -679,7 +679,7 @@ echo "=== All Tests Passed ✓ ==="
### Common Nickel Operations ### Common Nickel Operations
```bash ```
# Validate Nickel syntax # Validate Nickel syntax
nickel export config.ncl nickel export config.ncl
@ -711,7 +711,7 @@ nickel typecheck config.ncl
### Problem: "unexpected token" with multiple let ### Problem: "unexpected token" with multiple let
```nickel ```
# ❌ WRONG # ❌ WRONG
let A = {x = 1} let A = {x = 1}
let B = {y = 2} let B = {y = 2}
@ -725,7 +725,7 @@ let B = {y = 2} in
### Problem: Function serialization fails ### Problem: Function serialization fails
```nickel ```
# ❌ WRONG - function will fail to serialize # ❌ WRONG - function will fail to serialize
{ {
get_value = fun x => x + 1, get_value = fun x => x + 1,
@ -741,7 +741,7 @@ let B = {y = 2} in
### Problem: Null values cause export issues ### Problem: Null values cause export issues
```nickel ```
# ❌ WRONG # ❌ WRONG
{ optional_field = null } { optional_field = null }

View File

@ -8,7 +8,7 @@
## Quick Decision Tree ## Quick Decision Tree
```plaintext ```
Need to define infrastructure/schemas? Need to define infrastructure/schemas?
├─ New platform schemas → Use Nickel ✅ ├─ New platform schemas → Use Nickel ✅
├─ New provider extensions → Use Nickel ✅ ├─ New provider extensions → Use Nickel ✅
@ -26,7 +26,7 @@ Need to define infrastructure/schemas?
#### KCL Approach #### KCL Approach
```kcl ```
schema ServerDefaults: schema ServerDefaults:
name: str name: str
cpu_cores: int = 2 cpu_cores: int = 2
@ -51,7 +51,7 @@ server_defaults: ServerDefaults = {
**server_contracts.ncl**: **server_contracts.ncl**:
```nickel ```
{ {
ServerDefaults = { ServerDefaults = {
name | String, name | String,
@ -64,7 +64,7 @@ server_defaults: ServerDefaults = {
**server_defaults.ncl**: **server_defaults.ncl**:
```nickel ```
{ {
server = { server = {
name = "web-server", name = "web-server",
@ -77,7 +77,7 @@ server_defaults: ServerDefaults = {
**server.ncl**: **server.ncl**:
```nickel ```
let contracts = import "./server_contracts.ncl" in let contracts = import "./server_contracts.ncl" in
let defaults = import "./server_defaults.ncl" in let defaults = import "./server_defaults.ncl" in
@ -93,7 +93,7 @@ let defaults = import "./server_defaults.ncl" in
**Usage**: **Usage**:
```nickel ```
let server = import "./server.ncl" in let server = import "./server.ncl" in
# Simple override # Simple override
@ -117,7 +117,7 @@ my_custom = server.defaults.server & {
#### KCL (from `provisioning/extensions/providers/upcloud/nickel/` - legacy approach) #### KCL (from `provisioning/extensions/providers/upcloud/nickel/` - legacy approach)
```kcl ```
schema StorageBackup: schema StorageBackup:
backup_id: str backup_id: str
frequency: str frequency: str
@ -145,7 +145,7 @@ provision_upcloud: ProvisionUpcloud = {
**upcloud_contracts.ncl**: **upcloud_contracts.ncl**:
```nickel ```
{ {
StorageBackup = { StorageBackup = {
backup_id | String, backup_id | String,
@ -170,7 +170,7 @@ provision_upcloud: ProvisionUpcloud = {
**upcloud_defaults.ncl**: **upcloud_defaults.ncl**:
```nickel ```
{ {
storage_backup = { storage_backup = {
backup_id = "", backup_id = "",
@ -195,7 +195,7 @@ provision_upcloud: ProvisionUpcloud = {
**upcloud_main.ncl** (from actual codebase): **upcloud_main.ncl** (from actual codebase):
```nickel ```
let contracts = import "./upcloud_contracts.ncl" in let contracts = import "./upcloud_contracts.ncl" in
let defaults = import "./upcloud_defaults.ncl" in let defaults = import "./upcloud_defaults.ncl" in
@ -219,7 +219,7 @@ let defaults = import "./upcloud_defaults.ncl" in
**Usage Comparison**: **Usage Comparison**:
```nickel ```
# KCL way (KCL no lo permite bien) # KCL way (KCL no lo permite bien)
# Cannot easily extend without schema modification # Cannot easily extend without schema modification
@ -288,7 +288,7 @@ production_stack = upcloud.make_provision_upcloud {
**KCL (Legacy)**: **KCL (Legacy)**:
```kcl ```
schema ServerConfig: schema ServerConfig:
name: str name: str
zone: str = "us-nyc1" zone: str = "us-nyc1"
@ -300,7 +300,7 @@ web_server: ServerConfig = {
**Nickel (Recommended)**: **Nickel (Recommended)**:
```nickel ```
let defaults = import "./server_defaults.ncl" in let defaults = import "./server_defaults.ncl" in
web_server = defaults.make_server { name = "web-01" } web_server = defaults.make_server { name = "web-01" }
``` ```
@ -313,7 +313,7 @@ web_server = defaults.make_server { name = "web-01" }
**KCL** (from wuji infrastructure): **KCL** (from wuji infrastructure):
```kcl ```
schema TaskServDependency: schema TaskServDependency:
name: str name: str
wait_for_health: bool = false wait_for_health: bool = false
@ -343,7 +343,7 @@ taskserv_cilium: TaskServ = {
**Nickel** (from wuji/main.ncl): **Nickel** (from wuji/main.ncl):
```nickel ```
let ts_kubernetes = import "./taskservs/kubernetes.ncl" in let ts_kubernetes = import "./taskservs/kubernetes.ncl" in
let ts_cilium = import "./taskservs/cilium.ncl" in let ts_cilium = import "./taskservs/cilium.ncl" in
let ts_containerd = import "./taskservs/containerd.ncl" in let ts_containerd = import "./taskservs/containerd.ncl" in
@ -367,7 +367,7 @@ let ts_containerd = import "./taskservs/containerd.ncl" in
**KCL**: **KCL**:
```kcl ```
schema ServerConfig: schema ServerConfig:
name: str name: str
# Would need to modify schema! # Would need to modify schema!
@ -379,7 +379,7 @@ schema ServerConfig:
**Nickel**: **Nickel**:
```nickel ```
let server = import "./server.ncl" in let server = import "./server.ncl" in
# Add custom fields without modifying schema! # Add custom fields without modifying schema!
@ -402,7 +402,7 @@ my_server = server.defaults.server & {
**KCL Approach (Legacy)**: **KCL Approach (Legacy)**:
```kcl ```
schema ServerDefaults: schema ServerDefaults:
cpu: int = 2 cpu: int = 2
memory: int = 4 memory: int = 4
@ -423,7 +423,7 @@ server: Server = {
**Nickel Approach**: **Nickel Approach**:
```nickel ```
# defaults.ncl # defaults.ncl
server_defaults = { server_defaults = {
cpu = 2, cpu = 2,
@ -449,7 +449,7 @@ server = make_server {
**KCL Validation (Legacy)** (compile-time, inline): **KCL Validation (Legacy)** (compile-time, inline):
```kcl ```
schema Config: schema Config:
timeout: int = 5 timeout: int = 5
@ -465,7 +465,7 @@ schema Config:
**Nickel Validation** (runtime, contract-based): **Nickel Validation** (runtime, contract-based):
```nickel ```
# contracts.ncl - Pure type definitions # contracts.ncl - Pure type definitions
Config = { Config = {
timeout | Number, timeout | Number,
@ -495,7 +495,7 @@ my_config = validate_config { timeout = 10 }
**Before (KCL - Legacy)**: **Before (KCL - Legacy)**:
```kcl ```
schema Scheduler: schema Scheduler:
strategy: str = "fifo" strategy: str = "fifo"
workers: int = 4 workers: int = 4
@ -513,7 +513,7 @@ scheduler_config: Scheduler = {
`scheduler_contracts.ncl`: `scheduler_contracts.ncl`:
```nickel ```
{ {
Scheduler = { Scheduler = {
strategy | String, strategy | String,
@ -524,7 +524,7 @@ scheduler_config: Scheduler = {
`scheduler_defaults.ncl`: `scheduler_defaults.ncl`:
```nickel ```
{ {
scheduler = { scheduler = {
strategy = "fifo", strategy = "fifo",
@ -535,7 +535,7 @@ scheduler_config: Scheduler = {
`scheduler.ncl`: `scheduler.ncl`:
```nickel ```
let contracts = import "./scheduler_contracts.ncl" in let contracts = import "./scheduler_contracts.ncl" in
let defaults = import "./scheduler_defaults.ncl" in let defaults = import "./scheduler_defaults.ncl" in
@ -557,7 +557,7 @@ let defaults = import "./scheduler_defaults.ncl" in
**Before (KCL - Legacy)**: **Before (KCL - Legacy)**:
```kcl ```
schema Mode: schema Mode:
deployment_type: str = "solo" # "solo" | "multiuser" | "cicd" | "enterprise" deployment_type: str = "solo" # "solo" | "multiuser" | "cicd" | "enterprise"
@ -568,7 +568,7 @@ schema Mode:
**After (Nickel - Current)**: **After (Nickel - Current)**:
```nickel ```
# contracts.ncl # contracts.ncl
{ {
Mode = { Mode = {
@ -592,7 +592,7 @@ schema Mode:
**Before (KCL - Legacy)**: **Before (KCL - Legacy)**:
```kcl ```
schema ServerDefaults: schema ServerDefaults:
cpu: int = 2 cpu: int = 2
memory: int = 4 memory: int = 4
@ -609,7 +609,7 @@ web_server: Server = {
**After (Nickel - Current)**: **After (Nickel - Current)**:
```nickel ```
# defaults.ncl # defaults.ncl
{ {
server_defaults = { server_defaults = {
@ -643,7 +643,7 @@ let make_server = fun config =>
**Workflow**: **Workflow**:
```bash ```
# Edit workspace config # Edit workspace config
cd workspace_librecloud/nickel cd workspace_librecloud/nickel
vim wuji/main.ncl vim wuji/main.ncl
@ -658,7 +658,7 @@ nickel export wuji/main.ncl # Uses updated schemas
**Imports** (relative, central): **Imports** (relative, central):
```nickel ```
import "../../provisioning/schemas/main.ncl" import "../../provisioning/schemas/main.ncl"
import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl" import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
``` ```
@ -671,7 +671,7 @@ import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
**Workflow**: **Workflow**:
```bash ```
# 1. Create immutable snapshot # 1. Create immutable snapshot
provisioning workspace freeze \ provisioning workspace freeze \
--version "2025-12-15-prod-v1" \ --version "2025-12-15-prod-v1" \
@ -696,7 +696,7 @@ provisioning deploy \
**Frozen Imports** (rewritten to local): **Frozen Imports** (rewritten to local):
```nickel ```
# Original in workspace # Original in workspace
import "../../provisioning/schemas/main.ncl" import "../../provisioning/schemas/main.ncl"
@ -720,7 +720,7 @@ import "./provisioning/schemas/main.ncl"
**Problem**: **Problem**:
```nickel ```
# ❌ WRONG # ❌ WRONG
let A = { x = 1 } let A = { x = 1 }
let B = { y = 2 } let B = { y = 2 }
@ -731,7 +731,7 @@ Error: `unexpected token`
**Solution**: Use `let...in` chaining: **Solution**: Use `let...in` chaining:
```nickel ```
# ✅ CORRECT # ✅ CORRECT
let A = { x = 1 } in let A = { x = 1 } in
let B = { y = 2 } in let B = { y = 2 } in
@ -744,7 +744,7 @@ let B = { y = 2 } in
**Problem**: **Problem**:
```nickel ```
# ❌ WRONG # ❌ WRONG
let StorageVol = { let StorageVol = {
mount_path : String | null = null, mount_path : String | null = null,
@ -757,7 +757,7 @@ Error: `this can't be used as a contract`
**Solution**: Use untyped assignment: **Solution**: Use untyped assignment:
```nickel ```
# ✅ CORRECT # ✅ CORRECT
let StorageVol = { let StorageVol = {
mount_path = null, mount_path = null,
@ -770,7 +770,7 @@ let StorageVol = {
**Problem**: **Problem**:
```nickel ```
# ❌ WRONG # ❌ WRONG
{ {
get_value = fun x => x + 1, get_value = fun x => x + 1,
@ -782,7 +782,7 @@ Error: Functions can't be serialized
**Solution**: Mark helper functions `not_exported`: **Solution**: Mark helper functions `not_exported`:
```nickel ```
# ✅ CORRECT # ✅ CORRECT
{ {
get_value | not_exported = fun x => x + 1, get_value | not_exported = fun x => x + 1,
@ -796,7 +796,7 @@ Error: Functions can't be serialized
**Problem**: **Problem**:
```nickel ```
let defaults = import "./defaults.ncl" in let defaults = import "./defaults.ncl" in
defaults.scheduler_config # But file has "scheduler" defaults.scheduler_config # But file has "scheduler"
``` ```
@ -805,7 +805,7 @@ Error: `field not found`
**Solution**: Use exact field names: **Solution**: Use exact field names:
```nickel ```
let defaults = import "./defaults.ncl" in let defaults = import "./defaults.ncl" in
defaults.scheduler # Correct name from defaults.ncl defaults.scheduler # Correct name from defaults.ncl
``` ```
@ -818,7 +818,7 @@ defaults.scheduler # Correct name from defaults.ncl
**Solution**: Check for circular references or missing `not_exported`: **Solution**: Check for circular references or missing `not_exported`:
```nickel ```
# ❌ Slow - functions being serialized # ❌ Slow - functions being serialized
{ {
validate_config = fun x => x, validate_config = fun x => x,
@ -917,7 +917,7 @@ Type-safe prompts, forms, and schemas that **bidirectionally integrate with Nick
### Workflow: Nickel Schemas → Interactive UIs → Nickel Output ### Workflow: Nickel Schemas → Interactive UIs → Nickel Output
```bash ```
# 1. Define schema in Nickel # 1. Define schema in Nickel
cat > server.ncl << 'EOF' cat > server.ncl << 'EOF'
let contracts = import "./contracts.ncl" in let contracts = import "./contracts.ncl" in
@ -952,7 +952,7 @@ typedialog form --input form.toml --output nickel
### Example: Infrastructure Wizard ### Example: Infrastructure Wizard
```bash ```
# User runs # User runs
provisioning init --wizard provisioning init --wizard
@ -1014,7 +1014,7 @@ provisioning/schemas/config/workspace_config/main.ncl
**File**: `provisioning/schemas/main.ncl` (174 lines) **File**: `provisioning/schemas/main.ncl` (174 lines)
```nickel ```
# Domain-organized architecture # Domain-organized architecture
{ {
lib | doc "Core library types" lib | doc "Core library types"
@ -1054,7 +1054,7 @@ provisioning/schemas/config/workspace_config/main.ncl
**Usage**: **Usage**:
```nickel ```
let provisioning = import "./main.ncl" in let provisioning = import "./main.ncl" in
provisioning.lib.Storage provisioning.lib.Storage
@ -1069,7 +1069,7 @@ provisioning.operations.workflows
**File**: `provisioning/extensions/providers/upcloud/nickel/main.ncl` (38 lines) **File**: `provisioning/extensions/providers/upcloud/nickel/main.ncl` (38 lines)
```nickel ```
let contracts_lib = import "./contracts.ncl" in let contracts_lib = import "./contracts.ncl" in
let defaults_lib = import "./defaults.ncl" in let defaults_lib = import "./defaults.ncl" in
@ -1109,7 +1109,7 @@ let defaults_lib = import "./defaults.ncl" in
**File**: `workspace_librecloud/nickel/wuji/main.ncl` (53 lines) **File**: `workspace_librecloud/nickel/wuji/main.ncl` (53 lines)
```nickel ```
let settings_config = import "./settings.ncl" in let settings_config = import "./settings.ncl" in
let ts_cilium = import "./taskservs/cilium.ncl" in let ts_cilium = import "./taskservs/cilium.ncl" in
let ts_containerd = import "./taskservs/containerd.ncl" in let ts_containerd = import "./taskservs/containerd.ncl" in

View File

@ -15,7 +15,7 @@ verification, Cedar authorization, rate limiting, and audit logging) into a cohe
The middleware chain is applied in this specific order to ensure proper security: The middleware chain is applied in this specific order to ensure proper security:
```plaintext ```
┌─────────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────────┐
│ Incoming HTTP Request │ │ Incoming HTTP Request │
└────────────────────────┬────────────────────────────────────────┘ └────────────────────────┬────────────────────────────────────────┘
@ -90,7 +90,7 @@ The middleware chain is applied in this specific order to ensure proper security
**Example**: **Example**:
```rust ```
pub struct SecurityContext { pub struct SecurityContext {
pub user_id: String, pub user_id: String,
pub token: ValidatedToken, pub token: ValidatedToken,
@ -164,7 +164,7 @@ impl SecurityContext {
**Example**: **Example**:
```rust ```
fn requires_mfa(method: &str, path: &str) -> bool { fn requires_mfa(method: &str, path: &str) -> bool {
if path.contains("/production/") { return true; } if path.contains("/production/") { return true; }
if method == "DELETE" { return true; } if method == "DELETE" { return true; }
@ -190,7 +190,7 @@ fn requires_mfa(method: &str, path: &str) -> bool {
**Resource Mapping**: **Resource Mapping**:
```rust ```
/api/v1/servers/srv-123 → Resource::Server("srv-123") /api/v1/servers/srv-123 → Resource::Server("srv-123")
/api/v1/taskserv/kubernetes → Resource::TaskService("kubernetes") /api/v1/taskserv/kubernetes → Resource::TaskService("kubernetes")
/api/v1/cluster/prod → Resource::Cluster("prod") /api/v1/cluster/prod → Resource::Cluster("prod")
@ -199,7 +199,7 @@ fn requires_mfa(method: &str, path: &str) -> bool {
**Action Mapping**: **Action Mapping**:
```rust ```
GET → Action::Read GET → Action::Read
POST → Action::Create POST → Action::Create
PUT → Action::Update PUT → Action::Update
@ -223,7 +223,7 @@ DELETE → Action::Delete
**Configuration**: **Configuration**:
```rust ```
pub struct RateLimitConfig { pub struct RateLimitConfig {
pub max_requests: u32, // for example, 100 pub max_requests: u32, // for example, 100
pub window_duration: Duration, // for example, 60 seconds pub window_duration: Duration, // for example, 60 seconds
@ -236,7 +236,7 @@ pub struct RateLimitConfig {
**Statistics**: **Statistics**:
```rust ```
pub struct RateLimitStats { pub struct RateLimitStats {
pub total_ips: usize, // Number of tracked IPs pub total_ips: usize, // Number of tracked IPs
pub total_requests: u32, // Total requests made pub total_requests: u32, // Total requests made
@ -261,7 +261,7 @@ pub struct RateLimitStats {
**Usage Example**: **Usage Example**:
```rust ```
use provisioning_orchestrator::security_integration::{ use provisioning_orchestrator::security_integration::{
SecurityComponents, SecurityConfig SecurityComponents, SecurityConfig
}; };
@ -292,7 +292,7 @@ let secured_app = apply_security_middleware(app, &security);
### Updated AppState Structure ### Updated AppState Structure
```rust ```
pub struct AppState { pub struct AppState {
// Existing fields // Existing fields
pub task_storage: Arc<dyn TaskStorage>, pub task_storage: Arc<dyn TaskStorage>,
@ -317,7 +317,7 @@ pub struct AppState {
### Initialization in main.rs ### Initialization in main.rs
```rust ```
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
let args = Args::parse(); let args = Args::parse();
@ -398,7 +398,7 @@ async fn main() -> Result<()> {
### Step-by-Step Flow ### Step-by-Step Flow
```plaintext ```
1. CLIENT REQUEST 1. CLIENT REQUEST
├─ Headers: ├─ Headers:
│ ├─ Authorization: Bearer <jwt_token> │ ├─ Authorization: Bearer <jwt_token>
@ -485,7 +485,7 @@ async fn main() -> Result<()> {
### Environment Variables ### Environment Variables
```bash ```
# JWT Configuration # JWT Configuration
JWT_ISSUER=control-center JWT_ISSUER=control-center
JWT_AUDIENCE=orchestrator JWT_AUDIENCE=orchestrator
@ -513,7 +513,7 @@ AUDIT_RETENTION_DAYS=365
For development/testing, all security can be disabled: For development/testing, all security can be disabled:
```rust ```
// In main.rs // In main.rs
let security = if env::var("DEVELOPMENT_MODE").unwrap_or("false".to_string()) == "true" { let security = if env::var("DEVELOPMENT_MODE").unwrap_or("false".to_string()) == "true" {
SecurityComponents::disabled(audit_logger.clone()) SecurityComponents::disabled(audit_logger.clone())
@ -544,7 +544,7 @@ Location: `provisioning/platform/orchestrator/tests/security_integration_tests.r
**Run Tests**: **Run Tests**:
```bash ```
cd provisioning/platform/orchestrator cd provisioning/platform/orchestrator
cargo test security_integration_tests cargo test security_integration_tests
``` ```

View File

@ -54,18 +54,18 @@ http post <http://localhost:9090/workflows/servers/create> {
1. Orchestrator receives and queues: 1. Orchestrator receives and queues:
```rust ```
// Orchestrator receives HTTP request // Orchestrator receives HTTP request
async fn create_server_workflow(request) { async fn create_server_workflow(request) {
let task = Task::new(TaskType::ServerCreate, request); let task = Task::new(TaskType::ServerCreate, request);
task_queue.enqueue(task).await; // Queue for execution task_queue.enqueue(task).await; // Queue for execution
return workflow_id; // Return immediately return workflow_id; // Return immediately
} }
```text ```
2. Orchestrator executes via Nushell subprocess: 2. Orchestrator executes via Nushell subprocess:
```rust ```
// Orchestrator spawns Nushell to run business logic // Orchestrator spawns Nushell to run business logic
async fn execute_task(task: Task) { async fn execute_task(task: Task) {
let output = Command::new("nu") let output = Command::new("nu")
@ -76,11 +76,11 @@ async fn execute_task(task: Task) {
// Orchestrator manages: retry, checkpointing, monitoring // Orchestrator manages: retry, checkpointing, monitoring
} }
```text ```
3. Nushell executes the actual work: 3. Nushell executes the actual work:
```nu ```
# servers/create.nu # servers/create.nu
export def create-server [name: string] { export def create-server [name: string] {

View File

@ -18,7 +18,7 @@ functionality.
**Original Issue:** **Original Issue:**
```plaintext ```
Deep call stack in Nushell (template.nu:71) Deep call stack in Nushell (template.nu:71)
→ "Type not supported" errors → "Type not supported" errors
→ Cannot handle complex nested workflows → Cannot handle complex nested workflows
@ -35,7 +35,7 @@ Deep call stack in Nushell (template.nu:71)
### How It Works Today (Monorepo) ### How It Works Today (Monorepo)
```plaintext ```
┌─────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────┐
│ User │ │ User │
└───────────────────────────┬─────────────────────────────────┘ └───────────────────────────┬─────────────────────────────────┘
@ -80,7 +80,7 @@ Deep call stack in Nushell (template.nu:71)
#### Mode 1: Direct Mode (Simple Operations) #### Mode 1: Direct Mode (Simple Operations)
```bash ```
# No orchestrator needed # No orchestrator needed
provisioning server list provisioning server list
provisioning env provisioning env
@ -92,7 +92,7 @@ provisioning (CLI) → Nushell scripts → Result
#### Mode 2: Orchestrated Mode (Complex Operations) #### Mode 2: Orchestrated Mode (Complex Operations)
```bash ```
# Uses orchestrator for coordination # Uses orchestrator for coordination
provisioning server create --orchestrated provisioning server create --orchestrated
@ -104,7 +104,7 @@ provisioning CLI → Orchestrator API → Task Queue → Nushell executor
#### Mode 3: Workflow Mode (Batch Operations) #### Mode 3: Workflow Mode (Batch Operations)
```bash ```
# Complex workflows with dependencies # Complex workflows with dependencies
provisioning workflow submit server-cluster.ncl provisioning workflow submit server-cluster.ncl
@ -128,7 +128,7 @@ provisioning CLI → Orchestrator Workflow Engine → Dependency Graph
**Nushell CLI (`core/nulib/workflows/server_create.nu`):** **Nushell CLI (`core/nulib/workflows/server_create.nu`):**
```nushell ```
# Submit server creation workflow to orchestrator # Submit server creation workflow to orchestrator
export def server_create_workflow [ export def server_create_workflow [
infra_name: string infra_name: string
@ -153,7 +153,7 @@ export def server_create_workflow [
**Rust Orchestrator (`platform/orchestrator/src/api/workflows.rs`):** **Rust Orchestrator (`platform/orchestrator/src/api/workflows.rs`):**
```rust ```
// Receive workflow submission from Nushell CLI // Receive workflow submission from Nushell CLI
#[axum::debug_handler] #[axum::debug_handler]
async fn create_server_workflow( async fn create_server_workflow(
@ -183,7 +183,7 @@ async fn create_server_workflow(
**Flow:** **Flow:**
```plaintext ```
User → provisioning server create --orchestrated User → provisioning server create --orchestrated
Nushell CLI prepares task Nushell CLI prepares task
@ -201,7 +201,7 @@ User can monitor: provisioning workflow monitor <id>
**Orchestrator Task Executor (`platform/orchestrator/src/executor.rs`):** **Orchestrator Task Executor (`platform/orchestrator/src/executor.rs`):**
```rust ```
// Orchestrator spawns Nushell to execute business logic // Orchestrator spawns Nushell to execute business logic
pub async fn execute_task(task: Task) -> Result<TaskResult> { pub async fn execute_task(task: Task) -> Result<TaskResult> {
match task.task_type { match task.task_type {
@ -233,7 +233,7 @@ pub async fn execute_task(task: Task) -> Result<TaskResult> {
**Flow:** **Flow:**
```plaintext ```
Orchestrator task queue has pending task Orchestrator task queue has pending task
Executor picks up task Executor picks up task
@ -253,7 +253,7 @@ User monitors via: provisioning workflow status <id>
**Nushell Calls Orchestrator API:** **Nushell Calls Orchestrator API:**
```nushell ```
# Nushell script checks orchestrator status during execution # Nushell script checks orchestrator status during execution
export def check-orchestrator-health [] { export def check-orchestrator-health [] {
let response = (http get http://localhost:9090/health) let response = (http get http://localhost:9090/health)
@ -276,7 +276,7 @@ export def report-progress [task_id: string, progress: int] {
**Orchestrator Monitors Nushell Execution:** **Orchestrator Monitors Nushell Execution:**
```rust ```
// Orchestrator tracks Nushell subprocess // Orchestrator tracks Nushell subprocess
pub async fn execute_with_monitoring(task: Task) -> Result<TaskResult> { pub async fn execute_with_monitoring(task: Task) -> Result<TaskResult> {
let mut child = Command::new("nu") let mut child = Command::new("nu")
@ -332,7 +332,7 @@ pub async fn execute_with_monitoring(task: Task) -> Result<TaskResult> {
**Runtime Integration (Same as Monorepo):** **Runtime Integration (Same as Monorepo):**
```plaintext ```
User installs both packages: User installs both packages:
provisioning-core-3.2.1 → /usr/local/lib/provisioning/ provisioning-core-3.2.1 → /usr/local/lib/provisioning/
provisioning-platform-2.5.3 → /usr/local/bin/provisioning-orchestrator provisioning-platform-2.5.3 → /usr/local/bin/provisioning-orchestrator
@ -347,7 +347,7 @@ No code dependencies, just runtime coordination!
**Core Package (`provisioning-core`) config:** **Core Package (`provisioning-core`) config:**
```toml ```
# /usr/local/share/provisioning/config/config.defaults.toml # /usr/local/share/provisioning/config/config.defaults.toml
[orchestrator] [orchestrator]
@ -363,7 +363,7 @@ fallback_to_direct = true # Fall back if orchestrator down
**Platform Package (`provisioning-platform`) config:** **Platform Package (`provisioning-platform`) config:**
```toml ```
# /usr/local/share/provisioning/platform/config.toml # /usr/local/share/provisioning/platform/config.toml
[orchestrator] [orchestrator]
@ -382,7 +382,7 @@ task_timeout_seconds = 3600
**Compatibility Matrix (`provisioning-distribution/versions.toml`):** **Compatibility Matrix (`provisioning-distribution/versions.toml`):**
```toml ```
[compatibility.platform."2.5.3"] [compatibility.platform."2.5.3"]
core = "^3.2" # Platform 2.5.3 compatible with core 3.2.x core = "^3.2" # Platform 2.5.3 compatible with core 3.2.x
min-core = "3.2.0" min-core = "3.2.0"
@ -402,7 +402,7 @@ orchestrator-api = "v1"
**No Orchestrator Needed:** **No Orchestrator Needed:**
```bash ```
provisioning server list provisioning server list
# Flow: # Flow:
@ -414,7 +414,7 @@ CLI → servers/list.nu → Query state → Return results
**Using Orchestrator:** **Using Orchestrator:**
```bash ```
provisioning server create --orchestrated --infra wuji provisioning server create --orchestrated --infra wuji
# Detailed Flow: # Detailed Flow:
@ -466,7 +466,7 @@ provisioning server create --orchestrated --infra wuji
**Complex Workflow:** **Complex Workflow:**
```bash ```
provisioning batch submit multi-cloud-deployment.ncl provisioning batch submit multi-cloud-deployment.ncl
# Workflow contains: # Workflow contains:
@ -548,7 +548,7 @@ provisioning batch submit multi-cloud-deployment.ncl
1. **Reliable State Management** 1. **Reliable State Management**
```plaintext ```
Orchestrator maintains: Orchestrator maintains:
- Task queue (survives crashes) - Task queue (survives crashes)
- Workflow checkpoints (resume on failure) - Workflow checkpoints (resume on failure)
@ -558,7 +558,7 @@ provisioning batch submit multi-cloud-deployment.ncl
1. **Clean Separation** 1. **Clean Separation**
```plaintext ```
Orchestrator (Rust): Performance, concurrency, state Orchestrator (Rust): Performance, concurrency, state
Business Logic (Nushell): Providers, taskservs, workflows Business Logic (Nushell): Providers, taskservs, workflows
@ -594,7 +594,7 @@ provisioning batch submit multi-cloud-deployment.ncl
**User installs bundle:** **User installs bundle:**
```bash ```
curl -fsSL https://get.provisioning.io | sh curl -fsSL https://get.provisioning.io | sh
# Installs: # Installs:
@ -614,7 +614,7 @@ curl -fsSL https://get.provisioning.io | sh
**Core package expects orchestrator:** **Core package expects orchestrator:**
```nushell ```
# core/nulib/lib_provisioning/orchestrator/client.nu # core/nulib/lib_provisioning/orchestrator/client.nu
# Check if orchestrator is running # Check if orchestrator is running
@ -644,7 +644,7 @@ export def ensure-orchestrator [] {
**Platform package executes core scripts:** **Platform package executes core scripts:**
```rust ```
// platform/orchestrator/src/executor/nushell.rs // platform/orchestrator/src/executor/nushell.rs
pub struct NushellExecutor { pub struct NushellExecutor {
@ -689,7 +689,7 @@ impl NushellExecutor {
**`/usr/local/share/provisioning/config/config.defaults.toml`:** **`/usr/local/share/provisioning/config/config.defaults.toml`:**
```toml ```
[orchestrator] [orchestrator]
enabled = true enabled = true
endpoint = "http://localhost:9090" endpoint = "http://localhost:9090"
@ -722,7 +722,7 @@ force_direct = [
**`/usr/local/share/provisioning/platform/config.toml`:** **`/usr/local/share/provisioning/platform/config.toml`:**
```toml ```
[server] [server]
host = "127.0.0.1" host = "127.0.0.1"
port = 8080 port = 8080
@ -780,7 +780,7 @@ env_vars = { NU_LIB_DIRS = "/usr/local/lib/provisioning" }
The confusing example in the multi-repo doc was **oversimplified**. The real architecture is: The confusing example in the multi-repo doc was **oversimplified**. The real architecture is:
```plaintext ```
✅ Orchestrator IS USED and IS ESSENTIAL ✅ Orchestrator IS USED and IS ESSENTIAL
✅ Platform (Rust) coordinates Core (Nushell) execution ✅ Platform (Rust) coordinates Core (Nushell) execution
✅ Loose coupling via CLI + REST API (not code dependencies) ✅ Loose coupling via CLI + REST API (not code dependencies)

View File

@ -1,29 +1,30 @@
# KCL Package and Module Loader System # Nickel Package and Module Loader System
This document describes the new package-based architecture implemented for the provisioning system, replacing hardcoded extension paths with a This document describes the package-based architecture implemented for the provisioning system, replacing hardcoded extension paths with a
flexible module discovery and loading system. flexible module discovery and loading system using Nickel for type-safe configuration.
## Architecture Overview ## Architecture Overview
The new system consists of two main components: The system consists of two main components:
1. **Core KCL Package**: Distributable core provisioning schemas 1. **Core Nickel Package**: Distributable core provisioning schemas with type safety
2. **Module Loader System**: Dynamic discovery and loading of extensions 2. **Module Loader System**: Dynamic discovery and loading of extensions
### Benefits ### Benefits
- **Type-Safe Configuration**: Nickel ensures configuration validity at evaluation time
- **Clean Separation**: Core package is self-contained and distributable - **Clean Separation**: Core package is self-contained and distributable
- **Plug-and-Play Extensions**: Taskservs, providers, and clusters can be loaded dynamically - **Plug-and-Play Extensions**: Taskservs, providers, and clusters can be loaded dynamically
- **Version Management**: Core package and extensions can be versioned independently - **Version Management**: Core package and extensions can be versioned independently
- **Developer Friendly**: Easy workspace setup and module management - **Developer Friendly**: Easy workspace setup and module management with lazy evaluation
## Components ## Components
### 1. Core KCL Package (`/provisioning/kcl/`) ### 1. Core Nickel Package (`/provisioning/schemas/`)
Contains fundamental schemas for provisioning: Contains fundamental schemas for provisioning:
- `settings.ncl` - System settings and configuration - `main.ncl` - Primary provisioning configuration
- `server.ncl` - Server definitions and schemas - `server.ncl` - Server definitions and schemas
- `defaults.ncl` - Default configurations - `defaults.ncl` - Default configurations
- `lib.ncl` - Common library schemas - `lib.ncl` - Common library schemas
@ -33,13 +34,14 @@ Contains fundamental schemas for provisioning:
- No hardcoded extension paths - No hardcoded extension paths
- Self-contained and distributable - Self-contained and distributable
- Package-based imports only - Type-safe package-based imports
- Lazy evaluation of expensive computations
### 2. Module Discovery System ### 2. Module Discovery System
#### Discovery Commands #### Discovery Commands
```bash ```
# Discover available modules # Discover available modules
module-loader discover taskservs # List all taskservs module-loader discover taskservs # List all taskservs
module-loader discover providers --format yaml # List providers as YAML module-loader discover providers --format yaml # List providers as YAML
@ -56,7 +58,7 @@ module-loader discover clusters redis # Search for redis clusters
#### Loading Commands #### Loading Commands
```bash ```
# Load modules into workspace # Load modules into workspace
module-loader load taskservs . [kubernetes, cilium, containerd] module-loader load taskservs . [kubernetes, cilium, containerd]
module-loader load providers . [upcloud] module-loader load providers . [upcloud]
@ -79,7 +81,7 @@ module-loader init workspace/infra/production \
### New Workspace Layout ### New Workspace Layout
```plaintext ```
workspace/infra/my-project/ workspace/infra/my-project/
├── kcl.mod # Package dependencies ├── kcl.mod # Package dependencies
├── servers.ncl # Main server configuration ├── servers.ncl # Main server configuration
@ -108,7 +110,7 @@ workspace/infra/my-project/
#### Before (Old System) #### Before (Old System)
```kcl ```
# Hardcoded relative paths # Hardcoded relative paths
import ../../../kcl/server as server import ../../../kcl/server as server
import ../../../extensions/taskservs/kubernetes/kcl/kubernetes as k8s import ../../../extensions/taskservs/kubernetes/kcl/kubernetes as k8s
@ -116,7 +118,7 @@ import ../../../extensions/taskservs/kubernetes/kcl/kubernetes as k8s
#### After (New System) #### After (New System)
```kcl ```
# Package-based imports # Package-based imports
import provisioning.server as server import provisioning.server as server
@ -128,7 +130,7 @@ import .taskservs.nclubernetes.kubernetes as k8s
### Building Core Package ### Building Core Package
```bash ```
# Build distributable package # Build distributable package
./provisioning/tools/kcl-packager.nu build --version 1.0.0 ./provisioning/tools/kcl-packager.nu build --version 1.0.0
@ -143,21 +145,21 @@ import .taskservs.nclubernetes.kubernetes as k8s
#### Method 1: Local Installation (Recommended for development) #### Method 1: Local Installation (Recommended for development)
```toml ```
[dependencies] [dependencies]
provisioning = { path = "~/.kcl/packages/provisioning", version = "0.0.1" } provisioning = { path = "~/.kcl/packages/provisioning", version = "0.0.1" }
``` ```
#### Method 2: Git Repository (For distributed teams) #### Method 2: Git Repository (For distributed teams)
```toml ```
[dependencies] [dependencies]
provisioning = { git = "https://github.com/your-org/provisioning-kcl", version = "v0.0.1" } provisioning = { git = "https://github.com/your-org/provisioning-kcl", version = "v0.0.1" }
``` ```
#### Method 3: KCL Registry (When available) #### Method 3: KCL Registry (When available)
```toml ```
[dependencies] [dependencies]
provisioning = { version = "0.0.1" } provisioning = { version = "0.0.1" }
``` ```
@ -166,7 +168,7 @@ provisioning = { version = "0.0.1" }
### 1. New Project Setup ### 1. New Project Setup
```bash ```
# Create workspace from template # Create workspace from template
cp -r provisioning/templates/workspaces/kubernetes ./my-k8s-cluster cp -r provisioning/templates/workspaces/kubernetes ./my-k8s-cluster
cd my-k8s-cluster cd my-k8s-cluster
@ -185,7 +187,7 @@ provisioning server create --infra . --check
### 2. Extension Development ### 2. Extension Development
```bash ```
# Create new taskserv # Create new taskserv
mkdir -p extensions/taskservs/my-service/kcl mkdir -p extensions/taskservs/my-service/kcl
cd extensions/taskservs/my-service/kcl cd extensions/taskservs/my-service/kcl
@ -200,7 +202,7 @@ module-loader discover taskservs # Should find your service
### 3. Workspace Migration ### 3. Workspace Migration
```bash ```
# Analyze existing workspace # Analyze existing workspace
workspace-migrate.nu workspace/infra/old-project dry-run workspace-migrate.nu workspace/infra/old-project dry-run
@ -213,7 +215,7 @@ module-loader validate workspace/infra/old-project
### 4. Multi-Environment Management ### 4. Multi-Environment Management
```bash ```
# Development environment # Development environment
cd workspace/infra/dev cd workspace/infra/dev
module-loader load taskservs . [redis, postgres] module-loader load taskservs . [redis, postgres]
@ -229,7 +231,7 @@ module-loader load providers . [upcloud, aws] # Multi-cloud
### Listing and Validation ### Listing and Validation
```bash ```
# List loaded modules # List loaded modules
module-loader list taskservs . module-loader list taskservs .
module-loader list providers . module-loader list providers .
@ -244,7 +246,7 @@ workspace-init.nu . info
### Unloading Modules ### Unloading Modules
```bash ```
# Remove specific modules # Remove specific modules
module-loader unload taskservs . redis module-loader unload taskservs . redis
module-loader unload providers . aws module-loader unload providers . aws
@ -254,7 +256,7 @@ module-loader unload providers . aws
### Module Information ### Module Information
```bash ```
# Get detailed module info # Get detailed module info
module-loader info taskservs kubernetes module-loader info taskservs kubernetes
module-loader info providers upcloud module-loader info providers upcloud
@ -265,7 +267,7 @@ module-loader info clusters buildkit
### Pipeline Example ### Pipeline Example
```bash ```
#!/usr/bin/env nu #!/usr/bin/env nu
# deploy-pipeline.nu # deploy-pipeline.nu
@ -290,13 +292,13 @@ provisioning server create --infra $env.WORKSPACE_PATH
#### Module Import Errors #### Module Import Errors
```plaintext ```
Error: module not found Error: module not found
``` ```
**Solution**: Verify modules are loaded and regenerate imports **Solution**: Verify modules are loaded and regenerate imports
```bash ```
module-loader list taskservs . module-loader list taskservs .
module-loader load taskservs . [kubernetes, cilium, containerd] module-loader load taskservs . [kubernetes, cilium, containerd]
``` ```
@ -309,14 +311,14 @@ module-loader load taskservs . [kubernetes, cilium, containerd]
**Solution**: Verify core package installation and kcl.mod configuration **Solution**: Verify core package installation and kcl.mod configuration
```bash ```
kcl-packager.nu install --version latest kcl-packager.nu install --version latest
kcl run --dry-run servers.ncl kcl run --dry-run servers.ncl
``` ```
### Debug Commands ### Debug Commands
```bash ```
# Show workspace structure # Show workspace structure
tree -a workspace/infra/my-project tree -a workspace/infra/my-project
@ -362,25 +364,25 @@ For existing workspaces, follow these steps:
### 1. Backup Current Workspace ### 1. Backup Current Workspace
```bash ```
cp -r workspace/infra/existing workspace/infra/existing-backup cp -r workspace/infra/existing workspace/infra/existing-backup
``` ```
### 2. Analyze Migration Requirements ### 2. Analyze Migration Requirements
```bash ```
workspace-migrate.nu workspace/infra/existing dry-run workspace-migrate.nu workspace/infra/existing dry-run
``` ```
### 3. Perform Migration ### 3. Perform Migration
```bash ```
workspace-migrate.nu workspace/infra/existing workspace-migrate.nu workspace/infra/existing
``` ```
### 4. Load Required Modules ### 4. Load Required Modules
```bash ```
cd workspace/infra/existing cd workspace/infra/existing
module-loader load taskservs . [kubernetes, cilium] module-loader load taskservs . [kubernetes, cilium]
module-loader load providers . [upcloud] module-loader load providers . [upcloud]
@ -388,14 +390,14 @@ module-loader load providers . [upcloud]
### 5. Test and Validate ### 5. Test and Validate
```bash ```
kcl run servers.ncl kcl run servers.ncl
module-loader validate . module-loader validate .
``` ```
### 6. Deploy ### 6. Deploy
```bash ```
provisioning server create --infra . --check provisioning server create --infra . --check
``` ```

View File

@ -70,7 +70,7 @@ workflow, and user-friendly distribution.
### 1. Monorepo Structure ### 1. Monorepo Structure
```plaintext ```
project-provisioning/ project-provisioning/
├── provisioning/ # CORE SYSTEM (distribution source) ├── provisioning/ # CORE SYSTEM (distribution source)
@ -246,7 +246,7 @@ project-provisioning/
**Installation:** **Installation:**
```bash ```
/usr/local/ /usr/local/
├── bin/ ├── bin/
│ └── provisioning │ └── provisioning
@ -275,7 +275,7 @@ project-provisioning/
**Installation:** **Installation:**
```bash ```
/usr/local/ /usr/local/
├── bin/ ├── bin/
│ ├── provisioning-orchestrator │ ├── provisioning-orchestrator
@ -297,7 +297,7 @@ project-provisioning/
**Installation:** **Installation:**
```bash ```
/usr/local/lib/provisioning/extensions/ /usr/local/lib/provisioning/extensions/
├── taskservs/ ├── taskservs/
├── clusters/ ├── clusters/
@ -317,7 +317,7 @@ project-provisioning/
**Installation:** **Installation:**
```bash ```
~/.config/nushell/plugins/ ~/.config/nushell/plugins/
``` ```
@ -325,7 +325,7 @@ project-provisioning/
#### System Installation (Root) #### System Installation (Root)
```bash ```
/usr/local/ /usr/local/
├── bin/ ├── bin/
│ ├── provisioning # Main CLI │ ├── provisioning # Main CLI
@ -351,7 +351,7 @@ project-provisioning/
#### User Configuration #### User Configuration
```bash ```
~/.provisioning/ ~/.provisioning/
├── config/ ├── config/
│ └── config.user.toml # User overrides │ └── config.user.toml # User overrides
@ -365,7 +365,7 @@ project-provisioning/
#### Project Workspace #### Project Workspace
```bash ```
./workspace/ ./workspace/
├── infra/ # Infrastructure definitions ├── infra/ # Infrastructure definitions
│ ├── my-cluster/ │ ├── my-cluster/
@ -384,7 +384,7 @@ project-provisioning/
### Configuration Hierarchy ### Configuration Hierarchy
```plaintext ```
Priority (highest to lowest): Priority (highest to lowest):
1. CLI flags --debug, --infra=my-cluster 1. CLI flags --debug, --infra=my-cluster
2. Runtime overrides PROVISIONING_DEBUG=true 2. Runtime overrides PROVISIONING_DEBUG=true
@ -401,7 +401,7 @@ Priority (highest to lowest):
**`provisioning/tools/build/`:** **`provisioning/tools/build/`:**
```plaintext ```
build/ build/
├── build-system.nu # Main build orchestrator ├── build-system.nu # Main build orchestrator
├── package-core.nu # Core packaging ├── package-core.nu # Core packaging
@ -417,7 +417,7 @@ build/
**`provisioning/tools/build/build-system.nu`:** **`provisioning/tools/build/build-system.nu`:**
```nushell ```
#!/usr/bin/env nu #!/usr/bin/env nu
# Build system for provisioning project # Build system for provisioning project
@ -595,7 +595,7 @@ export def "main status" [] {
**`Justfile`:** **`Justfile`:**
```makefile ```
# Provisioning Build System # Provisioning Build System
# Use 'just --list' to see all available commands # Use 'just --list' to see all available commands
@ -727,7 +727,7 @@ audit:
**`distribution/installers/install.nu`:** **`distribution/installers/install.nu`:**
```nushell ```
#!/usr/bin/env nu #!/usr/bin/env nu
# Provisioning installation script # Provisioning installation script
@ -984,7 +984,7 @@ export def "main upgrade" [
**`distribution/installers/install.sh`:** **`distribution/installers/install.sh`:**
```bash ```
#!/usr/bin/env bash #!/usr/bin/env bash
# Provisioning installation script (Bash version) # Provisioning installation script (Bash version)
# This script installs Nushell first, then runs the Nushell installer # This script installs Nushell first, then runs the Nushell installer
@ -1111,7 +1111,7 @@ main "$@"
**Commands:** **Commands:**
```bash ```
# Backup current state # Backup current state
cp -r /Users/Akasha/project-provisioning /Users/Akasha/project-provisioning.backup cp -r /Users/Akasha/project-provisioning /Users/Akasha/project-provisioning.backup
@ -1136,7 +1136,7 @@ fd workspace -t d > workspace-dirs.txt
**Commands:** **Commands:**
```bash ```
# Create distribution directory # Create distribution directory
mkdir -p distribution/{packages,installers,registry} mkdir -p distribution/{packages,installers,registry}
@ -1410,7 +1410,7 @@ rm -rf NO/ wrks/ presentations/
#### Option 1: Clean Migration #### Option 1: Clean Migration
```bash ```
# Backup current workspace # Backup current workspace
cp -r workspace workspace.backup cp -r workspace workspace.backup
@ -1423,7 +1423,7 @@ provisioning workspace migrate --from workspace.backup --to workspace/
#### Option 2: In-Place Migration #### Option 2: In-Place Migration
```bash ```
# Run migration script # Run migration script
provisioning migrate --check # Dry run provisioning migrate --check # Dry run
provisioning migrate # Execute migration provisioning migrate # Execute migration
@ -1431,7 +1431,7 @@ provisioning migrate # Execute migration
### For Developers ### For Developers
```bash ```
# Pull latest changes # Pull latest changes
git pull origin main git pull origin main

View File

@ -11,7 +11,7 @@ The system solves fundamental technical challenges through architectural innovat
### System Diagram ### System Diagram
```plaintext ```
┌─────────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────────┐
│ User Interface Layer │ │ User Interface Layer │
├─────────────────┬─────────────────┬─────────────────────────────┤ ├─────────────────┬─────────────────┬─────────────────────────────┤
@ -149,7 +149,7 @@ The system solves fundamental technical challenges through architectural innovat
**Nickel Workflow Definitions**: **Nickel Workflow Definitions**:
```nickel ```
{ {
batch_workflow = { batch_workflow = {
name = "multi_cloud_deployment", name = "multi_cloud_deployment",
@ -247,14 +247,14 @@ The system solves fundamental technical challenges through architectural innovat
### Configuration Resolution Flow ### Configuration Resolution Flow
```plaintext ```
1. Workspace Discovery → 2. Configuration Loading → 3. Hierarchy Merge → 1. Workspace Discovery → 2. Configuration Loading → 3. Hierarchy Merge →
4. Variable Interpolation → 5. Schema Validation → 6. Runtime Application 4. Variable Interpolation → 5. Schema Validation → 6. Runtime Application
``` ```
### Workflow Execution Flow ### Workflow Execution Flow
```plaintext ```
1. Workflow Submission → 2. Dependency Analysis → 3. Task Scheduling → 1. Workflow Submission → 2. Dependency Analysis → 3. Task Scheduling →
4. Parallel Execution → 5. State Tracking → 6. Result Aggregation → 4. Parallel Execution → 5. State Tracking → 6. Result Aggregation →
7. Error Handling → 8. Cleanup/Rollback 7. Error Handling → 8. Cleanup/Rollback
@ -262,7 +262,7 @@ The system solves fundamental technical challenges through architectural innovat
### Provider Integration Flow ### Provider Integration Flow
```plaintext ```
1. Provider Discovery → 2. Configuration Validation → 3. Authentication → 1. Provider Discovery → 2. Configuration Validation → 3. Authentication →
4. Resource Planning → 5. Operation Execution → 6. State Persistence → 4. Resource Planning → 5. Operation Execution → 6. State Persistence →
7. Result Reporting 7. Result Reporting

View File

@ -11,7 +11,7 @@
TypeDialog generates **type-safe interactive forms** from configuration schemas with **bidirectional Nickel integration**. TypeDialog generates **type-safe interactive forms** from configuration schemas with **bidirectional Nickel integration**.
```plaintext ```
Nickel Schema Nickel Schema
TypeDialog Form (Auto-generated) TypeDialog Form (Auto-generated)
@ -27,7 +27,7 @@ Nickel output config (Type-safe)
### Three Layers ### Three Layers
```plaintext ```
CLI/TUI/Web Layer CLI/TUI/Web Layer
TypeDialog Form Engine TypeDialog Form Engine
@ -39,7 +39,7 @@ Schema Contracts
### Data Flow ### Data Flow
```plaintext ```
Input (Nickel) Input (Nickel)
Form Definition (TOML) Form Definition (TOML)
@ -59,7 +59,7 @@ Output (JSON/YAML/TOML/Nickel)
### Installation ### Installation
```bash ```
# Clone TypeDialog # Clone TypeDialog
git clone https://github.com/jesusperezlorenzo/typedialog.git git clone https://github.com/jesusperezlorenzo/typedialog.git
cd typedialog cd typedialog
@ -73,7 +73,7 @@ cargo install --path ./crates/typedialog
### Verify Installation ### Verify Installation
```bash ```
typedialog --version typedialog --version
typedialog --help typedialog --help
``` ```
@ -84,7 +84,7 @@ typedialog --help
### Step 1: Define Nickel Schema ### Step 1: Define Nickel Schema
```nickel ```
# server_config.ncl # server_config.ncl
let contracts = import "./contracts.ncl" in let contracts = import "./contracts.ncl" in
let defaults = import "./defaults.ncl" in let defaults = import "./defaults.ncl" in
@ -101,7 +101,7 @@ let defaults = import "./defaults.ncl" in
### Step 2: Define TypeDialog Form (TOML) ### Step 2: Define TypeDialog Form (TOML)
```toml ```
# server_form.toml # server_form.toml
[form] [form]
title = "Server Configuration" title = "Server Configuration"
@ -155,13 +155,13 @@ help = "Select applicable tags"
### Step 3: Render Form (CLI) ### Step 3: Render Form (CLI)
```bash ```
typedialog form --config server_form.toml --backend cli typedialog form --config server_form.toml --backend cli
``` ```
**Output**: **Output**:
```plaintext ```
Server Configuration Server Configuration
Create a new server configuration Create a new server configuration
@ -179,14 +179,14 @@ Create a new server configuration
### Step 4: Validate Against Nickel Schema ### Step 4: Validate Against Nickel Schema
```bash ```
# Validation happens automatically # Validation happens automatically
# If input matches Nickel contract, proceeds to output # If input matches Nickel contract, proceeds to output
``` ```
### Step 5: Output to Nickel ### Step 5: Output to Nickel
```bash ```
typedialog form \ typedialog form \
--config server_form.toml \ --config server_form.toml \
--output nickel \ --output nickel \
@ -195,7 +195,7 @@ typedialog form \
**Output file** (`server_config_output.ncl`): **Output file** (`server_config_output.ncl`):
```nickel ```
{ {
server_name = "web-01", server_name = "web-01",
cpu_cores = 4, cpu_cores = 4,
@ -216,7 +216,7 @@ You want an interactive CLI wizard for infrastructure provisioning.
### Step 1: Define Nickel Schema for Infrastructure ### Step 1: Define Nickel Schema for Infrastructure
```nickel ```
# infrastructure_schema.ncl # infrastructure_schema.ncl
{ {
InfrastructureConfig = { InfrastructureConfig = {
@ -245,7 +245,7 @@ You want an interactive CLI wizard for infrastructure provisioning.
### Step 2: Create Comprehensive Form ### Step 2: Create Comprehensive Form
```toml ```
# infrastructure_wizard.toml # infrastructure_wizard.toml
[form] [form]
title = "Infrastructure Provisioning Wizard" title = "Infrastructure Provisioning Wizard"
@ -334,7 +334,7 @@ placeholder = "admin@company.com"
### Step 3: Run Interactive Wizard ### Step 3: Run Interactive Wizard
```bash ```
typedialog form \ typedialog form \
--config infrastructure_wizard.toml \ --config infrastructure_wizard.toml \
--backend tui \ --backend tui \
@ -343,7 +343,7 @@ typedialog form \
**Output** (`infrastructure_config.ncl`): **Output** (`infrastructure_config.ncl`):
```nickel ```
{ {
workspace_name = "production-eu", workspace_name = "production-eu",
deployment_mode = 'enterprise, deployment_mode = 'enterprise,
@ -358,7 +358,7 @@ typedialog form \
### Step 4: Use Output in Infrastructure ### Step 4: Use Output in Infrastructure
```nickel ```
# main_infrastructure.ncl # main_infrastructure.ncl
let config = import "./infrastructure_config.ncl" in let config = import "./infrastructure_config.ncl" in
let schemas = import "../../provisioning/schemas/main.ncl" in let schemas = import "../../provisioning/schemas/main.ncl" in
@ -398,7 +398,7 @@ let schemas = import "../../provisioning/schemas/main.ncl" in
### Form Definition (Advanced) ### Form Definition (Advanced)
```toml ```
# server_advanced_form.toml # server_advanced_form.toml
[form] [form]
title = "Server Configuration" title = "Server Configuration"
@ -532,7 +532,7 @@ options = ["production", "staging", "testing", "development"]
### Output Structure ### Output Structure
```nickel ```
{ {
# Basic # Basic
server_name = "web-prod-01", server_name = "web-prod-01",
@ -562,7 +562,7 @@ options = ["production", "staging", "testing", "development"]
### TypeDialog REST Endpoints ### TypeDialog REST Endpoints
```bash ```
# Start TypeDialog server # Start TypeDialog server
typedialog server --port 8080 typedialog server --port 8080
@ -574,7 +574,7 @@ curl -X POST http://localhost:8080/forms \
### Response Format ### Response Format
```json ```
{ {
"form_id": "srv_abc123", "form_id": "srv_abc123",
"status": "rendered", "status": "rendered",
@ -592,7 +592,7 @@ curl -X POST http://localhost:8080/forms \
### Submit Form ### Submit Form
```bash ```
curl -X POST http://localhost:8080/forms/srv_abc123/submit \ curl -X POST http://localhost:8080/forms/srv_abc123/submit \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d '{ -d '{
@ -607,7 +607,7 @@ curl -X POST http://localhost:8080/forms/srv_abc123/submit \
### Response ### Response
```json ```
{ {
"status": "success", "status": "success",
"validation": "passed", "validation": "passed",
@ -631,7 +631,7 @@ curl -X POST http://localhost:8080/forms/srv_abc123/submit \
TypeDialog validates user input against Nickel contracts: TypeDialog validates user input against Nickel contracts:
```nickel ```
# Nickel contract # Nickel contract
ServerConfig = { ServerConfig = {
cpu_cores | Number, # Must be number cpu_cores | Number, # Must be number
@ -645,7 +645,7 @@ ServerConfig = {
### Validation Rules in Form ### Validation Rules in Form
```toml ```
[[fields]] [[fields]]
name = "cpu_cores" name = "cpu_cores"
type = "number" type = "number"
@ -661,7 +661,7 @@ help = "Must be 1-32 cores"
### Use Case: Infrastructure Initialization ### Use Case: Infrastructure Initialization
```bash ```
# 1. User runs initialization # 1. User runs initialization
provisioning init --wizard provisioning init --wizard
@ -679,7 +679,7 @@ provisioning init --wizard
### Implementation in Nushell ### Implementation in Nushell
```nushell ```
# provisioning/core/nulib/provisioning_init.nu # provisioning/core/nulib/provisioning_init.nu
def provisioning_init_wizard [] { def provisioning_init_wizard [] {
@ -714,7 +714,7 @@ def provisioning_init_wizard [] {
Show/hide fields based on user selections: Show/hide fields based on user selections:
```toml ```
[[fields]] [[fields]]
name = "backup_retention" name = "backup_retention"
label = "Backup Retention (days)" label = "Backup Retention (days)"
@ -726,7 +726,7 @@ visible_if = "enable_backup == true" # Only shown if backup enabled
Set defaults based on other fields: Set defaults based on other fields:
```toml ```
[[fields]] [[fields]]
name = "deployment_mode" name = "deployment_mode"
type = "select" type = "select"
@ -741,7 +741,7 @@ default_from = "deployment_mode" # Can reference other fields
### Custom Validation ### Custom Validation
```toml ```
[[fields]] [[fields]]
name = "memory_gb" name = "memory_gb"
type = "number" type = "number"
@ -755,7 +755,7 @@ help = "Memory must be at least 2 GB per CPU core"
TypeDialog can output to multiple formats: TypeDialog can output to multiple formats:
```bash ```
# Output to Nickel (recommended for IaC) # Output to Nickel (recommended for IaC)
typedialog form --config form.toml --output nickel typedialog form --config form.toml --output nickel
@ -777,7 +777,7 @@ TypeDialog supports three rendering backends:
### 1. CLI (Command-line prompts) ### 1. CLI (Command-line prompts)
```bash ```
typedialog form --config form.toml --backend cli typedialog form --config form.toml --backend cli
``` ```
@ -786,7 +786,7 @@ typedialog form --config form.toml --backend cli
### 2. TUI (Terminal User Interface - Ratatui) ### 2. TUI (Terminal User Interface - Ratatui)
```bash ```
typedialog form --config form.toml --backend tui typedialog form --config form.toml --backend tui
``` ```
@ -795,7 +795,7 @@ typedialog form --config form.toml --backend tui
### 3. Web (HTTP Server - Axum) ### 3. Web (HTTP Server - Axum)
```bash ```
typedialog form --config form.toml --backend web --port 3000 typedialog form --config form.toml --backend web --port 3000
# Opens http://localhost:3000 # Opens http://localhost:3000
``` ```
@ -813,7 +813,7 @@ typedialog form --config form.toml --backend web --port 3000
**Solution**: Verify field definitions match Nickel schema: **Solution**: Verify field definitions match Nickel schema:
```toml ```
# Form field # Form field
[[fields]] [[fields]]
name = "cpu_cores" # Must match Nickel field name name = "cpu_cores" # Must match Nickel field name
@ -826,7 +826,7 @@ type = "number" # Must match Nickel type
**Solution**: Add help text and validation rules: **Solution**: Add help text and validation rules:
```toml ```
[[fields]] [[fields]]
name = "cpu_cores" name = "cpu_cores"
validation_pattern = "^[1-9][0-9]*$" validation_pattern = "^[1-9][0-9]*$"
@ -839,7 +839,7 @@ help = "Must be positive integer"
**Solution**: Ensure all required fields in form: **Solution**: Ensure all required fields in form:
```toml ```
[[fields]] [[fields]]
name = "required_field" name = "required_field"
required = true # User must provide value required = true # User must provide value
@ -851,7 +851,7 @@ required = true # User must provide value
### Step 1: Define Nickel Schema ### Step 1: Define Nickel Schema
```nickel ```
# workspace_schema.ncl # workspace_schema.ncl
{ {
workspace = { workspace = {
@ -866,7 +866,7 @@ required = true # User must provide value
### Step 2: Define Form ### Step 2: Define Form
```toml ```
# workspace_form.toml # workspace_form.toml
[[fields]] [[fields]]
name = "name" name = "name"
@ -895,14 +895,14 @@ required = true
### Step 3: User Interaction ### Step 3: User Interaction
```bash ```
$ typedialog form --config workspace_form.toml --backend tui $ typedialog form --config workspace_form.toml --backend tui
# User fills form interactively # User fills form interactively
``` ```
### Step 4: Output ### Step 4: Output
```nickel ```
{ {
workspace = { workspace = {
name = "production", name = "production",
@ -916,7 +916,7 @@ $ typedialog form --config workspace_form.toml --backend tui
### Step 5: Use in Provisioning ### Step 5: Use in Provisioning
```nickel ```
# main.ncl # main.ncl
let config = import "./workspace.ncl" in let config = import "./workspace.ncl" in
let schemas = import "provisioning/schemas/main.ncl" in let schemas = import "provisioning/schemas/main.ncl" in

View File

@ -10,7 +10,7 @@ The new configuration system includes comprehensive schema validation to catch e
Ensures all required fields are present: Ensures all required fields are present:
```toml ```
# Schema definition # Schema definition
[required] [required]
fields = ["name", "version", "enabled"] fields = ["name", "version", "enabled"]
@ -30,7 +30,7 @@ version = "1.0.0"
Validates field types: Validates field types:
```toml ```
# Schema # Schema
[fields.port] [fields.port]
type = "int" type = "int"
@ -54,7 +54,7 @@ port = "8080" # Error: Expected int, got string
Restricts values to predefined set: Restricts values to predefined set:
```toml ```
# Schema # Schema
[fields.environment] [fields.environment]
type = "string" type = "string"
@ -71,7 +71,7 @@ environment = "production" # Error: Must be one of: dev, staging, prod
Validates numeric ranges: Validates numeric ranges:
```toml ```
# Schema # Schema
[fields.port] [fields.port]
type = "int" type = "int"
@ -92,7 +92,7 @@ port = 70000 # Error: Must be <= 65535
Validates string patterns using regex: Validates string patterns using regex:
```toml ```
# Schema # Schema
[fields.email] [fields.email]
type = "string" type = "string"
@ -109,7 +109,7 @@ email = "not-an-email" # Error: Does not match pattern
Warns about deprecated configuration: Warns about deprecated configuration:
```toml ```
# Schema # Schema
[deprecated] [deprecated]
fields = ["old_field"] fields = ["old_field"]
@ -125,7 +125,7 @@ old_field = "value" # Warning: old_field is deprecated. Use new_field instead.
### Command Line ### Command Line
```bash ```
# Validate workspace config # Validate workspace config
provisioning workspace config validate provisioning workspace config validate
@ -141,7 +141,7 @@ provisioning workspace config validate --verbose
### Programmatic Usage ### Programmatic Usage
```nushell ```
use provisioning/core/nulib/lib_provisioning/config/schema_validator.nu * use provisioning/core/nulib/lib_provisioning/config/schema_validator.nu *
# Load config # Load config
@ -171,7 +171,7 @@ if ($result.warnings | length) > 0 {
### Pretty Print Results ### Pretty Print Results
```nushell ```
# Validate and print formatted results # Validate and print formatted results
let result = (validate-workspace-config $config) let result = (validate-workspace-config $config)
print-validation-results $result print-validation-results $result
@ -183,7 +183,7 @@ print-validation-results $result
File: `/Users/Akasha/project-provisioning/provisioning/config/workspace.schema.toml` File: `/Users/Akasha/project-provisioning/provisioning/config/workspace.schema.toml`
```toml ```
[required] [required]
fields = ["workspace", "paths"] fields = ["workspace", "paths"]
@ -222,7 +222,7 @@ enum = ["debug", "info", "warn", "error"]
File: `/Users/Akasha/project-provisioning/provisioning/extensions/providers/aws/config.schema.toml` File: `/Users/Akasha/project-provisioning/provisioning/extensions/providers/aws/config.schema.toml`
```toml ```
[required] [required]
fields = ["provider", "credentials"] fields = ["provider", "credentials"]
@ -279,7 +279,7 @@ old_region_field = "provider.region"
File: `/Users/Akasha/project-provisioning/provisioning/platform/orchestrator/config.schema.toml` File: `/Users/Akasha/project-provisioning/provisioning/platform/orchestrator/config.schema.toml`
```toml ```
[required] [required]
fields = ["service", "server"] fields = ["service", "server"]
@ -325,7 +325,7 @@ type = "string"
File: `/Users/Akasha/project-provisioning/provisioning/core/services/kms/config.schema.toml` File: `/Users/Akasha/project-provisioning/provisioning/core/services/kms/config.schema.toml`
```toml ```
[required] [required]
fields = ["kms", "encryption"] fields = ["kms", "encryption"]
@ -372,7 +372,7 @@ old_kms_type = "kms.provider"
### 1. Development ### 1. Development
```bash ```
# Create new config # Create new config
vim ~/workspaces/dev/config/provisioning.yaml vim ~/workspaces/dev/config/provisioning.yaml
@ -386,7 +386,7 @@ provisioning workspace config validate
### 2. CI/CD Pipeline ### 2. CI/CD Pipeline
```yaml ```
# GitLab CI # GitLab CI
validate-config: validate-config:
stage: validate stage: validate
@ -402,7 +402,7 @@ validate-config:
### 3. Pre-Deployment ### 3. Pre-Deployment
```bash ```
# Validate all configurations before deployment # Validate all configurations before deployment
provisioning workspace config validate --verbose provisioning workspace config validate --verbose
provisioning provider validate --all provisioning provider validate --all
@ -418,7 +418,7 @@ fi
### Clear Error Format ### Clear Error Format
```plaintext ```
❌ Validation failed ❌ Validation failed
Errors: Errors:
@ -445,7 +445,7 @@ Each error includes:
### Pattern 1: Hostname Validation ### Pattern 1: Hostname Validation
```toml ```
[fields.hostname] [fields.hostname]
type = "string" type = "string"
pattern = "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$" pattern = "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
@ -453,7 +453,7 @@ pattern = "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
### Pattern 2: Email Validation ### Pattern 2: Email Validation
```toml ```
[fields.email] [fields.email]
type = "string" type = "string"
pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$" pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
@ -461,7 +461,7 @@ pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
### Pattern 3: Semantic Version ### Pattern 3: Semantic Version
```toml ```
[fields.version] [fields.version]
type = "string" type = "string"
pattern = "^\\d+\\.\\d+\\.\\d+(-[a-zA-Z0-9]+)?$" pattern = "^\\d+\\.\\d+\\.\\d+(-[a-zA-Z0-9]+)?$"
@ -469,7 +469,7 @@ pattern = "^\\d+\\.\\d+\\.\\d+(-[a-zA-Z0-9]+)?$"
### Pattern 4: URL Validation ### Pattern 4: URL Validation
```toml ```
[fields.url] [fields.url]
type = "string" type = "string"
pattern = "^https?://[a-zA-Z0-9.-]+(:[0-9]+)?(/.*)?$" pattern = "^https?://[a-zA-Z0-9.-]+(:[0-9]+)?(/.*)?$"
@ -477,7 +477,7 @@ pattern = "^https?://[a-zA-Z0-9.-]+(:[0-9]+)?(/.*)?$"
### Pattern 5: IPv4 Address ### Pattern 5: IPv4 Address
```toml ```
[fields.ip_address] [fields.ip_address]
type = "string" type = "string"
pattern = "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$" pattern = "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"
@ -485,7 +485,7 @@ pattern = "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"
### Pattern 6: AWS Resource ID ### Pattern 6: AWS Resource ID
```toml ```
[fields.instance_id] [fields.instance_id]
type = "string" type = "string"
pattern = "^i-[a-f0-9]{8,17}$" pattern = "^i-[a-f0-9]{8,17}$"
@ -503,14 +503,14 @@ pattern = "^vpc-[a-f0-9]{8,17}$"
### Unit Tests ### Unit Tests
```nushell ```
# Run validation test suite # Run validation test suite
nu provisioning/tests/config_validation_tests.nu nu provisioning/tests/config_validation_tests.nu
``` ```
### Integration Tests ### Integration Tests
```bash ```
# Test with real configs # Test with real configs
provisioning test validate --workspace dev provisioning test validate --workspace dev
provisioning test validate --workspace staging provisioning test validate --workspace staging
@ -519,7 +519,7 @@ provisioning test validate --workspace prod
### Custom Validation ### Custom Validation
```nushell ```
# Create custom validation function # Create custom validation function
def validate-custom-config [config: record] { def validate-custom-config [config: record] {
let result = (validate-workspace-config $config) let result = (validate-workspace-config $config)
@ -543,7 +543,7 @@ def validate-custom-config [config: record] {
### 1. Validate Early ### 1. Validate Early
```bash ```
# Validate during development # Validate during development
provisioning workspace config validate provisioning workspace config validate
@ -552,7 +552,7 @@ provisioning workspace config validate
### 2. Use Strict Schemas ### 2. Use Strict Schemas
```toml ```
# Be explicit about types and constraints # Be explicit about types and constraints
[fields.port] [fields.port]
type = "int" type = "int"
@ -564,7 +564,7 @@ max = 65535
### 3. Document Patterns ### 3. Document Patterns
```toml ```
# Include examples in schema # Include examples in schema
[fields.email] [fields.email]
type = "string" type = "string"
@ -574,7 +574,7 @@ pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
### 4. Handle Deprecation ### 4. Handle Deprecation
```toml ```
# Always provide replacement guidance # Always provide replacement guidance
[deprecated_replacements] [deprecated_replacements]
old_field = "new_field" # Clear migration path old_field = "new_field" # Clear migration path
@ -582,7 +582,7 @@ old_field = "new_field" # Clear migration path
### 5. Test Schemas ### 5. Test Schemas
```nushell ```
# Include test cases in comments # Include test cases in comments
# Valid: "admin@example.com" # Valid: "admin@example.com"
# Invalid: "not-an-email" # Invalid: "not-an-email"
@ -592,7 +592,7 @@ old_field = "new_field" # Clear migration path
### Schema File Not Found ### Schema File Not Found
```bash ```
# Error: Schema file not found: /path/to/schema.toml # Error: Schema file not found: /path/to/schema.toml
# Solution: Ensure schema exists # Solution: Ensure schema exists
@ -601,7 +601,7 @@ ls -la /Users/Akasha/project-provisioning/provisioning/config/*.schema.toml
### Pattern Not Matching ### Pattern Not Matching
```bash ```
# Error: Field hostname does not match pattern # Error: Field hostname does not match pattern
# Debug: Test pattern separately # Debug: Test pattern separately
@ -610,7 +610,7 @@ echo "my-hostname" | grep -E "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
### Type Mismatch ### Type Mismatch
```bash ```
# Error: Expected int, got string # Error: Expected int, got string
# Check config # Check config

View File

@ -1 +0,0 @@
# Workspace Config Architecture

Some files were not shown because too many files have changed in this diff Show More