chore: review docs
This commit is contained in:
parent
bc51cd4113
commit
db7ba4969b
@ -1,6 +1,6 @@
|
||||
[book]
|
||||
authors = ["Provisioning Platform Team"]
|
||||
description = "Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust"
|
||||
description = "Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust"
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
@ -54,7 +54,7 @@ smart-punctuation = true
|
||||
use-boolean-and = true
|
||||
|
||||
[output.html.code.highlightjs]
|
||||
additional-languages = ["nushell", "toml", "yaml", "bash", "rust", "kcl"]
|
||||
additional-languages = ["nushell", "toml", "yaml", "bash", "rust", "nickel"]
|
||||
|
||||
[output.html.code]
|
||||
hidelines = {}
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -178,7 +178,8 @@
|
||||
<strong>Decision Makers</strong>: Architecture Team</p>
|
||||
<hr />
|
||||
<h2 id="context"><a class="header" href="#context">Context</a></h2>
|
||||
<p>The Provisioning platform required a comprehensive, enterprise-grade security system covering authentication, authorization, secrets management, MFA, compliance, and emergency access. The system needed to be production-ready, scalable, and compliant with GDPR, SOC2, and ISO 27001.</p>
|
||||
<p>The Provisioning platform required a comprehensive, enterprise-grade security system covering authentication, authorization, secrets management, MFA,
|
||||
compliance, and emergency access. The system needed to be production-ready, scalable, and compliant with GDPR, SOC2, and ISO 27001.</p>
|
||||
<hr />
|
||||
<h2 id="decision"><a class="header" href="#decision">Decision</a></h2>
|
||||
<p>Implement a complete security architecture using 12 specialized components organized in 4 implementation groups.</p>
|
||||
@ -734,7 +735,7 @@ cargo test --test break_glass_integration_tests
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../../architecture/adr/ADR-010-configuration-format-strategy.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="../../architecture/adr/adr-010-configuration-format-strategy.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
|
||||
@ -748,7 +749,7 @@ cargo test --test break_glass_integration_tests
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../../architecture/adr/ADR-010-configuration-format-strategy.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="../../architecture/adr/adr-010-configuration-format-strategy.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -174,7 +174,8 @@
|
||||
<main>
|
||||
<h1 id="integration-patterns"><a class="header" href="#integration-patterns">Integration Patterns</a></h1>
|
||||
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
|
||||
<p>Provisioning implements sophisticated integration patterns to coordinate between its hybrid Rust/Nushell architecture, manage multi-provider workflows, and enable extensible functionality. This document outlines the key integration patterns, their implementations, and best practices.</p>
|
||||
<p>Provisioning implements sophisticated integration patterns to coordinate between its hybrid Rust/Nushell architecture, manage multi-provider
|
||||
workflows, and enable extensible functionality. This document outlines the key integration patterns, their implementations, and best practices.</p>
|
||||
<h2 id="core-integration-patterns"><a class="header" href="#core-integration-patterns">Core Integration Patterns</a></h2>
|
||||
<h3 id="1-hybrid-language-integration"><a class="header" href="#1-hybrid-language-integration">1. Hybrid Language Integration</a></h3>
|
||||
<h4 id="rust-to-nushell-communication-pattern"><a class="header" href="#rust-to-nushell-communication-pattern">Rust-to-Nushell Communication Pattern</a></h4>
|
||||
@ -680,7 +681,8 @@ mod integration_tests {
|
||||
assert_eq!(result.unwrap().status, WorkflowStatus::Completed);
|
||||
}
|
||||
}</code></pre>
|
||||
<p>These integration patterns provide the foundation for the system’s sophisticated multi-component architecture, enabling reliable, scalable, and maintainable infrastructure automation.</p>
|
||||
<p>These integration patterns provide the foundation for the system’s sophisticated multi-component architecture, enabling reliable, scalable, and
|
||||
maintainable infrastructure automation.</p>
|
||||
|
||||
</main>
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -177,7 +177,9 @@
|
||||
<strong>Status:</strong> Strategic Analysis
|
||||
<strong>Related:</strong> <a href="repo-dist-analysis.html">Repository Distribution Analysis</a></p>
|
||||
<h2 id="executive-summary"><a class="header" href="#executive-summary">Executive Summary</a></h2>
|
||||
<p>This document analyzes a <strong>multi-repository strategy</strong> as an alternative to the monorepo approach. After careful consideration of the provisioning system’s architecture, a <strong>hybrid approach with 4 core repositories</strong> is recommended, avoiding submodules in favor of a cleaner package-based dependency model.</p>
|
||||
<p>This document analyzes a <strong>multi-repository strategy</strong> as an alternative to the monorepo approach. After careful consideration of the provisioning
|
||||
system’s architecture, a <strong>hybrid approach with 4 core repositories</strong> is recommended, avoiding submodules in favor of a cleaner package-based
|
||||
dependency model.</p>
|
||||
<hr />
|
||||
<h2 id="repository-architecture-options"><a class="header" href="#repository-architecture-options">Repository Architecture Options</a></h2>
|
||||
<h3 id="option-a-pure-monorepo-original-recommendation"><a class="header" href="#option-a-pure-monorepo-original-recommendation">Option A: Pure Monorepo (Original Recommendation)</a></h3>
|
||||
@ -1041,7 +1043,8 @@ provisioning-distribution/ (Repo 5, ~30 MB)
|
||||
</ul>
|
||||
<p><strong>Avoid:</strong> Submodules (complexity nightmare)</p>
|
||||
<p><strong>Use:</strong> Package-based dependencies with version compatibility matrix</p>
|
||||
<p>This architecture scales better for your project’s growth, supports a community extension ecosystem, and provides professional-grade separation of concerns while maintaining integration through a well-designed package system.</p>
|
||||
<p>This architecture scales better for your project’s growth, supports a community extension ecosystem, and provides professional-grade separation of
|
||||
concerns while maintaining integration through a well-designed package system.</p>
|
||||
<hr />
|
||||
<h2 id="next-steps"><a class="header" href="#next-steps">Next Steps</a></h2>
|
||||
<ol>
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -177,7 +177,8 @@
|
||||
<strong>Date</strong>: 2025-10-08
|
||||
<strong>Status</strong>: Implemented</p>
|
||||
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
|
||||
<p>Complete authentication and authorization flow integration for the Provisioning Orchestrator, connecting all security components (JWT validation, MFA verification, Cedar authorization, rate limiting, and audit logging) into a cohesive security middleware chain.</p>
|
||||
<p>Complete authentication and authorization flow integration for the Provisioning Orchestrator, connecting all security components (JWT validation, MFA
|
||||
verification, Cedar authorization, rate limiting, and audit logging) into a cohesive security middleware chain.</p>
|
||||
<h2 id="architecture"><a class="header" href="#architecture">Architecture</a></h2>
|
||||
<h3 id="security-middleware-chain"><a class="header" href="#security-middleware-chain">Security Middleware Chain</a></h3>
|
||||
<p>The middleware chain is applied in this specific order to ensure proper security:</p>
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -177,7 +177,9 @@
|
||||
<strong>Status:</strong> Clarification Document
|
||||
<strong>Related:</strong> <a href="multi-repo-strategy.html">Multi-Repo Strategy</a>, <a href="../user/hybrid-orchestrator.html">Hybrid Orchestrator v3.0</a></p>
|
||||
<h2 id="executive-summary"><a class="header" href="#executive-summary">Executive Summary</a></h2>
|
||||
<p>This document clarifies <strong>how the Rust orchestrator integrates with Nushell core</strong> in both monorepo and multi-repo architectures. The orchestrator is a <strong>critical performance layer</strong> that coordinates Nushell business logic execution, solving deep call stack limitations while preserving all existing functionality.</p>
|
||||
<p>This document clarifies <strong>how the Rust orchestrator integrates with Nushell core</strong> in both monorepo and multi-repo architectures. The orchestrator is
|
||||
a <strong>critical performance layer</strong> that coordinates Nushell business logic execution, solving deep call stack limitations while preserving all existing
|
||||
functionality.</p>
|
||||
<hr />
|
||||
<h2 id="current-architecture-hybrid-orchestrator-v30"><a class="header" href="#current-architecture-hybrid-orchestrator-v30">Current Architecture (Hybrid Orchestrator v3.0)</a></h2>
|
||||
<h3 id="the-problem-being-solved"><a class="header" href="#the-problem-being-solved">The Problem Being Solved</a></h3>
|
||||
@ -606,7 +608,7 @@ CLI → servers/list.nu → Query state → Return results
|
||||
<ol>
|
||||
<li>
|
||||
<p><strong>Eliminates Deep Call Stack Issues</strong></p>
|
||||
<pre><code>
|
||||
<pre><code class="language-text">
|
||||
Without Orchestrator:
|
||||
template.nu → calls → cluster.nu → calls → taskserv.nu → calls → provider.nu
|
||||
(Deep nesting causes "Type not supported" errors)
|
||||
@ -617,22 +619,20 @@ Orchestrator → spawns → Nushell subprocess (flat execution)
|
||||
|
||||
</code></pre>
|
||||
</li>
|
||||
</ol>
|
||||
<pre><code>
|
||||
2. **Performance Optimization**
|
||||
<li>
|
||||
<p><strong>Performance Optimization</strong></p>
|
||||
<pre><code class="language-rust">// Orchestrator executes tasks in parallel
|
||||
let tasks = vec![task1, task2, task3, task4, task5];
|
||||
|
||||
```rust
|
||||
// Orchestrator executes tasks in parallel
|
||||
let tasks = vec![task1, task2, task3, task4, task5];
|
||||
let results = futures::future::join_all(
|
||||
tasks.iter().map(|t| execute_task(t))
|
||||
).await;
|
||||
|
||||
let results = futures::future::join_all(
|
||||
tasks.iter().map(|t| execute_task(t))
|
||||
).await;
|
||||
|
||||
// 5 Nushell subprocesses run concurrently
|
||||
</code></pre>
|
||||
<ol>
|
||||
<li><strong>Reliable State Management</strong></li>
|
||||
// 5 Nushell subprocesses run concurrently</code></pre>
|
||||
</li>
|
||||
<li>
|
||||
<p><strong>Reliable State Management</strong></p>
|
||||
</li>
|
||||
</ol>
|
||||
<pre><code class="language-plaintext"> Orchestrator maintains:
|
||||
- Task queue (survives crashes)
|
||||
|
||||
@ -1,221 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="en" class="ayu sidebar-visible" dir="ltr">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Workspace Config Architecture - Provisioning Platform Documentation</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
<link rel="icon" href="../favicon.svg">
|
||||
<link rel="shortcut icon" href="../favicon.png">
|
||||
<link rel="stylesheet" href="../css/variables.css">
|
||||
<link rel="stylesheet" href="../css/general.css">
|
||||
<link rel="stylesheet" href="../css/chrome.css">
|
||||
<link rel="stylesheet" href="../css/print.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
|
||||
<link rel="stylesheet" href="../fonts/fonts.css">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
|
||||
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
|
||||
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
|
||||
|
||||
<!-- Provide site root and default themes to javascript -->
|
||||
<script>
|
||||
const path_to_root = "../";
|
||||
const default_light_theme = "ayu";
|
||||
const default_dark_theme = "navy";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="../toc.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
<div id="mdbook-help-popup">
|
||||
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
|
||||
<div>
|
||||
<p>Press <kbd>←</kbd> or <kbd>→</kbd> to navigate between chapters</p>
|
||||
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
|
||||
<p>Press <kbd>?</kbd> to show this help</p>
|
||||
<p>Press <kbd>Esc</kbd> to hide this help</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="body-container">
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script>
|
||||
try {
|
||||
let theme = localStorage.getItem('mdbook-theme');
|
||||
let sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script>
|
||||
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
const html = document.documentElement;
|
||||
html.classList.remove('ayu')
|
||||
html.classList.add(theme);
|
||||
html.classList.add("js");
|
||||
</script>
|
||||
|
||||
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script>
|
||||
let sidebar = null;
|
||||
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
} else {
|
||||
sidebar = 'hidden';
|
||||
}
|
||||
sidebar_toggle.checked = sidebar === 'visible';
|
||||
html.classList.remove('sidebar-visible');
|
||||
html.classList.add("sidebar-" + sidebar);
|
||||
</script>
|
||||
|
||||
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<!-- populated by js -->
|
||||
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
|
||||
<noscript>
|
||||
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
|
||||
</noscript>
|
||||
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
|
||||
<div class="sidebar-resize-indicator"></div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div id="page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
<div id="menu-bar-hover-placeholder"></div>
|
||||
<div id="menu-bar" class="menu-bar sticky">
|
||||
<div class="left-buttons">
|
||||
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
|
||||
<i class="fa fa-bars"></i>
|
||||
</label>
|
||||
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
|
||||
<i class="fa fa-paint-brush"></i>
|
||||
</button>
|
||||
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
|
||||
</ul>
|
||||
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">Provisioning Platform Documentation</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="../print.html" title="Print this book" aria-label="Print this book">
|
||||
<i id="print-button" class="fa fa-print"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
|
||||
<i id="git-repository-button" class="fa fa-github"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/configuration/workspace-config-architecture.md" title="Suggest an edit" aria-label="Suggest an edit">
|
||||
<i id="git-edit-button" class="fa fa-edit"></i>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="search-wrapper" class="hidden">
|
||||
<form id="searchbar-outer" class="searchbar-outer">
|
||||
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
|
||||
</form>
|
||||
<div id="searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script>
|
||||
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="workspace-config-architecture"><a class="header" href="#workspace-config-architecture">Workspace Config Architecture</a></h1>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="../configuration/config-validation.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="../configuration/config-validation.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
|
||||
|
||||
<script src="../elasticlunr.min.js"></script>
|
||||
<script src="../mark.min.js"></script>
|
||||
<script src="../searcher.js"></script>
|
||||
|
||||
<script src="../clipboard.min.js"></script>
|
||||
<script src="../highlight.js"></script>
|
||||
<script src="../book.js"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -173,7 +173,8 @@
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="build-system-documentation"><a class="header" href="#build-system-documentation">Build System Documentation</a></h1>
|
||||
<p>This document provides comprehensive documentation for the provisioning project’s build system, including the complete Makefile reference with 40+ targets, build tools, compilation instructions, and troubleshooting.</p>
|
||||
<p>This document provides comprehensive documentation for the provisioning project’s build system, including the complete Makefile reference with 40+
|
||||
targets, build tools, compilation instructions, and troubleshooting.</p>
|
||||
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
|
||||
<ol>
|
||||
<li><a href="#overview">Overview</a></li>
|
||||
@ -1022,7 +1023,8 @@ make ci-test
|
||||
# Full CI/CD pipeline
|
||||
make ci-release
|
||||
</code></pre>
|
||||
<p>This build system provides a comprehensive, maintainable foundation for the provisioning project’s development lifecycle, from local development to production releases.</p>
|
||||
<p>This build system provides a comprehensive, maintainable foundation for the provisioning project’s development lifecycle, from local development to
|
||||
production releases.</p>
|
||||
|
||||
</main>
|
||||
|
||||
@ -1032,7 +1034,7 @@ make ci-release
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../development/extensions.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="../development/distribution-process.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
|
||||
@ -1046,7 +1048,7 @@ make ci-release
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../development/extensions.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="../development/distribution-process.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
@ -1,227 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="en" class="ayu sidebar-visible" dir="ltr">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Configuration - Provisioning Platform Documentation</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
<link rel="icon" href="../favicon.svg">
|
||||
<link rel="shortcut icon" href="../favicon.png">
|
||||
<link rel="stylesheet" href="../css/variables.css">
|
||||
<link rel="stylesheet" href="../css/general.css">
|
||||
<link rel="stylesheet" href="../css/chrome.css">
|
||||
<link rel="stylesheet" href="../css/print.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
|
||||
<link rel="stylesheet" href="../fonts/fonts.css">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
|
||||
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
|
||||
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
|
||||
|
||||
<!-- Provide site root and default themes to javascript -->
|
||||
<script>
|
||||
const path_to_root = "../";
|
||||
const default_light_theme = "ayu";
|
||||
const default_dark_theme = "navy";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="../toc.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
<div id="mdbook-help-popup">
|
||||
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
|
||||
<div>
|
||||
<p>Press <kbd>←</kbd> or <kbd>→</kbd> to navigate between chapters</p>
|
||||
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
|
||||
<p>Press <kbd>?</kbd> to show this help</p>
|
||||
<p>Press <kbd>Esc</kbd> to hide this help</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="body-container">
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script>
|
||||
try {
|
||||
let theme = localStorage.getItem('mdbook-theme');
|
||||
let sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script>
|
||||
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
const html = document.documentElement;
|
||||
html.classList.remove('ayu')
|
||||
html.classList.add(theme);
|
||||
html.classList.add("js");
|
||||
</script>
|
||||
|
||||
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script>
|
||||
let sidebar = null;
|
||||
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
} else {
|
||||
sidebar = 'hidden';
|
||||
}
|
||||
sidebar_toggle.checked = sidebar === 'visible';
|
||||
html.classList.remove('sidebar-visible');
|
||||
html.classList.add("sidebar-" + sidebar);
|
||||
</script>
|
||||
|
||||
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<!-- populated by js -->
|
||||
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
|
||||
<noscript>
|
||||
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
|
||||
</noscript>
|
||||
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
|
||||
<div class="sidebar-resize-indicator"></div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div id="page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
<div id="menu-bar-hover-placeholder"></div>
|
||||
<div id="menu-bar" class="menu-bar sticky">
|
||||
<div class="left-buttons">
|
||||
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
|
||||
<i class="fa fa-bars"></i>
|
||||
</label>
|
||||
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
|
||||
<i class="fa fa-paint-brush"></i>
|
||||
</button>
|
||||
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
|
||||
</ul>
|
||||
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">Provisioning Platform Documentation</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="../print.html" title="Print this book" aria-label="Print this book">
|
||||
<i id="print-button" class="fa fa-print"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
|
||||
<i id="git-repository-button" class="fa fa-github"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/development/configuration.md" title="Suggest an edit" aria-label="Suggest an edit">
|
||||
<i id="git-edit-button" class="fa fa-edit"></i>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="search-wrapper" class="hidden">
|
||||
<form id="searchbar-outer" class="searchbar-outer">
|
||||
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
|
||||
</form>
|
||||
<div id="searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script>
|
||||
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="configuration"><a class="header" href="#configuration">Configuration</a></h1>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="../development/command-handler-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../development/workflow.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="../development/command-handler-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../development/workflow.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
|
||||
|
||||
<script src="../elasticlunr.min.js"></script>
|
||||
<script src="../mark.min.js"></script>
|
||||
<script src="../searcher.js"></script>
|
||||
|
||||
<script src="../clipboard.min.js"></script>
|
||||
<script src="../highlight.js"></script>
|
||||
<script src="../book.js"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -173,7 +173,8 @@
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="distribution-process-documentation"><a class="header" href="#distribution-process-documentation">Distribution Process Documentation</a></h1>
|
||||
<p>This document provides comprehensive documentation for the provisioning project’s distribution process, covering release workflows, package generation, multi-platform distribution, and rollback procedures.</p>
|
||||
<p>This document provides comprehensive documentation for the provisioning project’s distribution process, covering release workflows, package
|
||||
generation, multi-platform distribution, and rollback procedures.</p>
|
||||
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
|
||||
<ol>
|
||||
<li><a href="#overview">Overview</a></li>
|
||||
@ -188,7 +189,8 @@
|
||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||
</ol>
|
||||
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
|
||||
<p>The distribution system provides a comprehensive solution for creating, packaging, and distributing provisioning across multiple platforms with automated release management.</p>
|
||||
<p>The distribution system provides a comprehensive solution for creating, packaging, and distributing provisioning across multiple platforms with
|
||||
automated release management.</p>
|
||||
<p><strong>Key Features</strong>:</p>
|
||||
<ul>
|
||||
<li><strong>Multi-Platform Support</strong>: Linux, macOS, Windows with multiple architectures</li>
|
||||
@ -988,13 +990,14 @@ make status
|
||||
top
|
||||
df -h
|
||||
</code></pre>
|
||||
<p>This distribution process provides a robust, automated pipeline for creating, validating, and distributing provisioning across multiple platforms while maintaining high quality and reliability standards.</p>
|
||||
<p>This distribution process provides a robust, automated pipeline for creating, validating, and distributing provisioning across multiple platforms
|
||||
while maintaining high quality and reliability standards.</p>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="../development/extensions.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<a rel="prev" href="../development/build-system.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
@ -1008,7 +1011,7 @@ df -h
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="../development/extensions.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<a rel="prev" href="../development/build-system.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -178,7 +178,8 @@
|
||||
<strong>Priority:</strong> High
|
||||
<strong>Related:</strong> <a href="../architecture/repo-dist-analysis.html">Architecture Analysis</a></p>
|
||||
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
|
||||
<p>This guide provides step-by-step instructions for implementing the repository restructuring and distribution system improvements. Each phase includes specific commands, validation steps, and rollback procedures.</p>
|
||||
<p>This guide provides step-by-step instructions for implementing the repository restructuring and distribution system improvements. Each phase includes
|
||||
specific commands, validation steps, and rollback procedures.</p>
|
||||
<hr />
|
||||
<h2 id="prerequisites"><a class="header" href="#prerequisites">Prerequisites</a></h2>
|
||||
<h3 id="required-tools"><a class="header" href="#required-tools">Required Tools</a></h3>
|
||||
@ -974,7 +975,7 @@ Day 16: Release prepared</li>
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../development/taskserv-developer-guide.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="../development/project-structure.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
|
||||
@ -988,7 +989,7 @@ Day 16: Release prepared</li>
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../development/taskserv-developer-guide.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="../development/project-structure.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -173,7 +173,8 @@
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="integration-guide"><a class="header" href="#integration-guide">Integration Guide</a></h1>
|
||||
<p>This document explains how the new project structure integrates with existing systems, API compatibility and versioning, database migration strategies, deployment considerations, and monitoring and observability.</p>
|
||||
<p>This document explains how the new project structure integrates with existing systems, API compatibility and versioning, database migration
|
||||
strategies, deployment considerations, and monitoring and observability.</p>
|
||||
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
|
||||
<ol>
|
||||
<li><a href="#overview">Overview</a></li>
|
||||
@ -187,7 +188,8 @@
|
||||
<li><a href="#troubleshooting-integration-issues">Troubleshooting Integration Issues</a></li>
|
||||
</ol>
|
||||
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
|
||||
<p>Provisioning has been designed with integration as a core principle, ensuring seamless compatibility between new development-focused components and existing production systems while providing clear migration pathways.</p>
|
||||
<p>Provisioning has been designed with integration as a core principle, ensuring seamless compatibility between new development-focused components and
|
||||
existing production systems while providing clear migration pathways.</p>
|
||||
<p><strong>Integration Principles</strong>:</p>
|
||||
<ul>
|
||||
<li><strong>Backward Compatibility</strong>: All existing APIs and interfaces remain functional</li>
|
||||
@ -1244,7 +1246,8 @@ provisioning server create test-server 2xCPU-4 GB --debug-integration
|
||||
}
|
||||
}
|
||||
</code></pre>
|
||||
<p>This integration guide provides a comprehensive framework for seamlessly integrating new development components with existing production systems while maintaining reliability, compatibility, and clear migration pathways.</p>
|
||||
<p>This integration guide provides a comprehensive framework for seamlessly integrating new development components with existing production systems while
|
||||
maintaining reliability, compatibility, and clear migration pathways.</p>
|
||||
|
||||
</main>
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -173,7 +173,8 @@
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="project-structure-guide"><a class="header" href="#project-structure-guide">Project Structure Guide</a></h1>
|
||||
<p>This document provides a comprehensive overview of the provisioning project’s structure after the major reorganization, explaining both the new development-focused organization and the preserved existing functionality.</p>
|
||||
<p>This document provides a comprehensive overview of the provisioning project’s structure after the major reorganization, explaining both the new
|
||||
development-focused organization and the preserved existing functionality.</p>
|
||||
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
|
||||
<ol>
|
||||
<li><a href="#overview">Overview</a></li>
|
||||
@ -501,17 +502,18 @@ nu workspace/extensions/providers/my-provider/nulib/provider.nu test
|
||||
<li><strong>Documentation</strong>: Comprehensive documentation and examples</li>
|
||||
<li><strong>Testing Framework</strong>: Built-in testing and validation tools</li>
|
||||
</ul>
|
||||
<p>This structure represents a significant evolution in the project’s organization while maintaining complete backward compatibility and providing powerful new development capabilities.</p>
|
||||
<p>This structure represents a significant evolution in the project’s organization while maintaining complete backward compatibility and providing
|
||||
powerful new development capabilities.</p>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="../development/taskserv-quick-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<a rel="prev" href="../development/implementation-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../development/provider-agnostic-architecture.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="../development/ctrl-c-implementation-notes.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
|
||||
@ -521,11 +523,11 @@ nu workspace/extensions/providers/my-provider/nulib/provider.nu test
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="../development/taskserv-quick-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<a rel="prev" href="../development/implementation-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../development/provider-agnostic-architecture.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<a rel="next prefetch" href="../development/ctrl-c-implementation-notes.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -173,7 +173,8 @@
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="development-workflow-guide"><a class="header" href="#development-workflow-guide">Development Workflow Guide</a></h1>
|
||||
<p>This document outlines the recommended development workflows, coding practices, testing strategies, and debugging techniques for the provisioning project.</p>
|
||||
<p>This document outlines the recommended development workflows, coding practices, testing strategies, and debugging techniques for the provisioning
|
||||
project.</p>
|
||||
<h2 id="table-of-contents"><a class="header" href="#table-of-contents">Table of Contents</a></h2>
|
||||
<ol>
|
||||
<li><a href="#overview">Overview</a></li>
|
||||
@ -188,7 +189,8 @@
|
||||
<li><a href="#best-practices">Best Practices</a></li>
|
||||
</ol>
|
||||
<h2 id="overview"><a class="header" href="#overview">Overview</a></h2>
|
||||
<p>The provisioning project employs a multi-language, multi-component architecture requiring specific development workflows to maintain consistency, quality, and efficiency.</p>
|
||||
<p>The provisioning project employs a multi-language, multi-component architecture requiring specific development workflows to maintain consistency,
|
||||
quality, and efficiency.</p>
|
||||
<p><strong>Key Technologies</strong>:</p>
|
||||
<ul>
|
||||
<li><strong>Nushell</strong>: Primary scripting and automation language</li>
|
||||
@ -1041,13 +1043,14 @@ def get-api-url [] {
|
||||
}
|
||||
}
|
||||
</code></pre>
|
||||
<p>This development workflow provides a comprehensive framework for efficient, quality-focused development while maintaining the project’s architectural principles and ensuring smooth collaboration across the team.</p>
|
||||
<p>This development workflow provides a comprehensive framework for efficient, quality-focused development while maintaining the project’s architectural
|
||||
principles and ensuring smooth collaboration across the team.</p>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="../development/configuration.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<a rel="prev" href="../development/command-handler-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
@ -1061,7 +1064,7 @@ def get-api-url [] {
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="../development/configuration.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<a rel="prev" href="../development/command-handler-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
@ -181,9 +181,11 @@
|
||||
<h1 id="provisioning-platform-documentation"><a class="header" href="#provisioning-platform-documentation">Provisioning Platform Documentation</a></h1>
|
||||
<p><strong>Last Updated</strong>: 2025-01-02 (Phase 3.A Cleanup Complete)
|
||||
<strong>Status</strong>: ✅ Primary documentation source (145 files consolidated)</p>
|
||||
<p>Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell, KCL, and Rust.</p>
|
||||
<p>Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell,
|
||||
Nickel, and Rust.</p>
|
||||
<blockquote>
|
||||
<p><strong>Note</strong>: Architecture Decision Records (ADRs) and high-level design documentation are in <code>docs/</code> directory. This location contains all user-facing, operational, and product documentation.</p>
|
||||
<p><strong>Note</strong>: Architecture Decision Records (ADRs) and design documentation are in <code>docs/</code>
|
||||
directory. This location contains user-facing, operational, and product documentation.</p>
|
||||
</blockquote>
|
||||
<hr />
|
||||
<h2 id="quick-navigation"><a class="header" href="#quick-navigation">Quick Navigation</a></h2>
|
||||
@ -318,7 +320,8 @@
|
||||
<hr />
|
||||
<h2 id="key-concepts"><a class="header" href="#key-concepts">Key Concepts</a></h2>
|
||||
<h3 id="infrastructure-as-code-iac"><a class="header" href="#infrastructure-as-code-iac">Infrastructure as Code (IaC)</a></h3>
|
||||
<p>The provisioning platform uses <strong>declarative configuration</strong> to manage infrastructure. Instead of manually creating resources, you define what you want in Nickel configuration files, and the system makes it happen.</p>
|
||||
<p>The provisioning platform uses <strong>declarative configuration</strong> to manage infrastructure. Instead of manually creating resources, you define what you
|
||||
want in Nickel configuration files, and the system makes it happen.</p>
|
||||
<h3 id="mode-based-architecture"><a class="header" href="#mode-based-architecture">Mode-Based Architecture</a></h3>
|
||||
<p>The system supports four operational modes:</p>
|
||||
<ul>
|
||||
@ -357,7 +360,7 @@
|
||||
<li>Study <strong><a href="architecture/design-principles.html">Design Principles</a></strong></li>
|
||||
<li>Read relevant <strong><a href="architecture/">ADRs</a></strong></li>
|
||||
<li>Follow <strong><a href="development/README.html">Development Guide</a></strong></li>
|
||||
<li>Reference <strong>KCL Quick Reference</strong></li>
|
||||
<li>Reference <strong>Nickel Quick Reference</strong></li>
|
||||
</ol>
|
||||
<h3 id="for-operators"><a class="header" href="#for-operators">For Operators</a></h3>
|
||||
<ol>
|
||||
@ -378,7 +381,7 @@
|
||||
<h3 id="-infrastructure-automation"><a class="header" href="#-infrastructure-automation">✅ Infrastructure Automation</a></h3>
|
||||
<ul>
|
||||
<li>Multi-cloud support (AWS, UpCloud, Local)</li>
|
||||
<li>Declarative configuration with KCL</li>
|
||||
<li>Declarative configuration with Nickel</li>
|
||||
<li>Automated dependency resolution</li>
|
||||
<li>Batch operations with rollback</li>
|
||||
</ul>
|
||||
@ -458,7 +461,7 @@
|
||||
<h2 id="technology-stack"><a class="header" href="#technology-stack">Technology Stack</a></h2>
|
||||
<div class="table-wrapper"><table><thead><tr><th>Component</th><th>Technology</th><th>Purpose</th></tr></thead><tbody>
|
||||
<tr><td><strong>Core CLI</strong></td><td>Nushell 0.107.1</td><td>Shell and scripting</td></tr>
|
||||
<tr><td><strong>Configuration</strong></td><td>KCL 0.11.2</td><td>Type-safe IaC</td></tr>
|
||||
<tr><td><strong>Configuration</strong></td><td>Nickel 1.0.0+</td><td>Type-safe IaC</td></tr>
|
||||
<tr><td><strong>Orchestrator</strong></td><td>Rust</td><td>High-performance coordination</td></tr>
|
||||
<tr><td><strong>Templates</strong></td><td>Jinja2 (nu_plugin_tera)</td><td>Code generation</td></tr>
|
||||
<tr><td><strong>Secrets</strong></td><td>SOPS 3.10.2 + Age 1.2.1</td><td>Encryption</td></tr>
|
||||
|
||||
@ -1,227 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="en" class="ayu sidebar-visible" dir="ltr">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Backup Recovery - Provisioning Platform Documentation</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
<link rel="icon" href="../favicon.svg">
|
||||
<link rel="shortcut icon" href="../favicon.png">
|
||||
<link rel="stylesheet" href="../css/variables.css">
|
||||
<link rel="stylesheet" href="../css/general.css">
|
||||
<link rel="stylesheet" href="../css/chrome.css">
|
||||
<link rel="stylesheet" href="../css/print.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
|
||||
<link rel="stylesheet" href="../fonts/fonts.css">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
|
||||
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
|
||||
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
|
||||
|
||||
<!-- Provide site root and default themes to javascript -->
|
||||
<script>
|
||||
const path_to_root = "../";
|
||||
const default_light_theme = "ayu";
|
||||
const default_dark_theme = "navy";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="../toc.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
<div id="mdbook-help-popup">
|
||||
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
|
||||
<div>
|
||||
<p>Press <kbd>←</kbd> or <kbd>→</kbd> to navigate between chapters</p>
|
||||
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
|
||||
<p>Press <kbd>?</kbd> to show this help</p>
|
||||
<p>Press <kbd>Esc</kbd> to hide this help</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="body-container">
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script>
|
||||
try {
|
||||
let theme = localStorage.getItem('mdbook-theme');
|
||||
let sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script>
|
||||
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
const html = document.documentElement;
|
||||
html.classList.remove('ayu')
|
||||
html.classList.add(theme);
|
||||
html.classList.add("js");
|
||||
</script>
|
||||
|
||||
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script>
|
||||
let sidebar = null;
|
||||
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
} else {
|
||||
sidebar = 'hidden';
|
||||
}
|
||||
sidebar_toggle.checked = sidebar === 'visible';
|
||||
html.classList.remove('sidebar-visible');
|
||||
html.classList.add("sidebar-" + sidebar);
|
||||
</script>
|
||||
|
||||
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<!-- populated by js -->
|
||||
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
|
||||
<noscript>
|
||||
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
|
||||
</noscript>
|
||||
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
|
||||
<div class="sidebar-resize-indicator"></div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div id="page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
<div id="menu-bar-hover-placeholder"></div>
|
||||
<div id="menu-bar" class="menu-bar sticky">
|
||||
<div class="left-buttons">
|
||||
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
|
||||
<i class="fa fa-bars"></i>
|
||||
</label>
|
||||
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
|
||||
<i class="fa fa-paint-brush"></i>
|
||||
</button>
|
||||
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
|
||||
</ul>
|
||||
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">Provisioning Platform Documentation</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="../print.html" title="Print this book" aria-label="Print this book">
|
||||
<i id="print-button" class="fa fa-print"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
|
||||
<i id="git-repository-button" class="fa fa-github"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/operations/backup-recovery.md" title="Suggest an edit" aria-label="Suggest an edit">
|
||||
<i id="git-edit-button" class="fa fa-edit"></i>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="search-wrapper" class="hidden">
|
||||
<form id="searchbar-outer" class="searchbar-outer">
|
||||
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
|
||||
</form>
|
||||
<div id="searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script>
|
||||
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="backup-and-recovery"><a class="header" href="#backup-and-recovery">Backup and Recovery</a></h1>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="../operations/coredns-guide.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../operations/deployment.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="../operations/coredns-guide.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../operations/deployment.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
|
||||
|
||||
<script src="../elasticlunr.min.js"></script>
|
||||
<script src="../mark.min.js"></script>
|
||||
<script src="../searcher.js"></script>
|
||||
|
||||
<script src="../clipboard.min.js"></script>
|
||||
<script src="../highlight.js"></script>
|
||||
<script src="../book.js"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@ -1,227 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="en" class="ayu sidebar-visible" dir="ltr">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Deployment - Provisioning Platform Documentation</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
<link rel="icon" href="../favicon.svg">
|
||||
<link rel="shortcut icon" href="../favicon.png">
|
||||
<link rel="stylesheet" href="../css/variables.css">
|
||||
<link rel="stylesheet" href="../css/general.css">
|
||||
<link rel="stylesheet" href="../css/chrome.css">
|
||||
<link rel="stylesheet" href="../css/print.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
|
||||
<link rel="stylesheet" href="../fonts/fonts.css">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
|
||||
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
|
||||
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
|
||||
|
||||
<!-- Provide site root and default themes to javascript -->
|
||||
<script>
|
||||
const path_to_root = "../";
|
||||
const default_light_theme = "ayu";
|
||||
const default_dark_theme = "navy";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="../toc.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
<div id="mdbook-help-popup">
|
||||
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
|
||||
<div>
|
||||
<p>Press <kbd>←</kbd> or <kbd>→</kbd> to navigate between chapters</p>
|
||||
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
|
||||
<p>Press <kbd>?</kbd> to show this help</p>
|
||||
<p>Press <kbd>Esc</kbd> to hide this help</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="body-container">
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script>
|
||||
try {
|
||||
let theme = localStorage.getItem('mdbook-theme');
|
||||
let sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script>
|
||||
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
const html = document.documentElement;
|
||||
html.classList.remove('ayu')
|
||||
html.classList.add(theme);
|
||||
html.classList.add("js");
|
||||
</script>
|
||||
|
||||
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script>
|
||||
let sidebar = null;
|
||||
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
} else {
|
||||
sidebar = 'hidden';
|
||||
}
|
||||
sidebar_toggle.checked = sidebar === 'visible';
|
||||
html.classList.remove('sidebar-visible');
|
||||
html.classList.add("sidebar-" + sidebar);
|
||||
</script>
|
||||
|
||||
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<!-- populated by js -->
|
||||
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
|
||||
<noscript>
|
||||
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
|
||||
</noscript>
|
||||
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
|
||||
<div class="sidebar-resize-indicator"></div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div id="page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
<div id="menu-bar-hover-placeholder"></div>
|
||||
<div id="menu-bar" class="menu-bar sticky">
|
||||
<div class="left-buttons">
|
||||
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
|
||||
<i class="fa fa-bars"></i>
|
||||
</label>
|
||||
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
|
||||
<i class="fa fa-paint-brush"></i>
|
||||
</button>
|
||||
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
|
||||
</ul>
|
||||
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">Provisioning Platform Documentation</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="../print.html" title="Print this book" aria-label="Print this book">
|
||||
<i id="print-button" class="fa fa-print"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
|
||||
<i id="git-repository-button" class="fa fa-github"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/operations/deployment.md" title="Suggest an edit" aria-label="Suggest an edit">
|
||||
<i id="git-edit-button" class="fa fa-edit"></i>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="search-wrapper" class="hidden">
|
||||
<form id="searchbar-outer" class="searchbar-outer">
|
||||
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
|
||||
</form>
|
||||
<div id="searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script>
|
||||
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="deployment-guide"><a class="header" href="#deployment-guide">Deployment Guide</a></h1>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="../operations/backup-recovery.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../operations/monitoring.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="../operations/backup-recovery.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../operations/monitoring.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
|
||||
|
||||
<script src="../elasticlunr.min.js"></script>
|
||||
<script src="../mark.min.js"></script>
|
||||
<script src="../searcher.js"></script>
|
||||
|
||||
<script src="../clipboard.min.js"></script>
|
||||
<script src="../highlight.js"></script>
|
||||
<script src="../book.js"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@ -1,227 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html lang="en" class="ayu sidebar-visible" dir="ltr">
|
||||
<head>
|
||||
<!-- Book generated using mdBook -->
|
||||
<meta charset="UTF-8">
|
||||
<title>Monitoring - Provisioning Platform Documentation</title>
|
||||
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
<link rel="icon" href="../favicon.svg">
|
||||
<link rel="shortcut icon" href="../favicon.png">
|
||||
<link rel="stylesheet" href="../css/variables.css">
|
||||
<link rel="stylesheet" href="../css/general.css">
|
||||
<link rel="stylesheet" href="../css/chrome.css">
|
||||
<link rel="stylesheet" href="../css/print.css" media="print">
|
||||
|
||||
<!-- Fonts -->
|
||||
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
|
||||
<link rel="stylesheet" href="../fonts/fonts.css">
|
||||
|
||||
<!-- Highlight.js Stylesheets -->
|
||||
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
|
||||
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
|
||||
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
|
||||
|
||||
<!-- Custom theme stylesheets -->
|
||||
|
||||
|
||||
<!-- Provide site root and default themes to javascript -->
|
||||
<script>
|
||||
const path_to_root = "../";
|
||||
const default_light_theme = "ayu";
|
||||
const default_dark_theme = "navy";
|
||||
</script>
|
||||
<!-- Start loading toc.js asap -->
|
||||
<script src="../toc.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<div id="mdbook-help-container">
|
||||
<div id="mdbook-help-popup">
|
||||
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
|
||||
<div>
|
||||
<p>Press <kbd>←</kbd> or <kbd>→</kbd> to navigate between chapters</p>
|
||||
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
|
||||
<p>Press <kbd>?</kbd> to show this help</p>
|
||||
<p>Press <kbd>Esc</kbd> to hide this help</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="body-container">
|
||||
<!-- Work around some values being stored in localStorage wrapped in quotes -->
|
||||
<script>
|
||||
try {
|
||||
let theme = localStorage.getItem('mdbook-theme');
|
||||
let sidebar = localStorage.getItem('mdbook-sidebar');
|
||||
|
||||
if (theme.startsWith('"') && theme.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
|
||||
}
|
||||
|
||||
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
|
||||
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
|
||||
}
|
||||
} catch (e) { }
|
||||
</script>
|
||||
|
||||
<!-- Set the theme before any content is loaded, prevents flash -->
|
||||
<script>
|
||||
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
|
||||
let theme;
|
||||
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
|
||||
if (theme === null || theme === undefined) { theme = default_theme; }
|
||||
const html = document.documentElement;
|
||||
html.classList.remove('ayu')
|
||||
html.classList.add(theme);
|
||||
html.classList.add("js");
|
||||
</script>
|
||||
|
||||
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
|
||||
|
||||
<!-- Hide / unhide sidebar before it is displayed -->
|
||||
<script>
|
||||
let sidebar = null;
|
||||
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
|
||||
if (document.body.clientWidth >= 1080) {
|
||||
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
|
||||
sidebar = sidebar || 'visible';
|
||||
} else {
|
||||
sidebar = 'hidden';
|
||||
}
|
||||
sidebar_toggle.checked = sidebar === 'visible';
|
||||
html.classList.remove('sidebar-visible');
|
||||
html.classList.add("sidebar-" + sidebar);
|
||||
</script>
|
||||
|
||||
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
|
||||
<!-- populated by js -->
|
||||
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
|
||||
<noscript>
|
||||
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
|
||||
</noscript>
|
||||
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
|
||||
<div class="sidebar-resize-indicator"></div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<div id="page-wrapper" class="page-wrapper">
|
||||
|
||||
<div class="page">
|
||||
<div id="menu-bar-hover-placeholder"></div>
|
||||
<div id="menu-bar" class="menu-bar sticky">
|
||||
<div class="left-buttons">
|
||||
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
|
||||
<i class="fa fa-bars"></i>
|
||||
</label>
|
||||
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
|
||||
<i class="fa fa-paint-brush"></i>
|
||||
</button>
|
||||
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
|
||||
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
|
||||
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
|
||||
</ul>
|
||||
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
|
||||
<i class="fa fa-search"></i>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<h1 class="menu-title">Provisioning Platform Documentation</h1>
|
||||
|
||||
<div class="right-buttons">
|
||||
<a href="../print.html" title="Print this book" aria-label="Print this book">
|
||||
<i id="print-button" class="fa fa-print"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform" title="Git repository" aria-label="Git repository">
|
||||
<i id="git-repository-button" class="fa fa-github"></i>
|
||||
</a>
|
||||
<a href="https://github.com/provisioning/provisioning-platform/edit/main/provisioning/docs/src/operations/monitoring.md" title="Suggest an edit" aria-label="Suggest an edit">
|
||||
<i id="git-edit-button" class="fa fa-edit"></i>
|
||||
</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div id="search-wrapper" class="hidden">
|
||||
<form id="searchbar-outer" class="searchbar-outer">
|
||||
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
|
||||
</form>
|
||||
<div id="searchresults-outer" class="searchresults-outer hidden">
|
||||
<div id="searchresults-header" class="searchresults-header"></div>
|
||||
<ul id="searchresults">
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
|
||||
<script>
|
||||
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
|
||||
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
|
||||
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
|
||||
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
|
||||
});
|
||||
</script>
|
||||
|
||||
<div id="content" class="content">
|
||||
<main>
|
||||
<h1 id="monitoring-guide"><a class="header" href="#monitoring-guide">Monitoring Guide</a></h1>
|
||||
|
||||
</main>
|
||||
|
||||
<nav class="nav-wrapper" aria-label="Page navigation">
|
||||
<!-- Mobile navigation buttons -->
|
||||
<a rel="prev" href="../operations/deployment.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../operations/production-readiness-checklist.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
|
||||
<div style="clear: both"></div>
|
||||
</nav>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<nav class="nav-wide-wrapper" aria-label="Page navigation">
|
||||
<a rel="prev" href="../operations/deployment.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
|
||||
<i class="fa fa-angle-left"></i>
|
||||
</a>
|
||||
|
||||
<a rel="next prefetch" href="../operations/production-readiness-checklist.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
|
||||
<i class="fa fa-angle-right"></i>
|
||||
</a>
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
<script>
|
||||
window.playground_copyable = true;
|
||||
</script>
|
||||
|
||||
|
||||
<script src="../elasticlunr.min.js"></script>
|
||||
<script src="../mark.min.js"></script>
|
||||
<script src="../searcher.js"></script>
|
||||
|
||||
<script src="../clipboard.min.js"></script>
|
||||
<script src="../highlight.js"></script>
|
||||
<script src="../book.js"></script>
|
||||
|
||||
<!-- Custom JS scripts -->
|
||||
|
||||
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
21578
docs/book/print.html
21578
docs/book/print.html
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
<!-- Custom HTML head -->
|
||||
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, KCL, and Rust">
|
||||
<meta name="description" content="Complete documentation for the Provisioning Platform - Infrastructure automation with Nushell, Nickel, and Rust">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<meta name="theme-color" content="#ffffff">
|
||||
|
||||
|
||||
@ -86,7 +86,7 @@ Declarative Infrastructure as Code (IaC) platform providing:
|
||||
|
||||
**Solution**: Unified abstraction layer with provider-agnostic interfaces. Write configuration once, deploy anywhere.
|
||||
|
||||
```kcl
|
||||
```
|
||||
# Same configuration works on UpCloud, AWS, or local infrastructure
|
||||
server: Server {
|
||||
name = "web-01"
|
||||
@ -101,7 +101,7 @@ server: Server {
|
||||
|
||||
**Solution**: Automatic dependency resolution with topological sorting and health checks.
|
||||
|
||||
```kcl
|
||||
```
|
||||
# Provisioning resolves: containerd → etcd → kubernetes → cilium
|
||||
taskservs = ["cilium"] # Automatically installs all dependencies
|
||||
```
|
||||
@ -112,7 +112,7 @@ taskservs = ["cilium"] # Automatically installs all dependencies
|
||||
|
||||
**Solution**: Hierarchical configuration system with 476+ config accessors replacing 200+ ENV variables.
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Defaults → User → Project → Infrastructure → Environment → Runtime
|
||||
```
|
||||
|
||||
@ -120,7 +120,7 @@ Defaults → User → Project → Infrastructure → Environment → Runtime
|
||||
|
||||
**Problem**: Brittle shell scripts that don't handle failures, don't support rollback, hard to maintain.
|
||||
|
||||
**Solution**: Declarative KCL configurations with validation, type safety, and automatic rollback.
|
||||
**Solution**: Declarative Nickel configurations with validation, type safety, and automatic rollback.
|
||||
|
||||
#### 5. **Lack of Visibility**
|
||||
|
||||
@ -197,7 +197,7 @@ Clusters handle:
|
||||
|
||||
Isolated environments for different projects or deployment stages.
|
||||
|
||||
```plaintext
|
||||
```
|
||||
workspace_librecloud/ # Production workspace
|
||||
├── infra/ # Infrastructure definitions
|
||||
├── config/ # Workspace configuration
|
||||
@ -211,7 +211,7 @@ workspace_dev/ # Development workspace
|
||||
|
||||
Switch between workspaces with single command:
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning workspace switch librecloud
|
||||
```
|
||||
|
||||
@ -240,7 +240,7 @@ Coordinated sequences of operations with dependency management.
|
||||
|
||||
### System Components
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ User Interface Layer │
|
||||
│ • CLI (provisioning command) │
|
||||
@ -282,7 +282,7 @@ Coordinated sequences of operations with dependency management.
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
project-provisioning/
|
||||
├── provisioning/ # Core provisioning system
|
||||
│ ├── core/ # Core engine and libraries
|
||||
@ -305,7 +305,7 @@ project-provisioning/
|
||||
│ │ ├── oci-registry/ # OCI registry for extensions
|
||||
│ │ └── installer/ # Platform installer (TUI + CLI)
|
||||
│ │
|
||||
│ ├── kcl/ # KCL configuration schemas
|
||||
│ ├── schemas/ # Nickel configuration schemas
|
||||
│ ├── config/ # Configuration files
|
||||
│ ├── templates/ # Template files
|
||||
│ └── tools/ # Build and distribution tools
|
||||
@ -394,14 +394,14 @@ Hierarchical, config-driven architecture.
|
||||
- **476+ config accessors** replacing 200+ ENV variables
|
||||
- **Hierarchical loading**: defaults → user → project → infra → env → runtime
|
||||
- **Variable interpolation**: `{{paths.base}}`, `{{env.HOME}}`, `{{now.date}}`
|
||||
- **Multi-format support**: TOML, YAML, KCL
|
||||
- **Multi-format support**: TOML, YAML, Nickel
|
||||
|
||||
### 3. **Batch Workflow System** (v3.1.0)
|
||||
|
||||
Provider-agnostic batch operations with 85-90% token efficiency.
|
||||
|
||||
- **Multi-cloud support**: Mixed UpCloud + AWS + local in single workflow
|
||||
- **KCL schema integration**: Type-safe workflow definitions
|
||||
- **Nickel schema integration**: Type-safe workflow definitions
|
||||
- **Dependency resolution**: Topological sorting with soft/hard dependencies
|
||||
- **State management**: Checkpoint-based recovery with rollback
|
||||
- **Real-time monitoring**: Live progress tracking
|
||||
@ -471,7 +471,7 @@ Comprehensive version tracking and updates.
|
||||
| Technology | Version | Purpose | Why |
|
||||
| ------------ | --------- | --------- | ----- |
|
||||
| **Nushell** | 0.107.1+ | Primary shell and scripting language | Data pipelines, cross-platform, modern parsers |
|
||||
| **KCL** | 0.11.3+ | Configuration language | Type safety, schema validation, immutability, constraint checking |
|
||||
| **Nickel** | 1.0.0+ | Configuration language | Type safety, schema validation, immutability, constraint checking |
|
||||
| **Rust** | Latest | Platform services (orchestrator, control-center, installer) | Performance, memory safety, concurrency, reliability |
|
||||
| **Tera** | Latest | Template engine | Jinja2-like syntax, configuration file rendering, variable interpolation, filters and functions |
|
||||
|
||||
@ -505,7 +505,6 @@ Comprehensive version tracking and updates.
|
||||
| ------ | --------- |
|
||||
| **K9s** | Kubernetes management interface |
|
||||
| **nu_plugin_tera** | Nushell plugin for Tera template rendering |
|
||||
| **nu_plugin_kcl** | Nushell plugin for KCL integration (CLI required, plugin optional) |
|
||||
| **glow** | Markdown rendering for interactive guides |
|
||||
| **bat** | Syntax highlighting for file viewing and guides |
|
||||
|
||||
@ -515,8 +514,8 @@ Comprehensive version tracking and updates.
|
||||
|
||||
### Data Flow
|
||||
|
||||
```plaintext
|
||||
1. User defines infrastructure in KCL
|
||||
```
|
||||
1. User defines infrastructure in Nickel
|
||||
↓
|
||||
2. CLI loads configuration (hierarchical)
|
||||
↓
|
||||
@ -541,7 +540,7 @@ Comprehensive version tracking and updates.
|
||||
|
||||
**Step 1**: Define infrastructure in Nickel
|
||||
|
||||
```nickel
|
||||
```
|
||||
# infra/my-cluster.ncl
|
||||
let config = {
|
||||
infra = {
|
||||
@ -562,13 +561,13 @@ config
|
||||
|
||||
**Step 2**: Submit to Provisioning
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning server create --infra my-cluster
|
||||
```
|
||||
|
||||
**Step 3**: Provisioning executes workflow
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. Create workflow: "deploy-my-cluster"
|
||||
2. Resolve dependencies:
|
||||
- containerd (required by kubernetes)
|
||||
@ -593,7 +592,7 @@ provisioning server create --infra my-cluster
|
||||
|
||||
**Step 4**: Verify deployment
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning cluster status my-cluster
|
||||
```
|
||||
|
||||
@ -601,7 +600,7 @@ provisioning cluster status my-cluster
|
||||
|
||||
Configuration values are resolved through a hierarchy:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. System Defaults (provisioning/config/config.defaults.toml)
|
||||
↓ (overridden by)
|
||||
2. User Preferences (~/.config/provisioning/user_config.yaml)
|
||||
@ -617,7 +616,7 @@ Configuration values are resolved through a hierarchy:
|
||||
|
||||
**Example**:
|
||||
|
||||
```toml
|
||||
```
|
||||
# System default
|
||||
[servers]
|
||||
default_plan = "small"
|
||||
@ -642,7 +641,7 @@ provisioning server create --plan xlarge # Overrides everything
|
||||
|
||||
Deploy Kubernetes clusters across different cloud providers with identical configuration.
|
||||
|
||||
```bash
|
||||
```
|
||||
# UpCloud cluster
|
||||
provisioning cluster create k8s-prod --provider upcloud
|
||||
|
||||
@ -654,7 +653,7 @@ provisioning cluster create k8s-prod --provider aws
|
||||
|
||||
Manage multiple environments with workspace switching.
|
||||
|
||||
```bash
|
||||
```
|
||||
# Development
|
||||
provisioning workspace switch dev
|
||||
provisioning cluster create app-stack
|
||||
@ -672,7 +671,7 @@ provisioning cluster create app-stack
|
||||
|
||||
Test infrastructure changes before deploying to production.
|
||||
|
||||
```bash
|
||||
```
|
||||
# Test Kubernetes upgrade locally
|
||||
provisioning test topology load kubernetes_3node | \
|
||||
test env cluster kubernetes --version 1.29.0
|
||||
@ -688,7 +687,7 @@ provisioning test env cleanup <env-id>
|
||||
|
||||
Deploy to multiple regions in parallel.
|
||||
|
||||
```nickel
|
||||
```
|
||||
# workflows/multi-region.ncl
|
||||
let batch_workflow = {
|
||||
operations = [
|
||||
@ -716,7 +715,7 @@ let batch_workflow = {
|
||||
batch_workflow
|
||||
```
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning batch submit workflows/multi-region.ncl
|
||||
provisioning batch monitor <workflow-id>
|
||||
```
|
||||
@ -725,7 +724,7 @@ provisioning batch monitor <workflow-id>
|
||||
|
||||
Recreate infrastructure from configuration.
|
||||
|
||||
```bash
|
||||
```
|
||||
# Infrastructure destroyed
|
||||
provisioning workspace switch prod
|
||||
|
||||
@ -739,7 +738,7 @@ provisioning cluster create --infra backup-restore --wait
|
||||
|
||||
Automated testing and deployment pipelines.
|
||||
|
||||
```yaml
|
||||
```
|
||||
# .gitlab-ci.yml
|
||||
test-infrastructure:
|
||||
script:
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
**Status**: ✅ Primary documentation source (145 files consolidated)
|
||||
|
||||
Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell,
|
||||
KCL, and Rust.
|
||||
Nickel, and Rust.
|
||||
|
||||
> **Note**: Architecture Decision Records (ADRs) and design documentation are in `docs/`
|
||||
> directory. This location contains user-facing, operational, and product documentation.
|
||||
@ -117,7 +117,7 @@ KCL, and Rust.
|
||||
|
||||
## Documentation Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/docs/src/
|
||||
├── README.md (this file) # Documentation hub
|
||||
├── getting-started/ # Getting started guides
|
||||
@ -214,7 +214,7 @@ Extensions and packages distributed as OCI artifacts, enabling:
|
||||
2. Study **[Design Principles](architecture/design-principles.md)**
|
||||
3. Read relevant **[ADRs](architecture/)**
|
||||
4. Follow **[Development Guide](development/README.md)**
|
||||
5. Reference **KCL Quick Reference**
|
||||
5. Reference **Nickel Quick Reference**
|
||||
|
||||
### For Operators
|
||||
|
||||
@ -237,7 +237,7 @@ Extensions and packages distributed as OCI artifacts, enabling:
|
||||
### ✅ Infrastructure Automation
|
||||
|
||||
- Multi-cloud support (AWS, UpCloud, Local)
|
||||
- Declarative configuration with KCL
|
||||
- Declarative configuration with Nickel
|
||||
- Automated dependency resolution
|
||||
- Batch operations with rollback
|
||||
|
||||
@ -322,7 +322,7 @@ Extensions and packages distributed as OCI artifacts, enabling:
|
||||
| Component | Technology | Purpose |
|
||||
| ----------- | ------------ | --------- |
|
||||
| **Core CLI** | Nushell 0.107.1 | Shell and scripting |
|
||||
| **Configuration** | KCL 0.11.2 | Type-safe IaC |
|
||||
| **Configuration** | Nickel 1.0.0+ | Type-safe IaC |
|
||||
| **Orchestrator** | Rust | High-performance coordination |
|
||||
| **Templates** | Jinja2 (nu_plugin_tera) | Code generation |
|
||||
| **Secrets** | SOPS 3.10.2 + Age 1.2.1 | Encryption |
|
||||
|
||||
385
docs/src/README.md.bak2
Normal file
385
docs/src/README.md.bak2
Normal file
@ -0,0 +1,385 @@
|
||||
<p align="center">
|
||||
<img src="resources/provisioning_logo.svg" alt="Provisioning Logo" width="300"/>
|
||||
</p>
|
||||
<p align="center">
|
||||
<img src="resources/logo-text.svg" alt="Provisioning" width="500"/>
|
||||
</p>
|
||||
|
||||
# Provisioning Platform Documentation
|
||||
|
||||
**Last Updated**: 2025-01-02 (Phase 3.A Cleanup Complete)
|
||||
**Status**: ✅ Primary documentation source (145 files consolidated)
|
||||
|
||||
Welcome to the comprehensive documentation for the Provisioning Platform - a modern, cloud-native infrastructure automation system built with Nushell,
|
||||
Nickel, and Rust.
|
||||
|
||||
> **Note**: Architecture Decision Records (ADRs) and design documentation are in `docs/`
|
||||
> directory. This location contains user-facing, operational, and product documentation.
|
||||
|
||||
---
|
||||
|
||||
## Quick Navigation
|
||||
|
||||
### 🚀 Getting Started
|
||||
|
||||
| Document | Description | Audience |
|
||||
| ---------- | ------------- | ---------- |
|
||||
| **[Installation Guide](getting-started/installation-guide.md)** | Install and configure the system | New Users |
|
||||
| **[Getting Started](getting-started/getting-started.md)** | First steps and basic concepts | New Users |
|
||||
| **[Quick Reference](getting-started/quickstart-cheatsheet.md)** | Command cheat sheet | All Users |
|
||||
| **[From Scratch Guide](guides/from-scratch.md)** | Complete deployment walkthrough | New Users |
|
||||
|
||||
### 📚 User Guides
|
||||
|
||||
| Document | Description |
|
||||
| ---------- | ------------- |
|
||||
| **[CLI Reference](infrastructure/cli-reference.md)** | Complete command reference |
|
||||
| **[Workspace Management](infrastructure/workspace-setup.md)** | Workspace creation and management |
|
||||
| **[Workspace Switching](infrastructure/workspace-switching-guide.md)** | Switch between workspaces |
|
||||
| **[Infrastructure Management](infrastructure/infrastructure-management.md)** | Server, taskserv, cluster operations |
|
||||
| **[Service Management](operations/service-management-guide.md)** | Platform service lifecycle management |
|
||||
| **[OCI Registry](integration/oci-registry-guide.md)** | OCI artifact management |
|
||||
| **[Gitea Integration](integration/gitea-integration-guide.md)** | Git workflow and collaboration |
|
||||
| **[CoreDNS Guide](operations/coredns-guide.md)** | DNS management |
|
||||
| **[Test Environments](testing/test-environment-usage.md)** | Containerized testing |
|
||||
| **[Extension Development](development/extension-development.md)** | Create custom extensions |
|
||||
|
||||
### 🏗️ Architecture
|
||||
|
||||
| Document | Description |
|
||||
| ---------- | ------------- |
|
||||
| **[System Overview](architecture/system-overview.md)** | High-level architecture |
|
||||
| **[Multi-Repo Architecture](architecture/multi-repo-architecture.md)** | Repository structure and OCI distribution |
|
||||
| **[Design Principles](architecture/design-principles.md)** | Architectural philosophy |
|
||||
| **[Integration Patterns](architecture/integration-patterns.md)** | System integration patterns |
|
||||
| **[Orchestrator Model](architecture/orchestrator-integration-model.md)** | Hybrid orchestration architecture |
|
||||
|
||||
### 📋 Architecture Decision Records (ADRs)
|
||||
|
||||
| ADR | Title | Status |
|
||||
| ----- | ------- | -------- |
|
||||
| **[ADR-001](architecture/adr/adr-001-project-structure.md)** | Project Structure Decision | Accepted |
|
||||
| **[ADR-002](architecture/adr/adr-002-distribution-strategy.md)** | Distribution Strategy | Accepted |
|
||||
| **[ADR-003](architecture/adr/adr-003-workspace-isolation.md)** | Workspace Isolation | Accepted |
|
||||
| **[ADR-004](architecture/adr/adr-004-hybrid-architecture.md)** | Hybrid Architecture | Accepted |
|
||||
| **[ADR-005](architecture/adr/adr-005-extension-framework.md)** | Extension Framework | Accepted |
|
||||
| **[ADR-006](architecture/adr/adr-006-provisioning-cli-refactoring.md)** | CLI Refactoring | Accepted |
|
||||
|
||||
### 🔌 API Documentation
|
||||
|
||||
| Document | Description |
|
||||
| ---------- | ------------- |
|
||||
| **[REST API](api-reference/rest-api.md)** | HTTP API endpoints |
|
||||
| **[WebSocket API](api-reference/websocket.md)** | Real-time event streams |
|
||||
| **[Extensions API](development/extensions.md)** | Extension integration APIs |
|
||||
| **[SDKs](api-reference/sdks.md)** | Client libraries |
|
||||
| **[Integration Examples](api-reference/integration-examples.md)** | API usage examples |
|
||||
|
||||
### 🛠️ Development
|
||||
|
||||
| Document | Description |
|
||||
| ---------- | ------------- |
|
||||
| **[Development README](development/README.md)** | Developer overview |
|
||||
| **[Implementation Guide](development/implementation-guide.md)** | Implementation details |
|
||||
| **[Provider Development](development/quick-provider-guide.md)** | Create cloud providers |
|
||||
| **[Taskserv Development](development/taskserv-developer-guide.md)** | Create task services |
|
||||
| **[Extension Framework](development/extensions.md)** | Extension system |
|
||||
| **[Command Handlers](development/command-handler-guide.md)** | CLI command development |
|
||||
|
||||
### 🐛 Troubleshooting
|
||||
|
||||
| Document | Description |
|
||||
| ---------- | ------------- |
|
||||
| **[Troubleshooting Guide](troubleshooting/troubleshooting-guide.md)** | Common issues and solutions |
|
||||
|
||||
### 📖 How-To Guides
|
||||
|
||||
| Document | Description |
|
||||
| ---------- | ------------- |
|
||||
| **[From Scratch](guides/from-scratch.md)** | Complete deployment from zero |
|
||||
| **[Update Infrastructure](guides/update-infrastructure.md)** | Safe update procedures |
|
||||
| **[Customize Infrastructure](guides/customize-infrastructure.md)** | Layer and template customization |
|
||||
|
||||
### 🔐 Configuration
|
||||
|
||||
| Document | Description |
|
||||
| ---------- | ------------- |
|
||||
| **[Workspace Config Architecture](configuration/workspace-config-architecture.md)** | Configuration architecture |
|
||||
|
||||
### 📦 Quick References
|
||||
|
||||
| Document | Description |
|
||||
| ---------- | ------------- |
|
||||
| **[Quickstart Cheatsheet](getting-started/quickstart-cheatsheet.md)** | Command shortcuts |
|
||||
| **[OCI Quick Reference](quick-reference/oci.md)** | OCI operations |
|
||||
|
||||
---
|
||||
|
||||
## Documentation Structure
|
||||
|
||||
```plaintext
|
||||
provisioning/docs/src/
|
||||
├── README.md (this file) # Documentation hub
|
||||
├── getting-started/ # Getting started guides
|
||||
│ ├── installation-guide.md
|
||||
│ ├── getting-started.md
|
||||
│ └── quickstart-cheatsheet.md
|
||||
├── architecture/ # System architecture
|
||||
│ ├── adr/ # Architecture Decision Records
|
||||
│ ├── design-principles.md
|
||||
│ ├── integration-patterns.md
|
||||
│ ├── system-overview.md
|
||||
│ └── ... (and 10+ more architecture docs)
|
||||
├── infrastructure/ # Infrastructure guides
|
||||
│ ├── cli-reference.md
|
||||
│ ├── workspace-setup.md
|
||||
│ ├── workspace-switching-guide.md
|
||||
│ └── infrastructure-management.md
|
||||
├── api-reference/ # API documentation
|
||||
│ ├── rest-api.md
|
||||
│ ├── websocket.md
|
||||
│ ├── integration-examples.md
|
||||
│ └── sdks.md
|
||||
├── development/ # Developer guides
|
||||
│ ├── README.md
|
||||
│ ├── implementation-guide.md
|
||||
│ ├── quick-provider-guide.md
|
||||
│ ├── taskserv-developer-guide.md
|
||||
│ └── ... (15+ more developer docs)
|
||||
├── guides/ # How-to guides
|
||||
│ ├── from-scratch.md
|
||||
│ ├── update-infrastructure.md
|
||||
│ └── customize-infrastructure.md
|
||||
├── operations/ # Operations guides
|
||||
│ ├── service-management-guide.md
|
||||
│ ├── coredns-guide.md
|
||||
│ └── ... (more operations docs)
|
||||
├── security/ # Security docs
|
||||
├── integration/ # Integration guides
|
||||
├── testing/ # Testing docs
|
||||
├── configuration/ # Configuration docs
|
||||
├── troubleshooting/ # Troubleshooting guides
|
||||
└── quick-reference/ # Quick references
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### Infrastructure as Code (IaC)
|
||||
|
||||
The provisioning platform uses **declarative configuration** to manage infrastructure. Instead of manually creating resources, you define what you
|
||||
want in Nickel configuration files, and the system makes it happen.
|
||||
|
||||
### Mode-Based Architecture
|
||||
|
||||
The system supports four operational modes:
|
||||
|
||||
- **Solo**: Single developer local development
|
||||
- **Multi-user**: Team collaboration with shared services
|
||||
- **CI/CD**: Automated pipeline execution
|
||||
- **Enterprise**: Production deployment with strict compliance
|
||||
|
||||
### Extension System
|
||||
|
||||
Extensibility through:
|
||||
|
||||
- **Providers**: Cloud platform integrations (AWS, UpCloud, Local)
|
||||
- **Task Services**: Infrastructure components (Kubernetes, databases, etc.)
|
||||
- **Clusters**: Complete deployment configurations
|
||||
|
||||
### OCI-Native Distribution
|
||||
|
||||
Extensions and packages distributed as OCI artifacts, enabling:
|
||||
|
||||
- Industry-standard packaging
|
||||
- Efficient caching and bandwidth
|
||||
- Version pinning and rollback
|
||||
- Air-gapped deployments
|
||||
|
||||
---
|
||||
|
||||
## Documentation by Role
|
||||
|
||||
### For New Users
|
||||
|
||||
1. Start with **[Installation Guide](getting-started/installation-guide.md)**
|
||||
2. Read **[Getting Started](getting-started/getting-started.md)**
|
||||
3. Follow **[From Scratch Guide](guides/from-scratch.md)**
|
||||
4. Reference **[Quickstart Cheatsheet](guides/quickstart-cheatsheet.md)**
|
||||
|
||||
### For Developers
|
||||
|
||||
1. Review **[System Overview](architecture/system-overview.md)**
|
||||
2. Study **[Design Principles](architecture/design-principles.md)**
|
||||
3. Read relevant **[ADRs](architecture/)**
|
||||
4. Follow **[Development Guide](development/README.md)**
|
||||
5. Reference **Nickel Quick Reference**
|
||||
|
||||
### For Operators
|
||||
|
||||
1. Understand **[Mode System](infrastructure/mode-system)**
|
||||
2. Learn **[Service Management](operations/service-management-guide.md)**
|
||||
3. Review **[Infrastructure Management](infrastructure/infrastructure-management.md)**
|
||||
4. Study **[OCI Registry](integration/oci-registry-guide.md)**
|
||||
|
||||
### For Architects
|
||||
|
||||
1. Read **[System Overview](architecture/system-overview.md)**
|
||||
2. Study all **[ADRs](architecture/)**
|
||||
3. Review **[Integration Patterns](architecture/integration-patterns.md)**
|
||||
4. Understand **[Multi-Repo Architecture](architecture/multi-repo-architecture.md)**
|
||||
|
||||
---
|
||||
|
||||
## System Capabilities
|
||||
|
||||
### ✅ Infrastructure Automation
|
||||
|
||||
- Multi-cloud support (AWS, UpCloud, Local)
|
||||
- Declarative configuration with Nickel
|
||||
- Automated dependency resolution
|
||||
- Batch operations with rollback
|
||||
|
||||
### ✅ Workflow Orchestration
|
||||
|
||||
- Hybrid Rust/Nushell orchestration
|
||||
- Checkpoint-based recovery
|
||||
- Parallel execution with limits
|
||||
- Real-time monitoring
|
||||
|
||||
### ✅ Test Environments
|
||||
|
||||
- Containerized testing
|
||||
- Multi-node cluster simulation
|
||||
- Topology templates
|
||||
- Automated cleanup
|
||||
|
||||
### ✅ Mode-Based Operation
|
||||
|
||||
- Solo: Local development
|
||||
- Multi-user: Team collaboration
|
||||
- CI/CD: Automated pipelines
|
||||
- Enterprise: Production deployment
|
||||
|
||||
### ✅ Extension Management
|
||||
|
||||
- OCI-native distribution
|
||||
- Automatic dependency resolution
|
||||
- Version management
|
||||
- Local and remote sources
|
||||
|
||||
---
|
||||
|
||||
## Key Achievements
|
||||
|
||||
### 🚀 Batch Workflow System (v3.1.0)
|
||||
|
||||
- Provider-agnostic batch operations
|
||||
- Mixed provider support (UpCloud + AWS + local)
|
||||
- Dependency resolution with soft/hard dependencies
|
||||
- Real-time monitoring and rollback
|
||||
|
||||
### 🏗️ Hybrid Orchestrator (v3.0.0)
|
||||
|
||||
- Solves Nushell deep call stack limitations
|
||||
- Preserves all business logic
|
||||
- REST API for external integration
|
||||
- Checkpoint-based state management
|
||||
|
||||
### ⚙️ Configuration System (v2.0.0)
|
||||
|
||||
- Migrated from ENV to config-driven
|
||||
- Hierarchical configuration loading
|
||||
- Variable interpolation
|
||||
- True IaC without hardcoded fallbacks
|
||||
|
||||
### 🎯 Modular CLI (v3.2.0)
|
||||
|
||||
- 84% reduction in main file size
|
||||
- Domain-driven handlers
|
||||
- 80+ shortcuts
|
||||
- Bi-directional help system
|
||||
|
||||
### 🧪 Test Environment Service (v3.4.0)
|
||||
|
||||
- Automated containerized testing
|
||||
- Multi-node cluster topologies
|
||||
- CI/CD integration ready
|
||||
- Template-based configurations
|
||||
|
||||
### 🔄 Workspace Switching (v2.0.5)
|
||||
|
||||
- Centralized workspace management
|
||||
- Single-command workspace switching
|
||||
- Active workspace tracking
|
||||
- User preference system
|
||||
|
||||
---
|
||||
|
||||
## Technology Stack
|
||||
|
||||
| Component | Technology | Purpose |
|
||||
| ----------- | ------------ | --------- |
|
||||
| **Core CLI** | Nushell 0.107.1 | Shell and scripting |
|
||||
| **Configuration** | KCL 0.11.2 | Type-safe IaC |
|
||||
| **Orchestrator** | Rust | High-performance coordination |
|
||||
| **Templates** | Jinja2 (nu_plugin_tera) | Code generation |
|
||||
| **Secrets** | SOPS 3.10.2 + Age 1.2.1 | Encryption |
|
||||
| **Distribution** | OCI (skopeo/crane/oras) | Artifact management |
|
||||
|
||||
---
|
||||
|
||||
## Support
|
||||
|
||||
### Getting Help
|
||||
|
||||
- **Documentation**: You're reading it!
|
||||
- **Quick Reference**: Run `provisioning sc` or `provisioning guide quickstart`
|
||||
- **Help System**: Run `provisioning help` or `provisioning <command> help`
|
||||
- **Interactive Shell**: Run `provisioning nu` for Nushell REPL
|
||||
|
||||
### Reporting Issues
|
||||
|
||||
- Check **[Troubleshooting Guide](infrastructure/troubleshooting-guide.md)**
|
||||
- Review **[FAQ](troubleshooting/troubleshooting-guide.md)**
|
||||
- Enable debug mode: `provisioning --debug <command>`
|
||||
- Check logs: `provisioning platform logs <service>`
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions! See **[Development Guide](development/README.md)** for:
|
||||
|
||||
- Development setup
|
||||
- Code style guidelines
|
||||
- Testing requirements
|
||||
- Pull request process
|
||||
|
||||
---
|
||||
|
||||
## License
|
||||
|
||||
[Add license information]
|
||||
|
||||
---
|
||||
|
||||
## Version History
|
||||
|
||||
| Version | Date | Major Changes |
|
||||
| --------- | ------ | --------------- |
|
||||
| **3.5.0** | 2025-10-06 | Mode system, OCI registry, comprehensive documentation |
|
||||
| **3.4.0** | 2025-10-06 | Test environment service |
|
||||
| **3.3.0** | 2025-09-30 | Interactive guides system |
|
||||
| **3.2.0** | 2025-09-30 | Modular CLI refactoring |
|
||||
| **3.1.0** | 2025-09-25 | Batch workflow system |
|
||||
| **3.0.0** | 2025-09-25 | Hybrid orchestrator architecture |
|
||||
| **2.0.5** | 2025-10-02 | Workspace switching system |
|
||||
| **2.0.0** | 2025-09-23 | Configuration system migration |
|
||||
|
||||
---
|
||||
|
||||
**Maintained By**: Provisioning Team
|
||||
**Last Review**: 2025-10-06
|
||||
**Next Review**: 2026-01-06
|
||||
@ -21,6 +21,26 @@
|
||||
|
||||
---
|
||||
|
||||
## AI Integration
|
||||
|
||||
- [Overview](ai/README.md)
|
||||
- [Architecture](ai/architecture.md)
|
||||
- [RAG System](ai/rag-system.md)
|
||||
- [MCP Integration](ai/mcp-integration.md)
|
||||
- [Configuration Guide](ai/configuration.md)
|
||||
- [Security Policies](ai/security-policies.md)
|
||||
- [Troubleshooting with AI](ai/troubleshooting-with-ai.md)
|
||||
- [Cost Management](ai/cost-management.md)
|
||||
|
||||
### Planned Features (Q2 2025)
|
||||
|
||||
- [Natural Language Configuration](ai/natural-language-config.md)
|
||||
- [Configuration Generation](ai/config-generation.md)
|
||||
- [AI-Assisted Forms](ai/ai-assisted-forms.md)
|
||||
- [AI Agents](ai/ai-agents.md)
|
||||
|
||||
---
|
||||
|
||||
## Architecture & Design
|
||||
|
||||
- [System Overview](architecture/system-overview.md)
|
||||
@ -51,8 +71,8 @@
|
||||
- [ADR-007: KMS Simplification](architecture/adr/adr-007-kms-simplification.md)
|
||||
- [ADR-008: Cedar Authorization](architecture/adr/adr-008-cedar-authorization.md)
|
||||
- [ADR-009: Security System Complete](architecture/adr/adr-009-security-system-complete.md)
|
||||
- [ADR-010: Configuration Format Strategy](architecture/adr/ADR-010-configuration-format-strategy.md)
|
||||
- [ADR-011: Nickel Migration](architecture/adr/ADR-011-nickel-migration.md)
|
||||
- [ADR-010: Configuration Format Strategy](architecture/adr/adr-010-configuration-format-strategy.md)
|
||||
- [ADR-011: Nickel Migration](architecture/adr/adr-011-nickel-migration.md)
|
||||
- [ADR-012: Nushell Nickel Plugin CLI Wrapper](architecture/adr/adr-012-nushell-nickel-plugin-cli-wrapper.md)
|
||||
- [ADR-013: Typdialog Web UI Backend Integration](architecture/adr/adr-013-typdialog-integration.md)
|
||||
- [ADR-014: SecretumVault Integration](architecture/adr/adr-014-secretumvault-integration.md)
|
||||
@ -60,21 +80,12 @@
|
||||
|
||||
---
|
||||
|
||||
## AI Integration
|
||||
## Roadmap & Future Features
|
||||
|
||||
- [Overview](ai/README.md)
|
||||
- [Architecture](ai/architecture.md)
|
||||
- [Natural Language Configuration](ai/natural-language-config.md)
|
||||
- [AI-Assisted Forms](ai/ai-assisted-forms.md)
|
||||
- [AI Agents](ai/ai-agents.md)
|
||||
- [Configuration Generation](ai/config-generation.md)
|
||||
- [RAG System](ai/rag-system.md)
|
||||
- [MCP Integration](ai/mcp-integration.md)
|
||||
- [Security Policies](ai/security-policies.md)
|
||||
- [Troubleshooting with AI](ai/troubleshooting-with-ai.md)
|
||||
- [API Reference](ai/api-reference.md)
|
||||
- [Configuration](ai/configuration.md)
|
||||
- [Cost Management](ai/cost-management.md)
|
||||
- [Overview](roadmap/README.md)
|
||||
- [AI Integration (Planned)](roadmap/ai-integration.md)
|
||||
- [Native Plugins (Partial)](roadmap/native-plugins.md)
|
||||
- [Nickel Workflows (Planned)](roadmap/nickel-workflows.md)
|
||||
|
||||
---
|
||||
|
||||
@ -93,33 +104,39 @@
|
||||
|
||||
## Development
|
||||
|
||||
- [Extension Development](development/extension-development.md)
|
||||
- [Infrastructure-Specific Extensions](development/infrastructure-specific-extensions.md)
|
||||
- [Quick Provider Guide](development/quick-provider-guide.md)
|
||||
- [Command Handler Guide](development/command-handler-guide.md)
|
||||
- [Configuration](development/configuration.md)
|
||||
- [Workflow](development/workflow.md)
|
||||
- [Integration](development/integration.md)
|
||||
- [Build System](development/build-system.md)
|
||||
- [Extensions](development/extensions.md)
|
||||
- [Distribution Process](development/distribution-process.md)
|
||||
- [Implementation Guide](development/implementation-guide.md)
|
||||
- [TaskServ Developer Guide](development/taskserv-developer-guide.md)
|
||||
- [TaskServ Quick Guide](development/taskserv-quick-guide.md)
|
||||
- [Project Structure](development/project-structure.md)
|
||||
- [Provider Agnostic Architecture](development/provider-agnostic-architecture.md)
|
||||
- [Ctrl-C Implementation Notes](development/ctrl-c-implementation-notes.md)
|
||||
- [Auth Metadata Guide](development/auth-metadata-guide.md)
|
||||
- [Migration Guide](development/migration-guide.md)
|
||||
- [KMS Simplification](development/kms-simplification.md)
|
||||
- [Migration Example](development/migration-example.md)
|
||||
- [Glossary](development/glossary.md)
|
||||
- [Provider Distribution Guide](development/provider-distribution-guide.md)
|
||||
- [TaskServ Categorization](development/taskserv-categorization.md)
|
||||
- [Extension Registry](development/extension-registry.md)
|
||||
- [MCP Server](development/mcp-server.md)
|
||||
- [TypeDialog Platform Config Guide](development/typedialog-platform-config-guide.md)
|
||||
- [Provider Comparison Matrix](development/provider-comparison.md)
|
||||
|
||||
### Extensions
|
||||
|
||||
- [Overview](development/extensions/README.md)
|
||||
- [Extension Development](development/extensions/extension-development.md)
|
||||
- [Extension Registry](development/extensions/extension-registry.md)
|
||||
|
||||
### Providers
|
||||
|
||||
- [Quick Provider Guide](development/providers/quick-provider-guide.md)
|
||||
- [Provider Agnostic Architecture](development/providers/provider-agnostic-architecture.md)
|
||||
- [Provider Development Guide](development/providers/provider-development-guide.md)
|
||||
- [Provider Distribution Guide](development/providers/provider-distribution-guide.md)
|
||||
- [Provider Comparison Matrix](development/providers/provider-comparison.md)
|
||||
|
||||
### TaskServs
|
||||
|
||||
- [TaskServ Quick Guide](development/taskservs/taskserv-quick-guide.md)
|
||||
- [TaskServ Categorization](development/taskservs/taskserv-categorization.md)
|
||||
|
||||
---
|
||||
|
||||
@ -128,11 +145,7 @@
|
||||
- [Platform Deployment Guide](operations/deployment-guide.md)
|
||||
- [Service Management Guide](operations/service-management-guide.md)
|
||||
- [Monitoring & Alerting Setup](operations/monitoring-alerting-setup.md)
|
||||
- [Service Management Quick Reference](operations/service-management-quickref.md)
|
||||
- [CoreDNS Guide](operations/coredns-guide.md)
|
||||
- [Backup Recovery](operations/backup-recovery.md)
|
||||
- [Deployment](operations/deployment.md)
|
||||
- [Monitoring](operations/monitoring.md)
|
||||
- [Production Readiness Checklist](operations/production-readiness-checklist.md)
|
||||
- [Break Glass Training Guide](operations/break-glass-training-guide.md)
|
||||
- [Cedar Policies Production Guide](operations/cedar-policies-production-guide.md)
|
||||
@ -154,20 +167,23 @@
|
||||
- [Batch Workflow Multi-Provider Examples](infrastructure/batch-workflow-multi-provider.md)
|
||||
- [CLI Architecture](infrastructure/cli-architecture.md)
|
||||
- [Configuration System](infrastructure/configuration-system.md)
|
||||
- [Workspace Setup](infrastructure/workspace-setup.md)
|
||||
- [Workspace Switching Guide](infrastructure/workspace-switching-guide.md)
|
||||
- [Workspace Switching System](infrastructure/workspace-switching-system.md)
|
||||
- [CLI Reference](infrastructure/cli-reference.md)
|
||||
- [Workspace Config Architecture](infrastructure/workspace-config-architecture.md)
|
||||
- [Dynamic Secrets Guide](infrastructure/dynamic-secrets-guide.md)
|
||||
- [Mode System Guide](infrastructure/mode-system-guide.md)
|
||||
- [Workspace Guide](infrastructure/workspace-guide.md)
|
||||
- [Workspace Enforcement Guide](infrastructure/workspace-enforcement-guide.md)
|
||||
- [Workspace Infra Reference](infrastructure/workspace-infra-reference.md)
|
||||
- [Workspace Config Commands](infrastructure/workspace-config-commands.md)
|
||||
- [Config Rendering Guide](infrastructure/config-rendering-guide.md)
|
||||
- [Configuration](infrastructure/configuration.md)
|
||||
|
||||
### Workspaces
|
||||
|
||||
- [Workspace Setup](infrastructure/workspaces/workspace-setup.md)
|
||||
- [Workspace Guide](infrastructure/workspaces/workspace-guide.md)
|
||||
- [Workspace Switching Guide](infrastructure/workspaces/workspace-switching-guide.md)
|
||||
- [Workspace Switching System](infrastructure/workspaces/workspace-switching-system.md)
|
||||
- [Workspace Config Architecture](infrastructure/workspaces/workspace-config-architecture.md)
|
||||
- [Workspace Config Commands](infrastructure/workspaces/workspace-config-commands.md)
|
||||
- [Workspace Enforcement Guide](infrastructure/workspaces/workspace-enforcement-guide.md)
|
||||
- [Workspace Infra Reference](infrastructure/workspaces/workspace-infra-reference.md)
|
||||
|
||||
---
|
||||
|
||||
## Security
|
||||
@ -183,8 +199,6 @@
|
||||
- [NuShell Plugins System](security/nushell-plugins-system.md)
|
||||
- [Plugin Usage Guide](security/plugin-usage-guide.md)
|
||||
- [Secrets Management Guide](security/secrets-management-guide.md)
|
||||
- [Auth Quick Reference](security/auth-quick-reference.md)
|
||||
- [Config Encryption Quick Reference](security/config-encryption-quickref.md)
|
||||
- [KMS Service](security/kms-service.md)
|
||||
|
||||
---
|
||||
@ -203,7 +217,6 @@
|
||||
## Testing
|
||||
|
||||
- [Test Environment Guide](testing/test-environment-guide.md)
|
||||
- [Test Environment Usage](testing/test-environment-usage.md)
|
||||
- [Test Environment System](testing/test-environment-system.md)
|
||||
- [TaskServ Validation Guide](testing/taskserv-validation-guide.md)
|
||||
|
||||
@ -224,7 +237,6 @@
|
||||
- [Extension Development Quickstart](guides/extension-development-quickstart.md)
|
||||
- [Guide System](guides/guide-system.md)
|
||||
- [Workspace Generation Quick Reference](guides/workspace-generation-quick-reference.md)
|
||||
- [Workspace Documentation Migration](guides/workspace-documentation-migration.md)
|
||||
|
||||
### Multi-Provider Deployment Guides
|
||||
|
||||
@ -255,4 +267,3 @@
|
||||
## Configuration
|
||||
|
||||
- [Config Validation](configuration/config-validation.md)
|
||||
- [Workspace Config Architecture](configuration/workspace-config-architecture.md)
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
# AI Integration - Intelligent Infrastructure Provisioning
|
||||
|
||||
The provisioning platform integrates AI capabilities to provide intelligent assistance for infrastructure configuration, deployment, and troubleshooting.
|
||||
The provisioning platform integrates AI capabilities to provide intelligent assistance for infrastructure configuration, deployment, and
|
||||
troubleshooting.
|
||||
This section documents the AI system architecture, features, and usage patterns.
|
||||
|
||||
## Overview
|
||||
@ -19,7 +20,7 @@ The AI integration consists of multiple components working together to provide i
|
||||
### Natural Language Configuration
|
||||
|
||||
Generate infrastructure configurations from plain English descriptions:
|
||||
```bash
|
||||
```
|
||||
provisioning ai generate "Create a production PostgreSQL cluster with encryption and daily backups"
|
||||
```
|
||||
|
||||
@ -30,7 +31,7 @@ Real-time suggestions and explanations as you fill out configuration forms via t
|
||||
### Intelligent Troubleshooting
|
||||
|
||||
AI analyzes deployment failures and suggests fixes:
|
||||
```bash
|
||||
```
|
||||
provisioning ai troubleshoot deployment-12345
|
||||
```
|
||||
|
||||
@ -38,13 +39,13 @@ provisioning ai troubleshoot deployment-12345
|
||||
|
||||
Configuration Optimization
|
||||
AI reviews configurations and suggests performance and security improvements:
|
||||
```bash
|
||||
```
|
||||
provisioning ai optimize workspaces/prod/config.ncl
|
||||
```
|
||||
|
||||
### Autonomous Agents
|
||||
AI agents execute multi-step workflows with minimal human intervention:
|
||||
```bash
|
||||
```
|
||||
provisioning ai agent --goal "Set up complete dev environment for Python app"
|
||||
```
|
||||
|
||||
@ -67,7 +68,7 @@ provisioning ai agent --goal "Set up complete dev environment for Python app"
|
||||
|
||||
### Enable AI Features
|
||||
|
||||
```bash
|
||||
```
|
||||
# Edit provisioning config
|
||||
vim provisioning/config/ai.toml
|
||||
|
||||
@ -85,7 +86,7 @@ troubleshooting = true
|
||||
|
||||
### Generate Configuration from Natural Language
|
||||
|
||||
```bash
|
||||
```
|
||||
# Simple generation
|
||||
provisioning ai generate "PostgreSQL database with encryption"
|
||||
|
||||
@ -98,7 +99,7 @@ provisioning ai generate \
|
||||
|
||||
### Use AI-Assisted Forms
|
||||
|
||||
```bash
|
||||
```
|
||||
# Open typdialog web UI with AI assistance
|
||||
provisioning workspace init --interactive --ai-assist
|
||||
|
||||
@ -109,7 +110,7 @@ provisioning workspace init --interactive --ai-assist
|
||||
|
||||
### Troubleshoot with AI
|
||||
|
||||
```bash
|
||||
```
|
||||
# Analyze failed deployment
|
||||
provisioning ai troubleshoot deployment-12345
|
||||
|
||||
@ -133,11 +134,11 @@ See [Security Policies](security-policies.md) for complete details.
|
||||
|
||||
## Supported LLM Providers
|
||||
|
||||
| Provider | Models | Best For |
|
||||
| ---------- | -------- | ---------- |
|
||||
| **Anthropic** | Claude Sonnet 4, Claude Opus 4 | Complex configs, long context |
|
||||
| **OpenAI** | GPT-4 Turbo, GPT-4 | Fast suggestions, tool calling |
|
||||
| **Local** | Llama 3, Mistral | Air-gapped, privacy-critical |
|
||||
| | Provider | Models | Best For | |
|
||||
| | ---------- | -------- | ---------- | |
|
||||
| | **Anthropic** | Claude Sonnet 4, Claude Opus 4 | Complex configs, long context | |
|
||||
| | **OpenAI** | GPT-4 Turbo, GPT-4 | Fast suggestions, tool calling | |
|
||||
| | **Local** | Llama 3, Mistral | Air-gapped, privacy-critical | |
|
||||
|
||||
## Cost Considerations
|
||||
|
||||
|
||||
@ -1 +1,532 @@
|
||||
# AI Agents
|
||||
# Autonomous AI Agents (typdialog-ag)
|
||||
|
||||
**Status**: 🔴 Planned (Q2 2025 target)
|
||||
|
||||
Autonomous AI Agents is a planned feature that enables AI agents to execute multi-step
|
||||
infrastructure provisioning workflows with minimal human intervention. Agents make
|
||||
decisions, adapt to changing conditions, and execute complex tasks while maintaining
|
||||
security and requiring human approval for critical operations.
|
||||
|
||||
## Feature Overview
|
||||
|
||||
### What It Does
|
||||
|
||||
Enable AI agents to manage complex provisioning workflows:
|
||||
|
||||
```
|
||||
User Goal:
|
||||
"Set up a complete development environment with:
|
||||
- PostgreSQL database
|
||||
- Redis cache
|
||||
- Kubernetes cluster
|
||||
- Monitoring stack
|
||||
- Logging infrastructure"
|
||||
|
||||
AI Agent executes:
|
||||
1. Analyzes requirements and constraints
|
||||
2. Plans multi-step deployment sequence
|
||||
3. Creates configurations for all components
|
||||
4. Validates configurations against policies
|
||||
5. Requests human approval for critical decisions
|
||||
6. Executes deployment in correct order
|
||||
7. Monitors for failures and adapts
|
||||
8. Reports completion and recommendations
|
||||
```
|
||||
|
||||
## Agent Capabilities
|
||||
|
||||
### Multi-Step Workflow Execution
|
||||
|
||||
Agents coordinate complex, multi-component deployments:
|
||||
|
||||
```
|
||||
Goal: "Deploy production Kubernetes cluster with managed databases"
|
||||
|
||||
Agent Plan:
|
||||
Phase 1: Infrastructure
|
||||
├─ Create VPC and networking
|
||||
├─ Set up security groups
|
||||
└─ Configure IAM roles
|
||||
|
||||
Phase 2: Kubernetes
|
||||
├─ Create EKS cluster
|
||||
├─ Configure network plugins
|
||||
├─ Set up autoscaling
|
||||
└─ Install cluster add-ons
|
||||
|
||||
Phase 3: Managed Services
|
||||
├─ Provision RDS PostgreSQL
|
||||
├─ Configure backups
|
||||
└─ Set up replicas
|
||||
|
||||
Phase 4: Observability
|
||||
├─ Deploy Prometheus
|
||||
├─ Deploy Grafana
|
||||
├─ Configure log collection
|
||||
└─ Set up alerting
|
||||
|
||||
Phase 5: Validation
|
||||
├─ Run smoke tests
|
||||
├─ Verify connectivity
|
||||
└─ Check compliance
|
||||
```
|
||||
|
||||
### Adaptive Decision Making
|
||||
|
||||
Agents adapt to conditions and make intelligent decisions:
|
||||
|
||||
```
|
||||
Scenario: Database provisioning fails due to resource quota
|
||||
|
||||
Standard approach (human):
|
||||
1. Detect failure
|
||||
2. Investigate issue
|
||||
3. Decide on fix (reduce size, change region, etc.)
|
||||
4. Update config
|
||||
5. Retry
|
||||
|
||||
Agent approach:
|
||||
1. Detect failure
|
||||
2. Analyze error: "Quota exceeded for db.r6g.xlarge"
|
||||
3. Check available options:
|
||||
- Try smaller instance: db.r6g.large (may be insufficient)
|
||||
- Try different region: different cost, latency
|
||||
- Request quota increase (requires human approval)
|
||||
4. Ask human: "Quota exceeded. Suggest: use db.r6g.large instead
|
||||
(slightly reduced performance). Approve? [yes/no/try-other]"
|
||||
5. Execute based on approval
|
||||
6. Continue workflow
|
||||
```
|
||||
|
||||
### Dependency Management
|
||||
|
||||
Agents understand resource dependencies:
|
||||
|
||||
```
|
||||
Knowledge graph of dependencies:
|
||||
|
||||
VPC ──→ Subnets ──→ EC2 Instances
|
||||
├─────────→ Security Groups
|
||||
└────→ NAT Gateway ──→ Route Tables
|
||||
|
||||
RDS ──→ DB Subnet Group ──→ VPC
|
||||
├─────────→ Security Group
|
||||
└────→ Parameter Group
|
||||
|
||||
Agent ensures:
|
||||
- VPC exists before creating subnets
|
||||
- Subnets exist before creating EC2
|
||||
- Security groups reference correct VPC
|
||||
- Deployment order respects all dependencies
|
||||
- Rollback order is reverse of creation
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Agent Design Pattern
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────┐
|
||||
│ Agent Supervisor (Orchestrator) │
|
||||
│ - Accepts user goal │
|
||||
│ - Plans workflow │
|
||||
│ - Coordinates specialist agents │
|
||||
│ - Requests human approvals │
|
||||
│ - Monitors overall progress │
|
||||
└────────────────────────────────────────────────────────┘
|
||||
↑ ↑ ↑
|
||||
│ │ │
|
||||
↓ ↓ ↓
|
||||
┌──────────────┐ ┌──────────────┐ ┌──────────────┐
|
||||
│ Database │ │ Kubernetes │ │ Monitoring │
|
||||
│ Specialist │ │ Specialist │ │ Specialist │
|
||||
│ │ │ │ │ │
|
||||
│ Tasks: │ │ Tasks: │ │ Tasks: │
|
||||
│ - Create DB │ │ - Create K8s │ │ - Deploy │
|
||||
│ - Configure │ │ - Configure │ │ Prometheus │
|
||||
│ - Validate │ │ - Validate │ │ - Deploy │
|
||||
│ - Report │ │ - Report │ │ Grafana │
|
||||
└──────────────┘ └──────────────┘ └──────────────┘
|
||||
```
|
||||
|
||||
### Agent Workflow
|
||||
|
||||
```
|
||||
Start: User Goal
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Goal Analysis & Planning │
|
||||
│ - Parse user intent │
|
||||
│ - Identify resources needed │
|
||||
│ - Plan dependency graph │
|
||||
│ - Generate task list │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Resource Generation │
|
||||
│ - Generate configs for each resource │
|
||||
│ - Validate against schemas │
|
||||
│ - Check compliance policies │
|
||||
│ - Identify potential issues │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
Human Review Point?
|
||||
├─ No issues: Continue
|
||||
└─ Issues found: Request approval/modification
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Execution Plan Verification │
|
||||
│ - Check all configs are valid │
|
||||
│ - Verify dependencies are resolvable │
|
||||
│ - Estimate costs and timeline │
|
||||
│ - Identify risks │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
Execute Workflow?
|
||||
├─ User approves: Start execution
|
||||
└─ User modifies: Return to planning
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Phase-by-Phase Execution │
|
||||
│ - Execute one logical phase │
|
||||
│ - Monitor for errors │
|
||||
│ - Report progress │
|
||||
│ - Ask for decisions if needed │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
All Phases Complete?
|
||||
├─ No: Continue to next phase
|
||||
└─ Yes: Final validation
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Final Validation & Reporting │
|
||||
│ - Smoke tests │
|
||||
│ - Connectivity tests │
|
||||
│ - Compliance verification │
|
||||
│ - Performance checks │
|
||||
│ - Generate final report │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
Success: Deployment Complete
|
||||
```
|
||||
|
||||
## Planned Agent Types
|
||||
|
||||
### 1. Database Specialist Agent
|
||||
|
||||
```
|
||||
Responsibilities:
|
||||
- Create and configure databases
|
||||
- Set up replication and backups
|
||||
- Configure encryption and security
|
||||
- Monitor database health
|
||||
- Handle database-specific issues
|
||||
|
||||
Examples:
|
||||
- Provision PostgreSQL cluster with replication
|
||||
- Set up MySQL with read replicas
|
||||
- Configure MongoDB sharding
|
||||
- Create backup pipelines
|
||||
```
|
||||
|
||||
### 2. Kubernetes Specialist Agent
|
||||
|
||||
```
|
||||
Responsibilities:
|
||||
- Create and configure Kubernetes clusters
|
||||
- Configure networking and ingress
|
||||
- Set up autoscaling policies
|
||||
- Deploy cluster add-ons
|
||||
- Manage workload placement
|
||||
|
||||
Examples:
|
||||
- Create EKS/GKE/AKS cluster
|
||||
- Configure Istio service mesh
|
||||
- Deploy Prometheus + Grafana
|
||||
- Configure auto-scaling policies
|
||||
```
|
||||
|
||||
### 3. Infrastructure Agent
|
||||
|
||||
```
|
||||
Responsibilities:
|
||||
- Create networking infrastructure
|
||||
- Configure security and firewalls
|
||||
- Set up load balancers
|
||||
- Configure DNS and CDN
|
||||
- Manage identity and access
|
||||
|
||||
Examples:
|
||||
- Create VPC with subnets
|
||||
- Configure security groups
|
||||
- Set up application load balancer
|
||||
- Configure Route53 DNS
|
||||
```
|
||||
|
||||
### 4. Monitoring Agent
|
||||
|
||||
```
|
||||
Responsibilities:
|
||||
- Deploy monitoring stack
|
||||
- Configure alerting
|
||||
- Set up logging infrastructure
|
||||
- Create dashboards
|
||||
- Configure notification channels
|
||||
|
||||
Examples:
|
||||
- Deploy Prometheus + Grafana
|
||||
- Set up CloudWatch dashboards
|
||||
- Configure log aggregation
|
||||
- Set up PagerDuty integration
|
||||
```
|
||||
|
||||
### 5. Compliance Agent
|
||||
|
||||
```
|
||||
Responsibilities:
|
||||
- Check security policies
|
||||
- Verify compliance requirements
|
||||
- Audit configurations
|
||||
- Generate compliance reports
|
||||
- Recommend security improvements
|
||||
|
||||
Examples:
|
||||
- Check PCI-DSS compliance
|
||||
- Verify encryption settings
|
||||
- Audit access controls
|
||||
- Generate compliance report
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Example 1: Development Environment Setup
|
||||
|
||||
```
|
||||
$ provisioning ai agent --goal "Set up dev environment for Python web app"
|
||||
|
||||
Agent Plan Generated:
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Environment: Development │
|
||||
│ Components: PostgreSQL + Redis + Monitoring
|
||||
│ │
|
||||
│ Phase 1: Database (1-2 min) │
|
||||
│ - PostgreSQL 15 │
|
||||
│ - 10 GB storage │
|
||||
│ - Dev security settings │
|
||||
│ │
|
||||
│ Phase 2: Cache (1 min) │
|
||||
│ - Redis Cluster Mode disabled │
|
||||
│ - Single node │
|
||||
│ - 2 GB memory │
|
||||
│ │
|
||||
│ Phase 3: Monitoring (1-2 min) │
|
||||
│ - Prometheus (metrics) │
|
||||
│ - Grafana (dashboards) │
|
||||
│ - Log aggregation │
|
||||
│ │
|
||||
│ Estimated time: 5-10 minutes │
|
||||
│ Estimated cost: $15/month │
|
||||
│ │
|
||||
│ [Approve] [Modify] [Cancel] │
|
||||
└─────────────────────────────────────────┘
|
||||
|
||||
Agent: Approve to proceed with setup.
|
||||
|
||||
User: Approve
|
||||
|
||||
[Agent execution starts]
|
||||
Creating PostgreSQL... [████████░░] 80%
|
||||
Creating Redis... [░░░░░░░░░░] 0%
|
||||
[Waiting for PostgreSQL creation...]
|
||||
|
||||
PostgreSQL created successfully!
|
||||
Connection string: postgresql://dev:pwd@db.internal:5432/app
|
||||
|
||||
Creating Redis... [████████░░] 80%
|
||||
[Waiting for Redis creation...]
|
||||
|
||||
Redis created successfully!
|
||||
Connection string: redis://cache.internal:6379
|
||||
|
||||
Deploying monitoring... [████████░░] 80%
|
||||
[Waiting for Grafana startup...]
|
||||
|
||||
All services deployed successfully!
|
||||
Grafana dashboards: [http://grafana.internal:3000](http://grafana.internal:3000)
|
||||
```
|
||||
|
||||
### Example 2: Production Kubernetes Deployment
|
||||
|
||||
```
|
||||
$ provisioning ai agent --interactive \
|
||||
--goal "Deploy production Kubernetes cluster with managed databases"
|
||||
|
||||
Agent Analysis:
|
||||
- Cluster size: 3-10 nodes (auto-scaling)
|
||||
- Databases: RDS PostgreSQL + ElastiCache Redis
|
||||
- Monitoring: Full observability stack
|
||||
- Security: TLS, encryption, VPC isolation
|
||||
|
||||
Agent suggests modifications:
|
||||
1. Enable cross-AZ deployment for HA
|
||||
2. Add backup retention: 30 days
|
||||
3. Add network policies for security
|
||||
4. Enable cluster autoscaling
|
||||
Approve all? [yes/review]
|
||||
|
||||
User: Review
|
||||
|
||||
Agent points out:
|
||||
- Network policies may affect performance
|
||||
- Cross-AZ increases costs by ~20%
|
||||
- Backup retention meets compliance
|
||||
|
||||
User: Approve with modifications
|
||||
- Network policies: use audit mode first
|
||||
- Keep cross-AZ
|
||||
- Keep backups
|
||||
|
||||
[Agent creates configs with modifications]
|
||||
|
||||
Configs generated:
|
||||
✓ infrastructure/vpc.ncl
|
||||
✓ infrastructure/kubernetes.ncl
|
||||
✓ databases/postgres.ncl
|
||||
✓ databases/redis.ncl
|
||||
✓ monitoring/prometheus.ncl
|
||||
✓ monitoring/grafana.ncl
|
||||
|
||||
Estimated deployment time: 15-20 minutes
|
||||
Estimated cost: $2,500/month
|
||||
|
||||
[Start deployment?] [Review configs]
|
||||
|
||||
User: Review configs
|
||||
|
||||
[User reviews and approves]
|
||||
|
||||
[Agent executes deployment in phases]
|
||||
```
|
||||
|
||||
## Safety and Control
|
||||
|
||||
### Human-in-the-Loop Checkpoints
|
||||
|
||||
Agents stop and ask humans for approval at critical points:
|
||||
|
||||
```
|
||||
Automatic Approval (Agent decides):
|
||||
- Create configuration
|
||||
- Validate configuration
|
||||
- Check dependencies
|
||||
- Generate execution plan
|
||||
|
||||
Human Approval Required:
|
||||
- First-time resource creation
|
||||
- Cost changes > 10%
|
||||
- Security policy changes
|
||||
- Cross-region deployment
|
||||
- Data deletion operations
|
||||
- Major version upgrades
|
||||
```
|
||||
|
||||
### Decision Logging
|
||||
|
||||
All decisions logged for audit trail:
|
||||
|
||||
```
|
||||
Agent Decision Log:
|
||||
| 2025-01-13 10:00:00 | Generate database config |
|
||||
| 2025-01-13 10:00:05 | Config validation: PASS |
|
||||
| 2025-01-13 10:00:07 | Requesting human approval: "Create new PostgreSQL instance" |
|
||||
| 2025-01-13 10:00:45 | Human approval: APPROVED |
|
||||
| 2025-01-13 10:00:47 | Cost estimate: $100/month - within budget |
|
||||
| 2025-01-13 10:01:00 | Creating infrastructure... |
|
||||
| 2025-01-13 10:02:15 | Database created successfully |
|
||||
| 2025-01-13 10:02:16 | Running health checks... |
|
||||
| 2025-01-13 10:02:45 | Health check: PASSED |
|
||||
```
|
||||
|
||||
### Rollback Capability
|
||||
|
||||
Agents can rollback on failure:
|
||||
|
||||
```
|
||||
Scenario: Database creation succeeds, but Kubernetes creation fails
|
||||
|
||||
Agent behavior:
|
||||
1. Detect failure in Kubernetes phase
|
||||
2. Try recovery (retry, different configuration)
|
||||
3. Recovery fails
|
||||
4. Ask human: "Kubernetes creation failed. Rollback database creation? [yes/no]"
|
||||
5. If yes: Delete database, clean up, report failure
|
||||
6. If no: Keep database, manual cleanup needed
|
||||
|
||||
Full rollback capability if entire workflow fails before human approval.
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Agent Settings
|
||||
|
||||
```
|
||||
# In provisioning/config/ai.toml
|
||||
[ai.agents]
|
||||
enabled = true
|
||||
|
||||
# Agent decision-making
|
||||
auto_approve_threshold = 0.95 # Approve if confidence > 95%
|
||||
require_approval_for = [
|
||||
"first_resource_creation",
|
||||
"cost_change_above_percent",
|
||||
"security_policy_change",
|
||||
"data_deletion",
|
||||
]
|
||||
|
||||
cost_change_threshold_percent = 10
|
||||
|
||||
# Execution control
|
||||
max_parallel_phases = 2
|
||||
phase_timeout_minutes = 30
|
||||
execution_log_retention_days = 90
|
||||
|
||||
# Safety
|
||||
dry_run_mode = false # Always perform dry run first
|
||||
require_final_approval = true
|
||||
rollback_on_failure = true
|
||||
|
||||
# Learning
|
||||
track_agent_decisions = true
|
||||
track_success_rate = true
|
||||
improve_from_feedback = true
|
||||
```
|
||||
|
||||
## Success Criteria (Q2 2025)
|
||||
|
||||
- ✅ Agents complete 5 standard workflows without human intervention
|
||||
- ✅ Cost estimation accuracy within 5%
|
||||
- ✅ Execution time matches or beats manual setup by 30%
|
||||
- ✅ Success rate > 95% for tested scenarios
|
||||
- ✅ Zero unapproved critical decisions
|
||||
- ✅ Full decision audit trail for all operations
|
||||
- ✅ Rollback capability tested and verified
|
||||
- ✅ User satisfaction > 8/10 in testing
|
||||
- ✅ Documentation complete with examples
|
||||
- ✅ Integration with form assistance and NLC working
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - AI system overview
|
||||
- [Natural Language Config](natural-language-config.md) - Config generation
|
||||
- [AI-Assisted Forms](ai-assisted-forms.md) - Interactive forms
|
||||
- [Configuration](configuration.md) - Setup guide
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Status**: 🔴 Planned
|
||||
**Target Release**: Q2 2025
|
||||
**Last Updated**: 2025-01-13
|
||||
**Component**: typdialog-ag
|
||||
**Architecture**: Complete
|
||||
**Implementation**: In Design Phase
|
||||
|
||||
@ -1 +1,438 @@
|
||||
# AI-Assisted Forms
|
||||
# AI-Assisted Forms (typdialog-ai)
|
||||
|
||||
**Status**: 🔴 Planned (Q2 2025 target)
|
||||
|
||||
AI-Assisted Forms is a planned feature that integrates intelligent suggestions, context-aware assistance, and natural language understanding into the
|
||||
typdialog web UI. This enables users to configure infrastructure through interactive forms with real-time AI guidance.
|
||||
|
||||
## Feature Overview
|
||||
|
||||
### What It Does
|
||||
|
||||
Enhance configuration forms with AI-powered assistance:
|
||||
|
||||
```
|
||||
User typing in form field: "storage"
|
||||
↓
|
||||
AI analyzes context:
|
||||
- Current form (database configuration)
|
||||
- Field type (storage capacity)
|
||||
- Similar past configurations
|
||||
- Best practices for this workload
|
||||
↓
|
||||
Suggestions appear:
|
||||
✓ "100 GB (standard production size)"
|
||||
✓ "50 GB (development environment)"
|
||||
✓ "500 GB (large-scale analytics)"
|
||||
```
|
||||
|
||||
### Primary Use Cases
|
||||
|
||||
1. **Guided Configuration**: Step-by-step assistance filling complex forms
|
||||
2. **Error Explanation**: AI explains validation failures in plain English
|
||||
3. **Smart Autocomplete**: Suggestions based on context, not just keywords
|
||||
4. **Learning**: New users learn patterns from AI explanations
|
||||
5. **Efficiency**: Experienced users get quick suggestions
|
||||
|
||||
## Architecture
|
||||
|
||||
### User Interface Integration
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────┐
|
||||
│ Typdialog Web UI (React/TypeScript) │
|
||||
│ │
|
||||
│ ┌──────────────────────────────────┐ │
|
||||
│ │ Form Fields │ │
|
||||
│ │ │ │
|
||||
│ │ Database Engine: [postgresql ▼] │ │
|
||||
│ │ Storage (GB): [100 GB ↓ ?] │ │
|
||||
│ │ AI suggestions │ │
|
||||
│ │ Encryption: [✓ enabled ] │ │
|
||||
│ │ "Required for │ │
|
||||
│ │ production" │ │
|
||||
│ │ │ │
|
||||
│ │ [← Back] [Next →] │ │
|
||||
│ └──────────────────────────────────┘ │
|
||||
│ ↓ │
|
||||
│ AI Assistance Panel │
|
||||
│ (suggestions & explanations) │
|
||||
└────────────────────────────────────────┘
|
||||
↓ ↑
|
||||
User Input AI Service
|
||||
(port 8083)
|
||||
```
|
||||
|
||||
### Suggestion Pipeline
|
||||
|
||||
```
|
||||
User Event (typing, focusing field, validation error)
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Context Extraction │
|
||||
│ - Current field and value │
|
||||
│ - Form schema and constraints │
|
||||
│ - Other filled fields │
|
||||
│ - User role and workspace │
|
||||
└─────────────────────┬───────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ RAG Retrieval │
|
||||
│ - Find similar configs │
|
||||
│ - Get examples for field type │
|
||||
│ - Retrieve relevant documentation │
|
||||
│ - Find validation rules │
|
||||
└─────────────────────┬───────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Suggestion Generation │
|
||||
│ - AI generates suggestions │
|
||||
│ - Rank by relevance │
|
||||
│ - Format for display │
|
||||
│ - Generate explanation │
|
||||
└─────────────────────┬───────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Response Formatting │
|
||||
│ - Debounce (don't update too fast) │
|
||||
│ - Cache identical results │
|
||||
│ - Stream if long response │
|
||||
│ - Display to user │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Planned Features
|
||||
|
||||
### 1. Smart Field Suggestions
|
||||
|
||||
Intelligent suggestions based on context:
|
||||
|
||||
```
|
||||
Scenario: User filling database configuration form
|
||||
|
||||
1. Engine selection
|
||||
User types: "post"
|
||||
Suggestion: "postgresql" (99% match)
|
||||
Explanation: "PostgreSQL is the most popular open-source relational database"
|
||||
|
||||
2. Storage size
|
||||
User has selected: "postgresql", "production", "web-application"
|
||||
Suggestions appear:
|
||||
• "100 GB" (standard production web app database)
|
||||
• "500 GB" (if expected growth > 1000 connections)
|
||||
• "1 TB" (high-traffic SaaS platform)
|
||||
Explanation: "For typical web applications with 1000s of concurrent users, 100 GB is recommended"
|
||||
|
||||
3. Backup frequency
|
||||
User has selected: "production", "critical-data"
|
||||
Suggestions appear:
|
||||
• "Daily" (standard for critical databases)
|
||||
• "Hourly" (for data warehouses with frequent updates)
|
||||
Explanation: "Critical production data requires daily or more frequent backups"
|
||||
```
|
||||
|
||||
### 2. Validation Error Explanation
|
||||
|
||||
Human-readable error messages with fixes:
|
||||
|
||||
```
|
||||
User enters: "storage = -100"
|
||||
|
||||
Current behavior:
|
||||
✗ Error: Expected positive integer
|
||||
|
||||
Planned AI behavior:
|
||||
✗ Storage must be positive (1-65535 GB)
|
||||
|
||||
Why: Negative storage doesn't make sense.
|
||||
Storage capacity must be at least 1 GB.
|
||||
|
||||
Fix suggestions:
|
||||
• Use 100 GB (typical production size)
|
||||
• Use 50 GB (development environment)
|
||||
• Use your required size in GB
|
||||
```
|
||||
|
||||
### 3. Field-to-Field Context Awareness
|
||||
|
||||
Suggestions change based on other fields:
|
||||
|
||||
```
|
||||
Scenario: Multi-step configuration form
|
||||
|
||||
Step 1: Select environment
|
||||
User: "production"
|
||||
→ Form shows constraints: (min storage 50GB, encryption required, backup required)
|
||||
|
||||
Step 2: Select database engine
|
||||
User: "postgresql"
|
||||
→ Suggestions adapted:
|
||||
- PostgreSQL 15 recommended for production
|
||||
- Point-in-time recovery available
|
||||
- Replication options highlighted
|
||||
|
||||
Step 3: Storage size
|
||||
→ Suggestions show:
|
||||
- Minimum 50 GB for production
|
||||
- Examples from similar production configs
|
||||
- Cost estimate updates in real-time
|
||||
|
||||
Step 4: Encryption
|
||||
→ Suggestion appears: "Recommended: AES-256"
|
||||
→ Explanation: "Required for production environments"
|
||||
```
|
||||
|
||||
### 4. Inline Documentation
|
||||
|
||||
Quick access to relevant docs:
|
||||
|
||||
```
|
||||
Field: "Backup Retention Days"
|
||||
|
||||
Suggestion popup:
|
||||
┌─────────────────────────────────┐
|
||||
│ Suggested value: 30 │
|
||||
│ │
|
||||
│ Why: 30 days is industry-standard│
|
||||
│ standard for compliance (PCI-DSS)│
|
||||
│ │
|
||||
│ Learn more: │
|
||||
│ → Backup best practices guide │
|
||||
│ → Your compliance requirements │
|
||||
│ → Cost vs retention trade-offs │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 5. Multi-Field Suggestions
|
||||
|
||||
Suggest multiple related fields together:
|
||||
|
||||
```
|
||||
User selects: environment = "production"
|
||||
|
||||
AI suggests completing:
|
||||
┌─────────────────────────────────┐
|
||||
│ Complete Production Setup │
|
||||
│ │
|
||||
│ Based on production environment │
|
||||
│ we recommend: │
|
||||
│ │
|
||||
│ Encryption: enabled │ ← Auto-fill
|
||||
│ Backups: daily │ ← Auto-fill
|
||||
│ Monitoring: enabled │ ← Auto-fill
|
||||
│ High availability: enabled │ ← Auto-fill
|
||||
│ Retention: 30 days │ ← Auto-fill
|
||||
│ │
|
||||
│ [Accept All] [Review] [Skip] │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Implementation Components
|
||||
|
||||
### Frontend (typdialog-ai JavaScript/TypeScript)
|
||||
|
||||
```
|
||||
// React component for field with AI assistance
|
||||
interface AIFieldProps {
|
||||
fieldName: string;
|
||||
fieldType: string;
|
||||
currentValue: string;
|
||||
formContext: Record<string, any>;
|
||||
schema: FieldSchema;
|
||||
}
|
||||
|
||||
function AIAssistedField({fieldName, formContext, schema}: AIFieldProps) {
|
||||
const [suggestions, setSuggestions] = useState<Suggestion[]>([]);
|
||||
const [explanation, setExplanation] = useState<string>("");
|
||||
|
||||
// Debounced suggestion generation
|
||||
useEffect(() => {
|
||||
const timer = setTimeout(async () => {
|
||||
const suggestions = await ai.suggestFieldValue({
|
||||
field: fieldName,
|
||||
context: formContext,
|
||||
schema: schema,
|
||||
});
|
||||
setSuggestions(suggestions);
|
||||
| setExplanation(suggestions[0]?.explanation | | ""); |
|
||||
}, 300); // Debounce 300ms
|
||||
|
||||
return () => clearTimeout(timer);
|
||||
}, [formContext[fieldName]]);
|
||||
|
||||
return (
|
||||
<div className="ai-field">
|
||||
<input
|
||||
value={formContext[fieldName]}
|
||||
onChange={(e) => handleChange(e.target.value)}
|
||||
/>
|
||||
|
||||
{suggestions.length > 0 && (
|
||||
<div className="ai-suggestions">
|
||||
{suggestions.map((s) => (
|
||||
<button key={s.value} onClick={() => accept(s.value)}>
|
||||
{s.label}
|
||||
</button>
|
||||
))}
|
||||
{explanation && (
|
||||
<p className="ai-explanation">{explanation}</p>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### Backend Service Integration
|
||||
|
||||
```
|
||||
// In AI Service: field suggestion endpoint
|
||||
async fn suggest_field_value(
|
||||
req: SuggestFieldRequest,
|
||||
) -> Result<Vec<Suggestion>> {
|
||||
// Build context for the suggestion
|
||||
let context = build_field_context(&req.form_context, &req.field_name)?;
|
||||
|
||||
// Retrieve relevant examples from RAG
|
||||
let examples = rag.search_by_field(&req.field_name, &context)?;
|
||||
|
||||
// Generate suggestions via LLM
|
||||
let suggestions = llm.generate_suggestions(
|
||||
&req.field_name,
|
||||
&req.field_type,
|
||||
&context,
|
||||
&examples,
|
||||
).await?;
|
||||
|
||||
// Rank and format suggestions
|
||||
let ranked = rank_suggestions(suggestions, &context);
|
||||
|
||||
Ok(ranked)
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Form Assistant Settings
|
||||
|
||||
```
|
||||
# In provisioning/config/ai.toml
|
||||
[ai.forms]
|
||||
enabled = true
|
||||
|
||||
# Suggestion delivery
|
||||
suggestions_enabled = true
|
||||
suggestions_debounce_ms = 300
|
||||
max_suggestions_per_field = 3
|
||||
|
||||
# Error explanations
|
||||
error_explanations_enabled = true
|
||||
explain_validation_errors = true
|
||||
suggest_fixes = true
|
||||
|
||||
# Field context awareness
|
||||
field_context_enabled = true
|
||||
cross_field_suggestions = true
|
||||
|
||||
# Inline documentation
|
||||
inline_docs_enabled = true
|
||||
docs_link_type = "modal" # or "sidebar", "tooltip"
|
||||
|
||||
# Performance
|
||||
cache_suggestions = true
|
||||
cache_ttl_seconds = 3600
|
||||
|
||||
# Learning
|
||||
track_accepted_suggestions = true
|
||||
track_rejected_suggestions = true
|
||||
```
|
||||
|
||||
## User Experience Flow
|
||||
|
||||
### Scenario: New User Configuring PostgreSQL
|
||||
|
||||
```
|
||||
1. User opens typdialog form
|
||||
- Form title: "Create Database"
|
||||
- First field: "Database Engine"
|
||||
- AI shows: "PostgreSQL recommended for relational data"
|
||||
|
||||
2. User types "post"
|
||||
- Autocomplete shows: "postgresql"
|
||||
- AI explains: "PostgreSQL is the most stable open-source database"
|
||||
|
||||
3. User selects "postgresql"
|
||||
- Form progresses
|
||||
- Next field: "Version"
|
||||
- AI suggests: "PostgreSQL 15 (latest stable)"
|
||||
- Explanation: "Version 15 is current stable, recommended for new deployments"
|
||||
|
||||
4. User selects version 15
|
||||
- Next field: "Environment"
|
||||
- User selects "production"
|
||||
- AI note appears: "Production environment requires encryption and backups"
|
||||
|
||||
5. Next field: "Storage (GB)"
|
||||
- Form shows: Minimum 50 GB (production requirement)
|
||||
- AI suggestions:
|
||||
• 100 GB (standard production)
|
||||
• 250 GB (high-traffic site)
|
||||
- User accepts: 100 GB
|
||||
|
||||
6. Validation error on next field
|
||||
- Old behavior: "Invalid backup_days value"
|
||||
- New behavior:
|
||||
"Backup retention must be 1-35 days. Recommended: 30 days.
|
||||
30-day retention meets compliance requirements for production systems."
|
||||
|
||||
7. User completes form
|
||||
- Summary shows all AI-assisted decisions
|
||||
- Generate button creates configuration
|
||||
```
|
||||
|
||||
## Integration with Natural Language Generation
|
||||
|
||||
NLC and form assistance share the same backend:
|
||||
|
||||
```
|
||||
Natural Language Generation AI-Assisted Forms
|
||||
↓ ↓
|
||||
"Create a PostgreSQL db" Select field values
|
||||
↓ ↓
|
||||
Intent Extraction Context Extraction
|
||||
↓ ↓
|
||||
RAG Search RAG Search (same results)
|
||||
↓ ↓
|
||||
LLM Generation LLM Suggestions
|
||||
↓ ↓
|
||||
Config Output Form Field Population
|
||||
```
|
||||
|
||||
## Success Criteria (Q2 2025)
|
||||
|
||||
- ✅ Suggestions appear within 300ms of user action
|
||||
- ✅ 80% suggestion acceptance rate in user testing
|
||||
- ✅ Error explanations clearly explain issues and fixes
|
||||
- ✅ Cross-field context awareness works for 5+ database scenarios
|
||||
- ✅ Form completion time reduced by 40% with AI
|
||||
- ✅ User satisfaction > 8/10 in testing
|
||||
- ✅ No false suggestions (all suggestions are valid)
|
||||
- ✅ Offline mode works with cached suggestions
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - AI system overview
|
||||
- [Natural Language Config](natural-language-config.md) - Related generation feature
|
||||
- [RAG System](rag-system.md) - Suggestion retrieval
|
||||
- [Configuration](configuration.md) - Setup guide
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Status**: 🔴 Planned
|
||||
**Target Release**: Q2 2025
|
||||
**Last Updated**: 2025-01-13
|
||||
**Component**: typdialog-ai
|
||||
**Architecture**: Complete
|
||||
**Implementation**: In Design Phase
|
||||
|
||||
@ -1 +0,0 @@
|
||||
# API Reference
|
||||
@ -1 +1,194 @@
|
||||
# Architecture
|
||||
# AI Integration Architecture
|
||||
|
||||
## Overview
|
||||
|
||||
The provisioning platform's AI system provides intelligent capabilities for configuration generation, troubleshooting, and automation. The
|
||||
architecture consists of multiple layers designed for reliability, security, and performance.
|
||||
|
||||
## Core Components - Production-Ready
|
||||
|
||||
### 1. AI Service (`provisioning/platform/ai-service`)
|
||||
|
||||
**Status**: ✅ Production-Ready (2,500+ lines Rust code)
|
||||
|
||||
The core AI service provides:
|
||||
- Multi-provider LLM support (Anthropic Claude, OpenAI GPT-4, local models)
|
||||
- Streaming response support for real-time feedback
|
||||
- Request caching with LRU and semantic similarity
|
||||
- Rate limiting and cost control
|
||||
- Comprehensive error handling
|
||||
- HTTP REST API on port 8083
|
||||
|
||||
**Supported Models**:
|
||||
- Claude Sonnet 4, Claude Opus 4 (Anthropic)
|
||||
- GPT-4 Turbo, GPT-4 (OpenAI)
|
||||
- Llama 3, Mistral (local/on-premise)
|
||||
|
||||
### 2. RAG System (Retrieval-Augmented Generation)
|
||||
|
||||
**Status**: ✅ Production-Ready (22/22 tests passing)
|
||||
|
||||
The RAG system enables AI to access and reason over platform documentation:
|
||||
- Vector embeddings via SurrealDB vector store
|
||||
- Hybrid search: vector similarity + BM25 keyword search
|
||||
- Document chunking (code and markdown aware)
|
||||
- Relevance ranking and context selection
|
||||
- Semantic caching for repeated queries
|
||||
|
||||
**Capabilities**:
|
||||
```
|
||||
provisioning ai query "How do I set up Kubernetes?"
|
||||
provisioning ai template "Describe my infrastructure"
|
||||
```
|
||||
|
||||
### 3. MCP Server (Model Context Protocol)
|
||||
|
||||
**Status**: ✅ Production-Ready
|
||||
|
||||
Provides Model Context Protocol integration:
|
||||
- Standardized tool interface for LLMs
|
||||
- Complex workflow composition
|
||||
- Integration with external AI systems (Claude, other LLMs)
|
||||
- Tool calling for provisioning operations
|
||||
|
||||
### 4. CLI Integration
|
||||
|
||||
**Status**: ✅ Production-Ready
|
||||
|
||||
Interactive commands:
|
||||
```
|
||||
provisioning ai template --prompt "Describe infrastructure"
|
||||
provisioning ai query --prompt "Configuration question"
|
||||
provisioning ai chat # Interactive mode
|
||||
```
|
||||
|
||||
**Configuration**:
|
||||
```
|
||||
[ai]
|
||||
enabled = true
|
||||
provider = "anthropic" # or "openai" or "local"
|
||||
model = "claude-sonnet-4"
|
||||
|
||||
[ai.cache]
|
||||
enabled = true
|
||||
semantic_similarity = true
|
||||
ttl_seconds = 3600
|
||||
|
||||
[ai.limits]
|
||||
max_tokens = 4096
|
||||
temperature = 0.7
|
||||
```
|
||||
|
||||
## Planned Components - Q2 2025
|
||||
|
||||
### Autonomous Agents (typdialog-ag)
|
||||
|
||||
**Status**: 🔴 Planned
|
||||
|
||||
Self-directed agents for complex tasks:
|
||||
- Multi-step workflow execution
|
||||
- Decision making and adaptation
|
||||
- Monitoring and self-healing recommendations
|
||||
|
||||
### AI-Assisted Forms (typdialog-ai)
|
||||
|
||||
**Status**: 🔴 Planned
|
||||
|
||||
Real-time AI suggestions in configuration forms:
|
||||
- Context-aware field recommendations
|
||||
- Validation error explanations
|
||||
- Auto-completion for infrastructure patterns
|
||||
|
||||
### Advanced Features
|
||||
|
||||
- Fine-tuning capabilities for custom models
|
||||
- Autonomous workflow execution with human approval
|
||||
- Cedar authorization policies for AI actions
|
||||
- Custom knowledge bases per workspace
|
||||
|
||||
## Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ User Interface │
|
||||
│ ├── CLI (provisioning ai ...) │
|
||||
│ ├── Web UI (typdialog) │
|
||||
│ └── MCP Client (Claude, etc.) │
|
||||
└──────────────┬──────────────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────────────┐
|
||||
│ AI Service (Port 8083) │
|
||||
│ ├── Request Router │
|
||||
│ ├── Cache Layer (LRU + Semantic) │
|
||||
│ ├── Prompt Engineering │
|
||||
│ └── Response Streaming │
|
||||
└──────┬─────────────────┬─────────────────────────┘
|
||||
↓ ↓
|
||||
┌─────────────┐ ┌──────────────────┐
|
||||
│ RAG System │ │ LLM Provider │
|
||||
│ SurrealDB │ │ ├── Anthropic │
|
||||
│ Vector DB │ │ ├── OpenAI │
|
||||
│ + BM25 │ │ └── Local Model │
|
||||
└─────────────┘ └──────────────────┘
|
||||
↓ ↓
|
||||
┌──────────────────────────────────────┐
|
||||
│ Cached Responses + Real Responses │
|
||||
│ Streamed to User │
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
| | Metric | Value | |
|
||||
| | -------- | ------- | |
|
||||
| | Cold response (cache miss) | 2-5 seconds | |
|
||||
| | Cached response | <500ms | |
|
||||
| | Streaming start time | <1 second | |
|
||||
| | AI service memory usage | ~200MB at rest | |
|
||||
| | Cache size (configurable) | Up to 500MB | |
|
||||
| | Vector DB (SurrealDB) | Included, auto-managed | |
|
||||
|
||||
## Security Model
|
||||
|
||||
### Cedar Authorization
|
||||
|
||||
All AI operations controlled by Cedar policies:
|
||||
- User role-based access control
|
||||
- Operation-specific permissions
|
||||
- Complete audit logging
|
||||
|
||||
### Secret Protection
|
||||
|
||||
- Secrets never sent to external LLMs
|
||||
- PII/sensitive data sanitized before API calls
|
||||
- Encryption at rest in local cache
|
||||
- HSM support for key storage
|
||||
|
||||
### Local Model Support
|
||||
|
||||
Air-gapped deployments:
|
||||
- On-premise LLM models (Llama 3, Mistral)
|
||||
- Zero external API calls
|
||||
- Full data privacy compliance
|
||||
- Ideal for classified environments
|
||||
|
||||
## Configuration
|
||||
|
||||
See [Configuration Guide](configuration.md) for:
|
||||
- LLM provider setup
|
||||
- Cache configuration
|
||||
- Cost limits and budgets
|
||||
- Security policies
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [RAG System](rag-system.md) - Retrieval implementation details
|
||||
- [Security Policies](security-policies.md) - Authorization and safety controls
|
||||
- [Configuration Guide](configuration.md) - Setup instructions
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-13
|
||||
**Status**: ✅ Production-Ready (core system)
|
||||
**Test Coverage**: 22/22 tests passing
|
||||
|
||||
@ -1 +1,64 @@
|
||||
# Configuration Generation
|
||||
# Configuration Generation (typdialog-prov-gen)
|
||||
|
||||
**Status**: 🔴 Planned for Q2 2025
|
||||
|
||||
## Overview
|
||||
|
||||
The Configuration Generator (typdialog-prov-gen) will provide template-based Nickel configuration generation with AI-powered customization.
|
||||
|
||||
## Planned Features
|
||||
|
||||
### Template Selection
|
||||
- Library of production-ready infrastructure templates
|
||||
- AI recommends templates based on requirements
|
||||
- Preview before generation
|
||||
|
||||
### Customization via Natural Language
|
||||
```
|
||||
provisioning ai config-gen \
|
||||
--template "kubernetes-cluster" \
|
||||
--customize "Add Prometheus monitoring, increase replicas to 5, use us-east-1"
|
||||
```
|
||||
|
||||
### Multi-Provider Support
|
||||
- AWS, Hetzner, UpCloud, local infrastructure
|
||||
- Automatic provider-specific optimizations
|
||||
- Cost estimation across providers
|
||||
|
||||
### Validation and Testing
|
||||
- Type-checking via Nickel before deployment
|
||||
- Dry-run execution for safety
|
||||
- Test data fixtures for verification
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Template Library
|
||||
↓
|
||||
Template Selection (AI + User)
|
||||
↓
|
||||
Customization Layer (NL → Nickel)
|
||||
↓
|
||||
Validation (Type + Runtime)
|
||||
↓
|
||||
Generated Configuration
|
||||
```
|
||||
|
||||
## Integration Points
|
||||
|
||||
- typdialog web UI for template browsing
|
||||
- CLI for batch generation
|
||||
- AI service for customization suggestions
|
||||
- Nickel for type-safe validation
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Natural Language Configuration](natural-language-config.md) - NL to config generation
|
||||
- [Architecture](architecture.md) - AI system overview
|
||||
- [Configuration Guide](configuration.md) - Setup instructions
|
||||
|
||||
---
|
||||
|
||||
**Status**: 🔴 Planned
|
||||
**Expected Release**: Q2 2025
|
||||
**Priority**: High (enables non-technical users to generate configs)
|
||||
|
||||
@ -1 +1,601 @@
|
||||
# Configuration
|
||||
# AI System Configuration Guide
|
||||
|
||||
**Status**: ✅ Production-Ready (Configuration system)
|
||||
|
||||
Complete setup guide for AI features in the provisioning platform. This guide covers LLM provider configuration, feature enablement, cache setup, cost
|
||||
controls, and security settings.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Minimal Configuration
|
||||
|
||||
```
|
||||
# provisioning/config/ai.toml
|
||||
[ai]
|
||||
enabled = true
|
||||
provider = "anthropic" # or "openai" or "local"
|
||||
model = "claude-sonnet-4"
|
||||
api_key = "sk-ant-..." # Set via PROVISIONING_AI_API_KEY env var
|
||||
|
||||
[ai.cache]
|
||||
enabled = true
|
||||
|
||||
[ai.limits]
|
||||
max_tokens = 4096
|
||||
temperature = 0.7
|
||||
```
|
||||
|
||||
### Initialize Configuration
|
||||
|
||||
```
|
||||
# Generate default configuration
|
||||
provisioning config init ai
|
||||
|
||||
# Edit configuration
|
||||
provisioning config edit ai
|
||||
|
||||
# Validate configuration
|
||||
provisioning config validate ai
|
||||
|
||||
# Show current configuration
|
||||
provisioning config show ai
|
||||
```
|
||||
|
||||
## Provider Configuration
|
||||
|
||||
### Anthropic Claude
|
||||
|
||||
```
|
||||
[ai]
|
||||
enabled = true
|
||||
provider = "anthropic"
|
||||
model = "claude-sonnet-4" # or "claude-opus-4", "claude-haiku-4"
|
||||
api_key = "${PROVISIONING_AI_API_KEY}"
|
||||
api_base = "[https://api.anthropic.com"](https://api.anthropic.com")
|
||||
|
||||
# Request parameters
|
||||
[ai.request]
|
||||
max_tokens = 4096
|
||||
temperature = 0.7
|
||||
top_p = 0.95
|
||||
top_k = 40
|
||||
|
||||
# Supported models
|
||||
# - claude-opus-4: Most capable, for complex reasoning ($15/MTok input, $45/MTok output)
|
||||
# - claude-sonnet-4: Balanced (recommended), ($3/MTok input, $15/MTok output)
|
||||
# - claude-haiku-4: Fast, for simple tasks ($0.80/MTok input, $4/MTok output)
|
||||
```
|
||||
|
||||
### OpenAI GPT-4
|
||||
|
||||
```
|
||||
[ai]
|
||||
enabled = true
|
||||
provider = "openai"
|
||||
model = "gpt-4-turbo" # or "gpt-4", "gpt-4o"
|
||||
api_key = "${OPENAI_API_KEY}"
|
||||
api_base = "[https://api.openai.com/v1"](https://api.openai.com/v1")
|
||||
|
||||
[ai.request]
|
||||
max_tokens = 4096
|
||||
temperature = 0.7
|
||||
top_p = 0.95
|
||||
|
||||
# Supported models
|
||||
# - gpt-4: Most capable ($0.03/1K input, $0.06/1K output)
|
||||
# - gpt-4-turbo: Better at code ($0.01/1K input, $0.03/1K output)
|
||||
# - gpt-4o: Latest, multi-modal ($5/MTok input, $15/MTok output)
|
||||
```
|
||||
|
||||
### Local Models
|
||||
|
||||
```
|
||||
[ai]
|
||||
enabled = true
|
||||
provider = "local"
|
||||
model = "llama2-70b" # or "mistral", "neural-chat"
|
||||
api_base = "[http://localhost:8000"](http://localhost:8000") # Local Ollama or LM Studio
|
||||
|
||||
# Local model support
|
||||
# - Ollama: docker run -d -v ollama:/root/.ollama -p 11434:11434 ollama/ollama
|
||||
# - LM Studio: GUI app with API
|
||||
# - vLLM: High-throughput serving
|
||||
# - llama.cpp: CPU inference
|
||||
|
||||
[ai.local]
|
||||
gpu_enabled = true
|
||||
gpu_memory_gb = 24
|
||||
max_batch_size = 4
|
||||
```
|
||||
|
||||
## Feature Configuration
|
||||
|
||||
### Enable Specific Features
|
||||
|
||||
```
|
||||
[ai.features]
|
||||
# Core features (production-ready)
|
||||
rag_search = true # Retrieve-Augmented Generation
|
||||
config_generation = true # Generate Nickel from natural language
|
||||
mcp_server = true # Model Context Protocol server
|
||||
troubleshooting = true # AI-assisted debugging
|
||||
|
||||
# Form assistance (planned Q2 2025)
|
||||
form_assistance = false # AI suggestions in forms
|
||||
form_explanations = false # AI explains validation errors
|
||||
|
||||
# Agents (planned Q2 2025)
|
||||
autonomous_agents = false # AI agents for workflows
|
||||
agent_learning = false # Agents learn from deployments
|
||||
|
||||
# Advanced features
|
||||
fine_tuning = false # Fine-tune models for domain
|
||||
knowledge_base = false # Custom knowledge base per workspace
|
||||
```
|
||||
|
||||
## Cache Configuration
|
||||
|
||||
### Cache Strategy
|
||||
|
||||
```
|
||||
[ai.cache]
|
||||
enabled = true
|
||||
cache_type = "memory" # or "redis", "disk"
|
||||
ttl_seconds = 3600 # Cache entry lifetime
|
||||
|
||||
# Memory cache (recommended for single server)
|
||||
[ai.cache.memory]
|
||||
max_size_mb = 500
|
||||
eviction_policy = "lru" # Least Recently Used
|
||||
|
||||
# Redis cache (recommended for distributed)
|
||||
[ai.cache.redis]
|
||||
url = "redis://localhost:6379"
|
||||
db = 0
|
||||
password = "${REDIS_PASSWORD}"
|
||||
ttl_seconds = 3600
|
||||
|
||||
# Disk cache (recommended for persistent caching)
|
||||
[ai.cache.disk]
|
||||
path = "/var/cache/provisioning/ai"
|
||||
max_size_mb = 5000
|
||||
|
||||
# Semantic caching (for RAG)
|
||||
[ai.cache.semantic]
|
||||
enabled = true
|
||||
similarity_threshold = 0.95 # Cache hit if query similarity > 0.95
|
||||
cache_embeddings = true # Cache embedding vectors
|
||||
```
|
||||
|
||||
### Cache Metrics
|
||||
|
||||
```
|
||||
# Monitor cache performance
|
||||
provisioning admin cache stats ai
|
||||
|
||||
# Clear cache
|
||||
provisioning admin cache clear ai
|
||||
|
||||
# Analyze cache efficiency
|
||||
provisioning admin cache analyze ai --hours 24
|
||||
```
|
||||
|
||||
## Rate Limiting and Cost Control
|
||||
|
||||
### Rate Limits
|
||||
|
||||
```
|
||||
[ai.limits]
|
||||
# Tokens per request
|
||||
max_tokens = 4096
|
||||
max_input_tokens = 8192
|
||||
max_output_tokens = 4096
|
||||
|
||||
# Requests per minute/hour
|
||||
rpm_limit = 60 # Requests per minute
|
||||
rpm_burst = 100 # Allow bursts up to 100 RPM
|
||||
|
||||
# Daily cost limit
|
||||
daily_cost_limit_usd = 100
|
||||
warn_at_percent = 80 # Warn when at 80% of daily limit
|
||||
stop_at_percent = 95 # Stop accepting requests at 95%
|
||||
|
||||
# Token usage tracking
|
||||
track_token_usage = true
|
||||
track_cost_per_request = true
|
||||
```
|
||||
|
||||
### Cost Budgeting
|
||||
|
||||
```
|
||||
[ai.budget]
|
||||
enabled = true
|
||||
monthly_limit_usd = 1000
|
||||
|
||||
# Budget alerts
|
||||
alert_at_percent = [50, 75, 90]
|
||||
alert_email = "ops@company.com"
|
||||
alert_slack = "[https://hooks.slack.com/services/..."](https://hooks.slack.com/services/...")
|
||||
|
||||
# Cost by provider
|
||||
[ai.budget.providers]
|
||||
anthropic_limit = 500
|
||||
openai_limit = 300
|
||||
local_limit = 0 # Free (run locally)
|
||||
```
|
||||
|
||||
### Track Costs
|
||||
|
||||
```
|
||||
# View cost metrics
|
||||
provisioning admin costs show ai --period month
|
||||
|
||||
# Forecast cost
|
||||
provisioning admin costs forecast ai --days 30
|
||||
|
||||
# Analyze cost by feature
|
||||
provisioning admin costs analyze ai --by feature
|
||||
|
||||
# Export cost report
|
||||
provisioning admin costs export ai --format csv --output costs.csv
|
||||
```
|
||||
|
||||
## Security Configuration
|
||||
|
||||
### Authentication
|
||||
|
||||
```
|
||||
[ai.auth]
|
||||
# API key from environment variable
|
||||
api_key = "${PROVISIONING_AI_API_KEY}"
|
||||
|
||||
# Or from secure store
|
||||
api_key_vault = "secrets/ai-api-key"
|
||||
|
||||
# Token rotation
|
||||
rotate_key_days = 90
|
||||
rotation_alert_days = 7
|
||||
|
||||
# Request signing (for cloud providers)
|
||||
sign_requests = true
|
||||
signing_method = "hmac-sha256"
|
||||
```
|
||||
|
||||
### Authorization (Cedar)
|
||||
|
||||
```
|
||||
[ai.authorization]
|
||||
enabled = true
|
||||
policy_file = "provisioning/policies/ai-policies.cedar"
|
||||
|
||||
# Example policies:
|
||||
# allow(principal, action, resource) when principal.role == "admin"
|
||||
# allow(principal == ?principal, action == "ai_generate_config", resource)
|
||||
# when principal.workspace == resource.workspace
|
||||
```
|
||||
|
||||
### Data Protection
|
||||
|
||||
```
|
||||
[ai.security]
|
||||
# Sanitize data before sending to external LLM
|
||||
sanitize_pii = true
|
||||
sanitize_secrets = true
|
||||
redact_patterns = [
|
||||
"(?i)password\\s*[:=]\\s*[^\\s]+", # Passwords
|
||||
"(?i)api[_-]?key\\s*[:=]\\s*[^\\s]+", # API keys
|
||||
"(?i)secret\\s*[:=]\\s*[^\\s]+", # Secrets
|
||||
]
|
||||
|
||||
# Encryption
|
||||
encryption_enabled = true
|
||||
encryption_algorithm = "aes-256-gcm"
|
||||
key_derivation = "argon2id"
|
||||
|
||||
# Local-only mode (never send to external LLM)
|
||||
local_only = false # Set true for air-gapped deployments
|
||||
```
|
||||
|
||||
## RAG Configuration
|
||||
|
||||
### Vector Store Setup
|
||||
|
||||
```
|
||||
[ai.rag]
|
||||
enabled = true
|
||||
|
||||
# SurrealDB backend
|
||||
[ai.rag.database]
|
||||
url = "surreal://localhost:8000"
|
||||
username = "root"
|
||||
password = "${SURREALDB_PASSWORD}"
|
||||
namespace = "provisioning"
|
||||
database = "ai_rag"
|
||||
|
||||
# Embedding model
|
||||
[ai.rag.embedding]
|
||||
provider = "openai" # or "anthropic", "local"
|
||||
model = "text-embedding-3-small"
|
||||
batch_size = 100
|
||||
cache_embeddings = true
|
||||
|
||||
# Search configuration
|
||||
[ai.rag.search]
|
||||
hybrid_enabled = true
|
||||
vector_weight = 0.7 # Weight for vector search
|
||||
keyword_weight = 0.3 # Weight for BM25 search
|
||||
top_k = 5 # Number of results to return
|
||||
rerank_enabled = false # Use cross-encoder to rerank results
|
||||
|
||||
# Chunking strategy
|
||||
[ai.rag.chunking]
|
||||
markdown_chunk_size = 1024
|
||||
markdown_overlap = 256
|
||||
code_chunk_size = 512
|
||||
code_overlap = 128
|
||||
```
|
||||
|
||||
### Index Management
|
||||
|
||||
```
|
||||
# Create indexes
|
||||
provisioning ai index create rag
|
||||
|
||||
# Rebuild indexes
|
||||
provisioning ai index rebuild rag
|
||||
|
||||
# Show index status
|
||||
provisioning ai index status rag
|
||||
|
||||
# Remove old indexes
|
||||
provisioning ai index cleanup rag --older-than 30days
|
||||
```
|
||||
|
||||
## MCP Server Configuration
|
||||
|
||||
### MCP Server Setup
|
||||
|
||||
```
|
||||
[ai.mcp]
|
||||
enabled = true
|
||||
port = 3000
|
||||
host = "127.0.0.1" # Change to 0.0.0.0 for network access
|
||||
|
||||
# Tool registry
|
||||
[ai.mcp.tools]
|
||||
generate_config = true
|
||||
validate_config = true
|
||||
search_docs = true
|
||||
troubleshoot_deployment = true
|
||||
get_schema = true
|
||||
check_compliance = true
|
||||
|
||||
# Rate limiting for tool calls
|
||||
rpm_limit = 30
|
||||
burst_limit = 50
|
||||
|
||||
# Tool request timeout
|
||||
timeout_seconds = 30
|
||||
```
|
||||
|
||||
### MCP Client Configuration
|
||||
|
||||
```
|
||||
~/.claude/claude_desktop_config.json:
|
||||
{
|
||||
"mcpServers": {
|
||||
"provisioning": {
|
||||
"command": "provisioning-mcp-server",
|
||||
"args": ["--config", "/etc/provisioning/ai.toml"],
|
||||
"env": {
|
||||
"PROVISIONING_API_KEY": "sk-ant-...",
|
||||
"RUST_LOG": "info"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Logging and Observability
|
||||
|
||||
### Logging Configuration
|
||||
|
||||
```
|
||||
[ai.logging]
|
||||
level = "info" # or "debug", "warn", "error"
|
||||
format = "json" # or "text"
|
||||
output = "stdout" # or "file"
|
||||
|
||||
# Log file
|
||||
[ai.logging.file]
|
||||
path = "/var/log/provisioning/ai.log"
|
||||
max_size_mb = 100
|
||||
max_backups = 10
|
||||
retention_days = 30
|
||||
|
||||
# Log filters
|
||||
[ai.logging.filters]
|
||||
log_requests = true
|
||||
log_responses = false # Don't log full responses (verbose)
|
||||
log_token_usage = true
|
||||
log_costs = true
|
||||
```
|
||||
|
||||
### Metrics and Monitoring
|
||||
|
||||
```
|
||||
# View AI service metrics
|
||||
provisioning admin metrics show ai
|
||||
|
||||
# Prometheus metrics endpoint
|
||||
curl [http://localhost:8083/metrics](http://localhost:8083/metrics)
|
||||
|
||||
# Key metrics:
|
||||
# - ai_requests_total: Total requests by provider/model
|
||||
# - ai_request_duration_seconds: Request latency
|
||||
# - ai_token_usage_total: Token consumption by provider
|
||||
# - ai_cost_total: Cumulative cost by provider
|
||||
# - ai_cache_hits: Cache hit rate
|
||||
# - ai_errors_total: Errors by type
|
||||
```
|
||||
|
||||
## Health Checks
|
||||
|
||||
### Configuration Validation
|
||||
|
||||
```
|
||||
# Validate configuration syntax
|
||||
provisioning config validate ai
|
||||
|
||||
# Test provider connectivity
|
||||
provisioning ai test provider anthropic
|
||||
|
||||
# Test RAG system
|
||||
provisioning ai test rag
|
||||
|
||||
# Test MCP server
|
||||
provisioning ai test mcp
|
||||
|
||||
# Full health check
|
||||
provisioning ai health-check
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### Common Settings
|
||||
|
||||
```
|
||||
# Provider configuration
|
||||
export PROVISIONING_AI_PROVIDER="anthropic"
|
||||
export PROVISIONING_AI_MODEL="claude-sonnet-4"
|
||||
export PROVISIONING_AI_API_KEY="sk-ant-..."
|
||||
|
||||
# Feature flags
|
||||
export PROVISIONING_AI_ENABLED="true"
|
||||
export PROVISIONING_AI_CACHE_ENABLED="true"
|
||||
export PROVISIONING_AI_RAG_ENABLED="true"
|
||||
|
||||
# Cost control
|
||||
export PROVISIONING_AI_DAILY_LIMIT_USD="100"
|
||||
export PROVISIONING_AI_RPM_LIMIT="60"
|
||||
|
||||
# Security
|
||||
export PROVISIONING_AI_SANITIZE_PII="true"
|
||||
export PROVISIONING_AI_LOCAL_ONLY="false"
|
||||
|
||||
# Logging
|
||||
export RUST_LOG="provisioning::ai=info"
|
||||
```
|
||||
|
||||
## Troubleshooting Configuration
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Issue**: API key not recognized
|
||||
```
|
||||
# Check environment variable is set
|
||||
echo $PROVISIONING_AI_API_KEY
|
||||
|
||||
# Test connectivity
|
||||
provisioning ai test provider anthropic
|
||||
|
||||
# Verify key format (should start with sk-ant- or sk-)
|
||||
| provisioning config show ai | grep api_key |
|
||||
```
|
||||
|
||||
**Issue**: Cache not working
|
||||
```
|
||||
# Check cache status
|
||||
provisioning admin cache stats ai
|
||||
|
||||
# Clear cache and restart
|
||||
provisioning admin cache clear ai
|
||||
provisioning service restart ai-service
|
||||
|
||||
# Enable cache debugging
|
||||
RUST_LOG=provisioning::cache=debug provisioning-ai-service
|
||||
```
|
||||
|
||||
**Issue**: RAG search not finding results
|
||||
```
|
||||
# Rebuild RAG indexes
|
||||
provisioning ai index rebuild rag
|
||||
|
||||
# Test search
|
||||
provisioning ai query "test query"
|
||||
|
||||
# Check index status
|
||||
provisioning ai index status rag
|
||||
```
|
||||
|
||||
## Upgrading Configuration
|
||||
|
||||
### Backward Compatibility
|
||||
|
||||
New AI versions automatically migrate old configurations:
|
||||
|
||||
```
|
||||
# Check configuration version
|
||||
provisioning config version ai
|
||||
|
||||
# Migrate configuration to latest version
|
||||
provisioning config migrate ai --auto
|
||||
|
||||
# Backup before migration
|
||||
provisioning config backup ai
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
### Recommended Production Settings
|
||||
|
||||
```
|
||||
[ai]
|
||||
enabled = true
|
||||
provider = "anthropic"
|
||||
model = "claude-sonnet-4"
|
||||
api_key = "${PROVISIONING_AI_API_KEY}"
|
||||
|
||||
[ai.features]
|
||||
rag_search = true
|
||||
config_generation = true
|
||||
mcp_server = true
|
||||
troubleshooting = true
|
||||
|
||||
[ai.cache]
|
||||
enabled = true
|
||||
cache_type = "redis"
|
||||
ttl_seconds = 3600
|
||||
|
||||
[ai.limits]
|
||||
rpm_limit = 60
|
||||
daily_cost_limit_usd = 1000
|
||||
max_tokens = 4096
|
||||
|
||||
[ai.security]
|
||||
sanitize_pii = true
|
||||
sanitize_secrets = true
|
||||
encryption_enabled = true
|
||||
|
||||
[ai.logging]
|
||||
level = "warn" # Less verbose in production
|
||||
format = "json"
|
||||
output = "file"
|
||||
|
||||
[ai.rag.database]
|
||||
url = "surreal://surrealdb-cluster:8000"
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - System overview
|
||||
- [RAG System](rag-system.md) - Vector database setup
|
||||
- [MCP Integration](mcp-integration.md) - MCP configuration
|
||||
- [Security Policies](security-policies.md) - Authorization policies
|
||||
- [Cost Management](cost-management.md) - Budget tracking
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-13
|
||||
**Status**: ✅ Production-Ready
|
||||
**Versions Supported**: v1.0+
|
||||
|
||||
@ -1 +1,497 @@
|
||||
# Cost Management
|
||||
# AI Cost Management and Optimization
|
||||
|
||||
**Status**: ✅ Production-Ready (cost tracking, budgets, caching benefits)
|
||||
|
||||
Comprehensive guide to managing LLM API costs, optimizing usage through caching and rate limiting, and tracking spending. The provisioning platform
|
||||
includes built-in cost controls to prevent runaway spending while maximizing value.
|
||||
|
||||
## Cost Overview
|
||||
|
||||
### API Provider Pricing
|
||||
|
||||
| | Provider | Model | Input | Output | Per MTok | |
|
||||
| | ---------- | ------- | ------- | -------- | ---------- | |
|
||||
| | **Anthropic** | Claude Sonnet 4 | $3 | $15 | $0.003 input / $0.015 output | |
|
||||
| | | Claude Opus 4 | $15 | $45 | Higher accuracy, longer context | |
|
||||
| | | Claude Haiku 4 | $0.80 | $4 | Fast, for simple queries | |
|
||||
| | **OpenAI** | GPT-4 Turbo | $0.01 | $0.03 | Per 1K tokens | |
|
||||
| | | GPT-4 | $0.03 | $0.06 | Legacy, avoid | |
|
||||
| | | GPT-4o | $5 | $15 | Per MTok | |
|
||||
| | **Local** | Llama 2, Mistral | Free | Free | Hardware cost only | |
|
||||
|
||||
### Cost Examples
|
||||
|
||||
```
|
||||
Scenario 1: Generate simple database configuration
|
||||
- Input: 500 tokens (description + schema)
|
||||
- Output: 200 tokens (generated config)
|
||||
- Cost: (500 × $3 + 200 × $15) / 1,000,000 = $0.0045
|
||||
- With caching (hit rate 50%): $0.0023
|
||||
|
||||
Scenario 2: Deep troubleshooting analysis
|
||||
- Input: 5000 tokens (logs + context)
|
||||
- Output: 2000 tokens (analysis + recommendations)
|
||||
- Cost: (5000 × $3 + 2000 × $15) / 1,000,000 = $0.045
|
||||
- With caching (hit rate 70%): $0.0135
|
||||
|
||||
Scenario 3: Monthly usage (typical organization)
|
||||
- ~1000 config generations @ $0.005 = $5
|
||||
- ~500 troubleshooting calls @ $0.045 = $22.50
|
||||
- ~2000 form assists @ $0.002 = $4
|
||||
- ~200 agent executions @ $0.10 = $20
|
||||
- **Total: ~$50-100/month for small org**
|
||||
- **Total: ~$500-1000/month for large org**
|
||||
```
|
||||
|
||||
## Cost Control Mechanisms
|
||||
|
||||
### Request Caching
|
||||
|
||||
Caching is the primary cost reduction strategy, cutting costs by 50-80%:
|
||||
|
||||
```
|
||||
Without Caching:
|
||||
User 1: "Generate PostgreSQL config" → API call → $0.005
|
||||
User 2: "Generate PostgreSQL config" → API call → $0.005
|
||||
Total: $0.010 (2 identical requests)
|
||||
|
||||
With LRU Cache:
|
||||
User 1: "Generate PostgreSQL config" → API call → $0.005
|
||||
User 2: "Generate PostgreSQL config" → Cache hit → $0.00001
|
||||
Total: $0.00501 (500x cost reduction for identical)
|
||||
|
||||
With Semantic Cache:
|
||||
User 1: "Generate PostgreSQL database config" → API call → $0.005
|
||||
User 2: "Create a PostgreSQL database" → Semantic hit → $0.00001
|
||||
(Slightly different wording, but same intent)
|
||||
Total: $0.00501 (near 500x reduction for similar)
|
||||
```
|
||||
|
||||
### Cache Configuration
|
||||
|
||||
```
|
||||
[ai.cache]
|
||||
enabled = true
|
||||
cache_type = "redis" # Distributed cache across instances
|
||||
ttl_seconds = 3600 # 1-hour cache lifetime
|
||||
|
||||
# Cache size limits
|
||||
max_size_mb = 500
|
||||
eviction_policy = "lru" # Least Recently Used
|
||||
|
||||
# Semantic caching - cache similar queries
|
||||
[ai.cache.semantic]
|
||||
enabled = true
|
||||
similarity_threshold = 0.95 # Cache if 95%+ similar to previous query
|
||||
cache_embeddings = true # Cache embedding vectors themselves
|
||||
|
||||
# Cache metrics
|
||||
[ai.cache.metrics]
|
||||
track_hit_rate = true
|
||||
track_space_usage = true
|
||||
alert_on_low_hit_rate = true
|
||||
```
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
Prevent usage spikes from unexpected costs:
|
||||
|
||||
```
|
||||
[ai.limits]
|
||||
# Per-request limits
|
||||
max_tokens = 4096
|
||||
max_input_tokens = 8192
|
||||
max_output_tokens = 4096
|
||||
|
||||
# Throughput limits
|
||||
rpm_limit = 60 # 60 requests per minute
|
||||
rpm_burst = 100 # Allow burst to 100
|
||||
daily_request_limit = 5000 # Max 5000 requests/day
|
||||
|
||||
# Cost limits
|
||||
daily_cost_limit_usd = 100 # Stop at $100/day
|
||||
monthly_cost_limit_usd = 2000 # Stop at $2000/month
|
||||
|
||||
# Budget alerts
|
||||
warn_at_percent = 80 # Warn when at 80% of daily budget
|
||||
stop_at_percent = 95 # Stop when at 95% of budget
|
||||
```
|
||||
|
||||
### Workspace-Level Budgets
|
||||
|
||||
```
|
||||
[ai.workspace_budgets]
|
||||
# Per-workspace cost limits
|
||||
dev.daily_limit_usd = 10
|
||||
staging.daily_limit_usd = 50
|
||||
prod.daily_limit_usd = 100
|
||||
|
||||
# Can override globally for specific workspaces
|
||||
teams.team-a.monthly_limit = 500
|
||||
teams.team-b.monthly_limit = 300
|
||||
```
|
||||
|
||||
## Cost Tracking
|
||||
|
||||
### Track Spending
|
||||
|
||||
```
|
||||
# View current month spending
|
||||
provisioning admin costs show ai
|
||||
|
||||
# Forecast monthly spend
|
||||
provisioning admin costs forecast ai --days-remaining 15
|
||||
|
||||
# Analyze by feature
|
||||
provisioning admin costs analyze ai --by feature
|
||||
|
||||
# Analyze by user
|
||||
provisioning admin costs analyze ai --by user
|
||||
|
||||
# Export for billing
|
||||
provisioning admin costs export ai --format csv --output costs.csv
|
||||
```
|
||||
|
||||
### Cost Breakdown
|
||||
|
||||
```
|
||||
Month: January 2025
|
||||
|
||||
Total Spending: $285.42
|
||||
|
||||
By Feature:
|
||||
Config Generation: $150.00 (52%) [300 requests × avg $0.50]
|
||||
Troubleshooting: $95.00 (33%) [80 requests × avg $1.19]
|
||||
Form Assistance: $30.00 (11%) [5000 requests × avg $0.006]
|
||||
Agents: $10.42 (4%) [20 runs × avg $0.52]
|
||||
|
||||
By Provider:
|
||||
Anthropic (Claude): $200.00 (70%)
|
||||
OpenAI (GPT-4): $85.42 (30%)
|
||||
Local: $0 (0%)
|
||||
|
||||
By User:
|
||||
alice@company.com: $50.00 (18%)
|
||||
bob@company.com: $45.00 (16%)
|
||||
...
|
||||
other (20 users): $190.42 (67%)
|
||||
|
||||
By Workspace:
|
||||
production: $150.00 (53%)
|
||||
staging: $85.00 (30%)
|
||||
development: $50.42 (18%)
|
||||
|
||||
Cache Performance:
|
||||
Requests: 50,000
|
||||
Cache hits: 35,000 (70%)
|
||||
Cache misses: 15,000 (30%)
|
||||
Cost savings from cache: ~$175 (38% reduction)
|
||||
```
|
||||
|
||||
## Optimization Strategies
|
||||
|
||||
### Strategy 1: Increase Cache Hit Rate
|
||||
|
||||
```
|
||||
# Longer TTL = more cache hits
|
||||
[ai.cache]
|
||||
ttl_seconds = 7200 # 2 hours instead of 1 hour
|
||||
|
||||
# Semantic caching helps with slight variations
|
||||
[ai.cache.semantic]
|
||||
enabled = true
|
||||
similarity_threshold = 0.90 # Lower threshold = more hits
|
||||
|
||||
# Result: Increase hit rate from 65% → 80%
|
||||
# Cost reduction: 15% → 23%
|
||||
```
|
||||
|
||||
### Strategy 2: Use Local Models
|
||||
|
||||
```
|
||||
[ai]
|
||||
provider = "local"
|
||||
model = "mistral-7b" # Free, runs on GPU
|
||||
|
||||
# Cost: Hardware ($5-20/month) instead of API calls
|
||||
# Savings: 50-100 config generations/month × $0.005 = $0.25-0.50
|
||||
# Hardware amortized cost: <$0.50/month on existing GPU
|
||||
|
||||
# Tradeoff: Slightly lower quality, 2x slower
|
||||
```
|
||||
|
||||
### Strategy 3: Use Haiku for Simple Tasks
|
||||
|
||||
```
|
||||
Task Complexity vs Model:
|
||||
|
||||
Simple (form assist): Claude Haiku 4 ($0.80/$4)
|
||||
Medium (config gen): Claude Sonnet 4 ($3/$15)
|
||||
Complex (agents): Claude Opus 4 ($15/$45)
|
||||
|
||||
Example optimization:
|
||||
Before: All tasks use Sonnet 4
|
||||
- 5000 form assists/month: 5000 × $0.006 = $30
|
||||
|
||||
After: Route by complexity
|
||||
- 5000 form assists → Haiku: 5000 × $0.001 = $5 (83% savings)
|
||||
- 200 config gen → Sonnet: 200 × $0.005 = $1
|
||||
- 10 agent runs → Opus: 10 × $0.10 = $1
|
||||
```
|
||||
|
||||
### Strategy 4: Batch Operations
|
||||
|
||||
```
|
||||
# Instead of individual requests, batch similar operations:
|
||||
|
||||
# Before: 100 configs, 100 separate API calls
|
||||
provisioning ai generate "PostgreSQL config" --output db1.ncl
|
||||
provisioning ai generate "PostgreSQL config" --output db2.ncl
|
||||
# ... 100 calls = $0.50
|
||||
|
||||
# After: Batch similar requests
|
||||
provisioning ai batch --input configs-list.yaml
|
||||
# Groups similar requests, reuses cache
|
||||
# ... 3-5 API calls = $0.02 (90% savings)
|
||||
```
|
||||
|
||||
### Strategy 5: Smart Feature Enablement
|
||||
|
||||
```
|
||||
[ai.features]
|
||||
# Enable high-ROI features
|
||||
config_generation = true # High value, moderate cost
|
||||
troubleshooting = true # High value, higher cost
|
||||
rag_search = true # Low cost, high value
|
||||
|
||||
# Disable low-ROI features if cost-constrained
|
||||
form_assistance = false # Low value, non-zero cost (if budget tight)
|
||||
agents = false # Complex, requires multiple calls
|
||||
```
|
||||
|
||||
## Budget Management Workflow
|
||||
|
||||
### 1. Set Budget
|
||||
|
||||
```
|
||||
# Set monthly budget
|
||||
provisioning config set ai.budget.monthly_limit_usd 500
|
||||
|
||||
# Set daily limit
|
||||
provisioning config set ai.limits.daily_cost_limit_usd 50
|
||||
|
||||
# Set workspace limits
|
||||
provisioning config set ai.workspace_budgets.prod.monthly_limit 300
|
||||
provisioning config set ai.workspace_budgets.dev.monthly_limit 100
|
||||
```
|
||||
|
||||
### 2. Monitor Spending
|
||||
|
||||
```
|
||||
# Daily check
|
||||
provisioning admin costs show ai
|
||||
|
||||
# Weekly analysis
|
||||
provisioning admin costs analyze ai --period week
|
||||
|
||||
# Monthly review
|
||||
provisioning admin costs analyze ai --period month
|
||||
```
|
||||
|
||||
### 3. Adjust If Needed
|
||||
|
||||
```
|
||||
# If overspending:
|
||||
# - Increase cache TTL
|
||||
# - Enable local models for simple tasks
|
||||
# - Reduce form assistance (high volume, low cost but adds up)
|
||||
# - Route complex tasks to Haiku instead of Opus
|
||||
|
||||
# If underspending:
|
||||
# - Enable new features (agents, form assistance)
|
||||
# - Increase rate limits
|
||||
# - Lower cache hit requirements (broader semantic matching)
|
||||
```
|
||||
|
||||
### 4. Forecast and Plan
|
||||
|
||||
```
|
||||
# Current monthly run rate
|
||||
provisioning admin costs forecast ai
|
||||
|
||||
# If trending over budget, recommend actions:
|
||||
# - Reduce daily limit
|
||||
# - Switch to local model for 50% of tasks
|
||||
# - Increase batch processing
|
||||
|
||||
# If trending under budget:
|
||||
# - Enable agents for automation workflows
|
||||
# - Enable form assistance across all workspaces
|
||||
```
|
||||
|
||||
## Cost Allocation
|
||||
|
||||
### Chargeback Models
|
||||
|
||||
**Per-Workspace Model**:
|
||||
```
|
||||
Development workspace: $50/month
|
||||
Staging workspace: $100/month
|
||||
Production workspace: $300/month
|
||||
------
|
||||
Total: $450/month
|
||||
```
|
||||
|
||||
**Per-User Model**:
|
||||
```
|
||||
Each user charged based on their usage
|
||||
Encourages efficiency
|
||||
Difficult to track/allocate
|
||||
```
|
||||
|
||||
**Shared Pool Model**:
|
||||
```
|
||||
All teams share $1000/month budget
|
||||
Budget splits by consumption rate
|
||||
Encourages optimization
|
||||
Most flexible
|
||||
```
|
||||
|
||||
## Cost Reporting
|
||||
|
||||
### Generate Reports
|
||||
|
||||
```
|
||||
# Monthly cost report
|
||||
provisioning admin costs report ai \
|
||||
--format pdf \
|
||||
--period month \
|
||||
--output cost-report-2025-01.pdf
|
||||
|
||||
# Detailed analysis for finance
|
||||
provisioning admin costs report ai \
|
||||
--format xlsx \
|
||||
--include-forecasts \
|
||||
--include-optimization-suggestions
|
||||
|
||||
# Executive summary
|
||||
provisioning admin costs report ai \
|
||||
--format markdown \
|
||||
--summary-only
|
||||
```
|
||||
|
||||
## Cost-Benefit Analysis
|
||||
|
||||
### ROI Examples
|
||||
|
||||
```
|
||||
Scenario 1: Developer Time Savings
|
||||
Problem: Manual config creation takes 2 hours
|
||||
Solution: AI config generation, 10 minutes (12x faster)
|
||||
Time saved: 1.83 hours/config
|
||||
Hourly rate: $100
|
||||
Value: $183/config
|
||||
|
||||
AI cost: $0.005/config
|
||||
ROI: 36,600x (far exceeds cost)
|
||||
|
||||
Scenario 2: Troubleshooting Efficiency
|
||||
Problem: Manual debugging takes 4 hours
|
||||
Solution: AI troubleshooting analysis, 2 minutes
|
||||
Time saved: 3.97 hours
|
||||
Value: $397/incident
|
||||
|
||||
AI cost: $0.045/incident
|
||||
ROI: 8,822x
|
||||
|
||||
Scenario 3: Reduction in Failed Deployments
|
||||
Before: 5% of 1000 deployments fail (50 failures)
|
||||
Failure cost: $500 each (lost time, data cleanup)
|
||||
Total: $25,000/month
|
||||
|
||||
After: With AI analysis, 2% fail (20 failures)
|
||||
Total: $10,000/month
|
||||
Savings: $15,000/month
|
||||
|
||||
AI cost: $200/month
|
||||
Net savings: $14,800/month
|
||||
ROI: 74:1
|
||||
```
|
||||
|
||||
## Advanced Cost Optimization
|
||||
|
||||
### Hybrid Strategy (Recommended)
|
||||
|
||||
```
|
||||
✓ Local models for:
|
||||
- Form assistance (high volume, low complexity)
|
||||
- Simple validation checks
|
||||
- Document retrieval (RAG)
|
||||
Cost: Hardware only (~$500 setup)
|
||||
|
||||
✓ Cloud API for:
|
||||
- Complex generation (requires latest model capability)
|
||||
- Troubleshooting (needs high accuracy)
|
||||
- Agents (complex reasoning)
|
||||
Cost: $50-200/month per organization
|
||||
|
||||
Result:
|
||||
- 70% of requests → Local (free after hardware amortization)
|
||||
- 30% of requests → Cloud ($50/month)
|
||||
- 80% overall cost reduction vs cloud-only
|
||||
```
|
||||
|
||||
## Monitoring and Alerts
|
||||
|
||||
### Cost Anomaly Detection
|
||||
|
||||
```
|
||||
# Enable anomaly detection
|
||||
provisioning config set ai.monitoring.anomaly_detection true
|
||||
|
||||
# Set thresholds
|
||||
provisioning config set ai.monitoring.cost_spike_percent 150
|
||||
# Alert if daily cost is 150% of average
|
||||
|
||||
# System alerts:
|
||||
# - Daily cost exceeded by 10x normal
|
||||
# - New expensive operation (agent run)
|
||||
# - Cache hit rate dropped below 40%
|
||||
# - Rate limit nearly exhausted
|
||||
```
|
||||
|
||||
### Alert Configuration
|
||||
|
||||
```
|
||||
[ai.monitoring.alerts]
|
||||
enabled = true
|
||||
spike_threshold_percent = 150
|
||||
check_interval_minutes = 5
|
||||
|
||||
[ai.monitoring.alerts.channels]
|
||||
email = "ops@company.com"
|
||||
slack = "[https://hooks.slack.com/..."](https://hooks.slack.com/...")
|
||||
pagerduty = "integration-key"
|
||||
|
||||
# Alert thresholds
|
||||
[ai.monitoring.alerts.thresholds]
|
||||
daily_budget_warning_percent = 80
|
||||
daily_budget_critical_percent = 95
|
||||
monthly_budget_warning_percent = 70
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - AI system overview
|
||||
- [Configuration](configuration.md) - Cost control settings
|
||||
- [Security Policies](security-policies.md) - Cost-aware policies
|
||||
- [RAG System](rag-system.md) - Caching details
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-13
|
||||
**Status**: ✅ Production-Ready
|
||||
**Average Savings**: 50-80% through caching
|
||||
**Typical Cost**: $50-500/month per organization
|
||||
**ROI**: 100:1 to 10,000:1 depending on use case
|
||||
|
||||
@ -1 +1,594 @@
|
||||
# MCP Integration
|
||||
# Model Context Protocol (MCP) Integration
|
||||
|
||||
**Status**: ✅ Production-Ready (MCP 0.6.0+, integrated with Claude, compatible with all LLMs)
|
||||
|
||||
The MCP server provides standardized Model Context Protocol integration, allowing external LLMs (Claude, GPT-4, local models) to access provisioning
|
||||
platform capabilities as tools. This enables complex multi-step workflows, tool composition, and integration with existing LLM applications.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The MCP integration follows the Model Context Protocol specification:
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ External LLM (Claude, GPT-4, etc.) │
|
||||
└────────────────────┬─────────────────────────────────────────┘
|
||||
│
|
||||
│ Tool Calls (JSON-RPC)
|
||||
▼
|
||||
┌──────────────────────────────────────────────────────────────┐
|
||||
│ MCP Server (provisioning/platform/crates/mcp-server) │
|
||||
│ │
|
||||
│ ┌───────────────────────────────────────────────────────┐ │
|
||||
│ │ Tool Registry │ │
|
||||
│ │ - generate_config(description, schema) │ │
|
||||
│ │ - validate_config(config) │ │
|
||||
│ │ - search_docs(query) │ │
|
||||
│ │ - troubleshoot_deployment(logs) │ │
|
||||
│ │ - get_schema(name) │ │
|
||||
│ │ - check_compliance(config, policy) │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌───────────────────────────────────────────────────────┐ │
|
||||
│ │ Implementation Layer │ │
|
||||
│ │ - AI Service client (ai-service port 8083) │ │
|
||||
│ │ - Validator client │ │
|
||||
│ │ - RAG client (SurrealDB) │ │
|
||||
│ │ - Schema loader │ │
|
||||
│ └───────────────────────────────────────────────────────┘ │
|
||||
└──────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## MCP Server Launch
|
||||
|
||||
The MCP server is started as a stdio-based service:
|
||||
|
||||
```
|
||||
# Start MCP server (stdio transport)
|
||||
provisioning-mcp-server --config /etc/provisioning/ai.toml
|
||||
|
||||
# With debug logging
|
||||
RUST_LOG=debug provisioning-mcp-server --config /etc/provisioning/ai.toml
|
||||
|
||||
# In Claude Desktop configuration
|
||||
~/.claude/claude_desktop_config.json:
|
||||
{
|
||||
"mcpServers": {
|
||||
"provisioning": {
|
||||
"command": "provisioning-mcp-server",
|
||||
"args": ["--config", "/etc/provisioning/ai.toml"],
|
||||
"env": {
|
||||
"PROVISIONING_TOKEN": "your-auth-token"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
### 1. Config Generation
|
||||
|
||||
**Tool**: `generate_config`
|
||||
|
||||
Generate infrastructure configuration from natural language description.
|
||||
|
||||
```
|
||||
{
|
||||
"name": "generate_config",
|
||||
"description": "Generate a Nickel infrastructure configuration from a natural language description",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Natural language description of desired infrastructure"
|
||||
},
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"description": "Target schema name (e.g., 'database', 'kubernetes', 'network'). Optional."
|
||||
},
|
||||
"format": {
|
||||
"type": "string",
|
||||
"enum": ["nickel", "toml"],
|
||||
"description": "Output format (default: nickel)"
|
||||
}
|
||||
},
|
||||
"required": ["description"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Usage**:
|
||||
|
||||
```
|
||||
# Via MCP client
|
||||
mcp-client provisioning generate_config \
|
||||
--description "Production PostgreSQL cluster with encryption and daily backups" \
|
||||
--schema database
|
||||
|
||||
# Claude desktop prompt:
|
||||
# @provisioning: Generate a production PostgreSQL setup with automated backups
|
||||
```
|
||||
|
||||
**Response**:
|
||||
|
||||
```
|
||||
{
|
||||
database = {
|
||||
engine = "postgresql",
|
||||
version = "15.0",
|
||||
|
||||
instance = {
|
||||
instance_class = "db.r6g.xlarge",
|
||||
allocated_storage_gb = 100,
|
||||
iops = 3000,
|
||||
},
|
||||
|
||||
security = {
|
||||
encryption_enabled = true,
|
||||
encryption_key_id = "kms://prod-db-key",
|
||||
tls_enabled = true,
|
||||
tls_version = "1.3",
|
||||
},
|
||||
|
||||
backup = {
|
||||
enabled = true,
|
||||
retention_days = 30,
|
||||
preferred_window = "03:00-04:00",
|
||||
copy_to_region = "us-west-2",
|
||||
},
|
||||
|
||||
monitoring = {
|
||||
enhanced_monitoring_enabled = true,
|
||||
monitoring_interval_seconds = 60,
|
||||
log_exports = ["postgresql"],
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Config Validation
|
||||
|
||||
**Tool**: `validate_config`
|
||||
|
||||
Validate a Nickel configuration against schemas and policies.
|
||||
|
||||
```
|
||||
{
|
||||
"name": "validate_config",
|
||||
"description": "Validate a Nickel configuration file",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"config": {
|
||||
"type": "string",
|
||||
"description": "Nickel configuration content or file path"
|
||||
},
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"description": "Schema name to validate against (optional)"
|
||||
},
|
||||
"strict": {
|
||||
"type": "boolean",
|
||||
"description": "Enable strict validation (default: true)"
|
||||
}
|
||||
},
|
||||
"required": ["config"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Usage**:
|
||||
|
||||
```
|
||||
# Validate configuration
|
||||
mcp-client provisioning validate_config \
|
||||
--config "$(cat workspaces/prod/database.ncl)"
|
||||
|
||||
# With specific schema
|
||||
mcp-client provisioning validate_config \
|
||||
--config "workspaces/prod/kubernetes.ncl" \
|
||||
--schema kubernetes
|
||||
```
|
||||
|
||||
**Response**:
|
||||
|
||||
```
|
||||
{
|
||||
"valid": true,
|
||||
"errors": [],
|
||||
"warnings": [
|
||||
"Consider enabling automated backups for production use"
|
||||
],
|
||||
"metadata": {
|
||||
"schema": "kubernetes",
|
||||
"version": "1.28",
|
||||
"validated_at": "2025-01-13T10:45:30Z"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Documentation Search
|
||||
|
||||
**Tool**: `search_docs`
|
||||
|
||||
Search infrastructure documentation using RAG system.
|
||||
|
||||
```
|
||||
{
|
||||
"name": "search_docs",
|
||||
"description": "Search provisioning documentation for information",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query (natural language)"
|
||||
},
|
||||
"top_k": {
|
||||
"type": "integer",
|
||||
"description": "Number of results (default: 5)"
|
||||
},
|
||||
"doc_type": {
|
||||
"type": "string",
|
||||
"enum": ["guide", "schema", "example", "troubleshooting"],
|
||||
"description": "Filter by document type (optional)"
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Usage**:
|
||||
|
||||
```
|
||||
# Search documentation
|
||||
mcp-client provisioning search_docs \
|
||||
--query "How do I configure PostgreSQL with replication?"
|
||||
|
||||
# Get examples
|
||||
mcp-client provisioning search_docs \
|
||||
--query "Kubernetes networking" \
|
||||
--doc_type example \
|
||||
--top_k 3
|
||||
```
|
||||
|
||||
**Response**:
|
||||
|
||||
```
|
||||
{
|
||||
"results": [
|
||||
{
|
||||
"source": "provisioning/docs/src/guides/database-replication.md",
|
||||
"excerpt": "PostgreSQL logical replication enables streaming of changes...",
|
||||
"relevance": 0.94,
|
||||
"section": "Setup Logical Replication"
|
||||
},
|
||||
{
|
||||
"source": "provisioning/schemas/database.ncl",
|
||||
"excerpt": "replication = { enabled = true, mode = \"logical\", ... }",
|
||||
"relevance": 0.87,
|
||||
"section": "Replication Configuration"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Deployment Troubleshooting
|
||||
|
||||
**Tool**: `troubleshoot_deployment`
|
||||
|
||||
Analyze deployment failures and suggest fixes.
|
||||
|
||||
```
|
||||
{
|
||||
"name": "troubleshoot_deployment",
|
||||
"description": "Analyze deployment logs and suggest fixes",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"deployment_id": {
|
||||
"type": "string",
|
||||
"description": "Deployment ID (e.g., 'deploy-2025-01-13-001')"
|
||||
},
|
||||
"logs": {
|
||||
"type": "string",
|
||||
"description": "Deployment logs (optional, if deployment_id not provided)"
|
||||
},
|
||||
"error_analysis_depth": {
|
||||
"type": "string",
|
||||
"enum": ["shallow", "deep"],
|
||||
"description": "Analysis depth (default: deep)"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Usage**:
|
||||
|
||||
```
|
||||
# Troubleshoot recent deployment
|
||||
mcp-client provisioning troubleshoot_deployment \
|
||||
--deployment_id "deploy-2025-01-13-001"
|
||||
|
||||
# With custom logs
|
||||
mcp-client provisioning troubleshoot_deployment \
|
||||
| --logs "$(journalctl -u provisioning --no-pager | tail -100)" |
|
||||
```
|
||||
|
||||
**Response**:
|
||||
|
||||
```
|
||||
{
|
||||
"status": "failure",
|
||||
"root_cause": "Database connection timeout during migration phase",
|
||||
"analysis": {
|
||||
"phase": "database_migration",
|
||||
"error_type": "connectivity",
|
||||
"confidence": 0.95
|
||||
},
|
||||
"suggestions": [
|
||||
"Verify database security group allows inbound on port 5432",
|
||||
"Check database instance status (may be rebooting)",
|
||||
"Increase connection timeout in configuration"
|
||||
],
|
||||
"corrected_config": "...generated Nickel config with fixes...",
|
||||
"similar_issues": [
|
||||
"[https://docs/troubleshooting/database-connectivity.md"](https://docs/troubleshooting/database-connectivity.md")
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Get Schema
|
||||
|
||||
**Tool**: `get_schema`
|
||||
|
||||
Retrieve schema definition with examples.
|
||||
|
||||
```
|
||||
{
|
||||
"name": "get_schema",
|
||||
"description": "Get a provisioning schema definition",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"schema_name": {
|
||||
"type": "string",
|
||||
"description": "Schema name (e.g., 'database', 'kubernetes')"
|
||||
},
|
||||
"format": {
|
||||
"type": "string",
|
||||
"enum": ["schema", "example", "documentation"],
|
||||
"description": "Response format (default: schema)"
|
||||
}
|
||||
},
|
||||
"required": ["schema_name"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Usage**:
|
||||
|
||||
```
|
||||
# Get schema definition
|
||||
mcp-client provisioning get_schema --schema_name database
|
||||
|
||||
# Get example configuration
|
||||
mcp-client provisioning get_schema \
|
||||
--schema_name kubernetes \
|
||||
--format example
|
||||
```
|
||||
|
||||
### 6. Compliance Check
|
||||
|
||||
**Tool**: `check_compliance`
|
||||
|
||||
Verify configuration against compliance policies (Cedar).
|
||||
|
||||
```
|
||||
{
|
||||
"name": "check_compliance",
|
||||
"description": "Check configuration against compliance policies",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"config": {
|
||||
"type": "string",
|
||||
"description": "Configuration to check"
|
||||
},
|
||||
"policy_set": {
|
||||
"type": "string",
|
||||
"description": "Policy set to check against (e.g., 'pci-dss', 'hipaa', 'sox')"
|
||||
}
|
||||
},
|
||||
"required": ["config", "policy_set"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Example Usage**:
|
||||
|
||||
```
|
||||
# Check against PCI-DSS
|
||||
mcp-client provisioning check_compliance \
|
||||
--config "$(cat workspaces/prod/database.ncl)" \
|
||||
--policy_set pci-dss
|
||||
```
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### Claude Desktop (Most Common)
|
||||
|
||||
```
|
||||
~/.claude/claude_desktop_config.json:
|
||||
{
|
||||
"mcpServers": {
|
||||
"provisioning": {
|
||||
"command": "provisioning-mcp-server",
|
||||
"args": ["--config", "/etc/provisioning/ai.toml"],
|
||||
"env": {
|
||||
"PROVISIONING_API_KEY": "sk-...",
|
||||
"PROVISIONING_BASE_URL": "[http://localhost:8083"](http://localhost:8083")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Usage in Claude**:
|
||||
|
||||
```
|
||||
User: I need a production Kubernetes cluster in AWS with automatic scaling
|
||||
|
||||
Claude can now use provisioning tools:
|
||||
I'll help you create a production Kubernetes cluster. Let me:
|
||||
1. Search the documentation for best practices
|
||||
2. Generate a configuration template
|
||||
3. Validate it against your policies
|
||||
4. Provide the final configuration
|
||||
```
|
||||
|
||||
### OpenAI Function Calling
|
||||
|
||||
```
|
||||
import openai
|
||||
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "generate_config",
|
||||
"description": "Generate infrastructure configuration",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Infrastructure description"
|
||||
}
|
||||
},
|
||||
"required": ["description"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-4",
|
||||
messages=[{"role": "user", "content": "Create a PostgreSQL database"}],
|
||||
tools=tools
|
||||
)
|
||||
```
|
||||
|
||||
### Local LLM Integration (Ollama)
|
||||
|
||||
```
|
||||
# Start Ollama with provisioning MCP
|
||||
OLLAMA_MCP_SERVERS=provisioning://localhost:3000 \
|
||||
ollama serve
|
||||
|
||||
# Use with llama2 or mistral
|
||||
curl [http://localhost:11434/api/generate](http://localhost:11434/api/generate) \
|
||||
-d '{
|
||||
"model": "mistral",
|
||||
"prompt": "Create a Kubernetes cluster",
|
||||
"tools": [{"type": "mcp", "server": "provisioning"}]
|
||||
}'
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
Tools return consistent error responses:
|
||||
|
||||
```
|
||||
{
|
||||
"error": {
|
||||
"code": "VALIDATION_ERROR",
|
||||
"message": "Configuration has 3 validation errors",
|
||||
"details": [
|
||||
{
|
||||
"field": "database.version",
|
||||
"message": "PostgreSQL version 9.6 is deprecated",
|
||||
"severity": "error"
|
||||
},
|
||||
{
|
||||
"field": "backup.retention_days",
|
||||
"message": "Recommended minimum is 30 days for production",
|
||||
"severity": "warning"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
| | Operation | Latency | Notes | |
|
||||
| | ----------- | --------- | ------- | |
|
||||
| | generate_config | 2-5s | Depends on LLM and config complexity | |
|
||||
| | validate_config | 500-1000ms | Parallel schema validation | |
|
||||
| | search_docs | 300-800ms | RAG hybrid search | |
|
||||
| | troubleshoot | 3-8s | Depends on log size and analysis depth | |
|
||||
| | get_schema | 100-300ms | Cached schema retrieval | |
|
||||
| | check_compliance | 500-2000ms | Policy evaluation | |
|
||||
|
||||
## Configuration
|
||||
|
||||
See [Configuration Guide](configuration.md) for MCP-specific settings:
|
||||
|
||||
- MCP server port and binding
|
||||
- Tool registry customization
|
||||
- Rate limiting for tool calls
|
||||
- Access control (Cedar policies)
|
||||
|
||||
## Security
|
||||
|
||||
### Authentication
|
||||
|
||||
- Tools require valid provisioning API token
|
||||
- Token scoped to user's workspace
|
||||
- All tool calls authenticated and logged
|
||||
|
||||
### Authorization
|
||||
|
||||
- Cedar policies control which tools user can call
|
||||
- Example: `allow(principal, action, resource)` when `role == "admin"`
|
||||
- Detailed audit trail of all tool invocations
|
||||
|
||||
### Data Protection
|
||||
|
||||
- Secrets never passed through MCP
|
||||
- Configuration sanitized before analysis
|
||||
- PII removed from logs sent to external LLMs
|
||||
|
||||
## Monitoring and Debugging
|
||||
|
||||
```
|
||||
# Monitor MCP server
|
||||
provisioning admin mcp status
|
||||
|
||||
# View MCP tool calls
|
||||
provisioning admin logs --filter "mcp_tools" --tail 100
|
||||
|
||||
# Debug tool response
|
||||
RUST_LOG=provisioning::mcp=debug provisioning-mcp-server
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - AI system overview
|
||||
- [RAG System](rag-system.md) - Documentation search
|
||||
- [Configuration](configuration.md) - MCP setup
|
||||
- [API Reference](api-reference.md) - Detailed API endpoints
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-13
|
||||
**Status**: ✅ Production-Ready
|
||||
**MCP Version**: 0.6.0+
|
||||
**Supported LLMs**: Claude, GPT-4, Llama, Mistral, all MCP-compatible models
|
||||
|
||||
@ -1 +1,469 @@
|
||||
# Natural Language Configuration
|
||||
# Natural Language Configuration Generation
|
||||
|
||||
**Status**: 🔴 Planned (Q2 2025 target)
|
||||
|
||||
Natural Language Configuration (NLC) is a planned feature that enables users to describe infrastructure requirements in plain English and have the
|
||||
system automatically generate validated Nickel configurations. This feature combines natural language understanding with schema-aware generation and
|
||||
validation.
|
||||
|
||||
## Feature Overview
|
||||
|
||||
### What It Does
|
||||
|
||||
Transform infrastructure descriptions into production-ready Nickel configurations:
|
||||
|
||||
```
|
||||
User Input:
|
||||
"Create a production PostgreSQL cluster with 100GB storage,
|
||||
daily backups, encryption enabled, and cross-region replication
|
||||
to us-west-2"
|
||||
|
||||
System Output:
|
||||
provisioning/schemas/database.ncl (validated, production-ready)
|
||||
```
|
||||
|
||||
### Primary Use Cases
|
||||
|
||||
1. **Rapid Prototyping**: From description to working config in seconds
|
||||
2. **Infrastructure Documentation**: Describe infrastructure as code
|
||||
3. **Configuration Templates**: Generate reusable patterns
|
||||
4. **Non-Expert Operations**: Enable junior developers to provision infrastructure
|
||||
5. **Configuration Migration**: Describe existing infrastructure to generate Nickel
|
||||
|
||||
## Architecture
|
||||
|
||||
### Generation Pipeline
|
||||
|
||||
```
|
||||
Input Description (Natural Language)
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Understanding & Analysis │
|
||||
│ - Intent extraction │
|
||||
│ - Entity recognition │
|
||||
│ - Constraint identification │
|
||||
│ - Best practice inference │
|
||||
└─────────────────────┬───────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ RAG Context Retrieval │
|
||||
│ - Find similar configs │
|
||||
│ - Retrieve best practices │
|
||||
│ - Get schema examples │
|
||||
│ - Identify constraints │
|
||||
└─────────────────────┬───────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Schema-Aware Generation │
|
||||
│ - Map entities to schema fields │
|
||||
│ - Apply type constraints │
|
||||
│ - Include required fields │
|
||||
│ - Generate valid Nickel │
|
||||
└─────────────────────┬───────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Validation & Refinement │
|
||||
│ - Type checking │
|
||||
│ - Schema validation │
|
||||
│ - Policy compliance │
|
||||
│ - Security checks │
|
||||
└─────────────────────┬───────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────┐
|
||||
│ Output & Explanation │
|
||||
│ - Generated Nickel config │
|
||||
│ - Decision rationale │
|
||||
│ - Alternative suggestions │
|
||||
│ - Warnings if any │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Planned Implementation Details
|
||||
|
||||
### 1. Intent Extraction
|
||||
|
||||
Extract structured intent from natural language:
|
||||
|
||||
```
|
||||
Input: "Create a production PostgreSQL cluster with encryption and backups"
|
||||
|
||||
Extracted Intent:
|
||||
{
|
||||
resource_type: "database",
|
||||
engine: "postgresql",
|
||||
environment: "production",
|
||||
requirements: [
|
||||
{constraint: "encryption", type: "boolean", value: true},
|
||||
{constraint: "backups", type: "enabled", frequency: "daily"},
|
||||
],
|
||||
modifiers: ["production"],
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Entity Mapping
|
||||
|
||||
Map natural language entities to schema fields:
|
||||
|
||||
```
|
||||
Description Terms → Schema Fields:
|
||||
"100GB storage" → database.instance.allocated_storage_gb = 100
|
||||
"daily backups" → backup.enabled = true, backup.frequency = "daily"
|
||||
"encryption" → security.encryption_enabled = true
|
||||
"cross-region" → backup.copy_to_region = "us-west-2"
|
||||
"PostgreSQL 15" → database.engine_version = "15.0"
|
||||
```
|
||||
|
||||
### 3. Prompt Engineering
|
||||
|
||||
Sophisticated prompting for schema-aware generation:
|
||||
|
||||
```
|
||||
System Prompt:
|
||||
You are generating Nickel infrastructure configurations.
|
||||
Generate ONLY valid Nickel syntax.
|
||||
Follow these rules:
|
||||
- Use record syntax: `field = value`
|
||||
- Type annotations must be valid
|
||||
- All required fields must be present
|
||||
- Apply best practices for [ENVIRONMENT]
|
||||
|
||||
Schema Context:
|
||||
[Database schema from provisioning/schemas/database.ncl]
|
||||
|
||||
Examples:
|
||||
[3 relevant examples from RAG]
|
||||
|
||||
User Request:
|
||||
[User natural language description]
|
||||
|
||||
Generate the complete Nickel configuration.
|
||||
Start with: let { database = {
|
||||
```
|
||||
|
||||
### 4. Iterative Refinement
|
||||
|
||||
Handle generation errors through iteration:
|
||||
|
||||
```
|
||||
Attempt 1: Generate initial config
|
||||
↓ Validate
|
||||
✗ Error: field `version` type mismatch (string vs number)
|
||||
↓ Re-prompt with error
|
||||
Attempt 2: Fix with context from error
|
||||
↓ Validate
|
||||
✓ Success: Config is valid
|
||||
```
|
||||
|
||||
## Command Interface
|
||||
|
||||
### CLI Usage
|
||||
|
||||
```
|
||||
# Simple generation
|
||||
provisioning ai generate "PostgreSQL database for production"
|
||||
|
||||
# With schema specification
|
||||
provisioning ai generate \
|
||||
--schema database \
|
||||
"Create PostgreSQL 15 with encryption and daily backups"
|
||||
|
||||
# Interactive generation (refine output)
|
||||
provisioning ai generate --interactive \
|
||||
"Kubernetes cluster on AWS"
|
||||
|
||||
# Generate and validate
|
||||
provisioning ai generate \
|
||||
--validate \
|
||||
"Production Redis cluster with sentinel"
|
||||
|
||||
# Generate and save directly
|
||||
provisioning ai generate \
|
||||
--schema database \
|
||||
--output workspaces/prod/database.ncl \
|
||||
"PostgreSQL production setup"
|
||||
|
||||
# Batch generation from file
|
||||
provisioning ai generate --batch descriptions.yaml
|
||||
```
|
||||
|
||||
### Interactive Refinement
|
||||
|
||||
```
|
||||
$ provisioning ai generate --interactive
|
||||
> Describe infrastructure: Create production PostgreSQL cluster
|
||||
|
||||
Generated configuration shown.
|
||||
|
||||
> Refine: Add cross-region backup to us-west-2
|
||||
Configuration updated.
|
||||
|
||||
> Refine: Use larger instance class for performance
|
||||
Configuration updated.
|
||||
|
||||
> Accept? [y/n]: y
|
||||
Configuration saved to: workspaces/prod/database.ncl
|
||||
```
|
||||
|
||||
## Example Generations
|
||||
|
||||
### Example 1: Simple Database
|
||||
|
||||
**Input**:
|
||||
```
|
||||
"PostgreSQL database with 50GB storage and encryption"
|
||||
```
|
||||
|
||||
**Output**:
|
||||
```
|
||||
let {
|
||||
database = {
|
||||
engine = "postgresql",
|
||||
version = "15.0",
|
||||
|
||||
instance = {
|
||||
instance_class = "db.t3.medium",
|
||||
allocated_storage_gb = 50,
|
||||
iops = 1000,
|
||||
},
|
||||
|
||||
security = {
|
||||
encryption_enabled = true,
|
||||
tls_enabled = true,
|
||||
tls_version = "1.3",
|
||||
},
|
||||
|
||||
backup = {
|
||||
enabled = true,
|
||||
retention_days = 7,
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rationale**:
|
||||
- PostgreSQL 15 is current stable version
|
||||
- db.t3.medium suitable for 50GB with general workload
|
||||
- Encryption enabled per requirement
|
||||
- Automatic backups with 7-day retention (default)
|
||||
|
||||
### Example 2: Complex Kubernetes Setup
|
||||
|
||||
**Input**:
|
||||
```
|
||||
"Production Kubernetes cluster in AWS with 3 availability zones,
|
||||
auto-scaling from 3 to 10 nodes, managed PostgreSQL, and monitoring"
|
||||
```
|
||||
|
||||
**Output**:
|
||||
```
|
||||
let {
|
||||
kubernetes = {
|
||||
version = "1.28.0",
|
||||
|
||||
cluster = {
|
||||
name = "prod-cluster",
|
||||
region = "us-east-1",
|
||||
availability_zones = ["us-east-1a", "us-east-1b", "us-east-1c"],
|
||||
},
|
||||
|
||||
node_group = {
|
||||
min_size = 3,
|
||||
max_size = 10,
|
||||
desired_size = 3,
|
||||
instance_type = "t3.large",
|
||||
|
||||
auto_scaling = {
|
||||
enabled = true,
|
||||
target_cpu = 70,
|
||||
scale_down_delay = 300,
|
||||
},
|
||||
},
|
||||
|
||||
managed_services = {
|
||||
postgres = {
|
||||
enabled = true,
|
||||
engine = "postgresql",
|
||||
version = "15.0",
|
||||
storage_gb = 100,
|
||||
},
|
||||
},
|
||||
|
||||
monitoring = {
|
||||
prometheus = {enabled = true},
|
||||
grafana = {enabled = true},
|
||||
cloudwatch_integration = true,
|
||||
},
|
||||
|
||||
networking = {
|
||||
vpc_cidr = "10.0.0.0/16",
|
||||
enable_nat_gateway = true,
|
||||
enable_dns_hostnames = true,
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Rationale**:
|
||||
- 3 AZs for high availability
|
||||
- t3.large balances cost and performance for general workload
|
||||
- Auto-scaling target 70% CPU (best practice)
|
||||
- Managed PostgreSQL reduces operational overhead
|
||||
- Full observability with Prometheus + Grafana
|
||||
|
||||
## Configuration and Constraints
|
||||
|
||||
### Configurable Generation Parameters
|
||||
|
||||
```
|
||||
# In provisioning/config/ai.toml
|
||||
[ai.generation]
|
||||
# Which schema to use by default
|
||||
default_schema = "database"
|
||||
|
||||
# Whether to require explicit environment specification
|
||||
require_environment = false
|
||||
|
||||
# Optimization targets
|
||||
optimization_target = "balanced" # or "cost", "performance"
|
||||
|
||||
# Best practices to always apply
|
||||
best_practices = [
|
||||
"encryption",
|
||||
"high_availability",
|
||||
"monitoring",
|
||||
"backup",
|
||||
]
|
||||
|
||||
# Constraints that limit generation
|
||||
[ai.generation.constraints]
|
||||
min_storage_gb = 10
|
||||
max_instances = 100
|
||||
allowed_engines = ["postgresql", "mysql", "mongodb"]
|
||||
|
||||
# Validation before accepting generated config
|
||||
[ai.generation.validation]
|
||||
strict_mode = true
|
||||
require_security_review = false
|
||||
require_compliance_check = true
|
||||
```
|
||||
|
||||
### Safety Guardrails
|
||||
|
||||
1. **Required Fields**: All schema required fields must be present
|
||||
2. **Type Validation**: Generated values must match schema types
|
||||
3. **Security Checks**: Encryption/backups enabled for production
|
||||
4. **Cost Estimation**: Warn if projected cost exceeds threshold
|
||||
5. **Resource Limits**: Enforce organizational constraints
|
||||
6. **Policy Compliance**: Check against Cedar policies
|
||||
|
||||
## User Workflow
|
||||
|
||||
### Typical Usage Session
|
||||
|
||||
```
|
||||
# 1. Describe infrastructure need
|
||||
$ provisioning ai generate "I need a database for my web app"
|
||||
|
||||
# System generates basic config, suggests refinements
|
||||
# Generated config shown with explanations
|
||||
|
||||
# 2. Refine if needed
|
||||
$ provisioning ai generate --interactive
|
||||
|
||||
# 3. Review and validate
|
||||
$ provisioning ai validate workspaces/dev/database.ncl
|
||||
|
||||
# 4. Deploy
|
||||
$ provisioning workspace apply workspaces/dev
|
||||
|
||||
# 5. Monitor
|
||||
$ provisioning workspace logs database
|
||||
```
|
||||
|
||||
## Integration with Other Systems
|
||||
|
||||
### RAG Integration
|
||||
|
||||
NLC uses RAG to find similar configurations:
|
||||
|
||||
```
|
||||
User: "Create Kubernetes cluster"
|
||||
↓
|
||||
RAG searches for:
|
||||
- Existing Kubernetes configs in workspaces
|
||||
- Kubernetes documentation and examples
|
||||
- Best practices from provisioning/docs/guides/kubernetes.md
|
||||
↓
|
||||
Context fed to LLM for generation
|
||||
```
|
||||
|
||||
### Form Assistance
|
||||
|
||||
NLC and form assistance share components:
|
||||
|
||||
- Intent extraction for pre-filling forms
|
||||
- Constraint validation for form field values
|
||||
- Explanation generation for validation errors
|
||||
|
||||
### CLI Integration
|
||||
|
||||
```
|
||||
# Generate then preview
|
||||
| provisioning ai generate "PostgreSQL prod" | \ |
|
||||
provisioning config preview
|
||||
|
||||
# Generate and apply
|
||||
provisioning ai generate \
|
||||
--apply \
|
||||
--environment prod \
|
||||
"PostgreSQL cluster"
|
||||
```
|
||||
|
||||
## Testing and Validation
|
||||
|
||||
### Test Cases (Planned)
|
||||
|
||||
1. **Simple Descriptions**: Single resource, few requirements
|
||||
- "PostgreSQL database"
|
||||
- "Redis cache"
|
||||
|
||||
2. **Complex Descriptions**: Multiple resources, constraints
|
||||
- "Kubernetes with managed database and monitoring"
|
||||
- "Multi-region deployment with failover"
|
||||
|
||||
3. **Edge Cases**:
|
||||
- Conflicting requirements
|
||||
- Ambiguous specifications
|
||||
- Deprecated technologies
|
||||
|
||||
4. **Refinement Cycles**:
|
||||
- Interactive generation with multiple refines
|
||||
- Error recovery and re-prompting
|
||||
- User feedback incorporation
|
||||
|
||||
## Success Criteria (Q2 2025)
|
||||
|
||||
- ✅ Generates valid Nickel for 90% of user descriptions
|
||||
- ✅ Generated configs pass all schema validation
|
||||
- ✅ Supports top 10 infrastructure patterns
|
||||
- ✅ Interactive refinement works smoothly
|
||||
- ✅ Error messages explain issues clearly
|
||||
- ✅ User testing with non-experts succeeds
|
||||
- ✅ Documentation complete with examples
|
||||
- ✅ Integration with form assistance operational
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - AI system overview
|
||||
- [AI-Assisted Forms](ai-assisted-forms.md) - Related form feature
|
||||
- [RAG System](rag-system.md) - Context retrieval
|
||||
- [Configuration](configuration.md) - Setup guide
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Status**: 🔴 Planned
|
||||
**Target Release**: Q2 2025
|
||||
**Last Updated**: 2025-01-13
|
||||
**Architecture**: Complete
|
||||
**Implementation**: In Design Phase
|
||||
|
||||
@ -1 +1,450 @@
|
||||
# RAG System
|
||||
# Retrieval-Augmented Generation (RAG) System
|
||||
|
||||
**Status**: ✅ Production-Ready (SurrealDB 1.5.0+, 22/22 tests passing)
|
||||
|
||||
The RAG system enables the AI service to access, retrieve, and reason over infrastructure documentation, schemas, and past configurations. This allows
|
||||
the AI to generate contextually accurate infrastructure configurations and provide intelligent troubleshooting advice grounded in actual platform
|
||||
knowledge.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The RAG system consists of:
|
||||
|
||||
1. **Document Store**: SurrealDB vector store with semantic indexing
|
||||
2. **Hybrid Search**: Vector similarity + BM25 keyword search
|
||||
3. **Chunk Management**: Intelligent document chunking for code and markdown
|
||||
4. **Context Ranking**: Relevance scoring for retrieved documents
|
||||
5. **Semantic Cache**: Deduplication of repeated queries
|
||||
|
||||
## Core Components
|
||||
|
||||
### 1. Vector Embeddings
|
||||
|
||||
The system uses embedding models to convert documents into vector representations:
|
||||
|
||||
```
|
||||
┌─────────────────────┐
|
||||
│ Document Source │
|
||||
│ (Markdown, Code) │
|
||||
└──────────┬──────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Chunking & Tokenization │
|
||||
│ - Code-aware splits │
|
||||
│ - Markdown aware │
|
||||
│ - Preserves context │
|
||||
└──────────┬───────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Embedding Model │
|
||||
│ (OpenAI Ada, Anthropic, Local) │
|
||||
└──────────┬───────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Vector Storage (SurrealDB) │
|
||||
│ - Vector index │
|
||||
│ - Metadata indexed │
|
||||
│ - BM25 index for keywords │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 2. SurrealDB Integration
|
||||
|
||||
SurrealDB serves as the vector database and knowledge store:
|
||||
|
||||
```
|
||||
# Configuration in provisioning/schemas/ai.ncl
|
||||
let {
|
||||
rag = {
|
||||
enabled = true,
|
||||
db_url = "surreal://localhost:8000",
|
||||
namespace = "provisioning",
|
||||
database = "ai_rag",
|
||||
|
||||
# Collections for different document types
|
||||
collections = {
|
||||
documentation = {
|
||||
chunking_strategy = "markdown",
|
||||
chunk_size = 1024,
|
||||
overlap = 256,
|
||||
},
|
||||
schemas = {
|
||||
chunking_strategy = "code",
|
||||
chunk_size = 512,
|
||||
overlap = 128,
|
||||
},
|
||||
deployments = {
|
||||
chunking_strategy = "json",
|
||||
chunk_size = 2048,
|
||||
overlap = 512,
|
||||
},
|
||||
},
|
||||
|
||||
# Embedding configuration
|
||||
embedding = {
|
||||
provider = "openai", # or "anthropic", "local"
|
||||
model = "text-embedding-3-small",
|
||||
cache_vectors = true,
|
||||
},
|
||||
|
||||
# Search configuration
|
||||
search = {
|
||||
hybrid_enabled = true,
|
||||
vector_weight = 0.7,
|
||||
keyword_weight = 0.3,
|
||||
top_k = 5, # Number of results to return
|
||||
semantic_cache = true,
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Document Chunking
|
||||
|
||||
Intelligent chunking preserves context while managing token limits:
|
||||
|
||||
#### Markdown Chunking Strategy
|
||||
|
||||
```
|
||||
Input Document: provisioning/docs/src/guides/from-scratch.md
|
||||
|
||||
Chunks:
|
||||
[1] Header + first section (up to 1024 tokens)
|
||||
[2] Next logical section + overlap with [1]
|
||||
[3] Code examples preserve as atomic units
|
||||
[4] Continue with overlap...
|
||||
|
||||
Each chunk includes:
|
||||
- Original section heading (for context)
|
||||
- Content
|
||||
- Source file and line numbers
|
||||
- Metadata (doctype, category, version)
|
||||
```
|
||||
|
||||
#### Code Chunking Strategy
|
||||
|
||||
```
|
||||
Input Document: provisioning/schemas/main.ncl
|
||||
|
||||
Chunks:
|
||||
[1] Top-level let binding + comments
|
||||
[2] Function definition (atomic, preserves signature)
|
||||
[3] Type definition (atomic, preserves interface)
|
||||
[4] Implementation blocks with context overlap
|
||||
|
||||
Each chunk preserves:
|
||||
- Type signatures
|
||||
- Function signatures
|
||||
- Import statements needed for context
|
||||
- Comments and docstrings
|
||||
```
|
||||
|
||||
## Hybrid Search
|
||||
|
||||
The system implements dual search strategy for optimal results:
|
||||
|
||||
### Vector Similarity Search
|
||||
|
||||
```
|
||||
// Find semantically similar documents
|
||||
async fn vector_search(query: &str, top_k: usize) -> Vec<Document> {
|
||||
let embedding = embed(query).await?;
|
||||
|
||||
// L2 distance in SurrealDB
|
||||
db.query("
|
||||
SELECT *, vector::similarity::cosine(embedding, $embedding) AS score
|
||||
FROM documents
|
||||
WHERE embedding <~> $embedding
|
||||
ORDER BY score DESC
|
||||
LIMIT $top_k
|
||||
")
|
||||
.bind(("embedding", embedding))
|
||||
.bind(("top_k", top_k))
|
||||
.await
|
||||
}
|
||||
```
|
||||
|
||||
**Use case**: Semantic understanding of intent
|
||||
- Query: "How to configure PostgreSQL"
|
||||
- Finds: Documents about database configuration, examples, schemas
|
||||
|
||||
### BM25 Keyword Search
|
||||
|
||||
```
|
||||
// Find documents with matching keywords
|
||||
async fn keyword_search(query: &str, top_k: usize) -> Vec<Document> {
|
||||
// BM25 full-text search in SurrealDB
|
||||
db.query("
|
||||
SELECT *, search::bm25(.) AS score
|
||||
FROM documents
|
||||
WHERE text @@ $query
|
||||
ORDER BY score DESC
|
||||
LIMIT $top_k
|
||||
")
|
||||
.bind(("query", query))
|
||||
.bind(("top_k", top_k))
|
||||
.await
|
||||
}
|
||||
```
|
||||
|
||||
**Use case**: Exact term matching
|
||||
- Query: "SurrealDB configuration"
|
||||
- Finds: Documents mentioning SurrealDB specifically
|
||||
|
||||
### Hybrid Results
|
||||
|
||||
```
|
||||
async fn hybrid_search(
|
||||
query: &str,
|
||||
vector_weight: f32,
|
||||
keyword_weight: f32,
|
||||
top_k: usize,
|
||||
) -> Vec<Document> {
|
||||
let vector_results = vector_search(query, top_k * 2).await?;
|
||||
let keyword_results = keyword_search(query, top_k * 2).await?;
|
||||
|
||||
let mut scored = HashMap::new();
|
||||
|
||||
// Score from vector search
|
||||
for (i, doc) in vector_results.iter().enumerate() {
|
||||
*scored.entry(doc.id).or_insert(0.0) +=
|
||||
vector_weight * (1.0 - (i as f32 / top_k as f32));
|
||||
}
|
||||
|
||||
// Score from keyword search
|
||||
for (i, doc) in keyword_results.iter().enumerate() {
|
||||
*scored.entry(doc.id).or_insert(0.0) +=
|
||||
keyword_weight * (1.0 - (i as f32 / top_k as f32));
|
||||
}
|
||||
|
||||
// Return top-k by combined score
|
||||
let mut results: Vec<_> = scored.into_iter().collect();
|
||||
| results.sort_by( | a, b | b.1.partial_cmp(&a.1).unwrap()); |
|
||||
| Ok(results.into_iter().take(top_k).map( | (id, _) | ...).collect()) |
|
||||
}
|
||||
```
|
||||
|
||||
## Semantic Caching
|
||||
|
||||
Reduces API calls by caching embeddings of repeated queries:
|
||||
|
||||
```
|
||||
struct SemanticCache {
|
||||
queries: Arc<DashMap<Vec<f32>, CachedResult>>,
|
||||
similarity_threshold: f32,
|
||||
}
|
||||
|
||||
impl SemanticCache {
|
||||
async fn get(&self, query: &str) -> Option<CachedResult> {
|
||||
let embedding = embed(query).await?;
|
||||
|
||||
// Find cached query with similar embedding
|
||||
// (cosine distance < threshold)
|
||||
for entry in self.queries.iter() {
|
||||
let distance = cosine_distance(&embedding, entry.key());
|
||||
if distance < self.similarity_threshold {
|
||||
return Some(entry.value().clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
async fn insert(&self, query: &str, result: CachedResult) {
|
||||
let embedding = embed(query).await?;
|
||||
self.queries.insert(embedding, result);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
- 50-80% reduction in embedding API calls
|
||||
- Identical queries return in <10ms
|
||||
- Similar queries reuse cached context
|
||||
|
||||
## Ingestion Workflow
|
||||
|
||||
### Document Indexing
|
||||
|
||||
```
|
||||
# Index all documentation
|
||||
provisioning ai index-docs provisioning/docs/src
|
||||
|
||||
# Index schemas
|
||||
provisioning ai index-schemas provisioning/schemas
|
||||
|
||||
# Index past deployments
|
||||
provisioning ai index-deployments workspaces/*/deployments
|
||||
|
||||
# Watch directory for changes (development mode)
|
||||
provisioning ai watch docs provisioning/docs/src
|
||||
```
|
||||
|
||||
### Programmatic Indexing
|
||||
|
||||
```
|
||||
// In ai-service on startup
|
||||
async fn initialize_rag() -> Result<()> {
|
||||
let rag = RAGSystem::new(&config.rag).await?;
|
||||
|
||||
// Index documentation
|
||||
let docs = load_markdown_docs("provisioning/docs/src")?;
|
||||
for doc in docs {
|
||||
rag.ingest_document(&doc).await?;
|
||||
}
|
||||
|
||||
// Index schemas
|
||||
let schemas = load_nickel_schemas("provisioning/schemas")?;
|
||||
for schema in schemas {
|
||||
rag.ingest_schema(&schema).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Query the RAG System
|
||||
|
||||
```
|
||||
# Search for context-aware information
|
||||
provisioning ai query "How do I configure PostgreSQL with encryption?"
|
||||
|
||||
# Get configuration template
|
||||
provisioning ai template "Describe production Kubernetes on AWS"
|
||||
|
||||
# Interactive mode
|
||||
provisioning ai chat
|
||||
> What are the best practices for database backup?
|
||||
```
|
||||
|
||||
### AI Service Integration
|
||||
|
||||
```
|
||||
// AI service uses RAG to enhance generation
|
||||
async fn generate_config(user_request: &str) -> Result<String> {
|
||||
// Retrieve relevant context
|
||||
let context = rag.search(user_request, top_k=5).await?;
|
||||
|
||||
// Build prompt with context
|
||||
let prompt = build_prompt_with_context(user_request, &context);
|
||||
|
||||
// Generate configuration
|
||||
let config = llm.generate(&prompt).await?;
|
||||
|
||||
// Validate against schemas
|
||||
validate_nickel_config(&config)?;
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
```
|
||||
|
||||
### Form Assistance Integration
|
||||
|
||||
```
|
||||
// In typdialog-ai (JavaScript/TypeScript)
|
||||
async function suggestFieldValue(fieldName, currentInput) {
|
||||
// Query RAG for similar configurations
|
||||
const context = await rag.search(
|
||||
`Field: ${fieldName}, Input: ${currentInput}`,
|
||||
{ topK: 3, semantic: true }
|
||||
);
|
||||
|
||||
// Generate suggestion using context
|
||||
const suggestion = await ai.suggest({
|
||||
field: fieldName,
|
||||
input: currentInput,
|
||||
context: context,
|
||||
});
|
||||
|
||||
return suggestion;
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
| | Operation | Time | Cache Hit | |
|
||||
| | ----------- | ------ | ----------- | |
|
||||
| | Vector embedding | 200-500ms | N/A | |
|
||||
| | Vector search (cold) | 300-800ms | N/A | |
|
||||
| | Keyword search | 50-200ms | N/A | |
|
||||
| | Hybrid search | 500-1200ms | <100ms cached | |
|
||||
| | Semantic cache hit | 10-50ms | Always | |
|
||||
|
||||
**Typical query flow**:
|
||||
1. Embedding: 300ms
|
||||
2. Vector search: 400ms
|
||||
3. Keyword search: 100ms
|
||||
4. Ranking: 50ms
|
||||
5. **Total**: ~850ms (first call), <100ms (cached)
|
||||
|
||||
## Configuration
|
||||
|
||||
See [Configuration Guide](configuration.md) for detailed RAG setup:
|
||||
|
||||
- LLM provider for embeddings
|
||||
- SurrealDB connection
|
||||
- Chunking strategies
|
||||
- Search weights and limits
|
||||
- Cache settings and TTLs
|
||||
|
||||
## Limitations and Considerations
|
||||
|
||||
### Document Freshness
|
||||
|
||||
- RAG indexes static snapshots
|
||||
- Changes to documentation require re-indexing
|
||||
- Use watch mode during development
|
||||
|
||||
### Token Limits
|
||||
|
||||
- Large documents chunked to fit LLM context
|
||||
- Some context may be lost in chunking
|
||||
- Adjustable chunk size vs. context trade-off
|
||||
|
||||
### Embedding Quality
|
||||
|
||||
- Quality depends on embedding model
|
||||
- Domain-specific models perform better
|
||||
- Fine-tuning possible for specialized vocabularies
|
||||
|
||||
## Monitoring and Debugging
|
||||
|
||||
### Query Metrics
|
||||
|
||||
```
|
||||
# View RAG search metrics
|
||||
provisioning ai metrics show rag
|
||||
|
||||
# Analysis of search quality
|
||||
provisioning ai eval-rag --sample-queries 100
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
```
|
||||
# In provisioning/config/ai.toml
|
||||
[ai.rag.debug]
|
||||
enabled = true
|
||||
log_embeddings = true # Log embedding vectors
|
||||
log_search_scores = true # Log relevance scores
|
||||
log_context_used = true # Log context retrieved
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - AI system overview
|
||||
- [MCP Integration](mcp-integration.md) - RAG access via MCP
|
||||
- [Configuration](configuration.md) - RAG setup guide
|
||||
- [API Reference](api-reference.md) - RAG API endpoints
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-13
|
||||
**Status**: ✅ Production-Ready
|
||||
**Test Coverage**: 22/22 tests passing
|
||||
**Database**: SurrealDB 1.5.0+
|
||||
|
||||
@ -1 +1,535 @@
|
||||
# Security Policies
|
||||
# AI Security Policies and Cedar Authorization
|
||||
|
||||
**Status**: ✅ Production-Ready (Cedar integration, policy enforcement)
|
||||
|
||||
Comprehensive documentation of security controls, authorization policies, and data protection mechanisms for the AI system. All AI operations are
|
||||
controlled through Cedar policies and include strict secret isolation.
|
||||
|
||||
## Security Model Overview
|
||||
|
||||
### Defense in Depth
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ User Request to AI │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Layer 1: Authentication │
|
||||
│ - Verify user identity │
|
||||
│ - Validate API token/credentials │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Layer 2: Authorization (Cedar) │
|
||||
│ - Check if user can access AI features │
|
||||
│ - Verify workspace permissions │
|
||||
│ - Check role-based access │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Layer 3: Data Sanitization │
|
||||
│ - Remove secrets from data │
|
||||
│ - Redact PII │
|
||||
│ - Filter sensitive information │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Layer 4: Request Validation │
|
||||
│ - Check request parameters │
|
||||
│ - Verify resource constraints │
|
||||
│ - Apply rate limits │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Layer 5: External API Call │
|
||||
│ - Only if all previous checks pass │
|
||||
│ - Encrypted TLS connection │
|
||||
│ - No secrets in request │
|
||||
└──────────────┬──────────────────────────┘
|
||||
↓
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Layer 6: Audit Logging │
|
||||
│ - Log all AI operations │
|
||||
│ - Capture user, time, action │
|
||||
│ - Store in tamper-proof log │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Cedar Policies
|
||||
|
||||
### Policy Engine Setup
|
||||
|
||||
```
|
||||
// File: provisioning/policies/ai-policies.cedar
|
||||
|
||||
// Core principle: Least privilege
|
||||
// All actions denied by default unless explicitly allowed
|
||||
|
||||
// Admin users can access all AI features
|
||||
permit(
|
||||
principal == ?principal,
|
||||
action == Action::"ai_generate_config",
|
||||
resource == ?resource
|
||||
)
|
||||
when {
|
||||
principal.role == "admin"
|
||||
};
|
||||
|
||||
// Developers can use AI within their workspace
|
||||
permit(
|
||||
principal == ?principal,
|
||||
action in [
|
||||
Action::"ai_query",
|
||||
Action::"ai_generate_config",
|
||||
Action::"ai_troubleshoot"
|
||||
],
|
||||
resource == ?resource
|
||||
)
|
||||
when {
|
||||
principal.role in ["developer", "senior_engineer"]
|
||||
&& principal.workspace == resource.workspace
|
||||
};
|
||||
|
||||
// Operators can access troubleshooting and queries
|
||||
permit(
|
||||
principal == ?principal,
|
||||
action in [
|
||||
Action::"ai_query",
|
||||
Action::"ai_troubleshoot"
|
||||
],
|
||||
resource == ?resource
|
||||
)
|
||||
when {
|
||||
principal.role in ["operator", "devops"]
|
||||
};
|
||||
|
||||
// Form assistance enabled for all authenticated users
|
||||
permit(
|
||||
principal == ?principal,
|
||||
action == Action::"ai_form_assistance",
|
||||
resource == ?resource
|
||||
)
|
||||
when {
|
||||
principal.authenticated == true
|
||||
};
|
||||
|
||||
// Agents (when available) require explicit approval
|
||||
permit(
|
||||
principal == ?principal,
|
||||
action == Action::"ai_agent_execute",
|
||||
resource == ?resource
|
||||
)
|
||||
when {
|
||||
principal.role == "automation_admin"
|
||||
&& resource.requires_approval == true
|
||||
};
|
||||
|
||||
// MCP tool access - restrictive by default
|
||||
permit(
|
||||
principal == ?principal,
|
||||
action == Action::"mcp_tool_call",
|
||||
resource == ?resource
|
||||
)
|
||||
when {
|
||||
principal.role == "admin"
|
||||
| | | (principal.role == "developer" && resource.tool in ["generate_config", "validate_config"]) |
|
||||
};
|
||||
|
||||
// Cost control policies
|
||||
permit(
|
||||
principal == ?principal,
|
||||
action == Action::"ai_generate_config",
|
||||
resource == ?resource
|
||||
)
|
||||
when {
|
||||
// User must have remaining budget
|
||||
principal.ai_budget_remaining_usd > resource.estimated_cost_usd
|
||||
// Workspace must be under budget
|
||||
&& resource.workspace.ai_budget_remaining_usd > resource.estimated_cost_usd
|
||||
};
|
||||
```
|
||||
|
||||
### Policy Best Practices
|
||||
|
||||
1. **Explicit Allow**: Only allow specific actions, deny by default
|
||||
2. **Workspace Isolation**: Users can't access AI in other workspaces
|
||||
3. **Role-Based**: Use consistent role definitions
|
||||
4. **Cost-Aware**: Check budgets before operations
|
||||
5. **Audit Trail**: Log all policy decisions
|
||||
|
||||
## Data Sanitization
|
||||
|
||||
### Automatic PII Removal
|
||||
|
||||
Before sending data to external LLMs, the system removes:
|
||||
|
||||
```
|
||||
Patterns Removed:
|
||||
├─ Passwords: password="...", pwd=..., etc.
|
||||
├─ API Keys: api_key=..., api-key=..., etc.
|
||||
├─ Tokens: token=..., bearer=..., etc.
|
||||
├─ Email addresses: user@example.com (unless necessary for context)
|
||||
├─ Phone numbers: +1-555-0123 patterns
|
||||
├─ Credit cards: 4111-1111-1111-1111 patterns
|
||||
├─ SSH keys: -----BEGIN RSA PRIVATE KEY-----...
|
||||
└─ AWS/GCP/Azure: AKIA2..., AIza..., etc.
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
```
|
||||
[ai.security]
|
||||
sanitize_pii = true
|
||||
sanitize_secrets = true
|
||||
|
||||
# Custom redaction patterns
|
||||
redact_patterns = [
|
||||
# Database passwords
|
||||
"(?i)db[_-]?password\\s*[:=]\\s*'?[^'\\n]+'?",
|
||||
# Generic secrets
|
||||
"(?i)secret\\s*[:=]\\s*'?[^'\\n]+'?",
|
||||
# API endpoints that shouldn't be logged
|
||||
"https?://api[.-]secret\\..+",
|
||||
]
|
||||
|
||||
# Exceptions (patterns NOT to redact)
|
||||
preserve_patterns = [
|
||||
# Preserve example.com domain for docs
|
||||
"example\\.com",
|
||||
# Preserve placeholder emails
|
||||
"user@example\\.com",
|
||||
]
|
||||
```
|
||||
|
||||
### Example Sanitization
|
||||
|
||||
**Before**:
|
||||
```
|
||||
Error configuring database:
|
||||
connection_string: postgresql://dbadmin:MySecurePassword123@prod-db.us-east-1.rds.amazonaws.com:5432/app
|
||||
api_key: sk-ant-abc123def456
|
||||
vault_token: hvs.CAESIyg7...
|
||||
```
|
||||
|
||||
**After Sanitization**:
|
||||
```
|
||||
Error configuring database:
|
||||
connection_string: postgresql://dbadmin:[REDACTED]@prod-db.us-east-1.rds.amazonaws.com:5432/app
|
||||
api_key: [REDACTED]
|
||||
vault_token: [REDACTED]
|
||||
```
|
||||
|
||||
## Secret Isolation
|
||||
|
||||
### Never Access Secrets Directly
|
||||
|
||||
AI cannot directly access secrets. Instead:
|
||||
|
||||
```
|
||||
User wants: "Configure PostgreSQL with encrypted backups"
|
||||
↓
|
||||
AI generates: Configuration schema with placeholders
|
||||
↓
|
||||
User inserts: Actual secret values (connection strings, passwords)
|
||||
↓
|
||||
System encrypts: Secrets remain encrypted at rest
|
||||
↓
|
||||
Deployment: Uses secrets from secure store (Vault, AWS Secrets Manager)
|
||||
```
|
||||
|
||||
### Secret Protection Rules
|
||||
|
||||
1. **No Direct Access**: AI never reads from Vault/Secrets Manager
|
||||
2. **Never in Logs**: Secrets never logged or stored in cache
|
||||
3. **Sanitization**: All secrets redacted before sending to LLM
|
||||
4. **Encryption**: Secrets encrypted at rest and in transit
|
||||
5. **Audit Trail**: All access to secrets logged
|
||||
6. **TTL**: Temporary secrets auto-expire
|
||||
|
||||
## Local Models Support
|
||||
|
||||
### Air-Gapped Deployments
|
||||
|
||||
For environments requiring zero external API calls:
|
||||
|
||||
```
|
||||
# Deploy local Ollama with provisioning support
|
||||
docker run -d \
|
||||
--name provisioning-ai \
|
||||
-p 11434:11434 \
|
||||
-v ollama:/root/.ollama \
|
||||
-e OLLAMA_HOST=0.0.0.0:11434 \
|
||||
ollama/ollama
|
||||
|
||||
# Pull model
|
||||
ollama pull mistral
|
||||
ollama pull llama2-70b
|
||||
|
||||
# Configure provisioning to use local model
|
||||
provisioning config edit ai
|
||||
|
||||
[ai]
|
||||
provider = "local"
|
||||
model = "mistral"
|
||||
api_base = "[http://localhost:11434"](http://localhost:11434")
|
||||
```
|
||||
|
||||
### Benefits
|
||||
|
||||
- ✅ Zero external API calls
|
||||
- ✅ Full data privacy (no LLM vendor access)
|
||||
- ✅ Compliance with classified/regulated data
|
||||
- ✅ No API key exposure
|
||||
- ✅ Deterministic (same results each run)
|
||||
|
||||
### Performance Trade-offs
|
||||
|
||||
| | Factor | Local | Cloud | |
|
||||
| | -------- | ------- | ------- | |
|
||||
| | Privacy | Excellent | Requires trust | |
|
||||
| | Cost | Free (hardware) | Per token | |
|
||||
| | Speed | 5-30s/response | 2-5s/response | |
|
||||
| | Quality | Good (70B models) | Excellent (Opus) | |
|
||||
| | Hardware | Requires GPU | None | |
|
||||
|
||||
## HSM Integration
|
||||
|
||||
### Hardware Security Module Support
|
||||
|
||||
For highly sensitive environments:
|
||||
|
||||
```
|
||||
[ai.security.hsm]
|
||||
enabled = true
|
||||
provider = "aws-cloudhsm" # or "thales", "yubihsm"
|
||||
|
||||
[ai.security.hsm.aws]
|
||||
cluster_id = "cluster-123"
|
||||
customer_ca_cert = "/etc/provisioning/certs/customerCA.crt"
|
||||
server_cert = "/etc/provisioning/certs/server.crt"
|
||||
server_key = "/etc/provisioning/certs/server.key"
|
||||
```
|
||||
|
||||
## Encryption
|
||||
|
||||
### Data at Rest
|
||||
|
||||
```
|
||||
[ai.security.encryption]
|
||||
enabled = true
|
||||
algorithm = "aes-256-gcm"
|
||||
key_derivation = "argon2id"
|
||||
|
||||
# Key rotation
|
||||
key_rotation_enabled = true
|
||||
key_rotation_days = 90
|
||||
rotation_alert_days = 7
|
||||
|
||||
# Encrypted storage
|
||||
cache_encryption = true
|
||||
log_encryption = true
|
||||
```
|
||||
|
||||
### Data in Transit
|
||||
|
||||
```
|
||||
All external LLM API calls:
|
||||
├─ TLS 1.3 (minimum)
|
||||
├─ Certificate pinning (optional)
|
||||
├─ Mutual TLS (with cloud providers)
|
||||
└─ No plaintext transmission
|
||||
```
|
||||
|
||||
## Audit Logging
|
||||
|
||||
### What Gets Logged
|
||||
|
||||
```
|
||||
{
|
||||
"timestamp": "2025-01-13T10:30:45Z",
|
||||
"event_type": "ai_action",
|
||||
"action": "generate_config",
|
||||
"principal": {
|
||||
"user_id": "user-123",
|
||||
"role": "developer",
|
||||
"workspace": "prod"
|
||||
},
|
||||
"resource": {
|
||||
"type": "database",
|
||||
"name": "prod-postgres"
|
||||
},
|
||||
"authorization": {
|
||||
"decision": "permit",
|
||||
"policy": "ai-policies.cedar",
|
||||
"reason": "developer role in workspace"
|
||||
},
|
||||
"cost": {
|
||||
"tokens_used": 1250,
|
||||
"estimated_cost_usd": 0.037
|
||||
},
|
||||
"sanitization": {
|
||||
"items_redacted": 3,
|
||||
"patterns_matched": ["db_password", "api_key", "token"]
|
||||
},
|
||||
"status": "success"
|
||||
}
|
||||
```
|
||||
|
||||
### Audit Trail Access
|
||||
|
||||
```
|
||||
# View recent AI actions
|
||||
provisioning audit log ai --tail 100
|
||||
|
||||
# Filter by user
|
||||
provisioning audit log ai --user alice@company.com
|
||||
|
||||
# Filter by action
|
||||
provisioning audit log ai --action generate_config
|
||||
|
||||
# Filter by time range
|
||||
provisioning audit log ai --from "2025-01-01" --to "2025-01-13"
|
||||
|
||||
# Export for analysis
|
||||
provisioning audit export ai --format csv --output audit.csv
|
||||
|
||||
# Full-text search
|
||||
provisioning audit search ai "error in database configuration"
|
||||
```
|
||||
|
||||
## Compliance Frameworks
|
||||
|
||||
### Built-in Compliance Checks
|
||||
|
||||
```
|
||||
[ai.compliance]
|
||||
frameworks = ["pci-dss", "hipaa", "sox", "gdpr"]
|
||||
|
||||
[ai.compliance.pci-dss]
|
||||
enabled = true
|
||||
# Requires encryption, audit logs, access controls
|
||||
|
||||
[ai.compliance.hipaa]
|
||||
enabled = true
|
||||
# Requires local models, encrypted storage, audit logs
|
||||
|
||||
[ai.compliance.gdpr]
|
||||
enabled = true
|
||||
# Requires data deletion, consent tracking, privacy by design
|
||||
```
|
||||
|
||||
### Compliance Reports
|
||||
|
||||
```
|
||||
# Generate compliance report
|
||||
provisioning audit compliance-report \
|
||||
--framework pci-dss \
|
||||
--period month \
|
||||
--output report.pdf
|
||||
|
||||
# Verify compliance
|
||||
provisioning audit verify-compliance \
|
||||
--framework hipaa \
|
||||
--verbose
|
||||
```
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
### For Administrators
|
||||
|
||||
1. **Rotate API Keys**: Every 90 days minimum
|
||||
2. **Monitor Budget**: Set up alerts at 80% and 90%
|
||||
3. **Review Policies**: Quarterly policy audit
|
||||
4. **Audit Logs**: Weekly review of AI operations
|
||||
5. **Update Models**: Use latest stable models
|
||||
6. **Test Recovery**: Monthly rollback drills
|
||||
|
||||
### For Developers
|
||||
|
||||
1. **Use Workspace Isolation**: Never share workspace access
|
||||
2. **Don't Log Secrets**: Use sanitization, never bypass it
|
||||
3. **Validate Outputs**: Always review AI-generated configs
|
||||
4. **Report Issues**: Security issues to `security-ai@company.com`
|
||||
5. **Stay Updated**: Follow security bulletins
|
||||
|
||||
### For Operators
|
||||
|
||||
1. **Monitor Costs**: Alert if exceeding 110% of budget
|
||||
2. **Watch Errors**: Unusual error patterns may indicate attacks
|
||||
3. **Check Audit Logs**: Unauthorized access attempts
|
||||
4. **Test Policies**: Periodically verify Cedar policies work
|
||||
5. **Backup Configs**: Secure backup of policy files
|
||||
|
||||
## Incident Response
|
||||
|
||||
### Compromised API Key
|
||||
|
||||
```
|
||||
# 1. Immediately revoke key
|
||||
provisioning admin revoke-key ai-api-key-123
|
||||
|
||||
# 2. Rotate key
|
||||
provisioning admin rotate-key ai \
|
||||
--notify ops-team@company.com
|
||||
|
||||
# 3. Audit usage since compromise
|
||||
provisioning audit log ai \
|
||||
--since "2025-01-13T09:00:00Z" \
|
||||
--api-key-id ai-api-key-123
|
||||
|
||||
# 4. Review any generated configs from this period
|
||||
# Configs generated while key was compromised may need review
|
||||
```
|
||||
|
||||
### Unauthorized Access
|
||||
|
||||
```
|
||||
# Review Cedar policy logs
|
||||
provisioning audit log ai \
|
||||
--decision deny \
|
||||
--last-hour
|
||||
|
||||
# Check for pattern
|
||||
provisioning audit search ai "authorization.*deny" \
|
||||
--trend-analysis
|
||||
|
||||
# Update policies if needed
|
||||
provisioning policy update ai-policies.cedar
|
||||
```
|
||||
|
||||
## Security Checklist
|
||||
|
||||
### Pre-Production
|
||||
|
||||
- ✅ Cedar policies reviewed and tested
|
||||
- ✅ API keys rotated and secured
|
||||
- ✅ Data sanitization tested with real secrets
|
||||
- ✅ Encryption enabled for cache
|
||||
- ✅ Audit logging configured
|
||||
- ✅ Cost limits set appropriately
|
||||
- ✅ Local-only mode tested (if needed)
|
||||
- ✅ HSM configured (if required)
|
||||
|
||||
### Ongoing
|
||||
|
||||
- ✅ Monthly policy review
|
||||
- ✅ Weekly audit log review
|
||||
- ✅ Quarterly key rotation
|
||||
- ✅ Annual compliance assessment
|
||||
- ✅ Continuous budget monitoring
|
||||
- ✅ Error pattern analysis
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - System overview
|
||||
- [Configuration](configuration.md) - Security settings
|
||||
- [Cost Management](cost-management.md) - Budget controls
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-13
|
||||
**Status**: ✅ Production-Ready
|
||||
**Compliance**: PCI-DSS, HIPAA, SOX, GDPR
|
||||
**Cedar Version**: 3.0+
|
||||
|
||||
@ -1 +1,502 @@
|
||||
# Troubleshooting with AI
|
||||
# AI-Assisted Troubleshooting and Debugging
|
||||
|
||||
**Status**: ✅ Production-Ready (AI troubleshooting analysis, log parsing)
|
||||
|
||||
The AI troubleshooting system provides intelligent debugging assistance for infrastructure failures. The system analyzes deployment logs, identifies
|
||||
root causes, suggests fixes, and generates corrected configurations based on failure patterns.
|
||||
|
||||
## Feature Overview
|
||||
|
||||
### What It Does
|
||||
|
||||
Transform deployment failures into actionable insights:
|
||||
|
||||
```
|
||||
Deployment Fails with Error
|
||||
↓
|
||||
AI analyzes logs:
|
||||
- Identifies failure phase (networking, database, k8s, etc.)
|
||||
- Detects root cause (resource limits, configuration, timeout)
|
||||
- Correlates with similar past failures
|
||||
- Reviews deployment configuration
|
||||
↓
|
||||
AI generates report:
|
||||
- Root cause explanation in plain English
|
||||
- Configuration issues identified
|
||||
- Suggested fixes with rationale
|
||||
- Alternative solutions
|
||||
- Links to relevant documentation
|
||||
↓
|
||||
Developer reviews and accepts:
|
||||
- Understands what went wrong
|
||||
- Knows how to fix it
|
||||
- Can implement fix with confidence
|
||||
```
|
||||
|
||||
## Troubleshooting Workflow
|
||||
|
||||
### Automatic Detection and Analysis
|
||||
|
||||
```
|
||||
┌──────────────────────────────────────────┐
|
||||
│ Deployment Monitoring │
|
||||
│ - Watches deployment for failures │
|
||||
│ - Captures logs in real-time │
|
||||
│ - Detects failure events │
|
||||
└──────────────┬───────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────┐
|
||||
│ Log Collection │
|
||||
│ - Gather all relevant logs │
|
||||
│ - Include stack traces │
|
||||
│ - Capture metrics at failure time │
|
||||
│ - Get resource usage data │
|
||||
└──────────────┬───────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────┐
|
||||
│ Context Retrieval (RAG) │
|
||||
│ - Find similar past failures │
|
||||
│ - Retrieve troubleshooting guides │
|
||||
│ - Get schema constraints │
|
||||
│ - Find best practices │
|
||||
└──────────────┬───────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────┐
|
||||
│ AI Analysis │
|
||||
│ - Identify failure pattern │
|
||||
│ - Determine root cause │
|
||||
│ - Generate hypotheses │
|
||||
│ - Score likely causes │
|
||||
└──────────────┬───────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────┐
|
||||
│ Solution Generation │
|
||||
│ - Create fixed configuration │
|
||||
│ - Generate step-by-step fix guide │
|
||||
│ - Suggest preventative measures │
|
||||
│ - Provide alternative approaches │
|
||||
└──────────────┬───────────────────────────┘
|
||||
↓
|
||||
┌──────────────────────────────────────────┐
|
||||
│ Report and Recommendations │
|
||||
│ - Explain what went wrong │
|
||||
│ - Show how to fix it │
|
||||
│ - Provide corrected configuration │
|
||||
│ - Link to prevention strategies │
|
||||
└──────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Example 1: Database Connection Timeout
|
||||
|
||||
**Failure**:
|
||||
```
|
||||
Deployment: deploy-2025-01-13-001
|
||||
Status: FAILED at phase database_migration
|
||||
Error: connection timeout after 30s connecting to postgres://...
|
||||
```
|
||||
|
||||
**Run Troubleshooting**:
|
||||
```
|
||||
$ provisioning ai troubleshoot deploy-2025-01-13-001
|
||||
|
||||
Analyzing deployment failure...
|
||||
|
||||
╔════════════════════════════════════════════════════════════════╗
|
||||
║ Root Cause Analysis: Database Connection Timeout ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ ║
|
||||
║ Phase: database_migration (occurred during migration job) ║
|
||||
║ Error: Timeout after 30 seconds connecting to database ║
|
||||
║ ║
|
||||
║ Most Likely Causes (confidence): ║
|
||||
║ 1. Database security group blocks migration job (85%) ║
|
||||
║ 2. Database instance not fully initialized yet (60%) ║
|
||||
║ 3. Network connectivity issue (40%) ║
|
||||
║ ║
|
||||
║ Analysis: ║
|
||||
║ - Database was created only 2 seconds before connection ║
|
||||
║ - Migration job started immediately (no wait time) ║
|
||||
║ - Security group: allows 5432 only from default SG ║
|
||||
║ - Migration pod uses different security group ║
|
||||
║ ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ Recommended Fix ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ ║
|
||||
║ Issue: Migration security group not in database's inbound ║
|
||||
║ ║
|
||||
║ Solution: Add migration pod security group to DB inbound ║
|
||||
║ ║
|
||||
║ database.security_group.ingress = [ ║
|
||||
║ { ║
|
||||
║ from_port = 5432, ║
|
||||
║ to_port = 5432, ║
|
||||
║ source_security_group = "migration-pods-sg" ║
|
||||
║ } ║
|
||||
║ ] ║
|
||||
║ ║
|
||||
║ Alternative: Add 30-second wait after database creation ║
|
||||
║ ║
|
||||
║ deployment.phases.database.post_actions = [ ║
|
||||
║ {action = "wait_for_database", timeout_seconds = 30} ║
|
||||
║ ] ║
|
||||
║ ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ Prevention ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ ║
|
||||
║ To prevent this in future deployments: ║
|
||||
║ ║
|
||||
║ 1. Always verify security group rules before migration ║
|
||||
║ 2. Add health check: `SELECT 1` before starting migration ║
|
||||
║ 3. Increase initial timeout: database can be slow to start ║
|
||||
║ 4. Use RDS wait condition instead of time-based wait ║
|
||||
║ ║
|
||||
║ See: docs/troubleshooting/database-connectivity.md ║
|
||||
║ docs/guides/database-migrations.md ║
|
||||
║ ║
|
||||
╚════════════════════════════════════════════════════════════════╝
|
||||
|
||||
Generate corrected configuration? [yes/no]: yes
|
||||
|
||||
Configuration generated and saved to:
|
||||
workspaces/prod/database.ncl.fixed
|
||||
|
||||
Changes made:
|
||||
✓ Added migration security group to database inbound
|
||||
✓ Added health check before migration
|
||||
✓ Increased connection timeout to 60s
|
||||
|
||||
Ready to redeploy with corrected configuration? [yes/no]: yes
|
||||
```
|
||||
|
||||
### Example 2: Kubernetes Deployment Error
|
||||
|
||||
**Failure**:
|
||||
```
|
||||
Deployment: deploy-2025-01-13-002
|
||||
Status: FAILED at phase kubernetes_workload
|
||||
Error: failed to create deployment app: Pod exceeded capacity
|
||||
```
|
||||
|
||||
**Troubleshooting**:
|
||||
```
|
||||
$ provisioning ai troubleshoot deploy-2025-01-13-002 --detailed
|
||||
|
||||
╔════════════════════════════════════════════════════════════════╗
|
||||
║ Root Cause: Pod Exceeded Node Capacity ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ ║
|
||||
║ Failure Analysis: ║
|
||||
║ ║
|
||||
║ Error: Pod requests 4CPU/8GB, but largest node has 2CPU/4GB ║
|
||||
║ Cluster: 3 nodes, each t3.medium (2CPU/4GB) ║
|
||||
║ Pod requirements: ║
|
||||
║ - CPU: 4 (requested) + 2 (reserved system) = 6 needed ║
|
||||
║ - Memory: 8Gi (requested) + 1Gi (system) = 9Gi needed ║
|
||||
║ ║
|
||||
║ Why this happened: ║
|
||||
║ Pod spec updated to 4CPU/8GB but node group wasn't ║
|
||||
║ Node group still has t3.medium (too small) ║
|
||||
║ No autoscaling configured (won't scale up automatically) ║
|
||||
║ ║
|
||||
║ Solution Options: ║
|
||||
║ 1. Reduce pod resource requests to 2CPU/4GB (simpler) ║
|
||||
║ 2. Scale up node group to t3.large (2x cost, safer) ║
|
||||
║ 3. Use both: t3.large nodes + reduce pod requests ║
|
||||
║ ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ Recommended: Option 2 (Scale up nodes) ║
|
||||
╠════════════════════════════════════════════════════════════════╣
|
||||
║ ║
|
||||
║ Reason: Pod requests are reasonable for production app ║
|
||||
║ Better to scale infrastructure than reduce resources ║
|
||||
║ ║
|
||||
║ Changes needed: ║
|
||||
║ ║
|
||||
║ kubernetes.node_group = { ║
|
||||
║ instance_type = "t3.large" # was t3.medium ║
|
||||
║ min_size = 3 ║
|
||||
║ max_size = 10 ║
|
||||
║ ║
|
||||
║ auto_scaling = { ║
|
||||
║ enabled = true ║
|
||||
║ target_cpu_percent = 70 ║
|
||||
║ } ║
|
||||
║ } ║
|
||||
║ ║
|
||||
║ Cost Impact: ║
|
||||
║ Current: 3 × t3.medium = ~$90/month ║
|
||||
║ Proposed: 3 × t3.large = ~$180/month ║
|
||||
║ With autoscaling, average: ~$150/month (some scale-down) ║
|
||||
║ ║
|
||||
╚════════════════════════════════════════════════════════════════╝
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### Basic Troubleshooting
|
||||
|
||||
```
|
||||
# Troubleshoot recent deployment
|
||||
provisioning ai troubleshoot deploy-2025-01-13-001
|
||||
|
||||
# Get detailed analysis
|
||||
provisioning ai troubleshoot deploy-2025-01-13-001 --detailed
|
||||
|
||||
# Analyze with specific focus
|
||||
provisioning ai troubleshoot deploy-2025-01-13-001 --focus networking
|
||||
|
||||
# Get alternative solutions
|
||||
provisioning ai troubleshoot deploy-2025-01-13-001 --alternatives
|
||||
```
|
||||
|
||||
### Working with Logs
|
||||
|
||||
```
|
||||
# Troubleshoot from custom logs
|
||||
provisioning ai troubleshoot \
|
||||
| --logs "$(journalctl -u provisioning --no-pager | tail -100)" |
|
||||
|
||||
# Troubleshoot from file
|
||||
provisioning ai troubleshoot --log-file /var/log/deployment.log
|
||||
|
||||
# Troubleshoot from cloud provider
|
||||
provisioning ai troubleshoot \
|
||||
--cloud-logs aws-deployment-123 \
|
||||
--region us-east-1
|
||||
```
|
||||
|
||||
### Generate Reports
|
||||
|
||||
```
|
||||
# Generate detailed troubleshooting report
|
||||
provisioning ai troubleshoot deploy-123 \
|
||||
--report \
|
||||
--output troubleshooting-report.md
|
||||
|
||||
# Generate with suggestions
|
||||
provisioning ai troubleshoot deploy-123 \
|
||||
--report \
|
||||
--include-suggestions \
|
||||
--output report-with-fixes.md
|
||||
|
||||
# Generate compliance report (PCI-DSS, HIPAA)
|
||||
provisioning ai troubleshoot deploy-123 \
|
||||
--report \
|
||||
--compliance pci-dss \
|
||||
--output compliance-report.pdf
|
||||
```
|
||||
|
||||
## Analysis Depth
|
||||
|
||||
### Shallow Analysis (Fast)
|
||||
|
||||
```
|
||||
provisioning ai troubleshoot deploy-123 --depth shallow
|
||||
|
||||
Analyzes:
|
||||
- First error message
|
||||
- Last few log lines
|
||||
- Basic pattern matching
|
||||
- Returns in 30-60 seconds
|
||||
```
|
||||
|
||||
### Deep Analysis (Thorough)
|
||||
|
||||
```
|
||||
provisioning ai troubleshoot deploy-123 --depth deep
|
||||
|
||||
Analyzes:
|
||||
- Full log context
|
||||
- Correlates multiple errors
|
||||
- Checks resource metrics
|
||||
- Compares to past failures
|
||||
- Generates alternative hypotheses
|
||||
- Returns in 5-10 seconds
|
||||
```
|
||||
|
||||
## Integration with Monitoring
|
||||
|
||||
### Automatic Troubleshooting
|
||||
|
||||
```
|
||||
# Enable auto-troubleshoot on failures
|
||||
provisioning config set ai.troubleshooting.auto_analyze true
|
||||
|
||||
# Deployments that fail automatically get analyzed
|
||||
# Reports available in provisioning dashboard
|
||||
# Alerts sent to on-call engineer with analysis
|
||||
```
|
||||
|
||||
### WebUI Integration
|
||||
|
||||
```
|
||||
Deployment Dashboard
|
||||
├─ deployment-123 [FAILED]
|
||||
│ └─ AI Analysis
|
||||
│ ├─ Root Cause: Database timeout
|
||||
│ ├─ Suggested Fix: ✓ View
|
||||
│ ├─ Corrected Config: ✓ Download
|
||||
│ └─ Alternative Solutions: 3 options
|
||||
```
|
||||
|
||||
## Learning from Failures
|
||||
|
||||
### Pattern Recognition
|
||||
|
||||
The system learns common failure patterns:
|
||||
|
||||
```
|
||||
Collected Patterns:
|
||||
├─ Database Timeouts (25% of failures)
|
||||
│ └─ Usually: Security group, connection pool, slow startup
|
||||
├─ Kubernetes Pod Failures (20%)
|
||||
│ └─ Usually: Insufficient resources, bad config
|
||||
├─ Network Connectivity (15%)
|
||||
│ └─ Usually: Security groups, routing, DNS
|
||||
└─ Other (40%)
|
||||
└─ Various causes, each analyzed individually
|
||||
```
|
||||
|
||||
### Improvement Tracking
|
||||
|
||||
```
|
||||
# See patterns in your deployments
|
||||
provisioning ai analytics failures --period month
|
||||
|
||||
Month Summary:
|
||||
Total deployments: 50
|
||||
Failed: 5 (10% failure rate)
|
||||
|
||||
Common causes:
|
||||
1. Security group rules (3 failures, 60%)
|
||||
2. Resource limits (1 failure, 20%)
|
||||
3. Configuration error (1 failure, 20%)
|
||||
|
||||
Improvement opportunities:
|
||||
- Pre-check security groups before deployment
|
||||
- Add health checks for resource sizing
|
||||
- Add configuration validation
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Troubleshooting Settings
|
||||
|
||||
```
|
||||
[ai.troubleshooting]
|
||||
enabled = true
|
||||
|
||||
# Analysis depth
|
||||
default_depth = "deep" # or "shallow" for speed
|
||||
max_analysis_time_seconds = 30
|
||||
|
||||
# Features
|
||||
auto_analyze_failed_deployments = true
|
||||
generate_corrected_config = true
|
||||
suggest_prevention = true
|
||||
|
||||
# Learning
|
||||
track_failure_patterns = true
|
||||
learn_from_similar_failures = true
|
||||
improve_suggestions_over_time = true
|
||||
|
||||
# Reporting
|
||||
auto_send_report = false # Email report to user
|
||||
report_format = "markdown" # or "json", "pdf"
|
||||
include_alternatives = true
|
||||
|
||||
# Cost impact analysis
|
||||
estimate_fix_cost = true
|
||||
estimate_alternative_costs = true
|
||||
```
|
||||
|
||||
### Failure Detection
|
||||
|
||||
```
|
||||
[ai.troubleshooting.detection]
|
||||
# Monitor logs for these patterns
|
||||
watch_patterns = [
|
||||
"error",
|
||||
"timeout",
|
||||
"failed",
|
||||
"unable to",
|
||||
"refused",
|
||||
"denied",
|
||||
"exceeded",
|
||||
"quota",
|
||||
]
|
||||
|
||||
# Minimum log lines before analyzing
|
||||
min_log_lines = 10
|
||||
|
||||
# Time window for log collection
|
||||
log_window_seconds = 300
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### For Effective Troubleshooting
|
||||
|
||||
1. **Keep Detailed Logs**: Enable verbose logging in deployments
|
||||
2. **Include Context**: Share full logs, not just error snippet
|
||||
3. **Check Suggestions**: Review AI suggestions even if obvious
|
||||
4. **Learn Patterns**: Track recurring failures and address root cause
|
||||
5. **Update Configs**: Use corrected configs from AI, validate them
|
||||
|
||||
### For Prevention
|
||||
|
||||
1. **Use Health Checks**: Add database/service health checks
|
||||
2. **Test Before Deploy**: Use dry-run to catch issues early
|
||||
3. **Monitor Metrics**: Watch CPU/memory before failures occur
|
||||
4. **Review Policies**: Ensure security groups are correct
|
||||
5. **Document Changes**: When updating configs, note the change
|
||||
|
||||
## Limitations
|
||||
|
||||
### What AI Can Troubleshoot
|
||||
|
||||
✅ Configuration errors
|
||||
✅ Resource limit problems
|
||||
✅ Networking/security group issues
|
||||
✅ Database connectivity problems
|
||||
✅ Deployment ordering issues
|
||||
✅ Common application errors
|
||||
✅ Performance problems
|
||||
|
||||
### What Requires Human Review
|
||||
|
||||
⚠️ Data corruption scenarios
|
||||
⚠️ Multi-failure cascades
|
||||
⚠️ Unclear error messages
|
||||
⚠️ Custom application code failures
|
||||
⚠️ Third-party service issues
|
||||
⚠️ Physical infrastructure failures
|
||||
|
||||
## Examples and Guides
|
||||
|
||||
### Common Issues - Quick Links
|
||||
|
||||
- [Database Connectivity](../troubleshooting/database-connectivity.md)
|
||||
- [Kubernetes Pod Failures](../troubleshooting/kubernetes-pods.md)
|
||||
- [Network Configuration](../troubleshooting/networking.md)
|
||||
- [Performance Issues](../troubleshooting/performance.md)
|
||||
- [Resource Limits](../troubleshooting/resource-limits.md)
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Architecture](architecture.md) - AI system overview
|
||||
- [RAG System](rag-system.md) - Context retrieval for troubleshooting
|
||||
- [Configuration](configuration.md) - Setup guide
|
||||
- [Security Policies](security-policies.md) - Safe log handling
|
||||
- [ADR-015](../architecture/adr/adr-015-ai-integration-architecture.md) - Design decisions
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-13
|
||||
**Status**: ✅ Production-Ready
|
||||
**Success Rate**: 85-95% accuracy in root cause identification
|
||||
**Supported**: All deployment types (infrastructure, Kubernetes, database)
|
||||
|
||||
@ -12,7 +12,7 @@ API reference for programmatic access to the Provisioning Platform.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
```
|
||||
# Check API health
|
||||
curl http://localhost:9090/health
|
||||
|
||||
|
||||
@ -16,7 +16,7 @@ All extensions follow a standardized structure and API for seamless integration.
|
||||
|
||||
### Standard Directory Layout
|
||||
|
||||
```plaintext
|
||||
```
|
||||
extension-name/
|
||||
├── manifest.toml # Extension metadata
|
||||
├── schemas/ # Nickel configuration files
|
||||
@ -71,7 +71,7 @@ All providers must implement the following interface:
|
||||
|
||||
Create `schemas/settings.ncl`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# Provider settings schema
|
||||
{
|
||||
ProviderSettings = {
|
||||
@ -146,7 +146,7 @@ schema ServerConfig {
|
||||
|
||||
Create `nulib/mod.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
use std log
|
||||
|
||||
# Provider name and version
|
||||
@ -231,7 +231,7 @@ export def "test-connection" [config: record] -> record {
|
||||
|
||||
Create `nulib/create.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
use std log
|
||||
use utils.nu *
|
||||
|
||||
@ -368,7 +368,7 @@ def wait-for-server-ready [server_id: string] -> string {
|
||||
|
||||
Add provider metadata in `metadata.toml`:
|
||||
|
||||
```toml
|
||||
```
|
||||
[extension]
|
||||
name = "my-provider"
|
||||
type = "provider"
|
||||
@ -429,7 +429,7 @@ Task services must implement:
|
||||
|
||||
Create `schemas/version.ncl`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# Task service version configuration
|
||||
{
|
||||
taskserv_version = {
|
||||
@ -483,7 +483,7 @@ Create `schemas/version.ncl`:
|
||||
|
||||
Create `nulib/mod.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
use std log
|
||||
use ../../../lib_provisioning *
|
||||
|
||||
@ -697,7 +697,7 @@ Clusters orchestrate multiple components:
|
||||
|
||||
Create `schemas/cluster.ncl`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# Cluster configuration schema
|
||||
{
|
||||
ClusterConfig = {
|
||||
@ -812,7 +812,7 @@ Create `schemas/cluster.ncl`:
|
||||
|
||||
Create `nulib/mod.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
use std log
|
||||
use ../../../lib_provisioning *
|
||||
|
||||
@ -1065,7 +1065,7 @@ Extensions should include comprehensive tests:
|
||||
|
||||
Create `tests/unit_tests.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
use std testing
|
||||
|
||||
export def test_provider_config_validation [] {
|
||||
@ -1096,7 +1096,7 @@ export def test_server_creation_check_mode [] {
|
||||
|
||||
Create `tests/integration_tests.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
use std testing
|
||||
|
||||
export def test_full_server_lifecycle [] {
|
||||
@ -1127,7 +1127,7 @@ export def test_full_server_lifecycle [] {
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
```
|
||||
# Run unit tests
|
||||
nu tests/unit_tests.nu
|
||||
|
||||
@ -1151,7 +1151,7 @@ Each extension must include:
|
||||
|
||||
### API Documentation Template
|
||||
|
||||
```markdown
|
||||
```
|
||||
# Extension Name API
|
||||
|
||||
## Overview
|
||||
|
||||
@ -18,7 +18,7 @@ Provisioning offers multiple integration points:
|
||||
|
||||
#### Full-Featured Python Client
|
||||
|
||||
```python
|
||||
```
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
@ -416,7 +416,7 @@ if __name__ == "__main__":
|
||||
|
||||
#### Complete JavaScript/TypeScript Client
|
||||
|
||||
```typescript
|
||||
```
|
||||
import axios, { AxiosInstance, AxiosResponse } from 'axios';
|
||||
import WebSocket from 'ws';
|
||||
import { EventEmitter } from 'events';
|
||||
@ -925,7 +925,7 @@ export { ProvisioningClient, Task, BatchConfig };
|
||||
|
||||
### Comprehensive Error Handling
|
||||
|
||||
```python
|
||||
```
|
||||
class ProvisioningErrorHandler:
|
||||
"""Centralized error handling for provisioning operations"""
|
||||
|
||||
@ -1028,7 +1028,7 @@ async def robust_workflow_execution():
|
||||
|
||||
### Circuit Breaker Pattern
|
||||
|
||||
```typescript
|
||||
```
|
||||
class CircuitBreaker {
|
||||
private failures = 0;
|
||||
private nextAttempt = Date.now();
|
||||
@ -1104,7 +1104,7 @@ class ResilientProvisioningClient {
|
||||
|
||||
### Connection Pooling and Caching
|
||||
|
||||
```python
|
||||
```
|
||||
import asyncio
|
||||
import aiohttp
|
||||
from cachetools import TTLCache
|
||||
@ -1222,7 +1222,7 @@ async def high_performance_workflow():
|
||||
|
||||
### WebSocket Connection Pooling
|
||||
|
||||
```javascript
|
||||
```
|
||||
class WebSocketPool {
|
||||
constructor(maxConnections = 5) {
|
||||
this.maxConnections = maxConnections;
|
||||
@ -1290,13 +1290,13 @@ The Python SDK provides a comprehensive interface for provisioning:
|
||||
|
||||
#### Installation
|
||||
|
||||
```bash
|
||||
```
|
||||
pip install provisioning-client
|
||||
```
|
||||
|
||||
#### Quick Start
|
||||
|
||||
```python
|
||||
```
|
||||
from provisioning_client import ProvisioningClient
|
||||
|
||||
# Initialize client
|
||||
@ -1319,7 +1319,7 @@ print(f"Workflow completed: {task.status}")
|
||||
|
||||
#### Advanced Usage
|
||||
|
||||
```python
|
||||
```
|
||||
# Use with async context manager
|
||||
async with ProvisioningClient() as client:
|
||||
# Batch operations
|
||||
@ -1340,13 +1340,13 @@ async with ProvisioningClient() as client:
|
||||
|
||||
#### Installation
|
||||
|
||||
```bash
|
||||
```
|
||||
npm install @provisioning/client
|
||||
```
|
||||
|
||||
#### Usage
|
||||
|
||||
```typescript
|
||||
```
|
||||
import { ProvisioningClient } from '@provisioning/client';
|
||||
|
||||
const client = new ProvisioningClient({
|
||||
@ -1373,7 +1373,7 @@ await client.connectWebSocket();
|
||||
|
||||
### Workflow Orchestration Pipeline
|
||||
|
||||
```python
|
||||
```
|
||||
class WorkflowPipeline:
|
||||
"""Orchestrate complex multi-step workflows"""
|
||||
|
||||
@ -1462,7 +1462,7 @@ async def complex_deployment():
|
||||
|
||||
### Event-Driven Architecture
|
||||
|
||||
```javascript
|
||||
```
|
||||
class EventDrivenWorkflowManager {
|
||||
constructor(client) {
|
||||
this.client = client;
|
||||
|
||||
@ -69,7 +69,7 @@ The provisioning platform provides a comprehensive Nushell library with reusable
|
||||
|
||||
## Usage Example
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Load provisioning library
|
||||
use provisioning/core/nulib/lib_provisioning *
|
||||
|
||||
|
||||
@ -17,7 +17,7 @@ The path resolution system provides a hierarchical and configurable mechanism fo
|
||||
|
||||
The system follows a specific hierarchy for loading configuration files:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. System defaults (config.defaults.toml)
|
||||
2. User configuration (config.user.toml)
|
||||
3. Project configuration (config.project.toml)
|
||||
@ -30,7 +30,7 @@ The system follows a specific hierarchy for loading configuration files:
|
||||
|
||||
The system searches for configuration files in these locations:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Default search paths (in order)
|
||||
/usr/local/provisioning/config.defaults.toml
|
||||
$HOME/.config/provisioning/config.user.toml
|
||||
@ -59,7 +59,7 @@ Resolves configuration file paths using the search hierarchy.
|
||||
|
||||
**Example:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
use path-resolution.nu *
|
||||
let config_path = (resolve-config-path "config.user.toml" [])
|
||||
# Returns: "/home/user/.config/provisioning/config.user.toml"
|
||||
@ -76,7 +76,7 @@ Discovers extension paths (providers, taskservs, clusters).
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
base_path: "/usr/local/provisioning/providers/upcloud",
|
||||
schemas_path: "/usr/local/provisioning/providers/upcloud/schemas",
|
||||
@ -92,7 +92,7 @@ Gets current workspace path configuration.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
base: "/usr/local/provisioning",
|
||||
current_infra: "/workspace/infra/production",
|
||||
@ -130,7 +130,7 @@ Interpolates variables in path templates.
|
||||
|
||||
**Example:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
let template = "{{paths.base}}/infra/{{env.USER}}/{{git.branch}}"
|
||||
let result = (interpolate-path $template {
|
||||
paths: { base: "/usr/local/provisioning" },
|
||||
@ -150,7 +150,7 @@ Discovers all available providers.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
[
|
||||
{
|
||||
name: "upcloud",
|
||||
@ -185,7 +185,7 @@ Gets provider-specific configuration and paths.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
name: "upcloud",
|
||||
base_path: "/usr/local/provisioning/providers/upcloud",
|
||||
@ -214,7 +214,7 @@ Discovers all available task services.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
[
|
||||
{
|
||||
name: "kubernetes",
|
||||
@ -245,7 +245,7 @@ Gets task service configuration and version information.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
name: "kubernetes",
|
||||
path: "/usr/local/provisioning/taskservs/kubernetes",
|
||||
@ -272,7 +272,7 @@ Discovers all available cluster configurations.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
[
|
||||
{
|
||||
name: "buildkit",
|
||||
@ -312,7 +312,7 @@ Gets environment-specific configuration.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
name: "production",
|
||||
paths: {
|
||||
@ -359,7 +359,7 @@ Discovers available workspaces and infrastructure directories.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
[
|
||||
{
|
||||
name: "production",
|
||||
@ -405,7 +405,7 @@ Analyzes project structure and identifies components.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
root: "/workspace/project",
|
||||
type: "provisioning_workspace",
|
||||
@ -458,7 +458,7 @@ Gets path resolution cache statistics.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
enabled: true,
|
||||
size: 150,
|
||||
@ -485,7 +485,7 @@ Normalizes paths for cross-platform compatibility.
|
||||
|
||||
**Example:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# On Windows
|
||||
normalize-path "path/to/file" # Returns: "path\to\file"
|
||||
|
||||
@ -519,7 +519,7 @@ Validates all paths in configuration.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
valid: true,
|
||||
errors: [],
|
||||
@ -541,7 +541,7 @@ Validates extension directory structure.
|
||||
|
||||
**Returns:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
valid: true,
|
||||
required_files: [
|
||||
@ -561,7 +561,7 @@ Validates extension directory structure.
|
||||
|
||||
The path resolution API is exposed via Nushell commands:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Show current path configuration
|
||||
provisioning show paths
|
||||
|
||||
@ -584,7 +584,7 @@ provisioning workspace set /path/to/infra
|
||||
|
||||
### Python Integration
|
||||
|
||||
```python
|
||||
```
|
||||
import subprocess
|
||||
import json
|
||||
|
||||
@ -612,7 +612,7 @@ providers = resolver.discover_providers()
|
||||
|
||||
### JavaScript/Node.js Integration
|
||||
|
||||
```javascript
|
||||
```
|
||||
const { exec } = require('child_process');
|
||||
const util = require('util');
|
||||
const execAsync = util.promisify(exec);
|
||||
@ -697,7 +697,7 @@ The system provides graceful fallbacks:
|
||||
|
||||
Monitor path resolution performance:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Get resolution statistics
|
||||
provisioning debug path-stats
|
||||
|
||||
|
||||
@ -18,7 +18,7 @@ All providers must implement the following interface:
|
||||
|
||||
### Required Functions
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Provider initialization
|
||||
export def init [] -> record { ... }
|
||||
|
||||
@ -37,7 +37,7 @@ export def get-pricing [plan: string] -> record { ... }
|
||||
|
||||
Each provider requires configuration in Nickel format:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# Example: UpCloud provider configuration
|
||||
{
|
||||
provider = {
|
||||
@ -57,7 +57,7 @@ Each provider requires configuration in Nickel format:
|
||||
|
||||
### 1. Directory Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/extensions/providers/my-provider/
|
||||
├── nulib/
|
||||
│ └── my_provider.nu # Provider implementation
|
||||
@ -69,7 +69,7 @@ provisioning/extensions/providers/my-provider/
|
||||
|
||||
### 2. Implementation Template
|
||||
|
||||
```nushell
|
||||
```
|
||||
# my_provider.nu
|
||||
export def init [] {
|
||||
{
|
||||
@ -94,7 +94,7 @@ export def list-servers [] {
|
||||
|
||||
### 3. Nickel Schema
|
||||
|
||||
```nickel
|
||||
```
|
||||
# main.ncl
|
||||
{
|
||||
MyProvider = {
|
||||
@ -118,7 +118,7 @@ Providers are automatically discovered from:
|
||||
- `provisioning/extensions/providers/*/nu/*.nu`
|
||||
- User workspace: `workspace/extensions/providers/*/nu/*.nu`
|
||||
|
||||
```bash
|
||||
```
|
||||
# Discover available providers
|
||||
provisioning module discover providers
|
||||
|
||||
@ -130,7 +130,7 @@ provisioning module load providers workspace my-provider
|
||||
|
||||
### Create Servers
|
||||
|
||||
```nushell
|
||||
```
|
||||
use my_provider.nu *
|
||||
|
||||
let plan = {
|
||||
@ -144,13 +144,13 @@ create-servers $plan
|
||||
|
||||
### List Servers
|
||||
|
||||
```nushell
|
||||
```
|
||||
list-servers | where status == "running" | select hostname ip_address
|
||||
```
|
||||
|
||||
### Get Pricing
|
||||
|
||||
```nushell
|
||||
```
|
||||
get-pricing "small" | to yaml
|
||||
```
|
||||
|
||||
@ -158,7 +158,7 @@ get-pricing "small" | to yaml
|
||||
|
||||
Use the test environment system to test providers:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Test provider without real resources
|
||||
provisioning test env single my-provider --check
|
||||
```
|
||||
|
||||
@ -20,13 +20,13 @@ Provisioning exposes two main REST APIs:
|
||||
|
||||
All API endpoints (except health checks) require JWT authentication via the Authorization header:
|
||||
|
||||
```http
|
||||
```
|
||||
Authorization: Bearer <jwt_token>
|
||||
```
|
||||
|
||||
### Getting Access Token
|
||||
|
||||
```http
|
||||
```
|
||||
POST /auth/login
|
||||
Content-Type: application/json
|
||||
|
||||
@ -47,7 +47,7 @@ Check orchestrator health status.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "Orchestrator is healthy"
|
||||
@ -68,7 +68,7 @@ List all workflow tasks.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": [
|
||||
@ -99,7 +99,7 @@ Get specific task status and details.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -126,7 +126,7 @@ Submit server creation workflow.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"infra": "production",
|
||||
"settings": "config.ncl",
|
||||
@ -137,7 +137,7 @@ Submit server creation workflow.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "uuid-task-id"
|
||||
@ -150,7 +150,7 @@ Submit task service workflow.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"operation": "create",
|
||||
"taskserv": "kubernetes",
|
||||
@ -163,7 +163,7 @@ Submit task service workflow.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "uuid-task-id"
|
||||
@ -176,7 +176,7 @@ Submit cluster workflow.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"operation": "create",
|
||||
"cluster_type": "buildkit",
|
||||
@ -189,7 +189,7 @@ Submit cluster workflow.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "uuid-task-id"
|
||||
@ -204,7 +204,7 @@ Execute batch workflow operation.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"name": "multi_cloud_deployment",
|
||||
"version": "1.0.0",
|
||||
@ -235,7 +235,7 @@ Execute batch workflow operation.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -263,7 +263,7 @@ List all batch operations.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": [
|
||||
@ -288,7 +288,7 @@ Get batch operation status.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -317,7 +317,7 @@ Cancel running batch operation.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "Operation cancelled"
|
||||
@ -336,7 +336,7 @@ Get real-time workflow progress.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -360,7 +360,7 @@ Get workflow state snapshots.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": [
|
||||
@ -380,7 +380,7 @@ Get system-wide metrics.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -403,7 +403,7 @@ Get system health status.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -424,7 +424,7 @@ Get state manager statistics.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -444,7 +444,7 @@ Create new checkpoint.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"name": "before_major_update",
|
||||
"description": "Checkpoint before deploying v2.0.0"
|
||||
@ -453,7 +453,7 @@ Create new checkpoint.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "checkpoint-uuid"
|
||||
@ -466,7 +466,7 @@ List all checkpoints.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": [
|
||||
@ -491,7 +491,7 @@ Get specific checkpoint details.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -511,7 +511,7 @@ Execute rollback operation.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"checkpoint_id": "checkpoint-uuid"
|
||||
}
|
||||
@ -519,7 +519,7 @@ Execute rollback operation.
|
||||
|
||||
Or for partial rollback:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"operation_ids": ["op-1", "op-2", "op-3"]
|
||||
}
|
||||
@ -527,7 +527,7 @@ Or for partial rollback:
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -550,7 +550,7 @@ Restore system state from checkpoint.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "State restored from checkpoint checkpoint-uuid"
|
||||
@ -563,7 +563,7 @@ Get rollback system statistics.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -585,7 +585,7 @@ Authenticate user and get JWT token.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"username": "admin",
|
||||
"password": "secure_password",
|
||||
@ -595,7 +595,7 @@ Authenticate user and get JWT token.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -617,7 +617,7 @@ Refresh JWT token.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"token": "current-jwt-token"
|
||||
}
|
||||
@ -625,7 +625,7 @@ Refresh JWT token.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -641,7 +641,7 @@ Logout and invalidate token.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "Successfully logged out"
|
||||
@ -661,7 +661,7 @@ List all users.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": [
|
||||
@ -684,7 +684,7 @@ Create new user.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"username": "newuser",
|
||||
"email": "newuser@example.com",
|
||||
@ -696,7 +696,7 @@ Create new user.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -719,7 +719,7 @@ Update existing user.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"email": "updated@example.com",
|
||||
"roles": ["admin", "operator"],
|
||||
@ -729,7 +729,7 @@ Update existing user.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "User updated successfully"
|
||||
@ -746,7 +746,7 @@ Delete user.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "User deleted successfully"
|
||||
@ -761,7 +761,7 @@ List all policies.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": [
|
||||
@ -783,7 +783,7 @@ Create new policy.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"name": "new_policy",
|
||||
"version": "1.0.0",
|
||||
@ -800,7 +800,7 @@ Create new policy.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
@ -821,7 +821,7 @@ Update policy.
|
||||
|
||||
**Request Body:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"name": "updated_policy",
|
||||
"rules": [...]
|
||||
@ -830,7 +830,7 @@ Update policy.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": "Policy updated successfully"
|
||||
@ -855,7 +855,7 @@ Get audit logs.
|
||||
|
||||
**Response:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": true,
|
||||
"data": [
|
||||
@ -876,7 +876,7 @@ Get audit logs.
|
||||
|
||||
All endpoints may return error responses in this format:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"success": false,
|
||||
"error": "Detailed error message"
|
||||
@ -904,7 +904,7 @@ API endpoints are rate-limited:
|
||||
|
||||
Rate limit headers are included in responses:
|
||||
|
||||
```http
|
||||
```
|
||||
X-RateLimit-Limit: 100
|
||||
X-RateLimit-Remaining: 95
|
||||
X-RateLimit-Reset: 1632150000
|
||||
@ -918,7 +918,7 @@ Prometheus-compatible metrics endpoint.
|
||||
|
||||
**Response:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
# HELP orchestrator_tasks_total Total number of tasks
|
||||
# TYPE orchestrator_tasks_total counter
|
||||
orchestrator_tasks_total{status="completed"} 150
|
||||
@ -937,7 +937,7 @@ Real-time event streaming via WebSocket connection.
|
||||
|
||||
**Connection:**
|
||||
|
||||
```javascript
|
||||
```
|
||||
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt-token');
|
||||
|
||||
ws.onmessage = function(event) {
|
||||
@ -948,7 +948,7 @@ ws.onmessage = function(event) {
|
||||
|
||||
**Event Format:**
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event_type": "TaskStatusChanged",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -967,7 +967,7 @@ ws.onmessage = function(event) {
|
||||
|
||||
### Python SDK Example
|
||||
|
||||
```python
|
||||
```
|
||||
import requests
|
||||
|
||||
class ProvisioningClient:
|
||||
@ -1007,7 +1007,7 @@ print(f"Task ID: {result['data']}")
|
||||
|
||||
### JavaScript/Node.js SDK Example
|
||||
|
||||
```javascript
|
||||
```
|
||||
const axios = require('axios');
|
||||
|
||||
class ProvisioningClient {
|
||||
@ -1051,7 +1051,7 @@ The system supports webhooks for external integrations:
|
||||
|
||||
Configure webhooks in the system configuration:
|
||||
|
||||
```toml
|
||||
```
|
||||
[webhooks]
|
||||
enabled = true
|
||||
endpoints = [
|
||||
@ -1065,7 +1065,7 @@ endpoints = [
|
||||
|
||||
### Webhook Payload
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event": "task.completed",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -1087,7 +1087,7 @@ For endpoints that return lists, use pagination parameters:
|
||||
|
||||
Pagination metadata is included in response headers:
|
||||
|
||||
```http
|
||||
```
|
||||
X-Total-Count: 1500
|
||||
X-Limit: 50
|
||||
X-Offset: 100
|
||||
@ -1098,7 +1098,7 @@ Link: </api/endpoint?offset=150&limit=50>; rel="next"
|
||||
|
||||
The API uses header-based versioning:
|
||||
|
||||
```http
|
||||
```
|
||||
Accept: application/vnd.provisioning.v1+json
|
||||
```
|
||||
|
||||
@ -1108,7 +1108,7 @@ Current version: v1
|
||||
|
||||
Use the included test suite to validate API functionality:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Run API integration tests
|
||||
cd src/orchestrator
|
||||
cargo test --test api_tests
|
||||
|
||||
@ -23,7 +23,7 @@ Provisioning provides SDKs in multiple languages to facilitate integration:
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install from PyPI
|
||||
pip install provisioning-client
|
||||
|
||||
@ -33,7 +33,7 @@ pip install git+https://github.com/provisioning-systems/python-client.git
|
||||
|
||||
### Quick Start
|
||||
|
||||
```python
|
||||
```
|
||||
from provisioning_client import ProvisioningClient
|
||||
import asyncio
|
||||
|
||||
@ -79,7 +79,7 @@ if __name__ == "__main__":
|
||||
|
||||
#### WebSocket Integration
|
||||
|
||||
```python
|
||||
```
|
||||
async def monitor_workflows():
|
||||
client = ProvisioningClient()
|
||||
await client.authenticate()
|
||||
@ -103,7 +103,7 @@ async def monitor_workflows():
|
||||
|
||||
#### Batch Operations
|
||||
|
||||
```python
|
||||
```
|
||||
async def execute_batch_deployment():
|
||||
client = ProvisioningClient()
|
||||
await client.authenticate()
|
||||
@ -158,7 +158,7 @@ async def execute_batch_deployment():
|
||||
|
||||
#### Error Handling with Retries
|
||||
|
||||
```python
|
||||
```
|
||||
from provisioning_client.exceptions import (
|
||||
ProvisioningAPIError,
|
||||
AuthenticationError,
|
||||
@ -209,7 +209,7 @@ async def robust_workflow():
|
||||
|
||||
#### ProvisioningClient Class
|
||||
|
||||
```python
|
||||
```
|
||||
class ProvisioningClient:
|
||||
def __init__(self,
|
||||
base_url: str = "http://localhost:9090",
|
||||
@ -258,7 +258,7 @@ class ProvisioningClient:
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
```
|
||||
# npm
|
||||
npm install @provisioning/client
|
||||
|
||||
@ -271,7 +271,7 @@ pnpm add @provisioning/client
|
||||
|
||||
### Quick Start
|
||||
|
||||
```typescript
|
||||
```
|
||||
import { ProvisioningClient } from '@provisioning/client';
|
||||
|
||||
async function main() {
|
||||
@ -308,7 +308,7 @@ main();
|
||||
|
||||
### React Integration
|
||||
|
||||
```tsx
|
||||
```
|
||||
import React, { useState, useEffect } from 'react';
|
||||
import { ProvisioningClient } from '@provisioning/client';
|
||||
|
||||
@ -434,7 +434,7 @@ export default WorkflowDashboard;
|
||||
|
||||
### Node.js CLI Tool
|
||||
|
||||
```typescript
|
||||
```
|
||||
#!/usr/bin/env node
|
||||
|
||||
import { Command } from 'commander';
|
||||
@ -590,7 +590,7 @@ program.parse();
|
||||
|
||||
### API Reference
|
||||
|
||||
```typescript
|
||||
```
|
||||
interface ProvisioningClientOptions {
|
||||
baseUrl?: string;
|
||||
authUrl?: string;
|
||||
@ -644,13 +644,13 @@ class ProvisioningClient extends EventEmitter {
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
```
|
||||
go get github.com/provisioning-systems/go-client
|
||||
```
|
||||
|
||||
### Quick Start
|
||||
|
||||
```go
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -711,7 +711,7 @@ func main() {
|
||||
|
||||
### WebSocket Integration
|
||||
|
||||
```go
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -777,7 +777,7 @@ func main() {
|
||||
|
||||
### HTTP Client with Retry Logic
|
||||
|
||||
```go
|
||||
```
|
||||
package main
|
||||
|
||||
import (
|
||||
@ -868,7 +868,7 @@ func main() {
|
||||
|
||||
Add to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
```
|
||||
[dependencies]
|
||||
provisioning-rs = "2.0.0"
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
@ -876,7 +876,7 @@ tokio = { version = "1.0", features = ["full"] }
|
||||
|
||||
### Quick Start
|
||||
|
||||
```rust
|
||||
```
|
||||
use provisioning_rs::{ProvisioningClient, Config, CreateServerRequest};
|
||||
use tokio;
|
||||
|
||||
@ -932,7 +932,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
### WebSocket Integration
|
||||
|
||||
```rust
|
||||
```
|
||||
use provisioning_rs::{ProvisioningClient, Config, WebSocketEvent};
|
||||
use futures_util::StreamExt;
|
||||
use tokio;
|
||||
@ -988,7 +988,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
### Batch Operations
|
||||
|
||||
```rust
|
||||
```
|
||||
use provisioning_rs::{BatchOperationRequest, BatchOperation};
|
||||
|
||||
#[tokio::main]
|
||||
|
||||
@ -30,7 +30,7 @@ The main WebSocket endpoint for real-time events and monitoring.
|
||||
|
||||
**Example Connection:**
|
||||
|
||||
```javascript
|
||||
```
|
||||
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt-token&events=task,batch,system');
|
||||
```
|
||||
|
||||
@ -64,7 +64,7 @@ Live log streaming endpoint.
|
||||
|
||||
All WebSocket connections require authentication via JWT token:
|
||||
|
||||
```javascript
|
||||
```
|
||||
// Include token in connection URL
|
||||
const ws = new WebSocket('ws://localhost:9090/ws?token=' + jwtToken);
|
||||
|
||||
@ -93,7 +93,7 @@ ws.onopen = function() {
|
||||
|
||||
Fired when a workflow task status changes.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event_type": "TaskStatusChanged",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -116,7 +116,7 @@ Fired when a workflow task status changes.
|
||||
|
||||
Fired when batch operation status changes.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event_type": "BatchOperationUpdate",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -150,7 +150,7 @@ Fired when batch operation status changes.
|
||||
|
||||
Fired when system health status changes.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event_type": "SystemHealthUpdate",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -185,7 +185,7 @@ Fired when system health status changes.
|
||||
|
||||
Fired when workflow progress changes.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event_type": "WorkflowProgressUpdate",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -215,7 +215,7 @@ Fired when workflow progress changes.
|
||||
|
||||
Real-time log streaming.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event_type": "LogEntry",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -241,7 +241,7 @@ Real-time log streaming.
|
||||
|
||||
Real-time metrics streaming.
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event_type": "MetricUpdate",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -266,7 +266,7 @@ Real-time metrics streaming.
|
||||
|
||||
Applications can define custom event types:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"event_type": "CustomApplicationEvent",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -283,7 +283,7 @@ Applications can define custom event types:
|
||||
|
||||
### Connection Management
|
||||
|
||||
```javascript
|
||||
```
|
||||
class ProvisioningWebSocket {
|
||||
constructor(baseUrl, token, options = {}) {
|
||||
this.baseUrl = baseUrl;
|
||||
@ -430,7 +430,7 @@ ws.subscribe(['TaskStatusChanged', 'WorkflowProgressUpdate']);
|
||||
|
||||
### Real-Time Dashboard Example
|
||||
|
||||
```javascript
|
||||
```
|
||||
class ProvisioningDashboard {
|
||||
constructor(wsUrl, token) {
|
||||
this.ws = new ProvisioningWebSocket(wsUrl, token);
|
||||
@ -542,7 +542,7 @@ const dashboard = new ProvisioningDashboard('ws://localhost:9090', jwtToken);
|
||||
|
||||
The orchestrator implements WebSocket support using Axum and Tokio:
|
||||
|
||||
```rust
|
||||
```
|
||||
use axum::{
|
||||
extract::{ws::WebSocket, ws::WebSocketUpgrade, Query, State},
|
||||
response::Response,
|
||||
@ -702,7 +702,7 @@ fn has_event_permission(claims: &Claims, event_type: &str) -> bool {
|
||||
|
||||
### Client-Side Filtering
|
||||
|
||||
```javascript
|
||||
```
|
||||
// Subscribe to specific event types
|
||||
ws.subscribe(['TaskStatusChanged', 'WorkflowProgressUpdate']);
|
||||
|
||||
@ -741,7 +741,7 @@ Events can be filtered on the server side based on:
|
||||
|
||||
### Connection Errors
|
||||
|
||||
```javascript
|
||||
```
|
||||
ws.on('error', (error) => {
|
||||
console.error('WebSocket error:', error);
|
||||
|
||||
@ -780,7 +780,7 @@ ws.on('disconnected', (event) => {
|
||||
|
||||
### Heartbeat and Keep-Alive
|
||||
|
||||
```javascript
|
||||
```
|
||||
class ProvisioningWebSocket {
|
||||
constructor(baseUrl, token, options = {}) {
|
||||
// ... existing code ...
|
||||
@ -835,7 +835,7 @@ class ProvisioningWebSocket {
|
||||
|
||||
To improve performance, the server can batch multiple events into single WebSocket messages:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"type": "batch",
|
||||
"timestamp": "2025-09-26T10:00:00Z",
|
||||
@ -856,7 +856,7 @@ To improve performance, the server can batch multiple events into single WebSock
|
||||
|
||||
Enable message compression for large events:
|
||||
|
||||
```javascript
|
||||
```
|
||||
const ws = new WebSocket('ws://localhost:9090/ws?token=jwt&compression=true');
|
||||
```
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ The system needed a clear, maintainable structure that supports:
|
||||
|
||||
Adopt a **domain-driven hybrid structure** organized around functional boundaries:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
src/
|
||||
├── core/ # Core system and CLI entry point
|
||||
├── platform/ # High-performance coordination layer (Rust orchestrator)
|
||||
|
||||
@ -49,7 +49,7 @@ Implement a **layered distribution strategy** with clear separation between deve
|
||||
|
||||
### Distribution Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
# User Distribution
|
||||
/usr/local/bin/
|
||||
├── provisioning # Main CLI entry point
|
||||
@ -153,7 +153,7 @@ Use environment variables to control what gets installed.
|
||||
|
||||
### Configuration Hierarchy
|
||||
|
||||
```plaintext
|
||||
```
|
||||
System Defaults (lowest precedence)
|
||||
└── User Configuration
|
||||
└── Project Configuration
|
||||
|
||||
@ -33,7 +33,7 @@ Implement **isolated user workspaces** with clear boundaries and hierarchical co
|
||||
|
||||
### Workspace Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
~/workspace/provisioning/ # User workspace root
|
||||
├── config/
|
||||
│ ├── user.toml # User preferences and overrides
|
||||
@ -141,7 +141,7 @@ Store all user configuration in database.
|
||||
|
||||
### Workspace Initialization
|
||||
|
||||
```bash
|
||||
```
|
||||
# Automatic workspace creation on first run
|
||||
provisioning workspace init
|
||||
|
||||
@ -163,7 +163,7 @@ provisioning workspace validate
|
||||
|
||||
### Backup and Migration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Backup entire workspace
|
||||
provisioning workspace backup --output ~/backup/provisioning-workspace.tar.gz
|
||||
|
||||
|
||||
@ -54,7 +54,7 @@ Implement a **Hybrid Rust/Nushell Architecture** with clear separation of concer
|
||||
|
||||
#### Rust → Nushell Communication
|
||||
|
||||
```rust
|
||||
```
|
||||
// Rust orchestrator invokes Nushell scripts via process execution
|
||||
let result = Command::new("nu")
|
||||
.arg("-c")
|
||||
@ -64,7 +64,7 @@ let result = Command::new("nu")
|
||||
|
||||
#### Nushell → Rust Communication
|
||||
|
||||
```bash
|
||||
```
|
||||
# Nushell submits workflows to Rust orchestrator via HTTP API
|
||||
http post "http://localhost:9090/workflows/servers/create" {
|
||||
name: "server-name",
|
||||
|
||||
@ -45,7 +45,7 @@ Implement a **registry-based extension framework** with structured discovery and
|
||||
|
||||
### Extension Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
extensions/
|
||||
├── providers/ # Provider extensions
|
||||
│ └── custom-cloud/
|
||||
@ -75,7 +75,7 @@ extensions/
|
||||
|
||||
### Extension Manifest (extension.toml)
|
||||
|
||||
```toml
|
||||
```
|
||||
[extension]
|
||||
name = "custom-provider"
|
||||
version = "1.0.0"
|
||||
@ -186,7 +186,7 @@ Traditional plugin architecture with dynamic loading.
|
||||
|
||||
### Extension Loading Lifecycle
|
||||
|
||||
```bash
|
||||
```
|
||||
# Extension discovery and validation
|
||||
provisioning extension discover
|
||||
provisioning extension validate --extension custom-provider
|
||||
@ -208,7 +208,7 @@ provisioning extension update custom-provider
|
||||
|
||||
Extensions integrate with hierarchical configuration system:
|
||||
|
||||
```toml
|
||||
```
|
||||
# System configuration includes extension settings
|
||||
[custom_provider]
|
||||
api_endpoint = "https://api.custom-cloud.com"
|
||||
@ -238,7 +238,7 @@ timeout = 30
|
||||
|
||||
### Provider Extension Pattern
|
||||
|
||||
```nushell
|
||||
```
|
||||
# extensions/providers/custom-cloud/nulib/provider.nu
|
||||
export def list-servers [] -> table {
|
||||
http get $"($config.custom_provider.api_endpoint)/servers"
|
||||
@ -260,7 +260,7 @@ export def create-server [name: string, config: record] -> record {
|
||||
|
||||
### Task Service Extension Pattern
|
||||
|
||||
```nushell
|
||||
```
|
||||
# extensions/taskservs/custom-service/nulib/service.nu
|
||||
export def install [server: string] -> nothing {
|
||||
let manifest_data = open ./manifests/deployment.yaml
|
||||
|
||||
@ -40,7 +40,7 @@ monolithic structure created multiple critical problems:
|
||||
|
||||
We refactored the monolithic CLI into a **modular, domain-driven architecture** with the following structure:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/core/nulib/
|
||||
├── provisioning (211 lines) ⬅️ 84% reduction
|
||||
├── main_provisioning/
|
||||
@ -63,7 +63,7 @@ provisioning/core/nulib/
|
||||
|
||||
Single source of truth for all flag parsing and argument building:
|
||||
|
||||
```nushell
|
||||
```
|
||||
export def parse_common_flags [flags: record]: nothing -> record
|
||||
export def build_module_args [flags: record, extra: string = ""]: nothing -> string
|
||||
export def set_debug_env [flags: record]
|
||||
@ -81,7 +81,7 @@ export def get_debug_flag [flags: record]: nothing -> string
|
||||
|
||||
Central routing with 80+ command mappings:
|
||||
|
||||
```nushell
|
||||
```
|
||||
export def get_command_registry []: nothing -> record # 80+ shortcuts
|
||||
export def dispatch_command [args: list, flags: record] # Main router
|
||||
```
|
||||
@ -148,7 +148,7 @@ Eliminated repetition:
|
||||
|
||||
All handlers depend on abstractions (flag records, not concrete flags):
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Handler signature
|
||||
export def handle_infrastructure_command [
|
||||
command: string
|
||||
@ -182,7 +182,7 @@ export def handle_infrastructure_command [
|
||||
|
||||
Users can now access help in multiple ways:
|
||||
|
||||
```bash
|
||||
```
|
||||
# All these work equivalently:
|
||||
provisioning help workspace
|
||||
provisioning workspace help # ⬅️ NEW: Bi-directional
|
||||
@ -192,7 +192,7 @@ provisioning help ws # ⬅️ NEW: Shortcut in help
|
||||
|
||||
**Implementation:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Intercept "command help" → "help command"
|
||||
let first_op = if ($ops_list | length) > 0 { ($ops_list | get 0) } else { "" }
|
||||
if $first_op in ["help" "h"] {
|
||||
@ -242,7 +242,7 @@ Comprehensive test suite created (`tests/test_provisioning_refactor.nu`):
|
||||
|
||||
### Test Results
|
||||
|
||||
```plaintext
|
||||
```
|
||||
📋 Testing main help... ✅
|
||||
📋 Testing category help... ✅
|
||||
🔄 Testing bi-directional help... ✅
|
||||
@ -319,7 +319,7 @@ Comprehensive test suite created (`tests/test_provisioning_refactor.nu`):
|
||||
|
||||
### Before: Repetitive Flag Handling
|
||||
|
||||
```nushell
|
||||
```
|
||||
"server" => {
|
||||
let use_check = if $check { "--check "} else { "" }
|
||||
let use_yes = if $yes { "--yes" } else { "" }
|
||||
@ -335,7 +335,7 @@ Comprehensive test suite created (`tests/test_provisioning_refactor.nu`):
|
||||
|
||||
### After: Clean, Reusable
|
||||
|
||||
```nushell
|
||||
```
|
||||
def handle_server [ops: string, flags: record] {
|
||||
let args = build_module_args $flags $ops
|
||||
run_module $args "server" --exec
|
||||
|
||||
@ -128,7 +128,7 @@ Remove support for:
|
||||
|
||||
### For Development
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Install Age
|
||||
brew install age # or apt install age
|
||||
|
||||
@ -142,7 +142,7 @@ age-keygen -y ~/.config/provisioning/age/private_key.txt > ~/.config/provisionin
|
||||
|
||||
### For Production
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Set up Cosmian KMS (cloud or self-hosted)
|
||||
# 2. Create master key in Cosmian
|
||||
# 3. Migrate secrets from Vault/AWS to Cosmian
|
||||
|
||||
@ -117,7 +117,7 @@ Use Casbin authorization library.
|
||||
|
||||
#### Architecture
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Orchestrator │
|
||||
├─────────────────────────────────────────────────────────┤
|
||||
@ -143,7 +143,7 @@ Use Casbin authorization library.
|
||||
|
||||
#### Policy Organization
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/config/cedar-policies/
|
||||
├── schema.cedar # Entity and action definitions
|
||||
├── production.cedar # Production environment policies
|
||||
@ -154,7 +154,7 @@ provisioning/config/cedar-policies/
|
||||
|
||||
#### Rust Implementation
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/platform/orchestrator/src/security/
|
||||
├── cedar.rs # Cedar engine integration (450 lines)
|
||||
├── policy_loader.rs # Policy loading with hot reload (320 lines)
|
||||
@ -190,7 +190,7 @@ provisioning/platform/orchestrator/src/security/
|
||||
|
||||
#### Context Variables
|
||||
|
||||
```rust
|
||||
```
|
||||
AuthorizationContext {
|
||||
mfa_verified: bool, // MFA verification status
|
||||
ip_address: String, // Client IP address
|
||||
@ -204,7 +204,7 @@ AuthorizationContext {
|
||||
|
||||
#### Example Policy
|
||||
|
||||
```cedar
|
||||
```
|
||||
// Production deployments require MFA verification
|
||||
@id("prod-deploy-mfa")
|
||||
@description("All production deployments must have MFA verification")
|
||||
|
||||
@ -249,7 +249,7 @@ Implement a complete security architecture using 12 specialized components organ
|
||||
|
||||
### End-to-End Request Flow
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. User Request
|
||||
↓
|
||||
2. Rate Limiting (100 req/min per IP)
|
||||
@ -271,7 +271,7 @@ Implement a complete security architecture using 12 specialized components organ
|
||||
|
||||
### Emergency Access Flow
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. Emergency Request (reason + justification)
|
||||
↓
|
||||
2. Multi-Party Approval (2+ approvers, different teams)
|
||||
@ -382,7 +382,7 @@ Implement a complete security architecture using 12 specialized components organ
|
||||
|
||||
### Development
|
||||
|
||||
```bash
|
||||
```
|
||||
# Start all services
|
||||
cd provisioning/platform/kms-service && cargo run &
|
||||
cd provisioning/platform/orchestrator && cargo run &
|
||||
@ -391,7 +391,7 @@ cd provisioning/platform/control-center && cargo run &
|
||||
|
||||
### Production
|
||||
|
||||
```bash
|
||||
```
|
||||
# Kubernetes deployment
|
||||
kubectl apply -f k8s/security-stack.yaml
|
||||
|
||||
@ -410,7 +410,7 @@ systemctl start provisioning-control-center
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
```
|
||||
# JWT
|
||||
export JWT_ISSUER="control-center"
|
||||
export JWT_AUDIENCE="orchestrator,cli"
|
||||
@ -433,7 +433,7 @@ export MFA_WEBAUTHN_RP_ID="provisioning.example.com"
|
||||
|
||||
### Config Files
|
||||
|
||||
```toml
|
||||
```
|
||||
# provisioning/config/security.toml
|
||||
[jwt]
|
||||
issuer = "control-center"
|
||||
@ -470,7 +470,7 @@ pii_anonymization = true
|
||||
|
||||
### Run All Tests
|
||||
|
||||
```bash
|
||||
```
|
||||
# Control Center (JWT, MFA)
|
||||
cd provisioning/platform/control-center
|
||||
cargo test
|
||||
@ -489,7 +489,7 @@ nu provisioning/core/nulib/lib_provisioning/config/encryption_tests.nu
|
||||
|
||||
### Integration Tests
|
||||
|
||||
```bash
|
||||
```
|
||||
# Full security flow
|
||||
cd provisioning/platform/orchestrator
|
||||
cargo test --test security_integration_tests
|
||||
|
||||
@ -65,7 +65,7 @@ Define and document the three-format approach through:
|
||||
|
||||
**Move template files to proper directory structure and correct extensions**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Previous (KCL):
|
||||
provisioning/kcl/templates/*.k (had Nushell/Jinja2 code, not KCL)
|
||||
|
||||
@ -326,7 +326,7 @@ Current (Nickel):
|
||||
|
||||
Currently, 15/16 files in `provisioning/kcl/templates/` have `.k` extension but contain Nushell/Jinja2 code, not KCL:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/kcl/templates/
|
||||
├── server.ncl # Actually Nushell/Jinja2 template
|
||||
├── taskserv.ncl # Actually Nushell/Jinja2 template
|
||||
@ -343,7 +343,7 @@ This causes:
|
||||
|
||||
Reorganize into type-specific directories:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/templates/
|
||||
├── nushell/ # Nushell code generation (*.nu.j2)
|
||||
│ ├── server.nu.j2
|
||||
|
||||
@ -112,7 +112,7 @@ The provisioning system required:
|
||||
|
||||
**Example - UpCloud Provider**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# upcloud/nickel/main.ncl (migrated from upcloud/kcl/)
|
||||
let contracts = import "./contracts.ncl" in
|
||||
let defaults = import "./defaults.ncl" in
|
||||
@ -171,7 +171,7 @@ let defaults = import "./defaults.ncl" in
|
||||
|
||||
**File 1: Contracts** (`batch_contracts.ncl`):
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
BatchScheduler = {
|
||||
strategy | String,
|
||||
@ -184,7 +184,7 @@ let defaults = import "./defaults.ncl" in
|
||||
|
||||
**File 2: Defaults** (`batch_defaults.ncl`):
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
scheduler = {
|
||||
strategy = "dependency_first",
|
||||
@ -197,7 +197,7 @@ let defaults = import "./defaults.ncl" in
|
||||
|
||||
**File 3: Main** (`batch.ncl`):
|
||||
|
||||
```nickel
|
||||
```
|
||||
let contracts = import "./batch_contracts.ncl" in
|
||||
let defaults = import "./batch_defaults.ncl" in
|
||||
|
||||
@ -218,7 +218,7 @@ let defaults = import "./batch_defaults.ncl" in
|
||||
|
||||
### Domain-Organized Architecture
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/schemas/
|
||||
├── lib/ # Storage, TaskServDef, ClusterDef
|
||||
├── config/ # Settings, defaults, workspace_config
|
||||
@ -233,7 +233,7 @@ provisioning/schemas/
|
||||
|
||||
**Import pattern**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let provisioning = import "./main.ncl" in
|
||||
provisioning.lib # For Storage, TaskServDef
|
||||
provisioning.config.settings # For Settings, Defaults
|
||||
@ -254,7 +254,7 @@ provisioning.operations.workflows
|
||||
- No snapshot overhead
|
||||
- Usage: Local development, testing, experimentation
|
||||
|
||||
```bash
|
||||
```
|
||||
# workspace_librecloud/nickel/main.ncl
|
||||
import "../../provisioning/schemas/main.ncl"
|
||||
import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
|
||||
@ -264,13 +264,13 @@ import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
|
||||
|
||||
Create immutable snapshots for reproducible deployments:
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning workspace freeze --version "2025-12-15-prod-v1" --env production
|
||||
```
|
||||
|
||||
**Frozen structure** (`.frozen/{version}/`):
|
||||
|
||||
```plaintext
|
||||
```
|
||||
├── provisioning/schemas/ # Snapshot of central schemas
|
||||
├── extensions/ # Snapshot of all extensions
|
||||
└── workspace/ # Snapshot of workspace configs
|
||||
@ -285,7 +285,7 @@ provisioning workspace freeze --version "2025-12-15-prod-v1" --env production
|
||||
|
||||
**Deploy from frozen snapshot**:
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning deploy --frozen "2025-12-15-prod-v1" --infra wuji
|
||||
```
|
||||
|
||||
@ -308,7 +308,7 @@ provisioning deploy --frozen "2025-12-15-prod-v1" --infra wuji
|
||||
|
||||
**Key Feature**: Nickel schemas → Type-safe UIs → Nickel output
|
||||
|
||||
```bash
|
||||
```
|
||||
# Nickel schema → Interactive form
|
||||
typedialog form --schema server.ncl --output json
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ The architectural decision was whether the plugin should:
|
||||
|
||||
Nickel configurations in provisioning use the **module system**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# config/database.ncl
|
||||
import "lib/defaults" as defaults
|
||||
import "lib/validation" as valid
|
||||
@ -47,7 +47,7 @@ Implement the `nu_plugin_nickel` plugin as a **CLI wrapper** that invokes the ex
|
||||
|
||||
### Architecture Diagram
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────┐
|
||||
│ Nushell Script │
|
||||
│ │
|
||||
@ -288,7 +288,7 @@ This makes direct usage risky. The CLI is the documented, proven interface.
|
||||
|
||||
The plugin uses the **correct Nickel command syntax**:
|
||||
|
||||
```rust
|
||||
```
|
||||
// Correct:
|
||||
cmd.arg("export").arg(file).arg("--format").arg(format);
|
||||
// Results in: "nickel export /file --format json"
|
||||
@ -323,7 +323,7 @@ Plugin correctly processes JSON output:
|
||||
|
||||
This enables Nushell cell path access:
|
||||
|
||||
```nushell
|
||||
```
|
||||
nickel-export json /config.ncl | .database.host # ✅ Works
|
||||
```
|
||||
|
||||
@ -343,7 +343,7 @@ nickel-export json /config.ncl | .database.host # ✅ Works
|
||||
|
||||
**Manual Verification**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Test module imports
|
||||
nickel-export json /workspace/config.ncl
|
||||
|
||||
|
||||
@ -78,7 +78,7 @@ integration with the provisioning orchestrator.
|
||||
|
||||
### Architecture Diagram
|
||||
|
||||
```text
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Nushell Script │
|
||||
│ │
|
||||
@ -167,7 +167,7 @@ integration with the provisioning orchestrator.
|
||||
|
||||
Nushell's `input` command is limited:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Current: No validation, no security
|
||||
let password = input "Password: " # ❌ Shows in terminal
|
||||
let region = input "AWS Region: " # ❌ No autocomplete/validation
|
||||
@ -184,7 +184,7 @@ let region = input "AWS Region: " # ❌ No autocomplete/validation
|
||||
|
||||
Nickel is declarative and cannot prompt users:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# Nickel defines what the config looks like, NOT how to get it
|
||||
{
|
||||
database = {
|
||||
@ -243,7 +243,7 @@ Nickel is declarative and cannot prompt users:
|
||||
### Mitigation Strategies
|
||||
|
||||
**Non-Interactive Mode**:
|
||||
```rust
|
||||
```
|
||||
// Support both interactive and non-interactive
|
||||
if terminal::is_interactive() {
|
||||
// Show TUI dialog
|
||||
@ -255,7 +255,7 @@ if terminal::is_interactive() {
|
||||
```
|
||||
|
||||
**Testing**:
|
||||
```rust
|
||||
```
|
||||
// Unit tests: Test form validation logic (no TUI)
|
||||
#[test]
|
||||
fn test_validate_workspace_name() {
|
||||
@ -267,7 +267,7 @@ fn test_validate_workspace_name() {
|
||||
```
|
||||
|
||||
**Scriptability**:
|
||||
```bash
|
||||
```
|
||||
# Batch mode: Provide config via file
|
||||
provisioning workspace init --config workspace.toml
|
||||
|
||||
@ -316,7 +316,7 @@ provisioning workspace init --interactive
|
||||
|
||||
### Form Definition Pattern
|
||||
|
||||
```rust
|
||||
```
|
||||
use typdialog::Form;
|
||||
|
||||
pub fn workspace_initialization_form() -> Result<WorkspaceConfig> {
|
||||
@ -353,7 +353,7 @@ pub fn workspace_initialization_form() -> Result<WorkspaceConfig> {
|
||||
|
||||
### Integration with Nickel
|
||||
|
||||
```rust
|
||||
```
|
||||
// 1. Get validated input from TUI dialog
|
||||
let config = workspace_initialization_form()?;
|
||||
|
||||
@ -370,7 +370,7 @@ fs::write("workspace/config.toml", config_toml)?;
|
||||
|
||||
### CLI Command Structure
|
||||
|
||||
```rust
|
||||
```
|
||||
// provisioning/core/cli/src/commands/workspace.rs
|
||||
|
||||
#[derive(Parser)]
|
||||
@ -404,7 +404,7 @@ pub fn handle_workspace_init(args: InitArgs) -> Result<()> {
|
||||
|
||||
### Validation Rules
|
||||
|
||||
```rust
|
||||
```
|
||||
pub fn validate_workspace_name(name: &str) -> Result<(), String> {
|
||||
// Alphanumeric, hyphens, 3-32 chars
|
||||
let re = Regex::new(r"^[a-z0-9-]{3,32}$").unwrap();
|
||||
@ -425,7 +425,7 @@ pub fn validate_region(region: &str) -> Result<(), String> {
|
||||
|
||||
### Security: Password Handling
|
||||
|
||||
```rust
|
||||
```
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
pub fn get_secure_password() -> Result<Zeroizing<String>> {
|
||||
@ -447,7 +447,7 @@ pub fn get_secure_password() -> Result<Zeroizing<String>> {
|
||||
## Testing Strategy
|
||||
|
||||
**Unit Tests**:
|
||||
```rust
|
||||
```
|
||||
#[test]
|
||||
fn test_workspace_name_validation() {
|
||||
assert!(validate_workspace_name("my-workspace").is_ok());
|
||||
@ -457,7 +457,7 @@ fn test_workspace_name_validation() {
|
||||
```
|
||||
|
||||
**Integration Tests**:
|
||||
```rust
|
||||
```
|
||||
// Use non-interactive mode with config files
|
||||
#[test]
|
||||
fn test_workspace_init_non_interactive() {
|
||||
@ -481,7 +481,7 @@ fn test_workspace_init_non_interactive() {
|
||||
```
|
||||
|
||||
**Manual Testing**:
|
||||
```bash
|
||||
```
|
||||
# Test interactive flow
|
||||
cargo build --release
|
||||
./target/release/provisioning workspace init --interactive
|
||||
@ -495,7 +495,7 @@ cargo build --release
|
||||
## Configuration Integration
|
||||
|
||||
**CLI Flag**:
|
||||
```toml
|
||||
```
|
||||
# provisioning/config/config.defaults.toml
|
||||
[ui]
|
||||
interactive_mode = "auto" # "auto" | "always" | "never"
|
||||
@ -503,7 +503,7 @@ dialog_theme = "default" # "default" | "minimal" | "colorful"
|
||||
```
|
||||
|
||||
**Environment Override**:
|
||||
```bash
|
||||
```
|
||||
# Force non-interactive mode (for CI/CD)
|
||||
export PROVISIONING_INTERACTIVE=false
|
||||
|
||||
@ -523,7 +523,7 @@ export PROVISIONING_INTERACTIVE=true
|
||||
- Validation rule patterns
|
||||
|
||||
**Configuration Schema**:
|
||||
```nickel
|
||||
```
|
||||
# provisioning/schemas/workspace.ncl
|
||||
{
|
||||
WorkspaceConfig = {
|
||||
|
||||
@ -93,7 +93,7 @@ Integrate **SecretumVault** as the centralized secrets management system for the
|
||||
|
||||
### Architecture Diagram
|
||||
|
||||
```text
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Provisioning CLI / Orchestrator / Services │
|
||||
│ │
|
||||
@ -273,7 +273,7 @@ SOPS is excellent for **static secrets in git**, but inadequate for:
|
||||
### Mitigation Strategies
|
||||
|
||||
**High Availability**:
|
||||
```bash
|
||||
```
|
||||
# Deploy SecretumVault cluster (3 nodes)
|
||||
provisioning deploy secretum-vault --ha --replicas 3
|
||||
|
||||
@ -282,7 +282,7 @@ provisioning deploy secretum-vault --ha --replicas 3
|
||||
```
|
||||
|
||||
**Migration from SOPS**:
|
||||
```bash
|
||||
```
|
||||
# Phase 1: Import existing SOPS secrets into SecretumVault
|
||||
provisioning secrets migrate --from-sops config/secrets.yaml
|
||||
|
||||
@ -291,7 +291,7 @@ provisioning secrets migrate --from-sops config/secrets.yaml
|
||||
```
|
||||
|
||||
**Fallback Strategy**:
|
||||
```rust
|
||||
```
|
||||
// Graceful degradation if vault unavailable
|
||||
let secret = match vault_client.get_secret("database/password").await {
|
||||
Ok(s) => s,
|
||||
@ -305,7 +305,7 @@ let secret = match vault_client.get_secret("database/password").await {
|
||||
```
|
||||
|
||||
**Operational Monitoring**:
|
||||
```toml
|
||||
```
|
||||
# prometheus metrics
|
||||
secretum_vault_request_duration_seconds
|
||||
secretum_vault_secret_lease_expiry
|
||||
@ -351,7 +351,7 @@ secretum_vault_raft_leader_changes
|
||||
|
||||
### SecretumVault Deployment
|
||||
|
||||
```bash
|
||||
```
|
||||
# Deploy via provisioning system
|
||||
provisioning deploy secretum-vault \
|
||||
--ha \
|
||||
@ -367,7 +367,7 @@ provisioning vault unseal --key-shares 5 --key-threshold 3
|
||||
|
||||
### Rust Client Library
|
||||
|
||||
```rust
|
||||
```
|
||||
// provisioning/core/libs/secretum-client/src/lib.rs
|
||||
|
||||
use secretum_vault::{Client, SecretEngine, Auth};
|
||||
@ -402,7 +402,7 @@ impl VaultClient {
|
||||
|
||||
### Nushell Integration
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Nushell commands via Rust CLI wrapper
|
||||
provisioning secrets get database/prod/password
|
||||
provisioning secrets set api/keys/stripe --value "sk_live_xyz"
|
||||
@ -413,7 +413,7 @@ provisioning secrets list database/
|
||||
|
||||
### Nickel Configuration Integration
|
||||
|
||||
```nickel
|
||||
```
|
||||
# provisioning/schemas/database.ncl
|
||||
{
|
||||
database = {
|
||||
@ -429,7 +429,7 @@ provisioning secrets list database/
|
||||
|
||||
### Cedar Policy for Secret Access
|
||||
|
||||
```cedar
|
||||
```
|
||||
// policy: developers can read dev secrets, not prod
|
||||
permit(
|
||||
principal in Group::"developers",
|
||||
@ -455,7 +455,7 @@ permit(
|
||||
|
||||
### Dynamic Database Credentials
|
||||
|
||||
```rust
|
||||
```
|
||||
// Application requests temporary DB credentials
|
||||
let creds = vault_client
|
||||
.database()
|
||||
@ -472,7 +472,7 @@ println!("TTL: {}", creds.lease_duration); // 1h
|
||||
|
||||
### Secret Rotation Automation
|
||||
|
||||
```toml
|
||||
```
|
||||
# secretum-vault config
|
||||
[[rotation_policies]]
|
||||
path = "database/prod/password"
|
||||
@ -487,7 +487,7 @@ max_age = "90d"
|
||||
|
||||
### Audit Log Format
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"timestamp": "2025-01-08T12:34:56Z",
|
||||
"type": "request",
|
||||
@ -515,7 +515,7 @@ max_age = "90d"
|
||||
## Testing Strategy
|
||||
|
||||
**Unit Tests**:
|
||||
```rust
|
||||
```
|
||||
#[tokio::test]
|
||||
async fn test_get_secret() {
|
||||
let vault = mock_vault_client();
|
||||
@ -533,7 +533,7 @@ async fn test_dynamic_credentials_generation() {
|
||||
```
|
||||
|
||||
**Integration Tests**:
|
||||
```bash
|
||||
```
|
||||
# Test vault deployment
|
||||
provisioning deploy secretum-vault --test-mode
|
||||
provisioning vault init
|
||||
@ -551,7 +551,7 @@ provisioning secrets rotate test/secret
|
||||
```
|
||||
|
||||
**Security Tests**:
|
||||
```rust
|
||||
```
|
||||
#[tokio::test]
|
||||
async fn test_unauthorized_access_denied() {
|
||||
let vault = vault_client_with_limited_token();
|
||||
@ -563,7 +563,7 @@ async fn test_unauthorized_access_denied() {
|
||||
## Configuration Integration
|
||||
|
||||
**Provisioning Config**:
|
||||
```toml
|
||||
```
|
||||
# provisioning/config/config.defaults.toml
|
||||
[secrets]
|
||||
provider = "secretum-vault" # "secretum-vault" | "sops" | "env"
|
||||
@ -583,7 +583,7 @@ max_size = "100MB"
|
||||
```
|
||||
|
||||
**Environment Variables**:
|
||||
```bash
|
||||
```
|
||||
export VAULT_ADDR="https://vault.example.com:8200"
|
||||
export VAULT_TOKEN="s.abc123def456..."
|
||||
export VAULT_NAMESPACE="provisioning"
|
||||
|
||||
@ -100,7 +100,7 @@ All AI components are **schema-aware**, **security-enforced**, and **human-super
|
||||
|
||||
### Architecture Diagram
|
||||
|
||||
```text
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ User Interfaces │
|
||||
│ │
|
||||
@ -268,7 +268,7 @@ All AI components are **schema-aware**, **security-enforced**, and **human-super
|
||||
|
||||
Traditional AI code generation fails for infrastructure because:
|
||||
|
||||
```text
|
||||
```
|
||||
Generic AI (like GitHub Copilot):
|
||||
❌ Generates syntactically correct but semantically wrong configs
|
||||
❌ Doesn't understand cloud provider constraints
|
||||
@ -278,7 +278,7 @@ Generic AI (like GitHub Copilot):
|
||||
```
|
||||
|
||||
**Schema-aware AI** (our approach):
|
||||
```nickel
|
||||
```
|
||||
# Nickel schema provides ground truth
|
||||
{
|
||||
Database = {
|
||||
@ -303,7 +303,7 @@ Generic AI (like GitHub Copilot):
|
||||
|
||||
LLMs alone have limitations:
|
||||
|
||||
```text
|
||||
```
|
||||
Pure LLM:
|
||||
❌ Knowledge cutoff (no recent updates)
|
||||
❌ Hallucinations (invents plausible-sounding configs)
|
||||
@ -312,7 +312,7 @@ Pure LLM:
|
||||
```
|
||||
|
||||
**RAG-enhanced LLM**:
|
||||
```toml
|
||||
```
|
||||
Query: "How to configure Postgres with encryption?"
|
||||
|
||||
RAG retrieves:
|
||||
@ -332,7 +332,7 @@ LLM generates answer WITH retrieved context:
|
||||
|
||||
AI-generated infrastructure configs require human approval:
|
||||
|
||||
```rust
|
||||
```
|
||||
// All AI operations require approval
|
||||
pub async fn ai_generate_config(request: GenerateRequest) -> Result<Config> {
|
||||
let ai_generated = ai_service.generate(request).await?;
|
||||
@ -414,7 +414,7 @@ No single LLM provider is best for all tasks:
|
||||
### Mitigation Strategies
|
||||
|
||||
**Cost Control**:
|
||||
```toml
|
||||
```
|
||||
[ai.rate_limiting]
|
||||
requests_per_minute = 60
|
||||
tokens_per_day = 1000000
|
||||
@ -427,7 +427,7 @@ ttl = "1h"
|
||||
```
|
||||
|
||||
**Latency Optimization**:
|
||||
```rust
|
||||
```
|
||||
// Streaming responses for real-time feedback
|
||||
pub async fn ai_generate_stream(request: GenerateRequest) -> impl Stream<Item = String> {
|
||||
ai_service
|
||||
@ -438,7 +438,7 @@ pub async fn ai_generate_stream(request: GenerateRequest) -> impl Stream<Item =
|
||||
```
|
||||
|
||||
**Privacy (Local Models)**:
|
||||
```toml
|
||||
```
|
||||
[ai]
|
||||
provider = "local"
|
||||
model_path = "/opt/provisioning/models/llama-3-70b"
|
||||
@ -447,7 +447,7 @@ model_path = "/opt/provisioning/models/llama-3-70b"
|
||||
```
|
||||
|
||||
**Validation (Defense in Depth)**:
|
||||
```text
|
||||
```
|
||||
AI generates config
|
||||
↓
|
||||
Nickel schema validation (syntax, types, contracts)
|
||||
@ -460,7 +460,7 @@ Deployment
|
||||
```
|
||||
|
||||
**Observability**:
|
||||
```toml
|
||||
```
|
||||
[ai.observability]
|
||||
trace_all_requests = true
|
||||
store_conversations = true
|
||||
@ -510,7 +510,7 @@ conversation_retention = "30d"
|
||||
|
||||
### AI Service API
|
||||
|
||||
```rust
|
||||
```
|
||||
// platform/crates/ai-service/src/lib.rs
|
||||
|
||||
#[async_trait]
|
||||
@ -609,7 +609,7 @@ impl AIService for AIServiceImpl {
|
||||
|
||||
### MCP Server Integration
|
||||
|
||||
```rust
|
||||
```
|
||||
// platform/crates/mcp-server/src/lib.rs
|
||||
|
||||
pub struct MCPClient {
|
||||
@ -675,7 +675,7 @@ impl ToolRegistry {
|
||||
|
||||
### RAG System Implementation
|
||||
|
||||
```rust
|
||||
```
|
||||
// platform/crates/rag/src/lib.rs
|
||||
|
||||
pub struct RAGService {
|
||||
@ -750,7 +750,7 @@ pub struct QdrantStore {
|
||||
|
||||
### typdialog-ai Integration
|
||||
|
||||
```rust
|
||||
```
|
||||
// typdialog-ai/src/form_assistant.rs
|
||||
|
||||
pub struct FormAssistant {
|
||||
@ -813,7 +813,7 @@ impl FormAssistant {
|
||||
|
||||
### typdialog-ag Agents
|
||||
|
||||
```rust
|
||||
```
|
||||
// typdialog-ag/src/agent.rs
|
||||
|
||||
pub struct ProvisioningAgent {
|
||||
@ -891,7 +891,7 @@ impl ProvisioningAgent {
|
||||
|
||||
### Cedar Policies for AI
|
||||
|
||||
```cedar
|
||||
```
|
||||
// AI cannot access secrets without explicit permission
|
||||
forbid(
|
||||
principal == Service::"ai-service",
|
||||
@ -931,7 +931,7 @@ forbid(
|
||||
## Testing Strategy
|
||||
|
||||
**Unit Tests**:
|
||||
```rust
|
||||
```
|
||||
#[tokio::test]
|
||||
async fn test_ai_config_generation_validates() {
|
||||
let ai_service = mock_ai_service();
|
||||
@ -960,7 +960,7 @@ async fn test_ai_cannot_access_secrets() {
|
||||
```
|
||||
|
||||
**Integration Tests**:
|
||||
```rust
|
||||
```
|
||||
#[tokio::test]
|
||||
async fn test_end_to_end_ai_config_generation() {
|
||||
// User provides natural language
|
||||
@ -991,7 +991,7 @@ async fn test_end_to_end_ai_config_generation() {
|
||||
```
|
||||
|
||||
**RAG Quality Tests**:
|
||||
```rust
|
||||
```
|
||||
#[tokio::test]
|
||||
async fn test_rag_retrieval_accuracy() {
|
||||
let rag = rag_service();
|
||||
@ -1018,7 +1018,7 @@ async fn test_rag_retrieval_accuracy() {
|
||||
## Security Considerations
|
||||
|
||||
**AI Access Control**:
|
||||
```bash
|
||||
```
|
||||
AI Service Permissions (enforced by Cedar):
|
||||
✅ CAN: Read Nickel schemas
|
||||
✅ CAN: Generate configurations
|
||||
@ -1031,7 +1031,7 @@ AI Service Permissions (enforced by Cedar):
|
||||
```
|
||||
|
||||
**Data Privacy**:
|
||||
```toml
|
||||
```
|
||||
[ai.privacy]
|
||||
# Sanitize before sending to LLM
|
||||
sanitize_secrets = true
|
||||
@ -1048,7 +1048,7 @@ sanitize_credentials = true
|
||||
```
|
||||
|
||||
**Audit Trail**:
|
||||
```rust
|
||||
```
|
||||
// Every AI operation logged
|
||||
pub struct AIAuditLog {
|
||||
timestamp: DateTime<Utc>,
|
||||
@ -1066,7 +1066,7 @@ pub struct AIAuditLog {
|
||||
|
||||
**Estimated Costs** (per month, based on typical usage):
|
||||
|
||||
```text
|
||||
```
|
||||
Assumptions:
|
||||
- 100 active users
|
||||
- 10 AI config generations per user per day
|
||||
|
||||
@ -0,0 +1,160 @@
|
||||
# ADR-016: Schema-Driven Accessor Generation Pattern
|
||||
|
||||
**Status**: Proposed
|
||||
**Date**: 2026-01-13
|
||||
**Author**: Architecture Team
|
||||
**Supersedes**: Manual accessor maintenance in `lib_provisioning/config/accessor.nu`
|
||||
|
||||
## Context
|
||||
|
||||
The `lib_provisioning/config/accessor.nu` file contains 1567 lines across 187 accessor functions. Analysis reveals that 95% of these functions follow
|
||||
an identical mechanical pattern:
|
||||
|
||||
```
|
||||
export def get-{field-name} [--config: record] {
|
||||
config-get "{path.to.field}" {default_value} --config $config
|
||||
}
|
||||
```
|
||||
|
||||
This represents significant technical debt:
|
||||
|
||||
1. **Manual Maintenance Burden**: Adding a new config field requires manually writing a new accessor function
|
||||
2. **Schema Drift Risk**: No automated validation that accessor matches the actual Nickel schema
|
||||
3. **Code Duplication**: Nearly identical functions across 187 definitions
|
||||
4. **Testing Complexity**: Each accessor requires manual testing
|
||||
|
||||
## Problem Statement
|
||||
|
||||
**Current Architecture**:
|
||||
- Nickel schemas define configuration structure (source of truth)
|
||||
- Accessor functions manually mirror the schema structure
|
||||
- No automated synchronization between schema and accessors
|
||||
- High risk of accessor-schema mismatch
|
||||
|
||||
**Key Metrics**:
|
||||
- 1567 lines of accessor code
|
||||
- 187 repetitive functions
|
||||
- ~95% code similarity
|
||||
|
||||
## Decision
|
||||
|
||||
Implement **Schema-Driven Accessor Generation**: automatically generate accessor functions from Nickel schema definitions.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Nickel Schema (contracts.ncl)
|
||||
↓
|
||||
[Parse & Extract Schema Structure]
|
||||
↓
|
||||
[Generate Nushell Functions]
|
||||
↓
|
||||
accessor_generated.nu (800 lines)
|
||||
↓
|
||||
[Validation & Integration]
|
||||
↓
|
||||
CI/CD enforces: schema hash == generated code
|
||||
```
|
||||
|
||||
### Generation Process
|
||||
|
||||
1. **Schema Parsing**: Extract field paths, types, and defaults from Nickel contracts
|
||||
2. **Code Generation**: Create accessor functions with Nushell 0.109 compliance
|
||||
3. **Validation**: Verify generated code against schema
|
||||
4. **CI Integration**: Detect schema changes, validate generated code matches
|
||||
|
||||
### Compliance Requirements
|
||||
|
||||
**Nushell 0.109 Guidelines**:
|
||||
- No `try-catch` blocks (use `do-complete` pattern)
|
||||
- No `reduce --init` (use `reduce --fold`)
|
||||
- No mutable variables (use immutable bindings)
|
||||
- No type annotations on boolean flags
|
||||
- Use `each` not `map`, `is-not-empty` not `length`
|
||||
|
||||
**Nickel Compliance**:
|
||||
- Schema-first design (schema is source of truth)
|
||||
- Type contracts enforce structure
|
||||
- `| doc` before `| default` ordering
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- **Elimination of Manual Maintenance**: New config fields automatically get accessors
|
||||
- **Zero Schema Drift**: Automatic validation ensures accessors match schema
|
||||
- **Reduced Code Size**: 1567 lines → ~400 lines (manual core) + ~800 lines (generated)
|
||||
- **Type Safety**: Generated code guarantees type correctness
|
||||
- **Consistency**: All 187 functions use identical pattern
|
||||
|
||||
### Negative
|
||||
|
||||
- **Tool Complexity**: Generator must parse Nickel and emit valid Nushell
|
||||
- **CI/CD Changes**: Build must validate schema hash
|
||||
- **Initial Migration**: One-time effort to verify generated code matches manual versions
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
1. **Create Generator** (`tools/codegen/accessor_generator.nu`)
|
||||
- Parse Nickel schema files
|
||||
- Extract paths, types, defaults
|
||||
- Generate valid Nushell code
|
||||
- Emit with proper formatting
|
||||
|
||||
2. **Generate Accessors** (`lib_provisioning/config/accessor_generated.nu`)
|
||||
- Run generator on `provisioning/schemas/config/settings/contracts.ncl`
|
||||
- Output 187 accessor functions
|
||||
- Verify compatibility with existing code
|
||||
|
||||
3. **Validation**
|
||||
- Integration tests comparing manual vs generated output
|
||||
- Signature validator ensuring generated functions match patterns
|
||||
- CI check for schema hash validity
|
||||
|
||||
4. **Gradual Adoption**
|
||||
- Keep manual accessors temporarily
|
||||
- Feature flag to switch between manual and generated
|
||||
- Gradual migration of dependent code
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. **Unit Tests**
|
||||
- Each generated accessor returns correct type
|
||||
- Default values applied correctly
|
||||
- Path resolution handles nested fields
|
||||
|
||||
2. **Integration Tests**
|
||||
- Generated accessors produce identical output to manual versions
|
||||
- Config loading pipeline works with generated accessors
|
||||
- Fallback behavior preserved
|
||||
|
||||
3. **Regression Tests**
|
||||
- All existing config access patterns work
|
||||
- Performance within 5% of manual version
|
||||
- No breaking changes to public API
|
||||
|
||||
## Related ADRs
|
||||
|
||||
- **ADR-010**: Configuration Format Strategy (TOML/YAML/Nickel)
|
||||
- **ADR-011**: Nickel Migration (schema-first architecture)
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. Should accessors be regenerated on every build or only on schema changes?
|
||||
2. How do we handle conditional fields (if X then Y)?
|
||||
3. What's the fallback strategy if generator fails?
|
||||
|
||||
## Timeline
|
||||
|
||||
- **Phase 1**: Generator implementation (foundation)
|
||||
- **Phase 2**: Generate and validate accessor functions
|
||||
- **Phase 3**: Integration tests and feature flags
|
||||
- **Phase 4**: Full migration and manual code removal
|
||||
|
||||
## References
|
||||
|
||||
- Nickel Language: [https://nickel-lang.org/](https://nickel-lang.org/)
|
||||
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
|
||||
- Current Accessor Implementation: `provisioning/core/nulib/lib_provisioning/config/accessor.nu`
|
||||
- Schema Source: `provisioning/schemas/config/settings/contracts.ncl`
|
||||
|
||||
@ -0,0 +1,226 @@
|
||||
# ADR-017: Plugin Wrapper Abstraction Framework
|
||||
|
||||
**Status**: Proposed
|
||||
**Date**: 2026-01-13
|
||||
**Author**: Architecture Team
|
||||
**Supersedes**: Manual plugin wrapper implementations in `lib_provisioning/plugins/`
|
||||
|
||||
## Context
|
||||
|
||||
The provisioning system integrates with four critical plugins, each with its own wrapper layer:
|
||||
|
||||
1. **auth.nu** (1066 lines) - Authentication plugin wrapper
|
||||
2. **orchestrator.nu** (~500 lines) - Orchestrator plugin wrapper
|
||||
3. **secretumvault.nu** (~500 lines) - Secrets vault plugin wrapper
|
||||
4. **kms.nu** (~500 lines) - Key management service plugin wrapper
|
||||
|
||||
Analysis reveals ~90% code duplication across these wrappers:
|
||||
|
||||
```
|
||||
# Pattern repeated 4 times with minor variations:
|
||||
export def plugin-available? [] {
|
||||
# Check if plugin is installed
|
||||
}
|
||||
|
||||
export def try-plugin-call [method args] {
|
||||
# Try to call the plugin
|
||||
# On failure, fallback to HTTP
|
||||
}
|
||||
|
||||
export def http-fallback-call [endpoint method args] {
|
||||
# HTTP endpoint fallback
|
||||
}
|
||||
```
|
||||
|
||||
## Problem Statement
|
||||
|
||||
**Current Architecture**:
|
||||
- Each plugin has manual wrapper implementation
|
||||
- ~3000 total lines across 4 files
|
||||
- Boilerplate code repeated for each plugin method
|
||||
- HTTP fallback logic duplicated
|
||||
- Error handling inconsistent
|
||||
- Testing each wrapper requires custom setup
|
||||
|
||||
**Key Metrics**:
|
||||
- 3000 lines of plugin wrapper code
|
||||
- 90% code similarity
|
||||
- 85% reduction opportunity
|
||||
|
||||
## Decision
|
||||
|
||||
Implement **Plugin Wrapper Abstraction Framework**: replace manual plugin wrappers with a generic proxy framework + declarative YAML definitions.
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Plugin Definition (YAML)
|
||||
├─ plugin: auth
|
||||
├─ methods:
|
||||
│ ├─ login(username, password)
|
||||
│ ├─ logout()
|
||||
│ └─ status()
|
||||
└─ http_endpoint: http://localhost:8001
|
||||
|
||||
Generic Plugin Proxy Framework
|
||||
├─ availability() - Check if plugin installed
|
||||
├─ call() - Try plugin, fallback to HTTP
|
||||
├─ http_fallback() - HTTP call with retry
|
||||
└─ error_handler() - Consistent error handling
|
||||
|
||||
Generated Wrappers
|
||||
├─ auth_wrapper.nu (150 lines, autogenerated)
|
||||
├─ orchestrator_wrapper.nu (150 lines)
|
||||
├─ vault_wrapper.nu (150 lines)
|
||||
└─ kms_wrapper.nu (150 lines)
|
||||
```
|
||||
|
||||
### Mechanism
|
||||
|
||||
**Plugin Call Flow**:
|
||||
|
||||
1. **Check Availability**: Is plugin installed and running?
|
||||
2. **Try Plugin Call**: Execute plugin method with timeout
|
||||
3. **On Failure**: Fall back to HTTP endpoint
|
||||
4. **Error Handling**: Unified error response format
|
||||
5. **Retry Logic**: Configurable retry with exponential backoff
|
||||
|
||||
### Error Handling Pattern
|
||||
|
||||
**Nushell 0.109 Compliant** (do-complete pattern, no try-catch):
|
||||
|
||||
```
|
||||
def call-plugin-with-fallback [method: string args: record] {
|
||||
let plugin_result = (
|
||||
do {
|
||||
# Try plugin call
|
||||
call-plugin $method $args
|
||||
} | complete
|
||||
)
|
||||
|
||||
if $plugin_result.exit_code != 0 {
|
||||
# Fall back to HTTP
|
||||
call-http-endpoint $method $args
|
||||
} else {
|
||||
$plugin_result.stdout | from json
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- **85% Code Reduction**: 3000 lines → 200 (proxy) + 600 (generated)
|
||||
- **Consistency**: All plugins use identical call pattern
|
||||
- **Maintainability**: Single proxy implementation vs 4 wrapper files
|
||||
- **Testability**: Mock proxy for testing, no plugin-specific setup needed
|
||||
- **Extensibility**: New plugins require only YAML definition
|
||||
|
||||
### Negative
|
||||
|
||||
- **Abstraction Overhead**: Proxy layer adds indirection
|
||||
- **YAML Schema**: Must maintain schema for plugin definitions
|
||||
- **Migration Risk**: Replacing working code requires careful testing
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
1. **Create Generic Proxy** (`lib_provisioning/plugins/proxy.nu`)
|
||||
- Plugin availability detection
|
||||
- Call execution with error handling
|
||||
- HTTP fallback mechanism
|
||||
- Retry logic with backoff
|
||||
|
||||
2. **Define Plugin Schema** (`lib_provisioning/plugins/definitions/plugin.schema.yaml`)
|
||||
- Plugin metadata (name, http_endpoint)
|
||||
- Method definitions (parameters, return types)
|
||||
- Fallback configuration (retry count, timeout)
|
||||
|
||||
3. **Plugin Definitions** (`lib_provisioning/plugins/definitions/`)
|
||||
- `auth.yaml` - Authentication plugin
|
||||
- `orchestrator.yaml` - Orchestrator plugin
|
||||
- `secretumvault.yaml` - Secrets vault plugin
|
||||
- `kms.yaml` - Key management service plugin
|
||||
|
||||
4. **Code Generator** (`tools/codegen/plugin_wrapper_generator.nu`)
|
||||
- Parse plugin YAML definitions
|
||||
- Generate wrapper functions
|
||||
- Ensure Nushell 0.109 compliance
|
||||
|
||||
5. **Integration**
|
||||
- Feature flag: `$env.PROVISIONING_USE_GENERATED_PLUGINS`
|
||||
- Gradual migration from manual to generated wrappers
|
||||
- Full compatibility with existing code
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
1. **Unit Tests**
|
||||
- Plugin availability detection
|
||||
- Successful plugin calls
|
||||
- HTTP fallback on plugin failure
|
||||
- Error handling and retry logic
|
||||
|
||||
2. **Integration Tests**
|
||||
- Real plugin calls with actual plugins
|
||||
- Mock HTTP server for fallback testing
|
||||
- Timeout handling
|
||||
- Retry with backoff
|
||||
|
||||
3. **Contract Tests**
|
||||
- Plugin method signatures match definitions
|
||||
- Return values have expected structure
|
||||
- Error responses consistent
|
||||
|
||||
## Plugin Definitions
|
||||
|
||||
### auth.yaml Example
|
||||
|
||||
```
|
||||
plugin: auth
|
||||
http_endpoint: http://localhost:8001
|
||||
methods:
|
||||
login:
|
||||
params:
|
||||
username: string
|
||||
password: string
|
||||
returns: {token: string}
|
||||
logout:
|
||||
params: {}
|
||||
returns: {status: string}
|
||||
status:
|
||||
params: {}
|
||||
returns: {authenticated: bool}
|
||||
```
|
||||
|
||||
## Rollback Strategy
|
||||
|
||||
**Feature Flag Approach**:
|
||||
|
||||
```
|
||||
# Use original manual wrappers
|
||||
export PROVISIONING_USE_GENERATED_PLUGINS=false
|
||||
|
||||
# Use new generated proxy framework
|
||||
export PROVISIONING_USE_GENERATED_PLUGINS=true
|
||||
```
|
||||
|
||||
Allows parallel operation and gradual migration.
|
||||
|
||||
## Related ADRs
|
||||
|
||||
- **ADR-012**: Nushell/Nickel Plugin CLI Wrapper
|
||||
- **ADR-013**: TypeDialog Integration (forms for plugin configuration)
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. Should plugin definitions be YAML or Nickel?
|
||||
2. How do we handle plugin discovery automatically?
|
||||
3. What's the expected HTTP endpoint format for all plugins?
|
||||
4. Should retry logic be configurable per plugin?
|
||||
|
||||
## References
|
||||
|
||||
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
|
||||
- Do-Complete Pattern: Error handling without try-catch
|
||||
- Plugin Framework: `provisioning/core/nulib/lib_provisioning/plugins/`
|
||||
|
||||
@ -0,0 +1,281 @@
|
||||
# ADR-018: Help System Fluent Integration & Data-Driven Architecture
|
||||
|
||||
**Status**: Proposed
|
||||
**Date**: 2026-01-13
|
||||
**Author**: Architecture Team
|
||||
**Supersedes**: Hardcoded help strings in `main_provisioning/help_system.nu`
|
||||
|
||||
## Context
|
||||
|
||||
The current help system in `main_provisioning/help_system.nu` (1303 lines) consists almost entirely of hardcoded string concatenation with embedded
|
||||
ANSI formatting codes:
|
||||
|
||||
```
|
||||
def help-infrastructure [] {
|
||||
print "╔════════════════════════════════════════════════════╗"
|
||||
print "║ SERVER & INFRASTRUCTURE ║"
|
||||
print "╚════════════════════════════════════════════════════╝"
|
||||
}
|
||||
```
|
||||
|
||||
**Current Problems**:
|
||||
|
||||
1. **No Internationalization**: Help text trapped in English-only code
|
||||
2. **Hard to Maintain**: Updating text requires editing Nushell code
|
||||
3. **Mixed Concerns**: Content (strings) mixed with presentation (ANSI codes)
|
||||
4. **No Hot-Reload**: Changes require recompilation
|
||||
5. **Difficult to Test**: String content buried in function definitions
|
||||
|
||||
## Problem Statement
|
||||
|
||||
**Metrics**:
|
||||
- 1303 lines of code-embedded help text
|
||||
- 17 help categories with 65 strings total
|
||||
- All help functions manually maintained
|
||||
- No separation of data from presentation
|
||||
|
||||
## Decision
|
||||
|
||||
Implement **Data-Driven Help with Mozilla Fluent Integration**:
|
||||
|
||||
1. Extract help content to Fluent files (`.ftl` format)
|
||||
2. Support multilingual help (English base, Spanish translations)
|
||||
3. Implement runtime language resolution via `LANG` environment variable
|
||||
4. Reduce help_system.nu to wrapper functions only
|
||||
|
||||
### Architecture
|
||||
|
||||
```
|
||||
Help Content (Fluent Files)
|
||||
├─ en-US/help.ftl (65 strings - English base)
|
||||
└─ es-ES/help.ftl (65 strings - Spanish translations)
|
||||
|
||||
Language Detection & Loading
|
||||
├─ Check LANG environment variable
|
||||
├─ Load appropriate Fluent file
|
||||
└─ Implement fallback chain (es-ES → en-US)
|
||||
|
||||
Help System Wrapper
|
||||
├─ help-main [] - Display main menu
|
||||
├─ help-infrastructure [] - Infrastructure category
|
||||
├─ help-orchestration [] - Orchestration category
|
||||
└─ help-setup [] - Setup category
|
||||
|
||||
User Interface
|
||||
├─ LANG=en_US provisioning help infrastructure
|
||||
└─ LANG=es_ES provisioning help infrastructure
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
### 1. Fluent File Structure
|
||||
|
||||
**en-US/help.ftl**:
|
||||
|
||||
```
|
||||
help-main-title = PROVISIONING SYSTEM
|
||||
help-main-subtitle = Layered Infrastructure Automation
|
||||
help-main-categories = COMMAND CATEGORIES
|
||||
help-main-categories-hint = Use 'provisioning help <category>' for details
|
||||
help-main-infrastructure-name = infrastructure
|
||||
help-main-infrastructure-desc = Server, taskserv, cluster, VM, and infra management
|
||||
help-main-orchestration-name = orchestration
|
||||
help-main-orchestration-desc = Workflow, batch operations, and orchestrator control
|
||||
help-infrastructure-title = SERVER & INFRASTRUCTURE
|
||||
help-infra-server = Server Operations
|
||||
help-infra-server-create = Create a new server
|
||||
help-infra-server-list = List all servers
|
||||
help-infra-server-status = Show server status
|
||||
help-infra-taskserv = TaskServ Management
|
||||
help-infra-taskserv-create = Deploy taskserv to server
|
||||
help-infra-cluster = Cluster Management
|
||||
help-infra-vm = Virtual Machine Operations
|
||||
help-orchestration-title = ORCHESTRATION & WORKFLOWS
|
||||
help-orch-control = Orchestrator Management
|
||||
help-orch-start = Start orchestrator [--background]
|
||||
help-orch-workflows = Single Task Workflows
|
||||
help-orch-batch = Multi-Provider Batch Operations
|
||||
```
|
||||
|
||||
**es-ES/help.ftl** (Spanish translations):
|
||||
|
||||
```
|
||||
help-main-title = SISTEMA DE PROVISIÓN
|
||||
help-main-subtitle = Automatización de Infraestructura por Capas
|
||||
help-main-categories = CATEGORÍAS DE COMANDOS
|
||||
help-main-categories-hint = Use 'provisioning help <categoría>' para más detalles
|
||||
help-main-infrastructure-name = infraestructura
|
||||
help-main-infrastructure-desc = Gestión de servidores, taskserv, clusters, VM e infraestructura
|
||||
help-main-orchestration-name = orquestación
|
||||
help-main-orchestration-desc = Flujos de trabajo, operaciones por lotes y control del orquestador
|
||||
help-infrastructure-title = SERVIDOR E INFRAESTRUCTURA
|
||||
help-infra-server = Operaciones de Servidor
|
||||
help-infra-server-create = Crear un nuevo servidor
|
||||
help-infra-server-list = Listar todos los servidores
|
||||
help-infra-server-status = Mostrar estado del servidor
|
||||
help-infra-taskserv = Gestión de TaskServ
|
||||
help-infra-taskserv-create = Desplegar taskserv en servidor
|
||||
help-infra-cluster = Gestión de Clusters
|
||||
help-infra-vm = Operaciones de Máquinas Virtuales
|
||||
help-orchestration-title = ORQUESTACIÓN Y FLUJOS DE TRABAJO
|
||||
help-orch-control = Gestión del Orquestador
|
||||
help-orch-start = Iniciar orquestador [--background]
|
||||
help-orch-workflows = Flujos de Trabajo de Tarea Única
|
||||
help-orch-batch = Operaciones por Lotes Multi-Proveedor
|
||||
```
|
||||
|
||||
### 2. Fluent Loading in Nushell
|
||||
|
||||
```
|
||||
def load-fluent-file [category: string] {
|
||||
let lang = ($env.LANG? | default "en_US" | str replace "_" "-")
|
||||
let fluent_path = $"provisioning/locales/($lang)/help.ftl"
|
||||
|
||||
# Parse Fluent file and extract strings for category
|
||||
# Fallback to en-US if lang not available
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Help System Wrapper
|
||||
|
||||
```
|
||||
export def help-infrastructure [] {
|
||||
let strings = (load-fluent-file "infrastructure")
|
||||
|
||||
# Apply formatting and render
|
||||
print $"╔════════════════════════════════════════════════════╗"
|
||||
print $"║ ($strings.title | str upcase) ║"
|
||||
print $"╚════════════════════════════════════════════════════╝"
|
||||
}
|
||||
```
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- **Internationalization Ready**: Easy to add new languages (Portuguese, French, Japanese)
|
||||
- **Data/Presentation Separation**: Content in Fluent, formatting in Nushell
|
||||
- **Maintainability**: Edit Fluent files, not Nushell code
|
||||
- **Hot-Reload Support**: Can update help text without recompilation
|
||||
- **Testing**: Help content testable independently from rendering
|
||||
- **Code Reduction**: 1303 lines → ~50 lines (wrapper) + ~700 lines (Fluent data)
|
||||
|
||||
### Negative
|
||||
|
||||
- **Tool Complexity**: Need Fluent parser and loader
|
||||
- **Fallback Chain Management**: Must handle missing translations gracefully
|
||||
- **Performance**: File I/O for loading translations (mitigated by caching)
|
||||
|
||||
## Integration Strategy
|
||||
|
||||
### Phase 1: Infrastructure & Extraction
|
||||
|
||||
- ✅ Create `provisioning/locales/` directory structure
|
||||
- ✅ Create `i18n-config.toml` with locale configuration
|
||||
- ✅ Extract strings to `en-US/help.ftl` (65 strings)
|
||||
- ✅ Create Spanish translations `es-ES/help.ftl`
|
||||
|
||||
### Phase 2: Integration (This Task)
|
||||
|
||||
- [ ] Modify `help_system.nu` to load from Fluent
|
||||
- [ ] Implement language detection (`$env.LANG`)
|
||||
- [ ] Implement fallback chain logic
|
||||
- [ ] Test with `LANG=en_US` and `LANG=es_ES`
|
||||
|
||||
### Phase 3: Validation & Documentation
|
||||
|
||||
- [ ] Comprehensive integration tests
|
||||
- [ ] Performance benchmarks
|
||||
- [ ] Documentation for adding new languages
|
||||
- [ ] Examples in provisioning/docs/
|
||||
|
||||
## Language Resolution Flow
|
||||
|
||||
```
|
||||
1. Check LANG environment variable
|
||||
LANG=es_ES.UTF-8 → extract "es_ES" or "es-ES"
|
||||
|
||||
2. Check if locale file exists
|
||||
provisioning/locales/es-ES/help.ftl exists? → YES
|
||||
|
||||
3. Load locale file
|
||||
Parse and extract help strings
|
||||
|
||||
4. On missing key:
|
||||
Check fallback chain in i18n-config.toml
|
||||
es-ES → en-US
|
||||
|
||||
5. Render with formatting
|
||||
Apply ANSI codes, boxes, alignment
|
||||
```
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
|
||||
```
|
||||
# Test language detection
|
||||
LANG=en_US provisioning help infrastructure
|
||||
# Expected: English output
|
||||
|
||||
LANG=es_ES provisioning help infrastructure
|
||||
# Expected: Spanish output
|
||||
|
||||
LANG=fr_FR provisioning help infrastructure
|
||||
# Expected: Fallback to English (fr-FR not available)
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
provisioning/
|
||||
├── locales/
|
||||
│ ├── i18n-config.toml # Locale metadata & fallback chains
|
||||
│ ├── en-US/
|
||||
│ │ └── help.ftl # 65 English help strings
|
||||
│ └── es-ES/
|
||||
│ └── help.ftl # 65 Spanish help strings
|
||||
└── core/nulib/main_provisioning/
|
||||
└── help_system.nu # ~50 lines (wrapper only)
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
**i18n-config.toml** defines:
|
||||
|
||||
```
|
||||
[locales]
|
||||
default = "en-US"
|
||||
fallback = "en-US"
|
||||
|
||||
[locales.en-US]
|
||||
name = "English (United States)"
|
||||
|
||||
[locales.es-ES]
|
||||
name = "Spanish (Spain)"
|
||||
|
||||
[fallback_chains]
|
||||
es-ES = ["en-US"]
|
||||
```
|
||||
|
||||
## Related ADRs
|
||||
|
||||
- **ADR-010**: Configuration Format Strategy
|
||||
- **ADR-011**: Nickel Migration
|
||||
- **ADR-013**: TypeDialog Integration (forms also use Fluent)
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. Should help strings support Fluent attributes for metadata?
|
||||
2. Should we implement Fluent caching for performance?
|
||||
3. How do we handle dynamic help (commands not in Fluent)?
|
||||
4. Should help system auto-update when Fluent files change?
|
||||
|
||||
## References
|
||||
|
||||
- Mozilla Fluent: [https://projectfluent.org/](https://projectfluent.org/)
|
||||
- Fluent Syntax: [https://projectfluent.org/fluent/guide/](https://projectfluent.org/fluent/guide/)
|
||||
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
|
||||
- Current Help Implementation: `provisioning/core/nulib/main_provisioning/help_system.nu`
|
||||
- Fluent Files: `provisioning/locales/{en-US,es-ES}/help.ftl`
|
||||
|
||||
@ -0,0 +1,263 @@
|
||||
# ADR-019: Configuration Loader Modularization
|
||||
|
||||
**Status**: Proposed
|
||||
**Date**: 2026-01-13
|
||||
**Author**: Architecture Team
|
||||
**Supersedes**: Monolithic loader in `lib_provisioning/config/loader.nu`
|
||||
|
||||
## Context
|
||||
|
||||
The `lib_provisioning/config/loader.nu` file (2199 lines) is a monolithic implementation mixing multiple unrelated concerns:
|
||||
|
||||
```
|
||||
Current Structure (2199 lines):
|
||||
├─ Cache lookup/storage (300 lines)
|
||||
├─ Nickel evaluation (400 lines)
|
||||
├─ TOML/YAML parsing (250 lines)
|
||||
├─ Environment variable loading (200 lines)
|
||||
├─ Configuration hierarchy merging (400 lines)
|
||||
├─ Validation logic (250 lines)
|
||||
├─ Error handling (200 lines)
|
||||
└─ Helper utilities (150 lines)
|
||||
```
|
||||
|
||||
**Problems**:
|
||||
|
||||
1. **Single Responsibility Violation**: One file handling 7 different concerns
|
||||
2. **Testing Difficulty**: Can't test TOML parsing without cache setup
|
||||
3. **Change Amplification**: Modifying one component affects entire file
|
||||
4. **Code Reuse**: Hard to reuse individual loaders in other contexts
|
||||
5. **Maintenance Burden**: 2199 lines of tightly coupled code
|
||||
|
||||
## Problem Statement
|
||||
|
||||
**Metrics**:
|
||||
- 2199 lines in single file
|
||||
- 7 distinct responsibilities mixed together
|
||||
- Hard to test individual components
|
||||
- Changes in one area risk breaking others
|
||||
|
||||
## Decision
|
||||
|
||||
Implement **Layered Loader Architecture**: decompose monolithic loader into specialized, testable modules with a thin orchestrator.
|
||||
|
||||
### Target Architecture
|
||||
|
||||
```
|
||||
lib_provisioning/config/
|
||||
├── loader.nu # ORCHESTRATOR (< 300 lines)
|
||||
│ └─ Coordinates loading pipeline
|
||||
├── loaders/ # SPECIALIZED LOADERS
|
||||
│ ├── nickel_loader.nu # Nickel evaluation + cache (150 lines)
|
||||
│ ├── toml_loader.nu # TOML parsing (80 lines)
|
||||
│ ├── yaml_loader.nu # YAML parsing (80 lines)
|
||||
│ ├── env_loader.nu # Environment variables (100 lines)
|
||||
│ └── hierarchy.nu # Configuration merging (200 lines)
|
||||
├── cache/ # EXISTING - already modular
|
||||
│ ├── core.nu # Cache core
|
||||
│ ├── nickel.nu # Nickel-specific caching
|
||||
│ └── final.nu # Final config caching
|
||||
└── validation/ # EXTRACTED
|
||||
└── config_validator.nu # Validation rules (100 lines)
|
||||
```
|
||||
|
||||
### Module Responsibilities
|
||||
|
||||
**loader.nu (Orchestrator)**:
|
||||
- Define loading pipeline
|
||||
- Coordinate loaders
|
||||
- Handle high-level errors
|
||||
- Return final config
|
||||
|
||||
**nickel_loader.nu**:
|
||||
- Evaluate Nickel files
|
||||
- Apply Nickel type contracts
|
||||
- Cache Nickel evaluation results
|
||||
- Handle schema validation
|
||||
|
||||
**toml_loader.nu**:
|
||||
- Parse TOML configuration files
|
||||
- Extract key-value pairs
|
||||
- Validate TOML structure
|
||||
- Return parsed records
|
||||
|
||||
**yaml_loader.nu**:
|
||||
- Parse YAML configuration files
|
||||
- Convert to Nushell records
|
||||
- Handle YAML nesting
|
||||
- Return normalized records
|
||||
|
||||
**env_loader.nu**:
|
||||
- Load environment variables
|
||||
- Filter by prefix (PROVISIONING_*)
|
||||
- Override existing values
|
||||
- Return environment records
|
||||
|
||||
**hierarchy.nu**:
|
||||
- Merge multiple config sources
|
||||
- Apply precedence rules
|
||||
- Handle nested merging
|
||||
- Return unified config
|
||||
|
||||
**config_validator.nu**:
|
||||
- Validate against schema
|
||||
- Check required fields
|
||||
- Enforce type constraints
|
||||
- Return validation results
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- **Separation of Concerns**: Each module has single responsibility
|
||||
- **Testability**: Can unit test each loader independently
|
||||
- **Reusability**: Loaders can be used in other contexts
|
||||
- **Maintainability**: Changes isolated to specific module
|
||||
- **Debugging**: Easier to isolate issues
|
||||
- **Performance**: Can optimize individual loaders
|
||||
|
||||
### Negative
|
||||
|
||||
- **Increased Complexity**: More files to maintain
|
||||
- **Integration Overhead**: Must coordinate between modules
|
||||
- **Migration Effort**: Refactoring existing monolithic code
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### Phase 1: Extract Specialized Loaders
|
||||
|
||||
Create each loader as independent module:
|
||||
|
||||
1. **toml_loader.nu**
|
||||
```nushell
|
||||
export def load-toml [path: string] {
|
||||
let content = (open $path)
|
||||
$content
|
||||
}
|
||||
```
|
||||
|
||||
2. **yaml_loader.nu**
|
||||
```nushell
|
||||
export def load-yaml [path: string] {
|
||||
let content = (open --raw $path | from yaml)
|
||||
$content
|
||||
}
|
||||
```
|
||||
|
||||
3. **env_loader.nu**
|
||||
```nushell
|
||||
export def load-environment [] {
|
||||
$env
|
||||
| to json
|
||||
| from json
|
||||
| select --contains "PROVISIONING_"
|
||||
}
|
||||
```
|
||||
|
||||
4. **hierarchy.nu**
|
||||
```nushell
|
||||
export def merge-configs [base override] {
|
||||
$base | merge $override
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 2: Refactor Nickel Loader
|
||||
|
||||
Extract Nickel evaluation logic:
|
||||
|
||||
```
|
||||
export def evaluate-nickel [file: string] {
|
||||
let result = (
|
||||
do {
|
||||
^nickel export $file
|
||||
} | complete
|
||||
)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
error $result.stderr
|
||||
} else {
|
||||
$result.stdout | from json
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Create Orchestrator
|
||||
|
||||
Implement thin loader.nu:
|
||||
|
||||
```
|
||||
export def load-provisioning-config [] {
|
||||
let env_config = (env-loader load-environment)
|
||||
let toml_config = (toml-loader load-toml "config.toml")
|
||||
let nickel_config = (nickel-loader evaluate-nickel "main.ncl")
|
||||
|
||||
let merged = (
|
||||
{}
|
||||
| hierarchy merge-configs $toml_config
|
||||
| hierarchy merge-configs $nickel_config
|
||||
| hierarchy merge-configs $env_config
|
||||
)
|
||||
|
||||
let validated = (config-validator validate $merged)
|
||||
$validated
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Testing
|
||||
|
||||
Create test for each module:
|
||||
|
||||
```
|
||||
tests/config/
|
||||
├── loaders/
|
||||
│ ├── test_nickel_loader.nu
|
||||
│ ├── test_toml_loader.nu
|
||||
│ ├── test_yaml_loader.nu
|
||||
│ ├── test_env_loader.nu
|
||||
│ └── test_hierarchy.nu
|
||||
└── test_orchestrator.nu
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
**Baseline**: Current monolithic loader ~500ms
|
||||
|
||||
**Layered Architecture**:
|
||||
- Individual loaders: ~50-100ms each
|
||||
- Orchestration: ~50ms
|
||||
- Total expected: ~400-500ms (within 5% tolerance)
|
||||
|
||||
**Optimization**:
|
||||
- Cache Nickel evaluation (largest cost)
|
||||
- Lazy load YAML (if rarely used)
|
||||
- Environment variable filtering
|
||||
|
||||
## Backward Compatibility
|
||||
|
||||
**Public API Unchanged**:
|
||||
```
|
||||
# Current usage (unchanged)
|
||||
let config = (load-provisioning-config)
|
||||
```
|
||||
|
||||
**Internal Only**: Refactoring is internal to loader module, no breaking changes to consumers.
|
||||
|
||||
## Related ADRs
|
||||
|
||||
- **ADR-010**: Configuration Format Strategy
|
||||
- **ADR-011**: Nickel Migration
|
||||
- **ADR-016**: Schema-Driven Accessor Generation
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. Should each loader have its own cache layer?
|
||||
2. How do we handle circular dependencies between loaders?
|
||||
3. Should validation run after each loader or only at end?
|
||||
4. What's the rollback strategy if orchestration fails?
|
||||
|
||||
## References
|
||||
|
||||
- Current Implementation: `provisioning/core/nulib/lib_provisioning/config/loader.nu`
|
||||
- Cache System: `provisioning/core/nulib/lib_provisioning/config/cache/`
|
||||
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
|
||||
|
||||
@ -0,0 +1,313 @@
|
||||
# ADR-020: Command Handler Domain Splitting
|
||||
|
||||
**Status**: Proposed
|
||||
**Date**: 2026-01-13
|
||||
**Author**: Architecture Team
|
||||
**Supersedes**: Monolithic command handlers in `main_provisioning/commands/`
|
||||
|
||||
## Context
|
||||
|
||||
Two large monolithic command handler files mix disparate domains:
|
||||
|
||||
**commands/utilities.nu** (1112 lines):
|
||||
- SSH operations (150 lines)
|
||||
- SOPS secret editing (200 lines)
|
||||
- Cache management (180 lines)
|
||||
- Provider listing (100 lines)
|
||||
- Plugin operations (150 lines)
|
||||
- Shell information (80 lines)
|
||||
- Guide system (120 lines)
|
||||
- QR code generation (50 lines)
|
||||
|
||||
**commands/integrations.nu** (1184 lines):
|
||||
- prov-ecosystem bridge (400 lines)
|
||||
- provctl integration (350 lines)
|
||||
- External API calls (434 lines)
|
||||
|
||||
**Problem Statement**:
|
||||
|
||||
1. **Mixed Concerns**: Each file handles 7-10 unrelated domains
|
||||
2. **Navigation Difficulty**: Hard to find specific functionality
|
||||
3. **Testing Complexity**: Can't test SSH without SOPS setup
|
||||
4. **Reusability**: Command logic locked in monolithic files
|
||||
5. **Maintenance Burden**: Changes in one domain affect entire file
|
||||
|
||||
## Decision
|
||||
|
||||
Implement **Domain-Based Command Modules**: split monolithic handlers into focused domain modules organized by responsibility.
|
||||
|
||||
### Target Architecture
|
||||
|
||||
```
|
||||
main_provisioning/commands/
|
||||
├── dispatcher.nu # Routes commands to domain handlers
|
||||
├── utilities/ # Split by domain
|
||||
│ ├── ssh.nu # SSH operations (150 lines)
|
||||
│ ├── sops.nu # SOPS editing (200 lines)
|
||||
│ ├── cache.nu # Cache management (180 lines)
|
||||
│ ├── providers.nu # Provider listing (100 lines)
|
||||
│ ├── plugins.nu # Plugin operations (150 lines)
|
||||
│ ├── shell.nu # Shell information (80 lines)
|
||||
│ ├── guides.nu # Guide system (120 lines)
|
||||
│ └── qr.nu # QR code generation (50 lines)
|
||||
└── integrations/ # Split by integration
|
||||
├── prov_ecosystem.nu # Prov-ecosystem bridge (400 lines)
|
||||
├── provctl.nu # Provctl integration (350 lines)
|
||||
└── external_apis.nu # External API calls (434 lines)
|
||||
```
|
||||
|
||||
### Module Organization
|
||||
|
||||
**utilities/ssh.nu**:
|
||||
- SSH connection management
|
||||
- Key management
|
||||
- Remote command execution
|
||||
- Connection pooling
|
||||
|
||||
**utilities/sops.nu**:
|
||||
- SOPS secret file editing
|
||||
- Encryption/decryption
|
||||
- Key rotation
|
||||
- Secret validation
|
||||
|
||||
**utilities/cache.nu**:
|
||||
- Cache lookup
|
||||
- Cache invalidation
|
||||
- Cache statistics
|
||||
- Cleanup operations
|
||||
|
||||
**utilities/providers.nu**:
|
||||
- List available providers
|
||||
- Provider capabilities
|
||||
- Provider health check
|
||||
- Provider registration
|
||||
|
||||
**utilities/plugins.nu**:
|
||||
- Plugin discovery
|
||||
- Plugin loading
|
||||
- Plugin execution
|
||||
- Plugin management
|
||||
|
||||
**utilities/shell.nu**:
|
||||
- Nushell info
|
||||
- Shell configuration
|
||||
- Environment variables
|
||||
- Shell capabilities
|
||||
|
||||
**utilities/guides.nu**:
|
||||
- Guide listing
|
||||
- Guide rendering
|
||||
- Guide search
|
||||
- Interactive guides
|
||||
|
||||
**utilities/qr.nu**:
|
||||
- QR code generation
|
||||
- QR code display
|
||||
- Code formatting
|
||||
- Error handling
|
||||
|
||||
**integrations/prov_ecosystem.nu**:
|
||||
- Prov-ecosystem API calls
|
||||
- Data synchronization
|
||||
- Registry integration
|
||||
- Extension discovery
|
||||
|
||||
**integrations/provctl.nu**:
|
||||
- Provctl command bridge
|
||||
- Orchestrator integration
|
||||
- Workflow execution
|
||||
- Status monitoring
|
||||
|
||||
**integrations/external_apis.nu**:
|
||||
- Third-party API integration
|
||||
- HTTP calls
|
||||
- Data transformation
|
||||
- Error handling
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
- **Single Responsibility**: Each module handles one domain
|
||||
- **Easier Navigation**: Find functionality by domain name
|
||||
- **Testable**: Can test SSH independently from SOPS
|
||||
- **Maintainable**: Changes isolated to domain module
|
||||
- **Reusable**: Modules can be imported by other components
|
||||
- **Scalable**: Easy to add new domains
|
||||
|
||||
### Negative
|
||||
|
||||
- **More Files**: 11 modules vs 2 monolithic files
|
||||
- **Import Overhead**: More module imports needed
|
||||
- **Coordination Complexity**: Dispatcher must route correctly
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### Phase 1: Extract Utilities Domain
|
||||
|
||||
Create `utilities/` directory with 8 modules:
|
||||
|
||||
1. **utilities/ssh.nu** - Extract SSH operations
|
||||
2. **utilities/sops.nu** - Extract SOPS operations
|
||||
3. **utilities/cache.nu** - Extract cache operations
|
||||
4. **utilities/providers.nu** - Extract provider operations
|
||||
5. **utilities/plugins.nu** - Extract plugin operations
|
||||
6. **utilities/shell.nu** - Extract shell operations
|
||||
7. **utilities/guides.nu** - Extract guide operations
|
||||
8. **utilities/qr.nu** - Extract QR operations
|
||||
|
||||
### Phase 2: Extract Integrations Domain
|
||||
|
||||
Create `integrations/` directory with 3 modules:
|
||||
|
||||
1. **integrations/prov_ecosystem.nu** - Extract prov-ecosystem
|
||||
2. **integrations/provctl.nu** - Extract provctl
|
||||
3. **integrations/external_apis.nu** - Extract external APIs
|
||||
|
||||
### Phase 3: Create Dispatcher
|
||||
|
||||
Implement `dispatcher.nu`:
|
||||
|
||||
```
|
||||
export def provision-ssh [args] {
|
||||
use ./utilities/ssh.nu *
|
||||
handle-ssh-command $args
|
||||
}
|
||||
|
||||
export def provision-sops [args] {
|
||||
use ./utilities/sops.nu *
|
||||
handle-sops-command $args
|
||||
}
|
||||
|
||||
export def provision-cache [args] {
|
||||
use ./utilities/cache.nu *
|
||||
handle-cache-command $args
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Maintain Backward Compatibility
|
||||
|
||||
Keep public exports in original files for compatibility:
|
||||
|
||||
```
|
||||
# commands/utilities.nu (compatibility layer)
|
||||
use ./utilities/ssh.nu *
|
||||
use ./utilities/sops.nu *
|
||||
use ./utilities/cache.nu *
|
||||
|
||||
# Re-export all functions (unchanged public API)
|
||||
export use ./utilities/ssh.nu
|
||||
export use ./utilities/sops.nu
|
||||
```
|
||||
|
||||
### Phase 5: Testing
|
||||
|
||||
Create test structure:
|
||||
|
||||
```
|
||||
tests/commands/
|
||||
├── utilities/
|
||||
│ ├── test_ssh.nu
|
||||
│ ├── test_sops.nu
|
||||
│ ├── test_cache.nu
|
||||
│ ├── test_providers.nu
|
||||
│ ├── test_plugins.nu
|
||||
│ ├── test_shell.nu
|
||||
│ ├── test_guides.nu
|
||||
│ └── test_qr.nu
|
||||
└── integrations/
|
||||
├── test_prov_ecosystem.nu
|
||||
├── test_provctl.nu
|
||||
└── test_external_apis.nu
|
||||
```
|
||||
|
||||
## Module Interface Example
|
||||
|
||||
**utilities/ssh.nu**:
|
||||
|
||||
```
|
||||
# Connect to remote host
|
||||
export def ssh-connect [host: string --port: int = 22] {
|
||||
# Implementation
|
||||
}
|
||||
|
||||
# Execute remote command
|
||||
export def ssh-exec [host: string command: string] {
|
||||
# Implementation
|
||||
}
|
||||
|
||||
# Close SSH connection
|
||||
export def ssh-close [host: string] {
|
||||
# Implementation
|
||||
}
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
main_provisioning/commands/
|
||||
├── dispatcher.nu # Route to domain handlers
|
||||
├── utilities/
|
||||
│ ├── mod.nu # Utilities module index
|
||||
│ ├── ssh.nu # 150 lines
|
||||
│ ├── sops.nu # 200 lines
|
||||
│ ├── cache.nu # 180 lines
|
||||
│ ├── providers.nu # 100 lines
|
||||
│ ├── plugins.nu # 150 lines
|
||||
│ ├── shell.nu # 80 lines
|
||||
│ ├── guides.nu # 120 lines
|
||||
│ └── qr.nu # 50 lines
|
||||
├── integrations/
|
||||
│ ├── mod.nu # Integrations module index
|
||||
│ ├── prov_ecosystem.nu # 400 lines
|
||||
│ ├── provctl.nu # 350 lines
|
||||
│ └── external_apis.nu # 434 lines
|
||||
└── README.md # Command routing guide
|
||||
```
|
||||
|
||||
## CLI Interface (Unchanged)
|
||||
|
||||
Users see no change in CLI:
|
||||
|
||||
```
|
||||
provisioning ssh host.example.com
|
||||
provisioning sops edit config.yaml
|
||||
provisioning cache clear
|
||||
provisioning list providers
|
||||
provisioning guide from-scratch
|
||||
```
|
||||
|
||||
## Backward Compatibility Strategy
|
||||
|
||||
**Import Path Options**:
|
||||
|
||||
```
|
||||
# Option 1: Import from domain module (new way)
|
||||
use ./utilities/ssh.nu *
|
||||
connect $host
|
||||
|
||||
# Option 2: Import from compatibility layer (old way)
|
||||
use ./utilities.nu *
|
||||
connect $host
|
||||
```
|
||||
|
||||
Both paths work without breaking existing code.
|
||||
|
||||
## Related ADRs
|
||||
|
||||
- **ADR-006**: Provisioning CLI Refactoring
|
||||
- **ADR-012**: Nushell/Nickel Plugin CLI Wrapper
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. Should we create a module registry for discoverability?
|
||||
2. Should domain modules be loadable as plugins?
|
||||
3. How do we handle shared utilities between domains?
|
||||
4. Should we implement hot-reloading for domain modules?
|
||||
|
||||
## References
|
||||
|
||||
- Current Implementation: `provisioning/core/nulib/main_provisioning/commands/`
|
||||
- Nushell 0.109 Guidelines: `.claude/guidelines/nushell.md`
|
||||
- Module System: Nushell module documentation
|
||||
|
||||
@ -43,7 +43,7 @@ The Provisioning Platform is a modern, cloud-native infrastructure automation sy
|
||||
|
||||
### Architecture at a Glance
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ Provisioning Platform │
|
||||
├─────────────────────────────────────────────────────────────────────┤
|
||||
@ -93,7 +93,7 @@ The Provisioning Platform is a modern, cloud-native infrastructure automation sy
|
||||
|
||||
### High-Level Architecture
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────────────────┐
|
||||
│ PRESENTATION LAYER │
|
||||
├────────────────────────────────────────────────────────────────────────────┤
|
||||
@ -191,7 +191,7 @@ The system is organized into three separate repositories:
|
||||
|
||||
#### **provisioning-core**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Core system functionality
|
||||
├── CLI interface (Nushell entry point)
|
||||
├── Core libraries (lib_provisioning)
|
||||
@ -205,7 +205,7 @@ Core system functionality
|
||||
|
||||
#### **provisioning-extensions**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
All provider, taskserv, cluster extensions
|
||||
├── providers/
|
||||
│ ├── aws/
|
||||
@ -229,7 +229,7 @@ All provider, taskserv, cluster extensions
|
||||
|
||||
#### **provisioning-platform**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Platform services
|
||||
├── orchestrator/ (Rust)
|
||||
├── control-center/ (Rust/Yew)
|
||||
@ -255,7 +255,7 @@ Platform services
|
||||
|
||||
**Architecture**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Main CLI (211 lines)
|
||||
↓
|
||||
Command Dispatcher (264 lines)
|
||||
@ -281,7 +281,7 @@ Domain Handlers (7 modules)
|
||||
|
||||
**Hierarchical Loading**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. System defaults (config.defaults.toml)
|
||||
2. User config (~/.provisioning/config.user.toml)
|
||||
3. Workspace config (workspace/config/provisioning.yaml)
|
||||
@ -303,7 +303,7 @@ Domain Handlers (7 modules)
|
||||
|
||||
**Architecture**:
|
||||
|
||||
```rust
|
||||
```
|
||||
src/
|
||||
├── main.rs // Entry point
|
||||
├── api/
|
||||
@ -342,7 +342,7 @@ src/
|
||||
|
||||
**Workflow Types**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
workflows/
|
||||
├── server_create.nu // Server provisioning
|
||||
├── taskserv.nu // Task service management
|
||||
@ -371,7 +371,7 @@ workflows/
|
||||
|
||||
**Extension Structure**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
extension-name/
|
||||
├── schemas/
|
||||
│ ├── main.ncl // Main schema
|
||||
@ -401,7 +401,7 @@ Each extension packaged as OCI artifact:
|
||||
|
||||
**Module System**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Discover available extensions
|
||||
provisioning module discover taskservs
|
||||
|
||||
@ -414,7 +414,7 @@ provisioning module list taskserv my-workspace
|
||||
|
||||
**Layer System** (Configuration Inheritance):
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Layer 1: Core (provisioning/extensions/{type}/{name})
|
||||
↓
|
||||
Layer 2: Workspace (workspace/extensions/{type}/{name})
|
||||
@ -438,7 +438,7 @@ Layer 3: Infrastructure (workspace/infra/{infra}/extensions/{type}/{name})
|
||||
|
||||
**Example**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let { TaskservDependencies } = import "provisioning/dependencies.ncl" in
|
||||
{
|
||||
kubernetes = TaskservDependencies {
|
||||
@ -467,7 +467,7 @@ let { TaskservDependencies } = import "provisioning/dependencies.ncl" in
|
||||
|
||||
**Lifecycle Management**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Start all auto-start services
|
||||
provisioning platform start
|
||||
|
||||
@ -485,7 +485,7 @@ provisioning platform logs orchestrator --follow
|
||||
|
||||
**Architecture**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
User Command (CLI)
|
||||
↓
|
||||
Test Orchestrator (Rust)
|
||||
@ -520,7 +520,7 @@ The platform supports four operational modes that adapt the system from individu
|
||||
|
||||
### Mode Comparison
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌───────────────────────────────────────────────────────────────────────┐
|
||||
│ MODE ARCHITECTURE │
|
||||
├───────────────┬───────────────┬───────────────┬───────────────────────┤
|
||||
@ -562,7 +562,7 @@ The platform supports four operational modes that adapt the system from individu
|
||||
|
||||
**Switching Modes**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Check current mode
|
||||
provisioning mode current
|
||||
|
||||
@ -577,7 +577,7 @@ provisioning mode validate enterprise
|
||||
|
||||
#### Solo Mode
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Default mode, no setup needed
|
||||
provisioning workspace init
|
||||
|
||||
@ -590,7 +590,7 @@ provisioning server create
|
||||
|
||||
#### Multi-User Mode
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Switch mode and authenticate
|
||||
provisioning mode switch multi-user
|
||||
provisioning auth login
|
||||
@ -609,7 +609,7 @@ provisioning workspace unlock my-infra
|
||||
|
||||
#### CI/CD Mode
|
||||
|
||||
```yaml
|
||||
```
|
||||
# GitLab CI
|
||||
deploy:
|
||||
stage: deploy
|
||||
@ -626,7 +626,7 @@ deploy:
|
||||
|
||||
#### Enterprise Mode
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Switch to enterprise, verify K8s
|
||||
provisioning mode switch enterprise
|
||||
kubectl get pods -n provisioning-system
|
||||
@ -654,7 +654,7 @@ provisioning workspace unlock prod-deployment
|
||||
|
||||
### Service Communication
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌──────────────────────────────────────────────────────────────────────┐
|
||||
│ NETWORK LAYER │
|
||||
├──────────────────────────────────────────────────────────────────────┤
|
||||
@ -732,7 +732,7 @@ provisioning workspace unlock prod-deployment
|
||||
|
||||
### Data Storage
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────┐
|
||||
│ DATA LAYER │
|
||||
├────────────────────────────────────────────────────────────────┤
|
||||
@ -813,7 +813,7 @@ provisioning workspace unlock prod-deployment
|
||||
|
||||
**Configuration Loading**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. Load system defaults (config.defaults.toml)
|
||||
2. Merge user config (~/.provisioning/config.user.toml)
|
||||
3. Load workspace config (workspace/config/provisioning.yaml)
|
||||
@ -824,7 +824,7 @@ provisioning workspace unlock prod-deployment
|
||||
|
||||
**State Persistence**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Workflow execution
|
||||
↓
|
||||
Create checkpoint (JSON)
|
||||
@ -836,7 +836,7 @@ On failure, load checkpoint and resume
|
||||
|
||||
**OCI Artifact Flow**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. Package extension (oci-package.nu)
|
||||
2. Push to OCI registry (provisioning oci push)
|
||||
3. Extension stored as OCI artifact
|
||||
@ -850,7 +850,7 @@ On failure, load checkpoint and resume
|
||||
|
||||
### Security Layers
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ SECURITY ARCHITECTURE │
|
||||
├─────────────────────────────────────────────────────────────────┤
|
||||
@ -921,7 +921,7 @@ On failure, load checkpoint and resume
|
||||
|
||||
**SOPS Integration**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Edit encrypted file
|
||||
provisioning sops workspace/secrets/keys.yaml.enc
|
||||
|
||||
@ -931,7 +931,7 @@ provisioning sops workspace/secrets/keys.yaml.enc
|
||||
|
||||
**KMS Integration** (Enterprise):
|
||||
|
||||
```yaml
|
||||
```
|
||||
# workspace/config/provisioning.yaml
|
||||
secrets:
|
||||
provider: "kms"
|
||||
@ -945,7 +945,7 @@ secrets:
|
||||
|
||||
**CI/CD Mode** (Required):
|
||||
|
||||
```bash
|
||||
```
|
||||
# Sign OCI artifact
|
||||
cosign sign oci://registry/kubernetes:1.28.0
|
||||
|
||||
@ -955,7 +955,7 @@ cosign verify oci://registry/kubernetes:1.28.0
|
||||
|
||||
**Enterprise Mode** (Mandatory):
|
||||
|
||||
```bash
|
||||
```
|
||||
# Pull with verification
|
||||
provisioning extension pull kubernetes --verify-signature
|
||||
|
||||
@ -970,7 +970,7 @@ provisioning extension pull kubernetes --verify-signature
|
||||
|
||||
#### 1. **Binary Deployment** (Solo, Multi-user)
|
||||
|
||||
```plaintext
|
||||
```
|
||||
User Machine
|
||||
├── ~/.provisioning/bin/
|
||||
│ ├── provisioning-orchestrator
|
||||
@ -986,7 +986,7 @@ User Machine
|
||||
|
||||
#### 2. **Docker Deployment** (Multi-user, CI/CD)
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Docker Daemon
|
||||
├── Container: provisioning-orchestrator
|
||||
├── Container: provisioning-control-center
|
||||
@ -1001,7 +1001,7 @@ Docker Daemon
|
||||
|
||||
#### 3. **Docker Compose Deployment** (Multi-user)
|
||||
|
||||
```yaml
|
||||
```
|
||||
# provisioning/platform/docker-compose.yaml
|
||||
services:
|
||||
orchestrator:
|
||||
@ -1039,7 +1039,7 @@ services:
|
||||
|
||||
#### 4. **Kubernetes Deployment** (CI/CD, Enterprise)
|
||||
|
||||
```yaml
|
||||
```
|
||||
# Namespace: provisioning-system
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@ -1085,7 +1085,7 @@ spec:
|
||||
|
||||
#### 5. **Remote Deployment** (All modes)
|
||||
|
||||
```yaml
|
||||
```
|
||||
# Connect to remotely-running services
|
||||
services:
|
||||
orchestrator:
|
||||
@ -1108,7 +1108,7 @@ services:
|
||||
|
||||
#### 1. **Hybrid Language Integration** (Rust ↔ Nushell)
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Rust Orchestrator
|
||||
↓ (HTTP API)
|
||||
Nushell CLI
|
||||
@ -1124,7 +1124,7 @@ File-based Task Queue
|
||||
|
||||
#### 2. **Provider Abstraction**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Unified Provider Interface
|
||||
├── create_server(config) -> Server
|
||||
├── delete_server(id) -> bool
|
||||
@ -1139,7 +1139,7 @@ Provider Implementations:
|
||||
|
||||
#### 3. **OCI Registry Integration**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Extension Development
|
||||
↓
|
||||
Package (oci-package.nu)
|
||||
@ -1157,7 +1157,7 @@ Load into Workspace
|
||||
|
||||
#### 4. **Gitea Integration** (Multi-user, Enterprise)
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Workspace Operations
|
||||
↓
|
||||
Check Lock Status (Gitea API)
|
||||
@ -1179,7 +1179,7 @@ Release Lock (Delete lock file)
|
||||
|
||||
#### 5. **CoreDNS Integration**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Service Registration
|
||||
↓
|
||||
Update CoreDNS Corefile
|
||||
|
||||
@ -86,7 +86,7 @@ Original comprehensive loader that handles:
|
||||
|
||||
## Module Dependency Graph
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Help/Status Commands
|
||||
↓
|
||||
loader-lazy.nu
|
||||
@ -110,7 +110,7 @@ loader.nu (full configuration)
|
||||
|
||||
### Fast Path (Help Commands)
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Uses minimal loader - 23ms
|
||||
./provisioning help infrastructure
|
||||
./provisioning workspace list
|
||||
@ -119,7 +119,7 @@ loader.nu (full configuration)
|
||||
|
||||
### Medium Path (Status Operations)
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Uses minimal loader with some full config - ~50ms
|
||||
./provisioning status
|
||||
./provisioning workspace active
|
||||
@ -128,7 +128,7 @@ loader.nu (full configuration)
|
||||
|
||||
### Full Path (Infrastructure Operations)
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Uses full loader - ~150ms
|
||||
./provisioning server create --infra myinfra
|
||||
./provisioning taskserv create kubernetes
|
||||
@ -139,7 +139,7 @@ loader.nu (full configuration)
|
||||
|
||||
### Lazy Loading Decision Logic
|
||||
|
||||
```nushell
|
||||
```
|
||||
# In loader-lazy.nu
|
||||
let is_fast_command = (
|
||||
$command == "help" or
|
||||
@ -160,7 +160,7 @@ if $is_fast_command {
|
||||
|
||||
The minimal loader returns a lightweight config record:
|
||||
|
||||
```nushell
|
||||
```
|
||||
{
|
||||
workspace: {
|
||||
name: "librecloud"
|
||||
@ -247,7 +247,7 @@ Only add if:
|
||||
|
||||
### Performance Testing
|
||||
|
||||
```bash
|
||||
```
|
||||
# Benchmark minimal loader
|
||||
time nu -n -c "use loader-minimal.nu *; get-active-workspace"
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@ Control-Center uses **SurrealDB with kv-mem backend**, an embedded in-memory dat
|
||||
|
||||
### Database Configuration
|
||||
|
||||
```toml
|
||||
```
|
||||
[database]
|
||||
url = "memory" # In-memory backend
|
||||
namespace = "control_center"
|
||||
@ -24,7 +24,7 @@ database = "main"
|
||||
|
||||
**Production Alternative**: Switch to remote WebSocket connection for persistent storage:
|
||||
|
||||
```toml
|
||||
```
|
||||
[database]
|
||||
url = "ws://localhost:8000"
|
||||
namespace = "control_center"
|
||||
@ -79,7 +79,7 @@ Control-Center also supports (via Cargo.toml dependencies):
|
||||
|
||||
Orchestrator uses simple file-based storage by default:
|
||||
|
||||
```toml
|
||||
```
|
||||
[orchestrator.storage]
|
||||
type = "filesystem" # Default
|
||||
backend_path = "{{orchestrator.paths.data_dir}}/queue.rkvs"
|
||||
@ -87,7 +87,7 @@ backend_path = "{{orchestrator.paths.data_dir}}/queue.rkvs"
|
||||
|
||||
**Resolved Path**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
{{workspace.path}}/.orchestrator/data/queue.rkvs
|
||||
```
|
||||
|
||||
@ -95,7 +95,7 @@ backend_path = "{{orchestrator.paths.data_dir}}/queue.rkvs"
|
||||
|
||||
For production deployments, switch to SurrealDB:
|
||||
|
||||
```toml
|
||||
```
|
||||
[orchestrator.storage]
|
||||
type = "surrealdb-server" # or surrealdb-embedded
|
||||
|
||||
@ -115,7 +115,7 @@ password = "secret"
|
||||
|
||||
All services load configuration in this order (priority: low → high):
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. System Defaults provisioning/config/config.defaults.toml
|
||||
2. Service Defaults provisioning/platform/{service}/config.defaults.toml
|
||||
3. Workspace Config workspace/{name}/config/provisioning.yaml
|
||||
@ -128,7 +128,7 @@ All services load configuration in this order (priority: low → high):
|
||||
|
||||
Configs support dynamic variable interpolation:
|
||||
|
||||
```toml
|
||||
```
|
||||
[paths]
|
||||
base = "/Users/Akasha/project-provisioning/provisioning"
|
||||
data_dir = "{{paths.base}}/data" # Resolves to: /Users/.../data
|
||||
@ -175,7 +175,7 @@ All services use workspace-aware paths:
|
||||
|
||||
**Orchestrator**:
|
||||
|
||||
```toml
|
||||
```
|
||||
[orchestrator.paths]
|
||||
base = "{{workspace.path}}/.orchestrator"
|
||||
data_dir = "{{orchestrator.paths.base}}/data"
|
||||
@ -185,7 +185,7 @@ queue_dir = "{{orchestrator.paths.data_dir}}/queue"
|
||||
|
||||
**Control-Center**:
|
||||
|
||||
```toml
|
||||
```
|
||||
[paths]
|
||||
base = "{{workspace.path}}/.control-center"
|
||||
data_dir = "{{paths.base}}/data"
|
||||
@ -194,7 +194,7 @@ logs_dir = "{{paths.base}}/logs"
|
||||
|
||||
**Result** (workspace: `workspace-librecloud`):
|
||||
|
||||
```plaintext
|
||||
```
|
||||
workspace-librecloud/
|
||||
├── .orchestrator/
|
||||
│ ├── data/
|
||||
@ -214,7 +214,7 @@ Any config value can be overridden via environment variables:
|
||||
|
||||
### Control-Center
|
||||
|
||||
```bash
|
||||
```
|
||||
# Override server port
|
||||
export CONTROL_CENTER_SERVER_PORT=8081
|
||||
|
||||
@ -227,7 +227,7 @@ export CONTROL_CENTER_JWT_ISSUER="my-issuer"
|
||||
|
||||
### Orchestrator
|
||||
|
||||
```bash
|
||||
```
|
||||
# Override orchestrator port
|
||||
export ORCHESTRATOR_SERVER_PORT=8080
|
||||
|
||||
@ -241,7 +241,7 @@ export ORCHESTRATOR_QUEUE_MAX_CONCURRENT_TASKS=10
|
||||
|
||||
### Naming Convention
|
||||
|
||||
```plaintext
|
||||
```
|
||||
{SERVICE}_{SECTION}_{KEY} = value
|
||||
```
|
||||
|
||||
@ -259,7 +259,7 @@ export ORCHESTRATOR_QUEUE_MAX_CONCURRENT_TASKS=10
|
||||
|
||||
**Container paths** (resolved inside container):
|
||||
|
||||
```toml
|
||||
```
|
||||
[paths]
|
||||
base = "/app/provisioning"
|
||||
data_dir = "/data" # Mounted volume
|
||||
@ -268,7 +268,7 @@ logs_dir = "/var/log/orchestrator" # Mounted volume
|
||||
|
||||
**Docker Compose volumes**:
|
||||
|
||||
```yaml
|
||||
```
|
||||
services:
|
||||
orchestrator:
|
||||
volumes:
|
||||
@ -289,7 +289,7 @@ volumes:
|
||||
|
||||
**Host paths** (macOS/Linux):
|
||||
|
||||
```toml
|
||||
```
|
||||
[paths]
|
||||
base = "/Users/Akasha/project-provisioning/provisioning"
|
||||
data_dir = "{{workspace.path}}/.orchestrator/data"
|
||||
@ -302,7 +302,7 @@ logs_dir = "{{workspace.path}}/.orchestrator/logs"
|
||||
|
||||
Check current configuration:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Show effective configuration
|
||||
provisioning env
|
||||
|
||||
@ -322,7 +322,7 @@ PROVISIONING_DEBUG=true ./orchestrator --show-config
|
||||
|
||||
**Cosmian KMS** uses its own database (when deployed):
|
||||
|
||||
```bash
|
||||
```
|
||||
# KMS database location (Docker)
|
||||
/data/kms.db # SQLite database inside KMS container
|
||||
|
||||
@ -332,7 +332,7 @@ PROVISIONING_DEBUG=true ./orchestrator --show-config
|
||||
|
||||
KMS also integrates with Control-Center's KMS hybrid backend (local + remote):
|
||||
|
||||
```toml
|
||||
```
|
||||
[kms]
|
||||
mode = "hybrid" # local, remote, or hybrid
|
||||
|
||||
|
||||
@ -32,7 +32,7 @@ without code changes. Hardcoded values defeat the purpose of IaC and create main
|
||||
|
||||
**Example**:
|
||||
|
||||
```toml
|
||||
```
|
||||
# ✅ PAP Compliant - Configuration-driven
|
||||
[providers.aws]
|
||||
regions = ["us-west-2", "us-east-1"]
|
||||
@ -62,7 +62,7 @@ configuration management and domain-specific operations.
|
||||
|
||||
**Language Responsibility Matrix**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Rust Layer:
|
||||
├── Workflow orchestration and coordination
|
||||
├── REST API servers and HTTP endpoints
|
||||
@ -111,7 +111,7 @@ flexibility while maintaining predictability.
|
||||
|
||||
**Domain Organization**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
├── core/ # Core system and library functions
|
||||
├── platform/ # High-performance coordination layer
|
||||
├── provisioning/ # Main business logic with providers and services
|
||||
@ -160,7 +160,7 @@ evolution.
|
||||
|
||||
**Recovery Strategies**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Operation Level:
|
||||
├── Atomic operations with rollback
|
||||
├── Retry logic with exponential backoff
|
||||
@ -203,7 +203,7 @@ gains.
|
||||
|
||||
**Security Implementation**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Authentication & Authorization:
|
||||
├── API authentication for external access
|
||||
├── Role-based access control for operations
|
||||
@ -234,7 +234,7 @@ the system.
|
||||
|
||||
**Testing Strategy**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Unit Testing:
|
||||
├── Configuration validation tests
|
||||
├── Individual component tests
|
||||
@ -272,7 +272,7 @@ System Testing:
|
||||
|
||||
**Error Categories**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Configuration Errors:
|
||||
├── Invalid configuration syntax
|
||||
├── Missing required configuration
|
||||
@ -300,7 +300,7 @@ System Errors:
|
||||
|
||||
**Observability Implementation**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Logging:
|
||||
├── Structured JSON logging
|
||||
├── Configurable log levels
|
||||
@ -358,7 +358,7 @@ Monitoring:
|
||||
|
||||
**Debt Management Strategy**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Assessment:
|
||||
├── Regular code quality reviews
|
||||
├── Performance profiling and optimization
|
||||
@ -382,7 +382,7 @@ Improvement:
|
||||
|
||||
**Trade-off Categories**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Performance vs. Maintainability:
|
||||
├── Rust coordination layer for performance
|
||||
├── Nushell business logic for maintainability
|
||||
|
||||
@ -19,7 +19,7 @@ This document describes the **hybrid selective integration** of prov-ecosystem a
|
||||
|
||||
### Three-Layer Integration
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ Provisioning CLI (provisioning/core/cli/) │
|
||||
│ ✅ 80+ command shortcuts │
|
||||
@ -70,7 +70,7 @@ This document describes the **hybrid selective integration** of prov-ecosystem a
|
||||
|
||||
**Key Types**:
|
||||
|
||||
```rust
|
||||
```
|
||||
pub enum ContainerRuntime {
|
||||
Docker,
|
||||
Podman,
|
||||
@ -85,7 +85,7 @@ pub struct ComposeAdapter { ... }
|
||||
|
||||
**Nushell Functions**:
|
||||
|
||||
```nushell
|
||||
```
|
||||
runtime-detect # Auto-detect available runtime
|
||||
runtime-exec # Execute command in detected runtime
|
||||
runtime-compose # Adapt docker-compose for runtime
|
||||
@ -112,7 +112,7 @@ runtime-list # List all available runtimes
|
||||
|
||||
**Key Types**:
|
||||
|
||||
```rust
|
||||
```
|
||||
pub struct SshConfig { ... }
|
||||
pub struct SshPool { ... }
|
||||
pub enum DeploymentStrategy {
|
||||
@ -124,7 +124,7 @@ pub enum DeploymentStrategy {
|
||||
|
||||
**Nushell Functions**:
|
||||
|
||||
```nushell
|
||||
```
|
||||
ssh-pool-connect # Create SSH pool connection
|
||||
ssh-pool-exec # Execute on SSH pool
|
||||
ssh-pool-status # Check pool status
|
||||
@ -153,7 +153,7 @@ ssh-circuit-breaker-status # Check circuit breaker
|
||||
|
||||
**Key Types**:
|
||||
|
||||
```rust
|
||||
```
|
||||
pub enum BackupBackend {
|
||||
Restic,
|
||||
Borg,
|
||||
@ -169,7 +169,7 @@ pub struct BackupManager { ... }
|
||||
|
||||
**Nushell Functions**:
|
||||
|
||||
```nushell
|
||||
```
|
||||
backup-create # Create backup job
|
||||
backup-restore # Restore from snapshot
|
||||
backup-list # List snapshots
|
||||
@ -199,7 +199,7 @@ backup-status # Check backup status
|
||||
|
||||
**Key Types**:
|
||||
|
||||
```rust
|
||||
```
|
||||
pub enum GitProvider {
|
||||
GitHub,
|
||||
GitLab,
|
||||
@ -212,7 +212,7 @@ pub struct GitOpsOrchestrator { ... }
|
||||
|
||||
**Nushell Functions**:
|
||||
|
||||
```nushell
|
||||
```
|
||||
gitops-rules # Load rules from config
|
||||
gitops-watch # Watch for Git events
|
||||
gitops-trigger # Manually trigger deployment
|
||||
@ -243,7 +243,7 @@ gitops-status # Get GitOps status
|
||||
|
||||
**Nushell Functions**:
|
||||
|
||||
```nushell
|
||||
```
|
||||
service-install # Install service
|
||||
service-start # Start service
|
||||
service-stop # Stop service
|
||||
@ -300,7 +300,7 @@ All implementations follow project standards:
|
||||
|
||||
## File Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/
|
||||
├── platform/integrations/
|
||||
│ └── provisioning-bridge/ # Rust bridge crate
|
||||
@ -338,7 +338,7 @@ provisioning/
|
||||
|
||||
### Runtime Abstraction
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Auto-detect available runtime
|
||||
let runtime = (runtime-detect)
|
||||
|
||||
@ -351,7 +351,7 @@ let compose_cmd = (runtime-compose "./docker-compose.yml")
|
||||
|
||||
### SSH Advanced
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Connect to SSH pool
|
||||
let pool = (ssh-pool-connect "server01.example.com" "root" --port 22)
|
||||
|
||||
@ -364,7 +364,7 @@ ssh-circuit-breaker-status
|
||||
|
||||
### Backup System
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Schedule regular backups
|
||||
backup-schedule "daily-app-backup" "0 2 * * *" \
|
||||
--paths ["/opt/app" "/var/lib/app"] \
|
||||
@ -381,7 +381,7 @@ backup-restore "snapshot-001" --restore_path "."
|
||||
|
||||
### GitOps Events
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Load GitOps rules
|
||||
let rules = (gitops-rules "./gitops-rules.yaml")
|
||||
|
||||
@ -394,7 +394,7 @@ gitops-trigger "deploy-app" --environment "prod"
|
||||
|
||||
### Service Management
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Install service
|
||||
service-install "my-app" "/usr/local/bin/my-app" \
|
||||
--user "appuser" \
|
||||
@ -418,7 +418,7 @@ service-restart-policy "my-app" --policy "on-failure" --delay-secs 5
|
||||
|
||||
Existing `provisioning` CLI will gain new command tree:
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning runtime detect|exec|compose|info|list
|
||||
provisioning ssh pool connect|exec|status|strategies
|
||||
provisioning backup create|restore|list|schedule|retention|status
|
||||
@ -430,7 +430,7 @@ provisioning service install|start|stop|restart|status|list|policy|detect-init
|
||||
|
||||
All integrations use Nickel schemas from `provisioning/schemas/integrations/`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let { IntegrationConfig } = import "provisioning/integrations.ncl" in
|
||||
{
|
||||
runtime = { ... },
|
||||
@ -445,7 +445,7 @@ let { IntegrationConfig } = import "provisioning/integrations.ncl" in
|
||||
|
||||
Nushell plugins can be created for performance-critical operations:
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning plugin list
|
||||
# [installed]
|
||||
# nu_plugin_runtime
|
||||
@ -460,7 +460,7 @@ provisioning plugin list
|
||||
|
||||
### Rust Tests
|
||||
|
||||
```bash
|
||||
```
|
||||
cd provisioning/platform/integrations/provisioning-bridge
|
||||
cargo test --all
|
||||
cargo test -p provisioning-bridge --lib
|
||||
@ -469,7 +469,7 @@ cargo test -p provisioning-bridge --doc
|
||||
|
||||
### Nushell Tests
|
||||
|
||||
```bash
|
||||
```
|
||||
nu provisioning/core/nulib/integrations/runtime.nu
|
||||
nu provisioning/core/nulib/integrations/ssh_advanced.nu
|
||||
```
|
||||
|
||||
@ -15,7 +15,7 @@ workflows, and enable extensible functionality. This document outlines the key i
|
||||
|
||||
**Implementation**:
|
||||
|
||||
```rust
|
||||
```
|
||||
use tokio::process::Command;
|
||||
use serde_json;
|
||||
|
||||
@ -35,7 +35,7 @@ pub async fn execute_nushell_workflow(
|
||||
|
||||
**Data Exchange Format**:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"status": "success" | "error" | "partial",
|
||||
"result": {
|
||||
@ -54,7 +54,7 @@ pub async fn execute_nushell_workflow(
|
||||
|
||||
**Implementation**:
|
||||
|
||||
```nushell
|
||||
```
|
||||
def submit-workflow [workflow: record] -> record {
|
||||
let payload = $workflow | to json
|
||||
|
||||
@ -68,7 +68,7 @@ def submit-workflow [workflow: record] -> record {
|
||||
|
||||
**API Contract**:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"workflow_id": "wf-456",
|
||||
"name": "multi_cloud_deployment",
|
||||
@ -86,7 +86,7 @@ def submit-workflow [workflow: record] -> record {
|
||||
|
||||
**Interface Definition**:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Standard provider interface that all providers must implement
|
||||
export def list-servers [] -> table {
|
||||
# Provider-specific implementation
|
||||
@ -107,7 +107,7 @@ export def get-server [id: string] -> record {
|
||||
|
||||
**Configuration Integration**:
|
||||
|
||||
```toml
|
||||
```
|
||||
[providers.aws]
|
||||
region = "us-west-2"
|
||||
credentials_profile = "default"
|
||||
@ -125,7 +125,7 @@ network_mode = "bridge"
|
||||
|
||||
#### Provider Discovery and Loading
|
||||
|
||||
```nushell
|
||||
```
|
||||
def load-providers [] -> table {
|
||||
let provider_dirs = glob "providers/*/nulib"
|
||||
|
||||
@ -150,7 +150,7 @@ def load-providers [] -> table {
|
||||
|
||||
**Implementation**:
|
||||
|
||||
```nushell
|
||||
```
|
||||
def resolve-configuration [context: record] -> record {
|
||||
let base_config = open config.defaults.toml
|
||||
let user_config = if ("config.user.toml" | path exists) {
|
||||
@ -173,7 +173,7 @@ def resolve-configuration [context: record] -> record {
|
||||
|
||||
#### Variable Interpolation Pattern
|
||||
|
||||
```nushell
|
||||
```
|
||||
def interpolate-variables [config: record] -> record {
|
||||
let interpolations = {
|
||||
"{{paths.base}}": ($env.PWD),
|
||||
@ -200,7 +200,7 @@ def interpolate-variables [config: record] -> record {
|
||||
|
||||
**Implementation (Rust)**:
|
||||
|
||||
```rust
|
||||
```
|
||||
use petgraph::{Graph, Direction};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@ -229,7 +229,7 @@ impl DependencyResolver {
|
||||
|
||||
#### Parallel Execution Pattern
|
||||
|
||||
```rust
|
||||
```
|
||||
use tokio::task::JoinSet;
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
|
||||
@ -265,7 +265,7 @@ pub async fn execute_parallel_batch(
|
||||
|
||||
**Implementation**:
|
||||
|
||||
```rust
|
||||
```
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct WorkflowCheckpoint {
|
||||
pub workflow_id: String,
|
||||
@ -309,7 +309,7 @@ impl CheckpointManager {
|
||||
|
||||
#### Rollback Pattern
|
||||
|
||||
```rust
|
||||
```
|
||||
pub struct RollbackManager {
|
||||
rollback_stack: Vec<RollbackAction>,
|
||||
}
|
||||
@ -349,7 +349,7 @@ impl RollbackManager {
|
||||
|
||||
**Event Definition**:
|
||||
|
||||
```rust
|
||||
```
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub enum SystemEvent {
|
||||
WorkflowStarted { workflow_id: String, name: String },
|
||||
@ -363,7 +363,7 @@ pub enum SystemEvent {
|
||||
|
||||
**Event Bus Implementation**:
|
||||
|
||||
```rust
|
||||
```
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
pub struct EventBus {
|
||||
@ -392,7 +392,7 @@ impl EventBus {
|
||||
|
||||
#### Extension Discovery and Loading
|
||||
|
||||
```nushell
|
||||
```
|
||||
def discover-extensions [] -> table {
|
||||
let extension_dirs = glob "extensions/*/extension.toml"
|
||||
|
||||
@ -417,7 +417,7 @@ def discover-extensions [] -> table {
|
||||
|
||||
#### Extension Interface Pattern
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Standard extension interface
|
||||
export def extension-info [] -> record {
|
||||
{
|
||||
@ -452,7 +452,7 @@ export def extension-deactivate [] -> nothing {
|
||||
|
||||
**Base API Structure**:
|
||||
|
||||
```rust
|
||||
```
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
response::Json,
|
||||
@ -473,7 +473,7 @@ pub fn create_api_router(state: AppState) -> Router {
|
||||
|
||||
**Standard Response Format**:
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"status": "success" | "error" | "pending",
|
||||
"data": { ... },
|
||||
@ -494,7 +494,7 @@ pub fn create_api_router(state: AppState) -> Router {
|
||||
|
||||
### Structured Error Pattern
|
||||
|
||||
```rust
|
||||
```
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ProvisioningError {
|
||||
#[error("Configuration error: {message}")]
|
||||
@ -513,7 +513,7 @@ pub enum ProvisioningError {
|
||||
|
||||
### Error Recovery Pattern
|
||||
|
||||
```nushell
|
||||
```
|
||||
def with-retry [operation: closure, max_attempts: int = 3] {
|
||||
mut attempts = 0
|
||||
mut last_error = null
|
||||
@ -540,7 +540,7 @@ def with-retry [operation: closure, max_attempts: int = 3] {
|
||||
|
||||
### Caching Strategy Pattern
|
||||
|
||||
```rust
|
||||
```
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use std::collections::HashMap;
|
||||
@ -583,7 +583,7 @@ impl<T: Clone> Cache<T> {
|
||||
|
||||
### Streaming Pattern for Large Data
|
||||
|
||||
```nushell
|
||||
```
|
||||
def process-large-dataset [source: string] -> nothing {
|
||||
# Stream processing instead of loading entire dataset
|
||||
open $source
|
||||
@ -600,7 +600,7 @@ def process-large-dataset [source: string] -> nothing {
|
||||
|
||||
### Integration Test Pattern
|
||||
|
||||
```rust
|
||||
```
|
||||
#[cfg(test)]
|
||||
mod integration_tests {
|
||||
use super::*;
|
||||
|
||||
@ -24,7 +24,7 @@ distributed extension management through OCI registry integration.
|
||||
|
||||
**Purpose**: Core system functionality - CLI, libraries, base schemas
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-core/
|
||||
├── core/
|
||||
│ ├── cli/ # Command-line interface
|
||||
@ -82,7 +82,7 @@ provisioning-core/
|
||||
|
||||
**Purpose**: All provider, taskserv, and cluster extensions
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-extensions/
|
||||
├── providers/
|
||||
│ ├── aws/
|
||||
@ -143,7 +143,7 @@ Each extension published separately as OCI artifact:
|
||||
|
||||
**Extension Manifest** (`manifest.yaml`):
|
||||
|
||||
```yaml
|
||||
```
|
||||
name: kubernetes
|
||||
type: taskserv
|
||||
version: 1.28.0
|
||||
@ -183,7 +183,7 @@ min_provisioning_version: "3.0.0"
|
||||
|
||||
**Purpose**: Platform services (orchestrator, control-center, MCP server, API gateway)
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-platform/
|
||||
├── orchestrator/ # Rust orchestrator service
|
||||
│ ├── src/
|
||||
@ -238,7 +238,7 @@ Standard Docker images in OCI registry:
|
||||
|
||||
### Registry Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
OCI Registry (localhost:5000 or harbor.company.com)
|
||||
├── provisioning-core/
|
||||
│ ├── v3.5.0 # Core system artifact
|
||||
@ -263,7 +263,7 @@ OCI Registry (localhost:5000 or harbor.company.com)
|
||||
|
||||
Each extension packaged as OCI artifact:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
kubernetes-1.28.0.tar.gz
|
||||
├── schemas/ # Nickel schemas
|
||||
│ ├── kubernetes.ncl
|
||||
@ -291,7 +291,7 @@ kubernetes-1.28.0.tar.gz
|
||||
|
||||
**File**: `workspace/config/provisioning.yaml`
|
||||
|
||||
```yaml
|
||||
```
|
||||
# Core system dependency
|
||||
dependencies:
|
||||
core:
|
||||
@ -363,7 +363,7 @@ The system resolves dependencies in this order:
|
||||
|
||||
### Dependency Resolution Commands
|
||||
|
||||
```bash
|
||||
```
|
||||
# Resolve and install all dependencies
|
||||
provisioning dep resolve
|
||||
|
||||
@ -386,7 +386,7 @@ provisioning dep tree kubernetes
|
||||
|
||||
### CLI Commands
|
||||
|
||||
```bash
|
||||
```
|
||||
# Pull extension from OCI registry
|
||||
provisioning oci pull kubernetes:1.28.0
|
||||
|
||||
@ -419,7 +419,7 @@ provisioning oci copy \
|
||||
|
||||
### OCI Configuration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Show OCI configuration
|
||||
provisioning oci config
|
||||
|
||||
@ -442,7 +442,7 @@ provisioning oci config
|
||||
|
||||
### 1. Develop Extension
|
||||
|
||||
```bash
|
||||
```
|
||||
# Create new extension from template
|
||||
provisioning generate extension taskserv redis
|
||||
|
||||
@ -466,7 +466,7 @@ provisioning generate extension taskserv redis
|
||||
|
||||
### 2. Test Extension Locally
|
||||
|
||||
```bash
|
||||
```
|
||||
# Load extension from local path
|
||||
provisioning module load taskserv workspace_dev redis --source local
|
||||
|
||||
@ -479,7 +479,7 @@ provisioning test extension redis
|
||||
|
||||
### 3. Package Extension
|
||||
|
||||
```bash
|
||||
```
|
||||
# Validate extension structure
|
||||
provisioning oci package validate ./extensions/taskservs/redis
|
||||
|
||||
@ -491,7 +491,7 @@ provisioning oci package ./extensions/taskservs/redis
|
||||
|
||||
### 4. Publish Extension
|
||||
|
||||
```bash
|
||||
```
|
||||
# Login to registry (one-time)
|
||||
provisioning oci login localhost:5000
|
||||
|
||||
@ -511,7 +511,7 @@ provisioning oci tags redis
|
||||
|
||||
### 5. Use Published Extension
|
||||
|
||||
```bash
|
||||
```
|
||||
# Add to workspace configuration
|
||||
# workspace/config/provisioning.yaml:
|
||||
# dependencies:
|
||||
@ -534,7 +534,7 @@ provisioning dep resolve
|
||||
|
||||
**Using Zot (lightweight OCI registry)**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Start local OCI registry
|
||||
provisioning oci-registry start
|
||||
|
||||
@ -555,7 +555,7 @@ provisioning oci-registry status
|
||||
|
||||
**Using Harbor**:
|
||||
|
||||
```yaml
|
||||
```
|
||||
# workspace/config/provisioning.yaml
|
||||
dependencies:
|
||||
registry:
|
||||
@ -591,7 +591,7 @@ dependencies:
|
||||
|
||||
### Phase 2: Gradual Migration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Migrate extensions one by one
|
||||
for ext in (ls provisioning/extensions/taskservs); do
|
||||
provisioning oci publish $ext.name
|
||||
|
||||
@ -79,7 +79,7 @@ dependency model.
|
||||
|
||||
**Contents:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-core/
|
||||
├── nulib/ # Nushell libraries
|
||||
│ ├── lib_provisioning/ # Core library functions
|
||||
@ -120,7 +120,7 @@ provisioning-core/
|
||||
|
||||
**Installation Path:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
/usr/local/
|
||||
├── bin/provisioning
|
||||
├── lib/provisioning/
|
||||
@ -135,7 +135,7 @@ provisioning-core/
|
||||
|
||||
**Contents:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-platform/
|
||||
├── orchestrator/ # Rust orchestrator
|
||||
│ ├── src/
|
||||
@ -180,7 +180,7 @@ provisioning-platform/
|
||||
|
||||
**Installation Path:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
/usr/local/
|
||||
├── bin/
|
||||
│ ├── provisioning-orchestrator
|
||||
@ -203,7 +203,7 @@ provisioning-platform/
|
||||
|
||||
**Contents:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-extensions/
|
||||
├── registry/ # Extension registry
|
||||
│ ├── index.json # Searchable index
|
||||
@ -252,7 +252,7 @@ provisioning-extensions/
|
||||
|
||||
**Installation:**
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install extension via core CLI
|
||||
provisioning extension install mongodb
|
||||
provisioning extension install azure-provider
|
||||
@ -261,7 +261,7 @@ provisioning extension install azure-provider
|
||||
**Extension Structure:**
|
||||
Each extension is self-contained:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
mongodb/
|
||||
├── manifest.toml # Extension metadata
|
||||
├── taskserv.nu # Implementation
|
||||
@ -279,7 +279,7 @@ mongodb/
|
||||
|
||||
**Contents:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-workspace/
|
||||
├── templates/ # Workspace templates
|
||||
│ ├── minimal/ # Minimal starter
|
||||
@ -315,7 +315,7 @@ provisioning-workspace/
|
||||
|
||||
**Usage:**
|
||||
|
||||
```bash
|
||||
```
|
||||
# Create workspace from template
|
||||
provisioning workspace init my-project --template kubernetes
|
||||
|
||||
@ -333,7 +333,7 @@ provisioning workspace init
|
||||
|
||||
**Contents:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-distribution/
|
||||
├── release-automation/ # Automated release workflows
|
||||
│ ├── build-all.nu # Build all packages
|
||||
@ -385,7 +385,7 @@ provisioning-distribution/
|
||||
|
||||
### Package-Based Dependencies (Not Submodules)
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ provisioning-distribution │
|
||||
│ (Release orchestration & registry) │
|
||||
@ -416,7 +416,7 @@ provisioning-distribution/
|
||||
|
||||
**Method:** Loose coupling via CLI + REST API
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Platform calls Core CLI (subprocess)
|
||||
def create-server [name: string] {
|
||||
# Orchestrator executes Core CLI
|
||||
@ -431,7 +431,7 @@ def submit-workflow [workflow: record] {
|
||||
|
||||
**Version Compatibility:**
|
||||
|
||||
```toml
|
||||
```
|
||||
# platform/Cargo.toml
|
||||
[package.metadata.provisioning]
|
||||
core-version = "^3.0" # Compatible with core 3.x
|
||||
@ -441,7 +441,7 @@ core-version = "^3.0" # Compatible with core 3.x
|
||||
|
||||
**Method:** Plugin/module system
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Extension manifest
|
||||
# extensions/mongodb/manifest.toml
|
||||
[extension]
|
||||
@ -465,7 +465,7 @@ provisioning extension install mongodb
|
||||
|
||||
**Method:** Git templates or package templates
|
||||
|
||||
```bash
|
||||
```
|
||||
# Option 1: GitHub template repository
|
||||
gh repo create my-infra --template provisioning-workspace
|
||||
cd my-infra
|
||||
@ -486,7 +486,7 @@ provisioning workspace create my-infra --template kubernetes
|
||||
|
||||
Each repository maintains independent semantic versioning:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-core: 3.2.1
|
||||
provisioning-platform: 2.5.3
|
||||
provisioning-extensions: (per-extension versioning)
|
||||
@ -497,7 +497,7 @@ provisioning-workspace: 1.4.0
|
||||
|
||||
**`provisioning-distribution/version-management/versions.toml`:**
|
||||
|
||||
```toml
|
||||
```
|
||||
# Version compatibility matrix
|
||||
[compatibility]
|
||||
|
||||
@ -536,7 +536,7 @@ workspace = "1.3.0"
|
||||
|
||||
**Coordinated releases** for major versions:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Major release: All repos release together
|
||||
provisioning-core: 3.0.0
|
||||
provisioning-platform: 2.0.0
|
||||
@ -553,7 +553,7 @@ provisioning-platform: 2.1.0 (improves orchestrator, core stays 3.1.x)
|
||||
|
||||
### Working on Single Repository
|
||||
|
||||
```bash
|
||||
```
|
||||
# Developer working on core only
|
||||
git clone https://github.com/yourorg/provisioning-core
|
||||
cd provisioning-core
|
||||
@ -574,7 +574,7 @@ just install-dev
|
||||
|
||||
### Working Across Repositories
|
||||
|
||||
```bash
|
||||
```
|
||||
# Scenario: Adding new feature requiring core + platform changes
|
||||
|
||||
# 1. Clone both repositories
|
||||
@ -615,7 +615,7 @@ cargo test
|
||||
|
||||
### Testing Cross-Repo Integration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Integration tests in provisioning-distribution
|
||||
cd provisioning-distribution
|
||||
|
||||
@ -636,7 +636,7 @@ just test-bundle stable-3.3
|
||||
|
||||
Each repository releases independently:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Core release
|
||||
cd provisioning-core
|
||||
git tag v3.2.1
|
||||
@ -656,7 +656,7 @@ git push --tags
|
||||
|
||||
Distribution repository creates tested bundles:
|
||||
|
||||
```bash
|
||||
```
|
||||
cd provisioning-distribution
|
||||
|
||||
# Create bundle
|
||||
@ -679,7 +679,7 @@ just publish-bundle stable-3.2
|
||||
|
||||
#### Option 1: Bundle Installation (Recommended for Users)
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install stable bundle (easiest)
|
||||
curl -fsSL https://get.provisioning.io | sh
|
||||
|
||||
@ -691,7 +691,7 @@ curl -fsSL https://get.provisioning.io | sh
|
||||
|
||||
#### Option 2: Individual Component Installation
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install only core (minimal)
|
||||
curl -fsSL https://get.provisioning.io/core | sh
|
||||
|
||||
@ -704,7 +704,7 @@ provisioning extension install mongodb
|
||||
|
||||
#### Option 3: Custom Combination
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install specific versions
|
||||
provisioning install core@3.1.0
|
||||
provisioning install platform@2.4.0
|
||||
@ -760,7 +760,7 @@ provisioning install platform@2.4.0
|
||||
|
||||
**Core CI (`provisioning-core/.github/workflows/ci.yml`):**
|
||||
|
||||
```yaml
|
||||
```
|
||||
name: Core CI
|
||||
|
||||
on: [push, pull_request]
|
||||
@ -792,7 +792,7 @@ jobs:
|
||||
|
||||
**Platform CI (`provisioning-platform/.github/workflows/ci.yml`):**
|
||||
|
||||
```yaml
|
||||
```
|
||||
name: Platform CI
|
||||
|
||||
on: [push, pull_request]
|
||||
@ -829,7 +829,7 @@ jobs:
|
||||
|
||||
**Distribution CI (`provisioning-distribution/.github/workflows/integration.yml`):**
|
||||
|
||||
```yaml
|
||||
```
|
||||
name: Integration Tests
|
||||
|
||||
on:
|
||||
@ -862,7 +862,7 @@ jobs:
|
||||
|
||||
### Monorepo Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/ (One repo, ~500 MB)
|
||||
├── core/ (Nushell)
|
||||
├── platform/ (Rust)
|
||||
@ -873,7 +873,7 @@ provisioning/ (One repo, ~500 MB)
|
||||
|
||||
### Multi-Repo Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning-core/ (Repo 1, ~50 MB)
|
||||
├── nulib/
|
||||
├── cli/
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
|
||||
### Prerequisites
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install Nickel
|
||||
brew install nickel
|
||||
# or from source: https://nickel-lang.org/getting-started/
|
||||
@ -21,7 +21,7 @@ nickel --version # Should be 1.0+
|
||||
|
||||
### Directory Structure for Examples
|
||||
|
||||
```bash
|
||||
```
|
||||
mkdir -p ~/nickel-examples/{simple,complex,production}
|
||||
cd ~/nickel-examples
|
||||
```
|
||||
@ -32,7 +32,7 @@ cd ~/nickel-examples
|
||||
|
||||
### Step 1: Create Contract File
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > simple/server_contracts.ncl << 'EOF'
|
||||
{
|
||||
ServerConfig = {
|
||||
@ -47,7 +47,7 @@ EOF
|
||||
|
||||
### Step 2: Create Defaults File
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > simple/server_defaults.ncl << 'EOF'
|
||||
{
|
||||
web_server = {
|
||||
@ -76,7 +76,7 @@ EOF
|
||||
|
||||
### Step 3: Create Main Module with Hybrid Interface
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > simple/server.ncl << 'EOF'
|
||||
let contracts = import "./server_contracts.ncl" in
|
||||
let defaults = import "./server_defaults.ncl" in
|
||||
@ -110,7 +110,7 @@ EOF
|
||||
|
||||
### Test: Export and Validate JSON
|
||||
|
||||
```bash
|
||||
```
|
||||
cd simple/
|
||||
|
||||
# Export to JSON
|
||||
@ -133,7 +133,7 @@ nickel export server.ncl --format json | jq '.production_web_server.cpu_cores'
|
||||
|
||||
### Usage in Consumer Module
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > simple/consumer.ncl << 'EOF'
|
||||
let server = import "./server.ncl" in
|
||||
|
||||
@ -162,14 +162,14 @@ nickel export consumer.ncl --format json | jq '.staging_web'
|
||||
|
||||
### Create Provider Structure
|
||||
|
||||
```bash
|
||||
```
|
||||
mkdir -p complex/upcloud/{contracts,defaults,main}
|
||||
cd complex/upcloud
|
||||
```
|
||||
|
||||
### Provider Contracts
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > upcloud_contracts.ncl << 'EOF'
|
||||
{
|
||||
StorageBackup = {
|
||||
@ -196,7 +196,7 @@ EOF
|
||||
|
||||
### Provider Defaults
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > upcloud_defaults.ncl << 'EOF'
|
||||
{
|
||||
backup = {
|
||||
@ -223,7 +223,7 @@ EOF
|
||||
|
||||
### Provider Main Module
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > upcloud_main.ncl << 'EOF'
|
||||
let contracts = import "./upcloud_contracts.ncl" in
|
||||
let defaults = import "./upcloud_defaults.ncl" in
|
||||
@ -281,7 +281,7 @@ EOF
|
||||
|
||||
### Test Provider Configuration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Export provider config
|
||||
nickel export upcloud_main.ncl --format json | jq '.production_high_availability'
|
||||
|
||||
@ -296,7 +296,7 @@ nickel export upcloud_main.ncl --format json | jq '.production_high_availability
|
||||
|
||||
### Consumer Using Provider
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > upcloud_consumer.ncl << 'EOF'
|
||||
let upcloud = import "./upcloud_main.ncl" in
|
||||
|
||||
@ -332,7 +332,7 @@ nickel export upcloud_consumer.ncl --format json | jq '.ha_stack | keys'
|
||||
|
||||
### Taskserv Contracts (from wuji)
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > production/taskserv_contracts.ncl << 'EOF'
|
||||
{
|
||||
Dependency = {
|
||||
@ -352,7 +352,7 @@ EOF
|
||||
|
||||
### Taskserv Defaults
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > production/taskserv_defaults.ncl << 'EOF'
|
||||
{
|
||||
kubernetes = {
|
||||
@ -407,7 +407,7 @@ EOF
|
||||
|
||||
### Taskserv Main
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > production/taskserv.ncl << 'EOF'
|
||||
let contracts = import "./taskserv_contracts.ncl" in
|
||||
let defaults = import "./taskserv_defaults.ncl" in
|
||||
@ -453,7 +453,7 @@ EOF
|
||||
|
||||
### Test Taskserv Setup
|
||||
|
||||
```bash
|
||||
```
|
||||
# Export stack
|
||||
nickel export taskserv.ncl --format json | jq '.wuji_k8s_stack | keys'
|
||||
# Output: ["kubernetes", "cilium", "containerd", "etcd"]
|
||||
@ -477,7 +477,7 @@ nickel export taskserv.ncl --format json | jq '.staging_stack | length'
|
||||
|
||||
### Base Infrastructure
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > production/infrastructure.ncl << 'EOF'
|
||||
let servers = import "./server.ncl" in
|
||||
let taskservs = import "./taskserv.ncl" in
|
||||
@ -520,7 +520,7 @@ nickel export infrastructure.ncl --format json | jq '.production.taskservs | key
|
||||
|
||||
### Extending Infrastructure (Nickel Advantage!)
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > production/infrastructure_extended.ncl << 'EOF'
|
||||
let infra = import "./infrastructure.ncl" in
|
||||
|
||||
@ -557,7 +557,7 @@ nickel export infrastructure_extended.ncl --format json | \
|
||||
|
||||
### Validation Functions
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > production/validation.ncl << 'EOF'
|
||||
let validate_server = fun server =>
|
||||
if server.cpu_cores <= 0 then
|
||||
@ -586,7 +586,7 @@ EOF
|
||||
|
||||
### Using Validations
|
||||
|
||||
```bash
|
||||
```
|
||||
cat > production/validated_config.ncl << 'EOF'
|
||||
let server = import "./server.ncl" in
|
||||
let taskserv = import "./taskserv.ncl" in
|
||||
@ -632,7 +632,7 @@ nickel export validated_config.ncl --format json
|
||||
|
||||
### Run All Examples
|
||||
|
||||
```bash
|
||||
```
|
||||
#!/bin/bash
|
||||
# test_all_examples.sh
|
||||
|
||||
@ -679,7 +679,7 @@ echo "=== All Tests Passed ✓ ==="
|
||||
|
||||
### Common Nickel Operations
|
||||
|
||||
```bash
|
||||
```
|
||||
# Validate Nickel syntax
|
||||
nickel export config.ncl
|
||||
|
||||
@ -711,7 +711,7 @@ nickel typecheck config.ncl
|
||||
|
||||
### Problem: "unexpected token" with multiple let
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ❌ WRONG
|
||||
let A = {x = 1}
|
||||
let B = {y = 2}
|
||||
@ -725,7 +725,7 @@ let B = {y = 2} in
|
||||
|
||||
### Problem: Function serialization fails
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ❌ WRONG - function will fail to serialize
|
||||
{
|
||||
get_value = fun x => x + 1,
|
||||
@ -741,7 +741,7 @@ let B = {y = 2} in
|
||||
|
||||
### Problem: Null values cause export issues
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ❌ WRONG
|
||||
{ optional_field = null }
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
|
||||
## Quick Decision Tree
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Need to define infrastructure/schemas?
|
||||
├─ New platform schemas → Use Nickel ✅
|
||||
├─ New provider extensions → Use Nickel ✅
|
||||
@ -26,7 +26,7 @@ Need to define infrastructure/schemas?
|
||||
|
||||
#### KCL Approach
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema ServerDefaults:
|
||||
name: str
|
||||
cpu_cores: int = 2
|
||||
@ -51,7 +51,7 @@ server_defaults: ServerDefaults = {
|
||||
|
||||
**server_contracts.ncl**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
ServerDefaults = {
|
||||
name | String,
|
||||
@ -64,7 +64,7 @@ server_defaults: ServerDefaults = {
|
||||
|
||||
**server_defaults.ncl**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
server = {
|
||||
name = "web-server",
|
||||
@ -77,7 +77,7 @@ server_defaults: ServerDefaults = {
|
||||
|
||||
**server.ncl**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let contracts = import "./server_contracts.ncl" in
|
||||
let defaults = import "./server_defaults.ncl" in
|
||||
|
||||
@ -93,7 +93,7 @@ let defaults = import "./server_defaults.ncl" in
|
||||
|
||||
**Usage**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let server = import "./server.ncl" in
|
||||
|
||||
# Simple override
|
||||
@ -117,7 +117,7 @@ my_custom = server.defaults.server & {
|
||||
|
||||
#### KCL (from `provisioning/extensions/providers/upcloud/nickel/` - legacy approach)
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema StorageBackup:
|
||||
backup_id: str
|
||||
frequency: str
|
||||
@ -145,7 +145,7 @@ provision_upcloud: ProvisionUpcloud = {
|
||||
|
||||
**upcloud_contracts.ncl**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
StorageBackup = {
|
||||
backup_id | String,
|
||||
@ -170,7 +170,7 @@ provision_upcloud: ProvisionUpcloud = {
|
||||
|
||||
**upcloud_defaults.ncl**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
storage_backup = {
|
||||
backup_id = "",
|
||||
@ -195,7 +195,7 @@ provision_upcloud: ProvisionUpcloud = {
|
||||
|
||||
**upcloud_main.ncl** (from actual codebase):
|
||||
|
||||
```nickel
|
||||
```
|
||||
let contracts = import "./upcloud_contracts.ncl" in
|
||||
let defaults = import "./upcloud_defaults.ncl" in
|
||||
|
||||
@ -219,7 +219,7 @@ let defaults = import "./upcloud_defaults.ncl" in
|
||||
|
||||
**Usage Comparison**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# KCL way (KCL no lo permite bien)
|
||||
# Cannot easily extend without schema modification
|
||||
|
||||
@ -288,7 +288,7 @@ production_stack = upcloud.make_provision_upcloud {
|
||||
|
||||
**KCL (Legacy)**:
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema ServerConfig:
|
||||
name: str
|
||||
zone: str = "us-nyc1"
|
||||
@ -300,7 +300,7 @@ web_server: ServerConfig = {
|
||||
|
||||
**Nickel (Recommended)**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let defaults = import "./server_defaults.ncl" in
|
||||
web_server = defaults.make_server { name = "web-01" }
|
||||
```
|
||||
@ -313,7 +313,7 @@ web_server = defaults.make_server { name = "web-01" }
|
||||
|
||||
**KCL** (from wuji infrastructure):
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema TaskServDependency:
|
||||
name: str
|
||||
wait_for_health: bool = false
|
||||
@ -343,7 +343,7 @@ taskserv_cilium: TaskServ = {
|
||||
|
||||
**Nickel** (from wuji/main.ncl):
|
||||
|
||||
```nickel
|
||||
```
|
||||
let ts_kubernetes = import "./taskservs/kubernetes.ncl" in
|
||||
let ts_cilium = import "./taskservs/cilium.ncl" in
|
||||
let ts_containerd = import "./taskservs/containerd.ncl" in
|
||||
@ -367,7 +367,7 @@ let ts_containerd = import "./taskservs/containerd.ncl" in
|
||||
|
||||
**KCL**:
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema ServerConfig:
|
||||
name: str
|
||||
# Would need to modify schema!
|
||||
@ -379,7 +379,7 @@ schema ServerConfig:
|
||||
|
||||
**Nickel**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let server = import "./server.ncl" in
|
||||
|
||||
# Add custom fields without modifying schema!
|
||||
@ -402,7 +402,7 @@ my_server = server.defaults.server & {
|
||||
|
||||
**KCL Approach (Legacy)**:
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema ServerDefaults:
|
||||
cpu: int = 2
|
||||
memory: int = 4
|
||||
@ -423,7 +423,7 @@ server: Server = {
|
||||
|
||||
**Nickel Approach**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# defaults.ncl
|
||||
server_defaults = {
|
||||
cpu = 2,
|
||||
@ -449,7 +449,7 @@ server = make_server {
|
||||
|
||||
**KCL Validation (Legacy)** (compile-time, inline):
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema Config:
|
||||
timeout: int = 5
|
||||
|
||||
@ -465,7 +465,7 @@ schema Config:
|
||||
|
||||
**Nickel Validation** (runtime, contract-based):
|
||||
|
||||
```nickel
|
||||
```
|
||||
# contracts.ncl - Pure type definitions
|
||||
Config = {
|
||||
timeout | Number,
|
||||
@ -495,7 +495,7 @@ my_config = validate_config { timeout = 10 }
|
||||
|
||||
**Before (KCL - Legacy)**:
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema Scheduler:
|
||||
strategy: str = "fifo"
|
||||
workers: int = 4
|
||||
@ -513,7 +513,7 @@ scheduler_config: Scheduler = {
|
||||
|
||||
`scheduler_contracts.ncl`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
Scheduler = {
|
||||
strategy | String,
|
||||
@ -524,7 +524,7 @@ scheduler_config: Scheduler = {
|
||||
|
||||
`scheduler_defaults.ncl`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
scheduler = {
|
||||
strategy = "fifo",
|
||||
@ -535,7 +535,7 @@ scheduler_config: Scheduler = {
|
||||
|
||||
`scheduler.ncl`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let contracts = import "./scheduler_contracts.ncl" in
|
||||
let defaults = import "./scheduler_defaults.ncl" in
|
||||
|
||||
@ -557,7 +557,7 @@ let defaults = import "./scheduler_defaults.ncl" in
|
||||
|
||||
**Before (KCL - Legacy)**:
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema Mode:
|
||||
deployment_type: str = "solo" # "solo" | "multiuser" | "cicd" | "enterprise"
|
||||
|
||||
@ -568,7 +568,7 @@ schema Mode:
|
||||
|
||||
**After (Nickel - Current)**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# contracts.ncl
|
||||
{
|
||||
Mode = {
|
||||
@ -592,7 +592,7 @@ schema Mode:
|
||||
|
||||
**Before (KCL - Legacy)**:
|
||||
|
||||
```kcl
|
||||
```
|
||||
schema ServerDefaults:
|
||||
cpu: int = 2
|
||||
memory: int = 4
|
||||
@ -609,7 +609,7 @@ web_server: Server = {
|
||||
|
||||
**After (Nickel - Current)**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# defaults.ncl
|
||||
{
|
||||
server_defaults = {
|
||||
@ -643,7 +643,7 @@ let make_server = fun config =>
|
||||
|
||||
**Workflow**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Edit workspace config
|
||||
cd workspace_librecloud/nickel
|
||||
vim wuji/main.ncl
|
||||
@ -658,7 +658,7 @@ nickel export wuji/main.ncl # Uses updated schemas
|
||||
|
||||
**Imports** (relative, central):
|
||||
|
||||
```nickel
|
||||
```
|
||||
import "../../provisioning/schemas/main.ncl"
|
||||
import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
|
||||
```
|
||||
@ -671,7 +671,7 @@ import "../../provisioning/extensions/taskservs/kubernetes/nickel/main.ncl"
|
||||
|
||||
**Workflow**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Create immutable snapshot
|
||||
provisioning workspace freeze \
|
||||
--version "2025-12-15-prod-v1" \
|
||||
@ -696,7 +696,7 @@ provisioning deploy \
|
||||
|
||||
**Frozen Imports** (rewritten to local):
|
||||
|
||||
```nickel
|
||||
```
|
||||
# Original in workspace
|
||||
import "../../provisioning/schemas/main.ncl"
|
||||
|
||||
@ -720,7 +720,7 @@ import "./provisioning/schemas/main.ncl"
|
||||
|
||||
**Problem**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ❌ WRONG
|
||||
let A = { x = 1 }
|
||||
let B = { y = 2 }
|
||||
@ -731,7 +731,7 @@ Error: `unexpected token`
|
||||
|
||||
**Solution**: Use `let...in` chaining:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ✅ CORRECT
|
||||
let A = { x = 1 } in
|
||||
let B = { y = 2 } in
|
||||
@ -744,7 +744,7 @@ let B = { y = 2 } in
|
||||
|
||||
**Problem**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ❌ WRONG
|
||||
let StorageVol = {
|
||||
mount_path : String | null = null,
|
||||
@ -757,7 +757,7 @@ Error: `this can't be used as a contract`
|
||||
|
||||
**Solution**: Use untyped assignment:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ✅ CORRECT
|
||||
let StorageVol = {
|
||||
mount_path = null,
|
||||
@ -770,7 +770,7 @@ let StorageVol = {
|
||||
|
||||
**Problem**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ❌ WRONG
|
||||
{
|
||||
get_value = fun x => x + 1,
|
||||
@ -782,7 +782,7 @@ Error: Functions can't be serialized
|
||||
|
||||
**Solution**: Mark helper functions `not_exported`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ✅ CORRECT
|
||||
{
|
||||
get_value | not_exported = fun x => x + 1,
|
||||
@ -796,7 +796,7 @@ Error: Functions can't be serialized
|
||||
|
||||
**Problem**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let defaults = import "./defaults.ncl" in
|
||||
defaults.scheduler_config # But file has "scheduler"
|
||||
```
|
||||
@ -805,7 +805,7 @@ Error: `field not found`
|
||||
|
||||
**Solution**: Use exact field names:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let defaults = import "./defaults.ncl" in
|
||||
defaults.scheduler # Correct name from defaults.ncl
|
||||
```
|
||||
@ -818,7 +818,7 @@ defaults.scheduler # Correct name from defaults.ncl
|
||||
|
||||
**Solution**: Check for circular references or missing `not_exported`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# ❌ Slow - functions being serialized
|
||||
{
|
||||
validate_config = fun x => x,
|
||||
@ -917,7 +917,7 @@ Type-safe prompts, forms, and schemas that **bidirectionally integrate with Nick
|
||||
|
||||
### Workflow: Nickel Schemas → Interactive UIs → Nickel Output
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Define schema in Nickel
|
||||
cat > server.ncl << 'EOF'
|
||||
let contracts = import "./contracts.ncl" in
|
||||
@ -952,7 +952,7 @@ typedialog form --input form.toml --output nickel
|
||||
|
||||
### Example: Infrastructure Wizard
|
||||
|
||||
```bash
|
||||
```
|
||||
# User runs
|
||||
provisioning init --wizard
|
||||
|
||||
@ -1014,7 +1014,7 @@ provisioning/schemas/config/workspace_config/main.ncl
|
||||
|
||||
**File**: `provisioning/schemas/main.ncl` (174 lines)
|
||||
|
||||
```nickel
|
||||
```
|
||||
# Domain-organized architecture
|
||||
{
|
||||
lib | doc "Core library types"
|
||||
@ -1054,7 +1054,7 @@ provisioning/schemas/config/workspace_config/main.ncl
|
||||
|
||||
**Usage**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let provisioning = import "./main.ncl" in
|
||||
|
||||
provisioning.lib.Storage
|
||||
@ -1069,7 +1069,7 @@ provisioning.operations.workflows
|
||||
|
||||
**File**: `provisioning/extensions/providers/upcloud/nickel/main.ncl` (38 lines)
|
||||
|
||||
```nickel
|
||||
```
|
||||
let contracts_lib = import "./contracts.ncl" in
|
||||
let defaults_lib = import "./defaults.ncl" in
|
||||
|
||||
@ -1109,7 +1109,7 @@ let defaults_lib = import "./defaults.ncl" in
|
||||
|
||||
**File**: `workspace_librecloud/nickel/wuji/main.ncl` (53 lines)
|
||||
|
||||
```nickel
|
||||
```
|
||||
let settings_config = import "./settings.ncl" in
|
||||
let ts_cilium = import "./taskservs/cilium.ncl" in
|
||||
let ts_containerd = import "./taskservs/containerd.ncl" in
|
||||
|
||||
@ -15,7 +15,7 @@ verification, Cedar authorization, rate limiting, and audit logging) into a cohe
|
||||
|
||||
The middleware chain is applied in this specific order to ensure proper security:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Incoming HTTP Request │
|
||||
└────────────────────────┬────────────────────────────────────────┘
|
||||
@ -90,7 +90,7 @@ The middleware chain is applied in this specific order to ensure proper security
|
||||
|
||||
**Example**:
|
||||
|
||||
```rust
|
||||
```
|
||||
pub struct SecurityContext {
|
||||
pub user_id: String,
|
||||
pub token: ValidatedToken,
|
||||
@ -164,7 +164,7 @@ impl SecurityContext {
|
||||
|
||||
**Example**:
|
||||
|
||||
```rust
|
||||
```
|
||||
fn requires_mfa(method: &str, path: &str) -> bool {
|
||||
if path.contains("/production/") { return true; }
|
||||
if method == "DELETE" { return true; }
|
||||
@ -190,7 +190,7 @@ fn requires_mfa(method: &str, path: &str) -> bool {
|
||||
|
||||
**Resource Mapping**:
|
||||
|
||||
```rust
|
||||
```
|
||||
/api/v1/servers/srv-123 → Resource::Server("srv-123")
|
||||
/api/v1/taskserv/kubernetes → Resource::TaskService("kubernetes")
|
||||
/api/v1/cluster/prod → Resource::Cluster("prod")
|
||||
@ -199,7 +199,7 @@ fn requires_mfa(method: &str, path: &str) -> bool {
|
||||
|
||||
**Action Mapping**:
|
||||
|
||||
```rust
|
||||
```
|
||||
GET → Action::Read
|
||||
POST → Action::Create
|
||||
PUT → Action::Update
|
||||
@ -223,7 +223,7 @@ DELETE → Action::Delete
|
||||
|
||||
**Configuration**:
|
||||
|
||||
```rust
|
||||
```
|
||||
pub struct RateLimitConfig {
|
||||
pub max_requests: u32, // for example, 100
|
||||
pub window_duration: Duration, // for example, 60 seconds
|
||||
@ -236,7 +236,7 @@ pub struct RateLimitConfig {
|
||||
|
||||
**Statistics**:
|
||||
|
||||
```rust
|
||||
```
|
||||
pub struct RateLimitStats {
|
||||
pub total_ips: usize, // Number of tracked IPs
|
||||
pub total_requests: u32, // Total requests made
|
||||
@ -261,7 +261,7 @@ pub struct RateLimitStats {
|
||||
|
||||
**Usage Example**:
|
||||
|
||||
```rust
|
||||
```
|
||||
use provisioning_orchestrator::security_integration::{
|
||||
SecurityComponents, SecurityConfig
|
||||
};
|
||||
@ -292,7 +292,7 @@ let secured_app = apply_security_middleware(app, &security);
|
||||
|
||||
### Updated AppState Structure
|
||||
|
||||
```rust
|
||||
```
|
||||
pub struct AppState {
|
||||
// Existing fields
|
||||
pub task_storage: Arc<dyn TaskStorage>,
|
||||
@ -317,7 +317,7 @@ pub struct AppState {
|
||||
|
||||
### Initialization in main.rs
|
||||
|
||||
```rust
|
||||
```
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let args = Args::parse();
|
||||
@ -398,7 +398,7 @@ async fn main() -> Result<()> {
|
||||
|
||||
### Step-by-Step Flow
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. CLIENT REQUEST
|
||||
├─ Headers:
|
||||
│ ├─ Authorization: Bearer <jwt_token>
|
||||
@ -485,7 +485,7 @@ async fn main() -> Result<()> {
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
```
|
||||
# JWT Configuration
|
||||
JWT_ISSUER=control-center
|
||||
JWT_AUDIENCE=orchestrator
|
||||
@ -513,7 +513,7 @@ AUDIT_RETENTION_DAYS=365
|
||||
|
||||
For development/testing, all security can be disabled:
|
||||
|
||||
```rust
|
||||
```
|
||||
// In main.rs
|
||||
let security = if env::var("DEVELOPMENT_MODE").unwrap_or("false".to_string()) == "true" {
|
||||
SecurityComponents::disabled(audit_logger.clone())
|
||||
@ -544,7 +544,7 @@ Location: `provisioning/platform/orchestrator/tests/security_integration_tests.r
|
||||
|
||||
**Run Tests**:
|
||||
|
||||
```bash
|
||||
```
|
||||
cd provisioning/platform/orchestrator
|
||||
cargo test security_integration_tests
|
||||
```
|
||||
|
||||
@ -54,18 +54,18 @@ http post <http://localhost:9090/workflows/servers/create> {
|
||||
|
||||
1. Orchestrator receives and queues:
|
||||
|
||||
```rust
|
||||
```
|
||||
// Orchestrator receives HTTP request
|
||||
async fn create_server_workflow(request) {
|
||||
let task = Task::new(TaskType::ServerCreate, request);
|
||||
task_queue.enqueue(task).await; // Queue for execution
|
||||
return workflow_id; // Return immediately
|
||||
}
|
||||
```text
|
||||
```
|
||||
|
||||
2. Orchestrator executes via Nushell subprocess:
|
||||
|
||||
```rust
|
||||
```
|
||||
// Orchestrator spawns Nushell to run business logic
|
||||
async fn execute_task(task: Task) {
|
||||
let output = Command::new("nu")
|
||||
@ -76,11 +76,11 @@ async fn execute_task(task: Task) {
|
||||
|
||||
// Orchestrator manages: retry, checkpointing, monitoring
|
||||
}
|
||||
```text
|
||||
```
|
||||
|
||||
3. Nushell executes the actual work:
|
||||
|
||||
```nu
|
||||
```
|
||||
# servers/create.nu
|
||||
|
||||
export def create-server [name: string] {
|
||||
|
||||
@ -18,7 +18,7 @@ functionality.
|
||||
|
||||
**Original Issue:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Deep call stack in Nushell (template.nu:71)
|
||||
→ "Type not supported" errors
|
||||
→ Cannot handle complex nested workflows
|
||||
@ -35,7 +35,7 @@ Deep call stack in Nushell (template.nu:71)
|
||||
|
||||
### How It Works Today (Monorepo)
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ User │
|
||||
└───────────────────────────┬─────────────────────────────────┘
|
||||
@ -80,7 +80,7 @@ Deep call stack in Nushell (template.nu:71)
|
||||
|
||||
#### Mode 1: Direct Mode (Simple Operations)
|
||||
|
||||
```bash
|
||||
```
|
||||
# No orchestrator needed
|
||||
provisioning server list
|
||||
provisioning env
|
||||
@ -92,7 +92,7 @@ provisioning (CLI) → Nushell scripts → Result
|
||||
|
||||
#### Mode 2: Orchestrated Mode (Complex Operations)
|
||||
|
||||
```bash
|
||||
```
|
||||
# Uses orchestrator for coordination
|
||||
provisioning server create --orchestrated
|
||||
|
||||
@ -104,7 +104,7 @@ provisioning CLI → Orchestrator API → Task Queue → Nushell executor
|
||||
|
||||
#### Mode 3: Workflow Mode (Batch Operations)
|
||||
|
||||
```bash
|
||||
```
|
||||
# Complex workflows with dependencies
|
||||
provisioning workflow submit server-cluster.ncl
|
||||
|
||||
@ -128,7 +128,7 @@ provisioning CLI → Orchestrator Workflow Engine → Dependency Graph
|
||||
|
||||
**Nushell CLI (`core/nulib/workflows/server_create.nu`):**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Submit server creation workflow to orchestrator
|
||||
export def server_create_workflow [
|
||||
infra_name: string
|
||||
@ -153,7 +153,7 @@ export def server_create_workflow [
|
||||
|
||||
**Rust Orchestrator (`platform/orchestrator/src/api/workflows.rs`):**
|
||||
|
||||
```rust
|
||||
```
|
||||
// Receive workflow submission from Nushell CLI
|
||||
#[axum::debug_handler]
|
||||
async fn create_server_workflow(
|
||||
@ -183,7 +183,7 @@ async fn create_server_workflow(
|
||||
|
||||
**Flow:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
User → provisioning server create --orchestrated
|
||||
↓
|
||||
Nushell CLI prepares task
|
||||
@ -201,7 +201,7 @@ User can monitor: provisioning workflow monitor <id>
|
||||
|
||||
**Orchestrator Task Executor (`platform/orchestrator/src/executor.rs`):**
|
||||
|
||||
```rust
|
||||
```
|
||||
// Orchestrator spawns Nushell to execute business logic
|
||||
pub async fn execute_task(task: Task) -> Result<TaskResult> {
|
||||
match task.task_type {
|
||||
@ -233,7 +233,7 @@ pub async fn execute_task(task: Task) -> Result<TaskResult> {
|
||||
|
||||
**Flow:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Orchestrator task queue has pending task
|
||||
↓
|
||||
Executor picks up task
|
||||
@ -253,7 +253,7 @@ User monitors via: provisioning workflow status <id>
|
||||
|
||||
**Nushell Calls Orchestrator API:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Nushell script checks orchestrator status during execution
|
||||
export def check-orchestrator-health [] {
|
||||
let response = (http get http://localhost:9090/health)
|
||||
@ -276,7 +276,7 @@ export def report-progress [task_id: string, progress: int] {
|
||||
|
||||
**Orchestrator Monitors Nushell Execution:**
|
||||
|
||||
```rust
|
||||
```
|
||||
// Orchestrator tracks Nushell subprocess
|
||||
pub async fn execute_with_monitoring(task: Task) -> Result<TaskResult> {
|
||||
let mut child = Command::new("nu")
|
||||
@ -332,7 +332,7 @@ pub async fn execute_with_monitoring(task: Task) -> Result<TaskResult> {
|
||||
|
||||
**Runtime Integration (Same as Monorepo):**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
User installs both packages:
|
||||
provisioning-core-3.2.1 → /usr/local/lib/provisioning/
|
||||
provisioning-platform-2.5.3 → /usr/local/bin/provisioning-orchestrator
|
||||
@ -347,7 +347,7 @@ No code dependencies, just runtime coordination!
|
||||
|
||||
**Core Package (`provisioning-core`) config:**
|
||||
|
||||
```toml
|
||||
```
|
||||
# /usr/local/share/provisioning/config/config.defaults.toml
|
||||
|
||||
[orchestrator]
|
||||
@ -363,7 +363,7 @@ fallback_to_direct = true # Fall back if orchestrator down
|
||||
|
||||
**Platform Package (`provisioning-platform`) config:**
|
||||
|
||||
```toml
|
||||
```
|
||||
# /usr/local/share/provisioning/platform/config.toml
|
||||
|
||||
[orchestrator]
|
||||
@ -382,7 +382,7 @@ task_timeout_seconds = 3600
|
||||
|
||||
**Compatibility Matrix (`provisioning-distribution/versions.toml`):**
|
||||
|
||||
```toml
|
||||
```
|
||||
[compatibility.platform."2.5.3"]
|
||||
core = "^3.2" # Platform 2.5.3 compatible with core 3.2.x
|
||||
min-core = "3.2.0"
|
||||
@ -402,7 +402,7 @@ orchestrator-api = "v1"
|
||||
|
||||
**No Orchestrator Needed:**
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning server list
|
||||
|
||||
# Flow:
|
||||
@ -414,7 +414,7 @@ CLI → servers/list.nu → Query state → Return results
|
||||
|
||||
**Using Orchestrator:**
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning server create --orchestrated --infra wuji
|
||||
|
||||
# Detailed Flow:
|
||||
@ -466,7 +466,7 @@ provisioning server create --orchestrated --infra wuji
|
||||
|
||||
**Complex Workflow:**
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning batch submit multi-cloud-deployment.ncl
|
||||
|
||||
# Workflow contains:
|
||||
@ -548,7 +548,7 @@ provisioning batch submit multi-cloud-deployment.ncl
|
||||
|
||||
1. **Reliable State Management**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Orchestrator maintains:
|
||||
- Task queue (survives crashes)
|
||||
- Workflow checkpoints (resume on failure)
|
||||
@ -558,7 +558,7 @@ provisioning batch submit multi-cloud-deployment.ncl
|
||||
|
||||
1. **Clean Separation**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Orchestrator (Rust): Performance, concurrency, state
|
||||
Business Logic (Nushell): Providers, taskservs, workflows
|
||||
|
||||
@ -594,7 +594,7 @@ provisioning batch submit multi-cloud-deployment.ncl
|
||||
|
||||
**User installs bundle:**
|
||||
|
||||
```bash
|
||||
```
|
||||
curl -fsSL https://get.provisioning.io | sh
|
||||
|
||||
# Installs:
|
||||
@ -614,7 +614,7 @@ curl -fsSL https://get.provisioning.io | sh
|
||||
|
||||
**Core package expects orchestrator:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# core/nulib/lib_provisioning/orchestrator/client.nu
|
||||
|
||||
# Check if orchestrator is running
|
||||
@ -644,7 +644,7 @@ export def ensure-orchestrator [] {
|
||||
|
||||
**Platform package executes core scripts:**
|
||||
|
||||
```rust
|
||||
```
|
||||
// platform/orchestrator/src/executor/nushell.rs
|
||||
|
||||
pub struct NushellExecutor {
|
||||
@ -689,7 +689,7 @@ impl NushellExecutor {
|
||||
|
||||
**`/usr/local/share/provisioning/config/config.defaults.toml`:**
|
||||
|
||||
```toml
|
||||
```
|
||||
[orchestrator]
|
||||
enabled = true
|
||||
endpoint = "http://localhost:9090"
|
||||
@ -722,7 +722,7 @@ force_direct = [
|
||||
|
||||
**`/usr/local/share/provisioning/platform/config.toml`:**
|
||||
|
||||
```toml
|
||||
```
|
||||
[server]
|
||||
host = "127.0.0.1"
|
||||
port = 8080
|
||||
@ -780,7 +780,7 @@ env_vars = { NU_LIB_DIRS = "/usr/local/lib/provisioning" }
|
||||
|
||||
The confusing example in the multi-repo doc was **oversimplified**. The real architecture is:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
✅ Orchestrator IS USED and IS ESSENTIAL
|
||||
✅ Platform (Rust) coordinates Core (Nushell) execution
|
||||
✅ Loose coupling via CLI + REST API (not code dependencies)
|
||||
|
||||
@ -1,29 +1,30 @@
|
||||
# KCL Package and Module Loader System
|
||||
# Nickel Package and Module Loader System
|
||||
|
||||
This document describes the new package-based architecture implemented for the provisioning system, replacing hardcoded extension paths with a
|
||||
flexible module discovery and loading system.
|
||||
This document describes the package-based architecture implemented for the provisioning system, replacing hardcoded extension paths with a
|
||||
flexible module discovery and loading system using Nickel for type-safe configuration.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The new system consists of two main components:
|
||||
The system consists of two main components:
|
||||
|
||||
1. **Core KCL Package**: Distributable core provisioning schemas
|
||||
1. **Core Nickel Package**: Distributable core provisioning schemas with type safety
|
||||
2. **Module Loader System**: Dynamic discovery and loading of extensions
|
||||
|
||||
### Benefits
|
||||
|
||||
- **Type-Safe Configuration**: Nickel ensures configuration validity at evaluation time
|
||||
- **Clean Separation**: Core package is self-contained and distributable
|
||||
- **Plug-and-Play Extensions**: Taskservs, providers, and clusters can be loaded dynamically
|
||||
- **Version Management**: Core package and extensions can be versioned independently
|
||||
- **Developer Friendly**: Easy workspace setup and module management
|
||||
- **Developer Friendly**: Easy workspace setup and module management with lazy evaluation
|
||||
|
||||
## Components
|
||||
|
||||
### 1. Core KCL Package (`/provisioning/kcl/`)
|
||||
### 1. Core Nickel Package (`/provisioning/schemas/`)
|
||||
|
||||
Contains fundamental schemas for provisioning:
|
||||
|
||||
- `settings.ncl` - System settings and configuration
|
||||
- `main.ncl` - Primary provisioning configuration
|
||||
- `server.ncl` - Server definitions and schemas
|
||||
- `defaults.ncl` - Default configurations
|
||||
- `lib.ncl` - Common library schemas
|
||||
@ -33,13 +34,14 @@ Contains fundamental schemas for provisioning:
|
||||
|
||||
- No hardcoded extension paths
|
||||
- Self-contained and distributable
|
||||
- Package-based imports only
|
||||
- Type-safe package-based imports
|
||||
- Lazy evaluation of expensive computations
|
||||
|
||||
### 2. Module Discovery System
|
||||
|
||||
#### Discovery Commands
|
||||
|
||||
```bash
|
||||
```
|
||||
# Discover available modules
|
||||
module-loader discover taskservs # List all taskservs
|
||||
module-loader discover providers --format yaml # List providers as YAML
|
||||
@ -56,7 +58,7 @@ module-loader discover clusters redis # Search for redis clusters
|
||||
|
||||
#### Loading Commands
|
||||
|
||||
```bash
|
||||
```
|
||||
# Load modules into workspace
|
||||
module-loader load taskservs . [kubernetes, cilium, containerd]
|
||||
module-loader load providers . [upcloud]
|
||||
@ -79,7 +81,7 @@ module-loader init workspace/infra/production \
|
||||
|
||||
### New Workspace Layout
|
||||
|
||||
```plaintext
|
||||
```
|
||||
workspace/infra/my-project/
|
||||
├── kcl.mod # Package dependencies
|
||||
├── servers.ncl # Main server configuration
|
||||
@ -108,7 +110,7 @@ workspace/infra/my-project/
|
||||
|
||||
#### Before (Old System)
|
||||
|
||||
```kcl
|
||||
```
|
||||
# Hardcoded relative paths
|
||||
import ../../../kcl/server as server
|
||||
import ../../../extensions/taskservs/kubernetes/kcl/kubernetes as k8s
|
||||
@ -116,7 +118,7 @@ import ../../../extensions/taskservs/kubernetes/kcl/kubernetes as k8s
|
||||
|
||||
#### After (New System)
|
||||
|
||||
```kcl
|
||||
```
|
||||
# Package-based imports
|
||||
import provisioning.server as server
|
||||
|
||||
@ -128,7 +130,7 @@ import .taskservs.nclubernetes.kubernetes as k8s
|
||||
|
||||
### Building Core Package
|
||||
|
||||
```bash
|
||||
```
|
||||
# Build distributable package
|
||||
./provisioning/tools/kcl-packager.nu build --version 1.0.0
|
||||
|
||||
@ -143,21 +145,21 @@ import .taskservs.nclubernetes.kubernetes as k8s
|
||||
|
||||
#### Method 1: Local Installation (Recommended for development)
|
||||
|
||||
```toml
|
||||
```
|
||||
[dependencies]
|
||||
provisioning = { path = "~/.kcl/packages/provisioning", version = "0.0.1" }
|
||||
```
|
||||
|
||||
#### Method 2: Git Repository (For distributed teams)
|
||||
|
||||
```toml
|
||||
```
|
||||
[dependencies]
|
||||
provisioning = { git = "https://github.com/your-org/provisioning-kcl", version = "v0.0.1" }
|
||||
```
|
||||
|
||||
#### Method 3: KCL Registry (When available)
|
||||
|
||||
```toml
|
||||
```
|
||||
[dependencies]
|
||||
provisioning = { version = "0.0.1" }
|
||||
```
|
||||
@ -166,7 +168,7 @@ provisioning = { version = "0.0.1" }
|
||||
|
||||
### 1. New Project Setup
|
||||
|
||||
```bash
|
||||
```
|
||||
# Create workspace from template
|
||||
cp -r provisioning/templates/workspaces/kubernetes ./my-k8s-cluster
|
||||
cd my-k8s-cluster
|
||||
@ -185,7 +187,7 @@ provisioning server create --infra . --check
|
||||
|
||||
### 2. Extension Development
|
||||
|
||||
```bash
|
||||
```
|
||||
# Create new taskserv
|
||||
mkdir -p extensions/taskservs/my-service/kcl
|
||||
cd extensions/taskservs/my-service/kcl
|
||||
@ -200,7 +202,7 @@ module-loader discover taskservs # Should find your service
|
||||
|
||||
### 3. Workspace Migration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Analyze existing workspace
|
||||
workspace-migrate.nu workspace/infra/old-project dry-run
|
||||
|
||||
@ -213,7 +215,7 @@ module-loader validate workspace/infra/old-project
|
||||
|
||||
### 4. Multi-Environment Management
|
||||
|
||||
```bash
|
||||
```
|
||||
# Development environment
|
||||
cd workspace/infra/dev
|
||||
module-loader load taskservs . [redis, postgres]
|
||||
@ -229,7 +231,7 @@ module-loader load providers . [upcloud, aws] # Multi-cloud
|
||||
|
||||
### Listing and Validation
|
||||
|
||||
```bash
|
||||
```
|
||||
# List loaded modules
|
||||
module-loader list taskservs .
|
||||
module-loader list providers .
|
||||
@ -244,7 +246,7 @@ workspace-init.nu . info
|
||||
|
||||
### Unloading Modules
|
||||
|
||||
```bash
|
||||
```
|
||||
# Remove specific modules
|
||||
module-loader unload taskservs . redis
|
||||
module-loader unload providers . aws
|
||||
@ -254,7 +256,7 @@ module-loader unload providers . aws
|
||||
|
||||
### Module Information
|
||||
|
||||
```bash
|
||||
```
|
||||
# Get detailed module info
|
||||
module-loader info taskservs kubernetes
|
||||
module-loader info providers upcloud
|
||||
@ -265,7 +267,7 @@ module-loader info clusters buildkit
|
||||
|
||||
### Pipeline Example
|
||||
|
||||
```bash
|
||||
```
|
||||
#!/usr/bin/env nu
|
||||
# deploy-pipeline.nu
|
||||
|
||||
@ -290,13 +292,13 @@ provisioning server create --infra $env.WORKSPACE_PATH
|
||||
|
||||
#### Module Import Errors
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Error: module not found
|
||||
```
|
||||
|
||||
**Solution**: Verify modules are loaded and regenerate imports
|
||||
|
||||
```bash
|
||||
```
|
||||
module-loader list taskservs .
|
||||
module-loader load taskservs . [kubernetes, cilium, containerd]
|
||||
```
|
||||
@ -309,14 +311,14 @@ module-loader load taskservs . [kubernetes, cilium, containerd]
|
||||
|
||||
**Solution**: Verify core package installation and kcl.mod configuration
|
||||
|
||||
```bash
|
||||
```
|
||||
kcl-packager.nu install --version latest
|
||||
kcl run --dry-run servers.ncl
|
||||
```
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
```
|
||||
# Show workspace structure
|
||||
tree -a workspace/infra/my-project
|
||||
|
||||
@ -362,25 +364,25 @@ For existing workspaces, follow these steps:
|
||||
|
||||
### 1. Backup Current Workspace
|
||||
|
||||
```bash
|
||||
```
|
||||
cp -r workspace/infra/existing workspace/infra/existing-backup
|
||||
```
|
||||
|
||||
### 2. Analyze Migration Requirements
|
||||
|
||||
```bash
|
||||
```
|
||||
workspace-migrate.nu workspace/infra/existing dry-run
|
||||
```
|
||||
|
||||
### 3. Perform Migration
|
||||
|
||||
```bash
|
||||
```
|
||||
workspace-migrate.nu workspace/infra/existing
|
||||
```
|
||||
|
||||
### 4. Load Required Modules
|
||||
|
||||
```bash
|
||||
```
|
||||
cd workspace/infra/existing
|
||||
module-loader load taskservs . [kubernetes, cilium]
|
||||
module-loader load providers . [upcloud]
|
||||
@ -388,14 +390,14 @@ module-loader load providers . [upcloud]
|
||||
|
||||
### 5. Test and Validate
|
||||
|
||||
```bash
|
||||
```
|
||||
kcl run servers.ncl
|
||||
module-loader validate .
|
||||
```
|
||||
|
||||
### 6. Deploy
|
||||
|
||||
```bash
|
||||
```
|
||||
provisioning server create --infra . --check
|
||||
```
|
||||
|
||||
|
||||
@ -70,7 +70,7 @@ workflow, and user-friendly distribution.
|
||||
|
||||
### 1. Monorepo Structure
|
||||
|
||||
```plaintext
|
||||
```
|
||||
project-provisioning/
|
||||
│
|
||||
├── provisioning/ # CORE SYSTEM (distribution source)
|
||||
@ -246,7 +246,7 @@ project-provisioning/
|
||||
|
||||
**Installation:**
|
||||
|
||||
```bash
|
||||
```
|
||||
/usr/local/
|
||||
├── bin/
|
||||
│ └── provisioning
|
||||
@ -275,7 +275,7 @@ project-provisioning/
|
||||
|
||||
**Installation:**
|
||||
|
||||
```bash
|
||||
```
|
||||
/usr/local/
|
||||
├── bin/
|
||||
│ ├── provisioning-orchestrator
|
||||
@ -297,7 +297,7 @@ project-provisioning/
|
||||
|
||||
**Installation:**
|
||||
|
||||
```bash
|
||||
```
|
||||
/usr/local/lib/provisioning/extensions/
|
||||
├── taskservs/
|
||||
├── clusters/
|
||||
@ -317,7 +317,7 @@ project-provisioning/
|
||||
|
||||
**Installation:**
|
||||
|
||||
```bash
|
||||
```
|
||||
~/.config/nushell/plugins/
|
||||
```
|
||||
|
||||
@ -325,7 +325,7 @@ project-provisioning/
|
||||
|
||||
#### System Installation (Root)
|
||||
|
||||
```bash
|
||||
```
|
||||
/usr/local/
|
||||
├── bin/
|
||||
│ ├── provisioning # Main CLI
|
||||
@ -351,7 +351,7 @@ project-provisioning/
|
||||
|
||||
#### User Configuration
|
||||
|
||||
```bash
|
||||
```
|
||||
~/.provisioning/
|
||||
├── config/
|
||||
│ └── config.user.toml # User overrides
|
||||
@ -365,7 +365,7 @@ project-provisioning/
|
||||
|
||||
#### Project Workspace
|
||||
|
||||
```bash
|
||||
```
|
||||
./workspace/
|
||||
├── infra/ # Infrastructure definitions
|
||||
│ ├── my-cluster/
|
||||
@ -384,7 +384,7 @@ project-provisioning/
|
||||
|
||||
### Configuration Hierarchy
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Priority (highest to lowest):
|
||||
1. CLI flags --debug, --infra=my-cluster
|
||||
2. Runtime overrides PROVISIONING_DEBUG=true
|
||||
@ -401,7 +401,7 @@ Priority (highest to lowest):
|
||||
|
||||
**`provisioning/tools/build/`:**
|
||||
|
||||
```plaintext
|
||||
```
|
||||
build/
|
||||
├── build-system.nu # Main build orchestrator
|
||||
├── package-core.nu # Core packaging
|
||||
@ -417,7 +417,7 @@ build/
|
||||
|
||||
**`provisioning/tools/build/build-system.nu`:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
#!/usr/bin/env nu
|
||||
# Build system for provisioning project
|
||||
|
||||
@ -595,7 +595,7 @@ export def "main status" [] {
|
||||
|
||||
**`Justfile`:**
|
||||
|
||||
```makefile
|
||||
```
|
||||
# Provisioning Build System
|
||||
# Use 'just --list' to see all available commands
|
||||
|
||||
@ -727,7 +727,7 @@ audit:
|
||||
|
||||
**`distribution/installers/install.nu`:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
#!/usr/bin/env nu
|
||||
# Provisioning installation script
|
||||
|
||||
@ -984,7 +984,7 @@ export def "main upgrade" [
|
||||
|
||||
**`distribution/installers/install.sh`:**
|
||||
|
||||
```bash
|
||||
```
|
||||
#!/usr/bin/env bash
|
||||
# Provisioning installation script (Bash version)
|
||||
# This script installs Nushell first, then runs the Nushell installer
|
||||
@ -1111,7 +1111,7 @@ main "$@"
|
||||
|
||||
**Commands:**
|
||||
|
||||
```bash
|
||||
```
|
||||
# Backup current state
|
||||
cp -r /Users/Akasha/project-provisioning /Users/Akasha/project-provisioning.backup
|
||||
|
||||
@ -1136,7 +1136,7 @@ fd workspace -t d > workspace-dirs.txt
|
||||
|
||||
**Commands:**
|
||||
|
||||
```bash
|
||||
```
|
||||
# Create distribution directory
|
||||
mkdir -p distribution/{packages,installers,registry}
|
||||
|
||||
@ -1410,7 +1410,7 @@ rm -rf NO/ wrks/ presentations/
|
||||
|
||||
#### Option 1: Clean Migration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Backup current workspace
|
||||
cp -r workspace workspace.backup
|
||||
|
||||
@ -1423,7 +1423,7 @@ provisioning workspace migrate --from workspace.backup --to workspace/
|
||||
|
||||
#### Option 2: In-Place Migration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Run migration script
|
||||
provisioning migrate --check # Dry run
|
||||
provisioning migrate # Execute migration
|
||||
@ -1431,7 +1431,7 @@ provisioning migrate # Execute migration
|
||||
|
||||
### For Developers
|
||||
|
||||
```bash
|
||||
```
|
||||
# Pull latest changes
|
||||
git pull origin main
|
||||
|
||||
|
||||
@ -11,7 +11,7 @@ The system solves fundamental technical challenges through architectural innovat
|
||||
|
||||
### System Diagram
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ User Interface Layer │
|
||||
├─────────────────┬─────────────────┬─────────────────────────────┤
|
||||
@ -149,7 +149,7 @@ The system solves fundamental technical challenges through architectural innovat
|
||||
|
||||
**Nickel Workflow Definitions**:
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
batch_workflow = {
|
||||
name = "multi_cloud_deployment",
|
||||
@ -247,14 +247,14 @@ The system solves fundamental technical challenges through architectural innovat
|
||||
|
||||
### Configuration Resolution Flow
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. Workspace Discovery → 2. Configuration Loading → 3. Hierarchy Merge →
|
||||
4. Variable Interpolation → 5. Schema Validation → 6. Runtime Application
|
||||
```
|
||||
|
||||
### Workflow Execution Flow
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. Workflow Submission → 2. Dependency Analysis → 3. Task Scheduling →
|
||||
4. Parallel Execution → 5. State Tracking → 6. Result Aggregation →
|
||||
7. Error Handling → 8. Cleanup/Rollback
|
||||
@ -262,7 +262,7 @@ The system solves fundamental technical challenges through architectural innovat
|
||||
|
||||
### Provider Integration Flow
|
||||
|
||||
```plaintext
|
||||
```
|
||||
1. Provider Discovery → 2. Configuration Validation → 3. Authentication →
|
||||
4. Resource Planning → 5. Operation Execution → 6. State Persistence →
|
||||
7. Result Reporting
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
|
||||
TypeDialog generates **type-safe interactive forms** from configuration schemas with **bidirectional Nickel integration**.
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Nickel Schema
|
||||
↓
|
||||
TypeDialog Form (Auto-generated)
|
||||
@ -27,7 +27,7 @@ Nickel output config (Type-safe)
|
||||
|
||||
### Three Layers
|
||||
|
||||
```plaintext
|
||||
```
|
||||
CLI/TUI/Web Layer
|
||||
↓
|
||||
TypeDialog Form Engine
|
||||
@ -39,7 +39,7 @@ Schema Contracts
|
||||
|
||||
### Data Flow
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Input (Nickel)
|
||||
↓
|
||||
Form Definition (TOML)
|
||||
@ -59,7 +59,7 @@ Output (JSON/YAML/TOML/Nickel)
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
```
|
||||
# Clone TypeDialog
|
||||
git clone https://github.com/jesusperezlorenzo/typedialog.git
|
||||
cd typedialog
|
||||
@ -73,7 +73,7 @@ cargo install --path ./crates/typedialog
|
||||
|
||||
### Verify Installation
|
||||
|
||||
```bash
|
||||
```
|
||||
typedialog --version
|
||||
typedialog --help
|
||||
```
|
||||
@ -84,7 +84,7 @@ typedialog --help
|
||||
|
||||
### Step 1: Define Nickel Schema
|
||||
|
||||
```nickel
|
||||
```
|
||||
# server_config.ncl
|
||||
let contracts = import "./contracts.ncl" in
|
||||
let defaults = import "./defaults.ncl" in
|
||||
@ -101,7 +101,7 @@ let defaults = import "./defaults.ncl" in
|
||||
|
||||
### Step 2: Define TypeDialog Form (TOML)
|
||||
|
||||
```toml
|
||||
```
|
||||
# server_form.toml
|
||||
[form]
|
||||
title = "Server Configuration"
|
||||
@ -155,13 +155,13 @@ help = "Select applicable tags"
|
||||
|
||||
### Step 3: Render Form (CLI)
|
||||
|
||||
```bash
|
||||
```
|
||||
typedialog form --config server_form.toml --backend cli
|
||||
```
|
||||
|
||||
**Output**:
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Server Configuration
|
||||
Create a new server configuration
|
||||
|
||||
@ -179,14 +179,14 @@ Create a new server configuration
|
||||
|
||||
### Step 4: Validate Against Nickel Schema
|
||||
|
||||
```bash
|
||||
```
|
||||
# Validation happens automatically
|
||||
# If input matches Nickel contract, proceeds to output
|
||||
```
|
||||
|
||||
### Step 5: Output to Nickel
|
||||
|
||||
```bash
|
||||
```
|
||||
typedialog form \
|
||||
--config server_form.toml \
|
||||
--output nickel \
|
||||
@ -195,7 +195,7 @@ typedialog form \
|
||||
|
||||
**Output file** (`server_config_output.ncl`):
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
server_name = "web-01",
|
||||
cpu_cores = 4,
|
||||
@ -216,7 +216,7 @@ You want an interactive CLI wizard for infrastructure provisioning.
|
||||
|
||||
### Step 1: Define Nickel Schema for Infrastructure
|
||||
|
||||
```nickel
|
||||
```
|
||||
# infrastructure_schema.ncl
|
||||
{
|
||||
InfrastructureConfig = {
|
||||
@ -245,7 +245,7 @@ You want an interactive CLI wizard for infrastructure provisioning.
|
||||
|
||||
### Step 2: Create Comprehensive Form
|
||||
|
||||
```toml
|
||||
```
|
||||
# infrastructure_wizard.toml
|
||||
[form]
|
||||
title = "Infrastructure Provisioning Wizard"
|
||||
@ -334,7 +334,7 @@ placeholder = "admin@company.com"
|
||||
|
||||
### Step 3: Run Interactive Wizard
|
||||
|
||||
```bash
|
||||
```
|
||||
typedialog form \
|
||||
--config infrastructure_wizard.toml \
|
||||
--backend tui \
|
||||
@ -343,7 +343,7 @@ typedialog form \
|
||||
|
||||
**Output** (`infrastructure_config.ncl`):
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
workspace_name = "production-eu",
|
||||
deployment_mode = 'enterprise,
|
||||
@ -358,7 +358,7 @@ typedialog form \
|
||||
|
||||
### Step 4: Use Output in Infrastructure
|
||||
|
||||
```nickel
|
||||
```
|
||||
# main_infrastructure.ncl
|
||||
let config = import "./infrastructure_config.ncl" in
|
||||
let schemas = import "../../provisioning/schemas/main.ncl" in
|
||||
@ -398,7 +398,7 @@ let schemas = import "../../provisioning/schemas/main.ncl" in
|
||||
|
||||
### Form Definition (Advanced)
|
||||
|
||||
```toml
|
||||
```
|
||||
# server_advanced_form.toml
|
||||
[form]
|
||||
title = "Server Configuration"
|
||||
@ -532,7 +532,7 @@ options = ["production", "staging", "testing", "development"]
|
||||
|
||||
### Output Structure
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
# Basic
|
||||
server_name = "web-prod-01",
|
||||
@ -562,7 +562,7 @@ options = ["production", "staging", "testing", "development"]
|
||||
|
||||
### TypeDialog REST Endpoints
|
||||
|
||||
```bash
|
||||
```
|
||||
# Start TypeDialog server
|
||||
typedialog server --port 8080
|
||||
|
||||
@ -574,7 +574,7 @@ curl -X POST http://localhost:8080/forms \
|
||||
|
||||
### Response Format
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"form_id": "srv_abc123",
|
||||
"status": "rendered",
|
||||
@ -592,7 +592,7 @@ curl -X POST http://localhost:8080/forms \
|
||||
|
||||
### Submit Form
|
||||
|
||||
```bash
|
||||
```
|
||||
curl -X POST http://localhost:8080/forms/srv_abc123/submit \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
@ -607,7 +607,7 @@ curl -X POST http://localhost:8080/forms/srv_abc123/submit \
|
||||
|
||||
### Response
|
||||
|
||||
```json
|
||||
```
|
||||
{
|
||||
"status": "success",
|
||||
"validation": "passed",
|
||||
@ -631,7 +631,7 @@ curl -X POST http://localhost:8080/forms/srv_abc123/submit \
|
||||
|
||||
TypeDialog validates user input against Nickel contracts:
|
||||
|
||||
```nickel
|
||||
```
|
||||
# Nickel contract
|
||||
ServerConfig = {
|
||||
cpu_cores | Number, # Must be number
|
||||
@ -645,7 +645,7 @@ ServerConfig = {
|
||||
|
||||
### Validation Rules in Form
|
||||
|
||||
```toml
|
||||
```
|
||||
[[fields]]
|
||||
name = "cpu_cores"
|
||||
type = "number"
|
||||
@ -661,7 +661,7 @@ help = "Must be 1-32 cores"
|
||||
|
||||
### Use Case: Infrastructure Initialization
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. User runs initialization
|
||||
provisioning init --wizard
|
||||
|
||||
@ -679,7 +679,7 @@ provisioning init --wizard
|
||||
|
||||
### Implementation in Nushell
|
||||
|
||||
```nushell
|
||||
```
|
||||
# provisioning/core/nulib/provisioning_init.nu
|
||||
|
||||
def provisioning_init_wizard [] {
|
||||
@ -714,7 +714,7 @@ def provisioning_init_wizard [] {
|
||||
|
||||
Show/hide fields based on user selections:
|
||||
|
||||
```toml
|
||||
```
|
||||
[[fields]]
|
||||
name = "backup_retention"
|
||||
label = "Backup Retention (days)"
|
||||
@ -726,7 +726,7 @@ visible_if = "enable_backup == true" # Only shown if backup enabled
|
||||
|
||||
Set defaults based on other fields:
|
||||
|
||||
```toml
|
||||
```
|
||||
[[fields]]
|
||||
name = "deployment_mode"
|
||||
type = "select"
|
||||
@ -741,7 +741,7 @@ default_from = "deployment_mode" # Can reference other fields
|
||||
|
||||
### Custom Validation
|
||||
|
||||
```toml
|
||||
```
|
||||
[[fields]]
|
||||
name = "memory_gb"
|
||||
type = "number"
|
||||
@ -755,7 +755,7 @@ help = "Memory must be at least 2 GB per CPU core"
|
||||
|
||||
TypeDialog can output to multiple formats:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Output to Nickel (recommended for IaC)
|
||||
typedialog form --config form.toml --output nickel
|
||||
|
||||
@ -777,7 +777,7 @@ TypeDialog supports three rendering backends:
|
||||
|
||||
### 1. CLI (Command-line prompts)
|
||||
|
||||
```bash
|
||||
```
|
||||
typedialog form --config form.toml --backend cli
|
||||
```
|
||||
|
||||
@ -786,7 +786,7 @@ typedialog form --config form.toml --backend cli
|
||||
|
||||
### 2. TUI (Terminal User Interface - Ratatui)
|
||||
|
||||
```bash
|
||||
```
|
||||
typedialog form --config form.toml --backend tui
|
||||
```
|
||||
|
||||
@ -795,7 +795,7 @@ typedialog form --config form.toml --backend tui
|
||||
|
||||
### 3. Web (HTTP Server - Axum)
|
||||
|
||||
```bash
|
||||
```
|
||||
typedialog form --config form.toml --backend web --port 3000
|
||||
# Opens http://localhost:3000
|
||||
```
|
||||
@ -813,7 +813,7 @@ typedialog form --config form.toml --backend web --port 3000
|
||||
|
||||
**Solution**: Verify field definitions match Nickel schema:
|
||||
|
||||
```toml
|
||||
```
|
||||
# Form field
|
||||
[[fields]]
|
||||
name = "cpu_cores" # Must match Nickel field name
|
||||
@ -826,7 +826,7 @@ type = "number" # Must match Nickel type
|
||||
|
||||
**Solution**: Add help text and validation rules:
|
||||
|
||||
```toml
|
||||
```
|
||||
[[fields]]
|
||||
name = "cpu_cores"
|
||||
validation_pattern = "^[1-9][0-9]*$"
|
||||
@ -839,7 +839,7 @@ help = "Must be positive integer"
|
||||
|
||||
**Solution**: Ensure all required fields in form:
|
||||
|
||||
```toml
|
||||
```
|
||||
[[fields]]
|
||||
name = "required_field"
|
||||
required = true # User must provide value
|
||||
@ -851,7 +851,7 @@ required = true # User must provide value
|
||||
|
||||
### Step 1: Define Nickel Schema
|
||||
|
||||
```nickel
|
||||
```
|
||||
# workspace_schema.ncl
|
||||
{
|
||||
workspace = {
|
||||
@ -866,7 +866,7 @@ required = true # User must provide value
|
||||
|
||||
### Step 2: Define Form
|
||||
|
||||
```toml
|
||||
```
|
||||
# workspace_form.toml
|
||||
[[fields]]
|
||||
name = "name"
|
||||
@ -895,14 +895,14 @@ required = true
|
||||
|
||||
### Step 3: User Interaction
|
||||
|
||||
```bash
|
||||
```
|
||||
$ typedialog form --config workspace_form.toml --backend tui
|
||||
# User fills form interactively
|
||||
```
|
||||
|
||||
### Step 4: Output
|
||||
|
||||
```nickel
|
||||
```
|
||||
{
|
||||
workspace = {
|
||||
name = "production",
|
||||
@ -916,7 +916,7 @@ $ typedialog form --config workspace_form.toml --backend tui
|
||||
|
||||
### Step 5: Use in Provisioning
|
||||
|
||||
```nickel
|
||||
```
|
||||
# main.ncl
|
||||
let config = import "./workspace.ncl" in
|
||||
let schemas = import "provisioning/schemas/main.ncl" in
|
||||
|
||||
@ -10,7 +10,7 @@ The new configuration system includes comprehensive schema validation to catch e
|
||||
|
||||
Ensures all required fields are present:
|
||||
|
||||
```toml
|
||||
```
|
||||
# Schema definition
|
||||
[required]
|
||||
fields = ["name", "version", "enabled"]
|
||||
@ -30,7 +30,7 @@ version = "1.0.0"
|
||||
|
||||
Validates field types:
|
||||
|
||||
```toml
|
||||
```
|
||||
# Schema
|
||||
[fields.port]
|
||||
type = "int"
|
||||
@ -54,7 +54,7 @@ port = "8080" # Error: Expected int, got string
|
||||
|
||||
Restricts values to predefined set:
|
||||
|
||||
```toml
|
||||
```
|
||||
# Schema
|
||||
[fields.environment]
|
||||
type = "string"
|
||||
@ -71,7 +71,7 @@ environment = "production" # Error: Must be one of: dev, staging, prod
|
||||
|
||||
Validates numeric ranges:
|
||||
|
||||
```toml
|
||||
```
|
||||
# Schema
|
||||
[fields.port]
|
||||
type = "int"
|
||||
@ -92,7 +92,7 @@ port = 70000 # Error: Must be <= 65535
|
||||
|
||||
Validates string patterns using regex:
|
||||
|
||||
```toml
|
||||
```
|
||||
# Schema
|
||||
[fields.email]
|
||||
type = "string"
|
||||
@ -109,7 +109,7 @@ email = "not-an-email" # Error: Does not match pattern
|
||||
|
||||
Warns about deprecated configuration:
|
||||
|
||||
```toml
|
||||
```
|
||||
# Schema
|
||||
[deprecated]
|
||||
fields = ["old_field"]
|
||||
@ -125,7 +125,7 @@ old_field = "value" # Warning: old_field is deprecated. Use new_field instead.
|
||||
|
||||
### Command Line
|
||||
|
||||
```bash
|
||||
```
|
||||
# Validate workspace config
|
||||
provisioning workspace config validate
|
||||
|
||||
@ -141,7 +141,7 @@ provisioning workspace config validate --verbose
|
||||
|
||||
### Programmatic Usage
|
||||
|
||||
```nushell
|
||||
```
|
||||
use provisioning/core/nulib/lib_provisioning/config/schema_validator.nu *
|
||||
|
||||
# Load config
|
||||
@ -171,7 +171,7 @@ if ($result.warnings | length) > 0 {
|
||||
|
||||
### Pretty Print Results
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Validate and print formatted results
|
||||
let result = (validate-workspace-config $config)
|
||||
print-validation-results $result
|
||||
@ -183,7 +183,7 @@ print-validation-results $result
|
||||
|
||||
File: `/Users/Akasha/project-provisioning/provisioning/config/workspace.schema.toml`
|
||||
|
||||
```toml
|
||||
```
|
||||
[required]
|
||||
fields = ["workspace", "paths"]
|
||||
|
||||
@ -222,7 +222,7 @@ enum = ["debug", "info", "warn", "error"]
|
||||
|
||||
File: `/Users/Akasha/project-provisioning/provisioning/extensions/providers/aws/config.schema.toml`
|
||||
|
||||
```toml
|
||||
```
|
||||
[required]
|
||||
fields = ["provider", "credentials"]
|
||||
|
||||
@ -279,7 +279,7 @@ old_region_field = "provider.region"
|
||||
|
||||
File: `/Users/Akasha/project-provisioning/provisioning/platform/orchestrator/config.schema.toml`
|
||||
|
||||
```toml
|
||||
```
|
||||
[required]
|
||||
fields = ["service", "server"]
|
||||
|
||||
@ -325,7 +325,7 @@ type = "string"
|
||||
|
||||
File: `/Users/Akasha/project-provisioning/provisioning/core/services/kms/config.schema.toml`
|
||||
|
||||
```toml
|
||||
```
|
||||
[required]
|
||||
fields = ["kms", "encryption"]
|
||||
|
||||
@ -372,7 +372,7 @@ old_kms_type = "kms.provider"
|
||||
|
||||
### 1. Development
|
||||
|
||||
```bash
|
||||
```
|
||||
# Create new config
|
||||
vim ~/workspaces/dev/config/provisioning.yaml
|
||||
|
||||
@ -386,7 +386,7 @@ provisioning workspace config validate
|
||||
|
||||
### 2. CI/CD Pipeline
|
||||
|
||||
```yaml
|
||||
```
|
||||
# GitLab CI
|
||||
validate-config:
|
||||
stage: validate
|
||||
@ -402,7 +402,7 @@ validate-config:
|
||||
|
||||
### 3. Pre-Deployment
|
||||
|
||||
```bash
|
||||
```
|
||||
# Validate all configurations before deployment
|
||||
provisioning workspace config validate --verbose
|
||||
provisioning provider validate --all
|
||||
@ -418,7 +418,7 @@ fi
|
||||
|
||||
### Clear Error Format
|
||||
|
||||
```plaintext
|
||||
```
|
||||
❌ Validation failed
|
||||
|
||||
Errors:
|
||||
@ -445,7 +445,7 @@ Each error includes:
|
||||
|
||||
### Pattern 1: Hostname Validation
|
||||
|
||||
```toml
|
||||
```
|
||||
[fields.hostname]
|
||||
type = "string"
|
||||
pattern = "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
|
||||
@ -453,7 +453,7 @@ pattern = "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
|
||||
|
||||
### Pattern 2: Email Validation
|
||||
|
||||
```toml
|
||||
```
|
||||
[fields.email]
|
||||
type = "string"
|
||||
pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
|
||||
@ -461,7 +461,7 @@ pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
|
||||
|
||||
### Pattern 3: Semantic Version
|
||||
|
||||
```toml
|
||||
```
|
||||
[fields.version]
|
||||
type = "string"
|
||||
pattern = "^\\d+\\.\\d+\\.\\d+(-[a-zA-Z0-9]+)?$"
|
||||
@ -469,7 +469,7 @@ pattern = "^\\d+\\.\\d+\\.\\d+(-[a-zA-Z0-9]+)?$"
|
||||
|
||||
### Pattern 4: URL Validation
|
||||
|
||||
```toml
|
||||
```
|
||||
[fields.url]
|
||||
type = "string"
|
||||
pattern = "^https?://[a-zA-Z0-9.-]+(:[0-9]+)?(/.*)?$"
|
||||
@ -477,7 +477,7 @@ pattern = "^https?://[a-zA-Z0-9.-]+(:[0-9]+)?(/.*)?$"
|
||||
|
||||
### Pattern 5: IPv4 Address
|
||||
|
||||
```toml
|
||||
```
|
||||
[fields.ip_address]
|
||||
type = "string"
|
||||
pattern = "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"
|
||||
@ -485,7 +485,7 @@ pattern = "^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"
|
||||
|
||||
### Pattern 6: AWS Resource ID
|
||||
|
||||
```toml
|
||||
```
|
||||
[fields.instance_id]
|
||||
type = "string"
|
||||
pattern = "^i-[a-f0-9]{8,17}$"
|
||||
@ -503,14 +503,14 @@ pattern = "^vpc-[a-f0-9]{8,17}$"
|
||||
|
||||
### Unit Tests
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Run validation test suite
|
||||
nu provisioning/tests/config_validation_tests.nu
|
||||
```
|
||||
|
||||
### Integration Tests
|
||||
|
||||
```bash
|
||||
```
|
||||
# Test with real configs
|
||||
provisioning test validate --workspace dev
|
||||
provisioning test validate --workspace staging
|
||||
@ -519,7 +519,7 @@ provisioning test validate --workspace prod
|
||||
|
||||
### Custom Validation
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Create custom validation function
|
||||
def validate-custom-config [config: record] {
|
||||
let result = (validate-workspace-config $config)
|
||||
@ -543,7 +543,7 @@ def validate-custom-config [config: record] {
|
||||
|
||||
### 1. Validate Early
|
||||
|
||||
```bash
|
||||
```
|
||||
# Validate during development
|
||||
provisioning workspace config validate
|
||||
|
||||
@ -552,7 +552,7 @@ provisioning workspace config validate
|
||||
|
||||
### 2. Use Strict Schemas
|
||||
|
||||
```toml
|
||||
```
|
||||
# Be explicit about types and constraints
|
||||
[fields.port]
|
||||
type = "int"
|
||||
@ -564,7 +564,7 @@ max = 65535
|
||||
|
||||
### 3. Document Patterns
|
||||
|
||||
```toml
|
||||
```
|
||||
# Include examples in schema
|
||||
[fields.email]
|
||||
type = "string"
|
||||
@ -574,7 +574,7 @@ pattern = "^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}$"
|
||||
|
||||
### 4. Handle Deprecation
|
||||
|
||||
```toml
|
||||
```
|
||||
# Always provide replacement guidance
|
||||
[deprecated_replacements]
|
||||
old_field = "new_field" # Clear migration path
|
||||
@ -582,7 +582,7 @@ old_field = "new_field" # Clear migration path
|
||||
|
||||
### 5. Test Schemas
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Include test cases in comments
|
||||
# Valid: "admin@example.com"
|
||||
# Invalid: "not-an-email"
|
||||
@ -592,7 +592,7 @@ old_field = "new_field" # Clear migration path
|
||||
|
||||
### Schema File Not Found
|
||||
|
||||
```bash
|
||||
```
|
||||
# Error: Schema file not found: /path/to/schema.toml
|
||||
|
||||
# Solution: Ensure schema exists
|
||||
@ -601,7 +601,7 @@ ls -la /Users/Akasha/project-provisioning/provisioning/config/*.schema.toml
|
||||
|
||||
### Pattern Not Matching
|
||||
|
||||
```bash
|
||||
```
|
||||
# Error: Field hostname does not match pattern
|
||||
|
||||
# Debug: Test pattern separately
|
||||
@ -610,7 +610,7 @@ echo "my-hostname" | grep -E "^[a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?$"
|
||||
|
||||
### Type Mismatch
|
||||
|
||||
```bash
|
||||
```
|
||||
# Error: Expected int, got string
|
||||
|
||||
# Check config
|
||||
|
||||
@ -1 +0,0 @@
|
||||
# Workspace Config Architecture
|
||||
@ -1 +0,0 @@
|
||||
# Migration Overview
|
||||
@ -28,7 +28,7 @@ This guide describes the metadata-driven authentication system implemented over
|
||||
|
||||
### System Components
|
||||
|
||||
```plaintext
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ User Command │
|
||||
└────────────────────────────────┬──────────────────────────────┘
|
||||
@ -89,7 +89,7 @@ This guide describes the metadata-driven authentication system implemented over
|
||||
|
||||
### Installation Steps
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Clone or update repository
|
||||
git clone https://github.com/your-org/project-provisioning.git
|
||||
cd project-provisioning
|
||||
@ -113,7 +113,7 @@ nu tests/test-metadata-cache-benchmark.nu
|
||||
|
||||
### Basic Commands
|
||||
|
||||
```bash
|
||||
```
|
||||
# Initialize authentication
|
||||
provisioning login
|
||||
|
||||
@ -135,7 +135,7 @@ provisioning server create --name test --check
|
||||
|
||||
### Authentication Flow
|
||||
|
||||
```bash
|
||||
```
|
||||
# 1. Login (required for production operations)
|
||||
$ provisioning login
|
||||
Username: alice@example.com
|
||||
@ -160,7 +160,7 @@ Auth check: Check auth for destructive operation
|
||||
|
||||
### Check Mode (Bypass Auth for Testing)
|
||||
|
||||
```bash
|
||||
```
|
||||
# Dry-run without auth checks
|
||||
provisioning server create --name test --check
|
||||
|
||||
@ -172,7 +172,7 @@ Dry-run mode - no changes will be made
|
||||
|
||||
### Non-Interactive CI/CD Mode
|
||||
|
||||
```bash
|
||||
```
|
||||
# Automated mode - skip confirmations
|
||||
provisioning server create --name web-01 --yes
|
||||
|
||||
@ -189,7 +189,7 @@ PROVISIONING_NON_INTERACTIVE=1 provisioning server create --name web-02 --yes
|
||||
|
||||
**Old Pattern** (Before Fase 5):
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Hardcoded auth check
|
||||
let response = (input "Delete server? (yes/no): ")
|
||||
if $response != "yes" { exit 1 }
|
||||
@ -203,7 +203,7 @@ export def delete-server [name: string, --yes] {
|
||||
|
||||
**New Pattern** (After Fase 5):
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Metadata header
|
||||
# [command]
|
||||
# name = "server delete"
|
||||
@ -226,7 +226,7 @@ export def delete-server [name: string, --yes] {
|
||||
|
||||
1. Add metadata header after shebang:
|
||||
|
||||
```nushell
|
||||
```
|
||||
#!/usr/bin/env nu
|
||||
# [command]
|
||||
# name = "server create"
|
||||
@ -241,7 +241,7 @@ export def create-server [name: string] {
|
||||
|
||||
1. Register in `provisioning/schemas/main.ncl`:
|
||||
|
||||
```nickel
|
||||
```
|
||||
let server_create = {
|
||||
name = "server create",
|
||||
domain = "infrastructure",
|
||||
@ -259,7 +259,7 @@ server_create
|
||||
|
||||
1. Handler integration (happens in dispatcher):
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Dispatcher automatically:
|
||||
# 1. Loads metadata for "server create"
|
||||
# 2. Validates auth based on requirements
|
||||
@ -269,7 +269,7 @@ server_create
|
||||
|
||||
### Phase 3: Validating Migration
|
||||
|
||||
```bash
|
||||
```
|
||||
# Validate metadata headers
|
||||
nu utils/validate-metadata-headers.nu
|
||||
|
||||
@ -292,7 +292,7 @@ nu utils/search-scripts.nu list
|
||||
|
||||
**Step 1: Create metadata in main.ncl**
|
||||
|
||||
```nickel
|
||||
```
|
||||
let new_feature_command = {
|
||||
name = "feature command",
|
||||
domain = "infrastructure",
|
||||
@ -310,7 +310,7 @@ new_feature_command
|
||||
|
||||
**Step 2: Add metadata header to script**
|
||||
|
||||
```nushell
|
||||
```
|
||||
#!/usr/bin/env nu
|
||||
# [command]
|
||||
# name = "feature command"
|
||||
@ -325,7 +325,7 @@ export def feature-command [param: string] {
|
||||
|
||||
**Step 3: Implement handler function**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Handler registered in dispatcher
|
||||
export def handle-feature-command [
|
||||
action: string
|
||||
@ -342,7 +342,7 @@ export def handle-feature-command [
|
||||
|
||||
**Step 4: Test with check mode**
|
||||
|
||||
```bash
|
||||
```
|
||||
# Dry-run without auth
|
||||
provisioning feature command --check
|
||||
|
||||
@ -389,7 +389,7 @@ provisioning feature command --yes
|
||||
|
||||
**Pattern 1: For Long Operations**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Use orchestrator for operations >2 seconds
|
||||
if (get-operation-duration "my-operation") > 2000 {
|
||||
submit-to-orchestrator $operation
|
||||
@ -399,7 +399,7 @@ if (get-operation-duration "my-operation") > 2000 {
|
||||
|
||||
**Pattern 2: For Batch Operations**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Use batch workflows for multiple operations
|
||||
nu -c "
|
||||
use core/nulib/workflows/batch.nu *
|
||||
@ -409,7 +409,7 @@ batch submit workflows/batch-deploy.ncl --parallel-limit 5
|
||||
|
||||
**Pattern 3: For Metadata Overhead**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Cache hit rate optimization
|
||||
# Current: 40-100x faster with warm cache
|
||||
# Target: >95% cache hit rate
|
||||
@ -420,7 +420,7 @@ batch submit workflows/batch-deploy.ncl --parallel-limit 5
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
```
|
||||
# End-to-End Integration Tests
|
||||
nu tests/test-fase5-e2e.nu
|
||||
|
||||
@ -456,7 +456,7 @@ for test in tests/test-*.nu { nu $test }
|
||||
|
||||
**Solution**: Ensure metadata is registered in `main.ncl`
|
||||
|
||||
```bash
|
||||
```
|
||||
# Check if command is in metadata
|
||||
grep "command_name" provisioning/schemas/main.ncl
|
||||
```
|
||||
@ -465,7 +465,7 @@ grep "command_name" provisioning/schemas/main.ncl
|
||||
|
||||
**Solution**: Verify user has required permission level
|
||||
|
||||
```bash
|
||||
```
|
||||
# Check current user permissions
|
||||
provisioning auth whoami
|
||||
|
||||
@ -480,7 +480,7 @@ get-command-metadata 'server create'
|
||||
|
||||
**Solution**: Check cache status
|
||||
|
||||
```bash
|
||||
```
|
||||
# Force cache reload
|
||||
rm ~/.cache/provisioning/command_metadata.json
|
||||
|
||||
@ -492,7 +492,7 @@ nu tests/test-metadata-cache-benchmark.nu
|
||||
|
||||
**Solution**: Run compliance check
|
||||
|
||||
```bash
|
||||
```
|
||||
# Validate Nushell compliance
|
||||
nu --ide-check 100 <file.nu>
|
||||
|
||||
@ -514,7 +514,7 @@ grep "let mut" <file.nu> # Should be empty
|
||||
|
||||
### Real-World Impact
|
||||
|
||||
```plaintext
|
||||
```
|
||||
Scenario: 20 sequential commands
|
||||
Without cache: 20 × 200 ms = 4 seconds
|
||||
With cache: 1 × 200 ms + 19 × 5 ms = 295 ms
|
||||
|
||||
@ -30,7 +30,7 @@ The build system is a comprehensive, Makefile-based solution that orchestrates:
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
```
|
||||
# Navigate to build system
|
||||
cd src/tools
|
||||
|
||||
@ -61,7 +61,7 @@ make status
|
||||
|
||||
**Variables**:
|
||||
|
||||
```makefile
|
||||
```
|
||||
# Project metadata
|
||||
PROJECT_NAME := provisioning
|
||||
VERSION := $(git describe --tags --always --dirty)
|
||||
@ -95,7 +95,7 @@ PARALLEL := true
|
||||
|
||||
**`make build-platform`** - Build platform binaries for all targets
|
||||
|
||||
```bash
|
||||
```
|
||||
make build-platform
|
||||
# Equivalent to:
|
||||
nu tools/build/compile-platform.nu \
|
||||
@ -107,7 +107,7 @@ nu tools/build/compile-platform.nu \
|
||||
|
||||
**`make build-core`** - Bundle core Nushell libraries
|
||||
|
||||
```bash
|
||||
```
|
||||
make build-core
|
||||
# Equivalent to:
|
||||
nu tools/build/bundle-core.nu \
|
||||
@ -119,7 +119,7 @@ nu tools/build/bundle-core.nu \
|
||||
|
||||
**`make validate-nickel`** - Validate and compile Nickel schemas
|
||||
|
||||
```bash
|
||||
```
|
||||
make validate-nickel
|
||||
# Equivalent to:
|
||||
nu tools/build/validate-nickel.nu \
|
||||
@ -142,7 +142,7 @@ nu tools/build/validate-nickel.nu \
|
||||
|
||||
**`make dist-generate`** - Generate complete distributions
|
||||
|
||||
```bash
|
||||
```
|
||||
make dist-generate
|
||||
# Advanced usage:
|
||||
make dist-generate PLATFORMS=linux-amd64,macos-amd64 VARIANTS=complete
|
||||
@ -176,7 +176,7 @@ make dist-generate PLATFORMS=linux-amd64,macos-amd64 VARIANTS=complete
|
||||
|
||||
**`make release`** - Create a complete release (requires VERSION)
|
||||
|
||||
```bash
|
||||
```
|
||||
make release VERSION=2.1.0
|
||||
```
|
||||
|
||||
@ -217,7 +217,7 @@ Features:
|
||||
|
||||
**`make dev-build`** - Quick development build
|
||||
|
||||
```bash
|
||||
```
|
||||
make dev-build
|
||||
# Fast build with minimal validation
|
||||
```
|
||||
@ -250,7 +250,7 @@ make dev-build
|
||||
|
||||
**`make docs`** - Generate documentation
|
||||
|
||||
```bash
|
||||
```
|
||||
make docs
|
||||
# Generates API docs, user guides, and examples
|
||||
```
|
||||
@ -265,7 +265,7 @@ make docs
|
||||
|
||||
**`make clean`** - Clean all build artifacts
|
||||
|
||||
```bash
|
||||
```
|
||||
make clean
|
||||
# Removes all build, distribution, and package directories
|
||||
```
|
||||
@ -290,7 +290,7 @@ make clean
|
||||
|
||||
**`make status`** - Show build system status
|
||||
|
||||
```bash
|
||||
```
|
||||
make status
|
||||
# Output:
|
||||
# Build System Status
|
||||
@ -345,21 +345,21 @@ make status
|
||||
|
||||
**`make linux`** - Build for Linux only
|
||||
|
||||
```bash
|
||||
```
|
||||
make linux
|
||||
# Sets PLATFORMS=linux-amd64
|
||||
```
|
||||
|
||||
**`make macos`** - Build for macOS only
|
||||
|
||||
```bash
|
||||
```
|
||||
make macos
|
||||
# Sets PLATFORMS=macos-amd64
|
||||
```
|
||||
|
||||
**`make windows`** - Build for Windows only
|
||||
|
||||
```bash
|
||||
```
|
||||
make windows
|
||||
# Sets PLATFORMS=windows-amd64
|
||||
```
|
||||
@ -368,7 +368,7 @@ make windows
|
||||
|
||||
**`make debug`** - Build with debug information
|
||||
|
||||
```bash
|
||||
```
|
||||
make debug
|
||||
# Sets BUILD_MODE=debug VERBOSE=true
|
||||
```
|
||||
@ -398,7 +398,7 @@ All build tools are implemented as Nushell scripts with comprehensive parameter
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu compile-platform.nu [options]
|
||||
|
||||
Options:
|
||||
@ -412,7 +412,7 @@ Options:
|
||||
|
||||
**Example**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu compile-platform.nu \
|
||||
--target x86_64-apple-darwin \
|
||||
--release \
|
||||
@ -435,7 +435,7 @@ nu compile-platform.nu \
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu bundle-core.nu [options]
|
||||
|
||||
Options:
|
||||
@ -468,7 +468,7 @@ Options:
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu validate-nickel.nu [options]
|
||||
|
||||
Options:
|
||||
@ -490,7 +490,7 @@ Options:
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu test-distribution.nu [options]
|
||||
|
||||
Options:
|
||||
@ -514,7 +514,7 @@ Options:
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu clean-build.nu [options]
|
||||
|
||||
Options:
|
||||
@ -544,7 +544,7 @@ Options:
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu generate-distribution.nu [command] [options]
|
||||
|
||||
Commands:
|
||||
@ -566,7 +566,7 @@ Options:
|
||||
|
||||
**Advanced Examples**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Complete multi-platform release
|
||||
nu generate-distribution.nu \
|
||||
--version 2.1.0 \
|
||||
@ -599,7 +599,7 @@ nu generate-distribution.nu status
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu create-installer.nu DISTRIBUTION_DIR [options]
|
||||
|
||||
Options:
|
||||
@ -660,7 +660,7 @@ Options:
|
||||
|
||||
**Usage**:
|
||||
|
||||
```bash
|
||||
```
|
||||
nu create-release.nu [options]
|
||||
|
||||
Options:
|
||||
@ -694,7 +694,7 @@ Options:
|
||||
|
||||
**Install Rust Targets**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install additional targets
|
||||
rustup target add x86_64-apple-darwin
|
||||
rustup target add x86_64-pc-windows-gnu
|
||||
@ -706,7 +706,7 @@ rustup target add aarch64-apple-darwin
|
||||
|
||||
**macOS Cross-Compilation**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install osxcross toolchain
|
||||
brew install FiloSottile/musl-cross/musl-cross
|
||||
brew install mingw-w64
|
||||
@ -714,7 +714,7 @@ brew install mingw-w64
|
||||
|
||||
**Windows Cross-Compilation**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install Windows dependencies
|
||||
brew install mingw-w64
|
||||
# or on Linux:
|
||||
@ -725,7 +725,7 @@ sudo apt-get install gcc-mingw-w64
|
||||
|
||||
**Single Platform**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Build for macOS from Linux
|
||||
make build-platform RUST_TARGET=x86_64-apple-darwin
|
||||
|
||||
@ -735,7 +735,7 @@ make build-platform RUST_TARGET=x86_64-pc-windows-gnu
|
||||
|
||||
**Multiple Platforms**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Build for all configured platforms
|
||||
make build-cross
|
||||
|
||||
@ -745,7 +745,7 @@ make build-cross PLATFORMS=linux-amd64,macos-amd64,windows-amd64
|
||||
|
||||
**Platform-Specific Targets**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Quick platform builds
|
||||
make linux # Linux AMD64
|
||||
make macos # macOS AMD64
|
||||
@ -775,7 +775,7 @@ make windows # Windows AMD64
|
||||
|
||||
**Check Dependencies**:
|
||||
|
||||
```bash
|
||||
```
|
||||
make info
|
||||
# Shows versions of all required tools
|
||||
|
||||
@ -789,7 +789,7 @@ make info
|
||||
|
||||
**Install Missing Dependencies**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Install Nushell
|
||||
cargo install nu
|
||||
|
||||
@ -810,7 +810,7 @@ cargo install cross
|
||||
|
||||
**Build Cache Management**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Clean Cargo cache
|
||||
cargo clean
|
||||
|
||||
@ -829,7 +829,7 @@ make clean SCOPE=cache
|
||||
|
||||
**Error**: `linker 'cc' not found`
|
||||
|
||||
```bash
|
||||
```
|
||||
# Solution: Install build essentials
|
||||
sudo apt-get install build-essential # Linux
|
||||
xcode-select --install # macOS
|
||||
@ -837,14 +837,14 @@ xcode-select --install # macOS
|
||||
|
||||
**Error**: `target not found`
|
||||
|
||||
```bash
|
||||
```
|
||||
# Solution: Install target
|
||||
rustup target add x86_64-unknown-linux-gnu
|
||||
```
|
||||
|
||||
**Error**: Cross-compilation linking errors
|
||||
|
||||
```bash
|
||||
```
|
||||
# Solution: Use cross instead of cargo
|
||||
cargo install cross
|
||||
make build-platform CROSS=true
|
||||
@ -854,7 +854,7 @@ make build-platform CROSS=true
|
||||
|
||||
**Error**: `command not found`
|
||||
|
||||
```bash
|
||||
```
|
||||
# Solution: Ensure Nushell is in PATH
|
||||
which nu
|
||||
export PATH="$HOME/.cargo/bin:$PATH"
|
||||
@ -862,14 +862,14 @@ export PATH="$HOME/.cargo/bin:$PATH"
|
||||
|
||||
**Error**: Permission denied
|
||||
|
||||
```bash
|
||||
```
|
||||
# Solution: Make scripts executable
|
||||
chmod +x src/tools/build/*.nu
|
||||
```
|
||||
|
||||
**Error**: Module not found
|
||||
|
||||
```bash
|
||||
```
|
||||
# Solution: Check working directory
|
||||
cd src/tools
|
||||
nu build/compile-platform.nu --help
|
||||
@ -879,7 +879,7 @@ nu build/compile-platform.nu --help
|
||||
|
||||
**Error**: `nickel command not found`
|
||||
|
||||
```bash
|
||||
```
|
||||
# Solution: Install Nickel
|
||||
cargo install nickel
|
||||
# or
|
||||
@ -888,7 +888,7 @@ brew install nickel
|
||||
|
||||
**Error**: Schema validation failed
|
||||
|
||||
```bash
|
||||
```
|
||||
# Solution: Check Nickel syntax
|
||||
nickel fmt schemas/
|
||||
nickel check schemas/
|
||||
@ -900,7 +900,7 @@ nickel check schemas/
|
||||
|
||||
**Optimizations**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Enable parallel builds
|
||||
make build-all PARALLEL=true
|
||||
|
||||
@ -913,7 +913,7 @@ export CARGO_BUILD_JOBS=8
|
||||
|
||||
**Cargo Configuration** (`~/.cargo/config.toml`):
|
||||
|
||||
```toml
|
||||
```
|
||||
[build]
|
||||
jobs = 8
|
||||
|
||||
@ -925,7 +925,7 @@ linker = "lld"
|
||||
|
||||
**Solutions**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Reduce parallel jobs
|
||||
export CARGO_BUILD_JOBS=2
|
||||
|
||||
@ -942,7 +942,7 @@ make clean-dist
|
||||
|
||||
**Validation**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Test distribution
|
||||
make test-dist
|
||||
|
||||
@ -954,7 +954,7 @@ nu src/tools/package/validate-package.nu dist/
|
||||
|
||||
**Optimizations**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Strip binaries
|
||||
make package-binaries STRIP=true
|
||||
|
||||
@ -969,7 +969,7 @@ make dist-generate VARIANTS=minimal
|
||||
|
||||
**Enable Debug Logging**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Set environment
|
||||
export PROVISIONING_DEBUG=true
|
||||
export RUST_LOG=debug
|
||||
@ -983,7 +983,7 @@ make build-all VERBOSE=true
|
||||
|
||||
**Debug Information**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Show debug information
|
||||
make debug-info
|
||||
|
||||
@ -1000,7 +1000,7 @@ make info
|
||||
|
||||
**Example Workflow** (`.github/workflows/build.yml`):
|
||||
|
||||
```yaml
|
||||
```
|
||||
name: Build and Test
|
||||
on: [push, pull_request]
|
||||
|
||||
@ -1034,7 +1034,7 @@ jobs:
|
||||
|
||||
**Release Workflow**:
|
||||
|
||||
```yaml
|
||||
```
|
||||
name: Release
|
||||
on:
|
||||
push:
|
||||
@ -1061,7 +1061,7 @@ jobs:
|
||||
|
||||
**Test CI Pipeline Locally**:
|
||||
|
||||
```bash
|
||||
```
|
||||
# Run CI build pipeline
|
||||
make ci-build
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ work with this architecture.
|
||||
|
||||
### Architecture Components
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/core/nulib/
|
||||
├── provisioning (211 lines) - Main entry point
|
||||
├── main_provisioning/
|
||||
@ -58,7 +58,7 @@ Commands are organized by domain. Choose the appropriate handler:
|
||||
|
||||
Edit `provisioning/core/nulib/main_provisioning/commands/infrastructure.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Add to the handle_infrastructure_command match statement
|
||||
export def handle_infrastructure_command [
|
||||
command: string
|
||||
@ -102,7 +102,7 @@ If you want shortcuts like `provisioning s status`:
|
||||
|
||||
Edit `provisioning/core/nulib/main_provisioning/dispatcher.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
export def get_command_registry []: nothing -> record {
|
||||
{
|
||||
# Infrastructure commands
|
||||
@ -127,7 +127,7 @@ Let's say you want to add better error handling to the taskserv command:
|
||||
|
||||
**Before:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
def handle_taskserv [ops: string, flags: record] {
|
||||
let args = build_module_args $flags $ops
|
||||
run_module $args "taskserv" --exec
|
||||
@ -136,7 +136,7 @@ def handle_taskserv [ops: string, flags: record] {
|
||||
|
||||
**After:**
|
||||
|
||||
```nushell
|
||||
```
|
||||
def handle_taskserv [ops: string, flags: record] {
|
||||
# Validate taskserv name if provided
|
||||
let first_arg = ($ops | split row " " | get -o 0)
|
||||
@ -163,7 +163,7 @@ def handle_taskserv [ops: string, flags: record] {
|
||||
|
||||
The `flags.nu` module provides centralized flag handling:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Parse all flags into normalized record
|
||||
let parsed_flags = (parse_common_flags {
|
||||
version: $version, v: $v, info: $info,
|
||||
@ -210,7 +210,7 @@ If you need to add a new flag:
|
||||
|
||||
**Example: Adding `--timeout` flag**
|
||||
|
||||
```nushell
|
||||
```
|
||||
# 1. In provisioning main file (parameter list)
|
||||
def main [
|
||||
# ... existing parameters
|
||||
@ -253,7 +253,7 @@ export def build_module_args [flags: record, extra: string = ""]: nothing -> str
|
||||
|
||||
Edit `provisioning/core/nulib/main_provisioning/dispatcher.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
export def get_command_registry []: nothing -> record {
|
||||
{
|
||||
# ... existing shortcuts
|
||||
@ -273,7 +273,7 @@ export def get_command_registry []: nothing -> record {
|
||||
|
||||
### Running the Test Suite
|
||||
|
||||
```bash
|
||||
```
|
||||
# Run comprehensive test suite
|
||||
nu tests/test_provisioning_refactor.nu
|
||||
```
|
||||
@ -293,7 +293,7 @@ The test suite validates:
|
||||
|
||||
Edit `tests/test_provisioning_refactor.nu`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# Add your test function
|
||||
export def test_my_new_feature [] {
|
||||
print "\n🧪 Testing my new feature..."
|
||||
@ -317,7 +317,7 @@ export def main [] {
|
||||
|
||||
### Manual Testing
|
||||
|
||||
```bash
|
||||
```
|
||||
# Test command execution
|
||||
provisioning/core/cli/provisioning my-command test --check
|
||||
|
||||
@ -335,7 +335,7 @@ provisioning/core/cli/provisioning help my-command # Bi-directional
|
||||
|
||||
**Use Case**: Command just needs to execute a module with standard flags
|
||||
|
||||
```nushell
|
||||
```
|
||||
def handle_simple_command [ops: string, flags: record] {
|
||||
let args = build_module_args $flags $ops
|
||||
run_module $args "module_name" --exec
|
||||
@ -346,7 +346,7 @@ def handle_simple_command [ops: string, flags: record] {
|
||||
|
||||
**Use Case**: Need to validate input before execution
|
||||
|
||||
```nushell
|
||||
```
|
||||
def handle_validated_command [ops: string, flags: record] {
|
||||
# Validate
|
||||
let first_arg = ($ops | split row " " | get -o 0)
|
||||
@ -366,7 +366,7 @@ def handle_validated_command [ops: string, flags: record] {
|
||||
|
||||
**Use Case**: Command has multiple subcommands (like `server create`, `server delete`)
|
||||
|
||||
```nushell
|
||||
```
|
||||
def handle_complex_command [ops: string, flags: record] {
|
||||
let subcommand = ($ops | split row " " | get -o 0)
|
||||
let rest_ops = ($ops | split row " " | skip 1 | str join " ")
|
||||
@ -388,7 +388,7 @@ def handle_complex_command [ops: string, flags: record] {
|
||||
|
||||
**Use Case**: Command behavior changes based on flags
|
||||
|
||||
```nushell
|
||||
```
|
||||
def handle_flag_routed_command [ops: string, flags: record] {
|
||||
if $flags.check_mode {
|
||||
# Dry-run mode
|
||||
@ -414,7 +414,7 @@ Each handler should do **one thing well**:
|
||||
|
||||
### 2. Use Descriptive Error Messages
|
||||
|
||||
```nushell
|
||||
```
|
||||
# ❌ Bad
|
||||
print "Error"
|
||||
|
||||
@ -433,7 +433,7 @@ print "Use 'provisioning taskserv list' to see all available taskservs"
|
||||
|
||||
Don't repeat code - use centralized functions:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# ❌ Bad: Repeating flag handling
|
||||
def handle_bad [ops: string, flags: record] {
|
||||
let use_check = if $flags.check_mode { "--check " } else { "" }
|
||||
@ -478,7 +478,7 @@ Before committing:
|
||||
|
||||
**Fix**: Use relative imports with `.nu` extension:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# ✅ Correct
|
||||
use ../flags.nu *
|
||||
use ../../lib_provisioning *
|
||||
@ -494,7 +494,7 @@ use lib_provisioning *
|
||||
|
||||
**Fix**: Use proper Nushell 0.107 type signature:
|
||||
|
||||
```nushell
|
||||
```
|
||||
# ✅ Correct
|
||||
export def my_function [param: string]: nothing -> string {
|
||||
"result"
|
||||
@ -512,7 +512,7 @@ export def my_function [param: string] -> string {
|
||||
|
||||
**Fix**: Add to `dispatcher.nu:get_command_registry`:
|
||||
|
||||
```nushell
|
||||
```
|
||||
"myshortcut" => "domain command"
|
||||
```
|
||||
|
||||
@ -522,7 +522,7 @@ export def my_function [param: string] -> string {
|
||||
|
||||
**Fix**: Use centralized flag builder:
|
||||
|
||||
```nushell
|
||||
```
|
||||
let args = build_module_args $flags $ops
|
||||
run_module $args "module" --exec
|
||||
```
|
||||
@ -531,7 +531,7 @@ run_module $args "module" --exec
|
||||
|
||||
### File Locations
|
||||
|
||||
```plaintext
|
||||
```
|
||||
provisioning/core/nulib/
|
||||
├── provisioning - Main entry, flag definitions
|
||||
├── main_provisioning/
|
||||
@ -550,7 +550,7 @@ docs/
|
||||
|
||||
### Key Functions
|
||||
|
||||
```nushell
|
||||
```
|
||||
# In flags.nu
|
||||
parse_common_flags [flags: record]: nothing -> record
|
||||
build_module_args [flags: record, extra: string = ""]: nothing -> string
|
||||
@ -574,7 +574,7 @@ handle_*_command [command: string, ops: string, flags: record]
|
||||
|
||||
### Testing Commands
|
||||
|
||||
```bash
|
||||
```
|
||||
# Run full test suite
|
||||
nu tests/test_provisioning_refactor.nu
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ This guide includes:
|
||||
|
||||
### Essential Commands
|
||||
|
||||
```bash
|
||||
```
|
||||
# System status
|
||||
provisioning status
|
||||
provisioning health
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user