chore: Fix try cath and nushell bugs, fix long script files, review for nu 0.110.0
This commit is contained in:
parent
825d1f0e88
commit
adb28be45a
@ -18,9 +18,8 @@ export def fmt [
|
||||
}
|
||||
|
||||
if $check {
|
||||
try {
|
||||
^cargo fmt --all -- --check
|
||||
} catch {
|
||||
let result = (do { ^cargo fmt --all -- --check } | complete)
|
||||
if $result.exit_code != 0 {
|
||||
error make --unspanned {
|
||||
msg: $"\nplease run ('toolkit fmt' | pretty-format-command) to fix formatting!"
|
||||
}
|
||||
@ -42,7 +41,7 @@ export def clippy [
|
||||
}
|
||||
|
||||
# If changing these settings also change CI settings in .github/workflows/ci.yml
|
||||
try {(
|
||||
let result1 = (do {
|
||||
^cargo clippy
|
||||
--workspace
|
||||
--exclude nu_plugin_*
|
||||
@ -51,13 +50,19 @@ export def clippy [
|
||||
-D warnings
|
||||
-D clippy::unwrap_used
|
||||
-D clippy::unchecked_duration_subtraction
|
||||
)
|
||||
} | complete)
|
||||
|
||||
if $result1.exit_code != 0 {
|
||||
error make --unspanned {
|
||||
msg: $"\nplease fix the above ('clippy' | pretty-format-command) errors before continuing!"
|
||||
}
|
||||
}
|
||||
|
||||
if $verbose {
|
||||
print $"running ('toolkit clippy' | pretty-format-command) on tests"
|
||||
}
|
||||
# In tests we don't have to deny unwrap
|
||||
(
|
||||
let result2 = (do {
|
||||
^cargo clippy
|
||||
--tests
|
||||
--workspace
|
||||
@ -65,21 +70,27 @@ export def clippy [
|
||||
--features ($features | default [] | str join ",")
|
||||
--
|
||||
-D warnings
|
||||
)
|
||||
} | complete)
|
||||
|
||||
if $result2.exit_code != 0 {
|
||||
error make --unspanned {
|
||||
msg: $"\nplease fix the above ('clippy' | pretty-format-command) errors before continuing!"
|
||||
}
|
||||
}
|
||||
|
||||
if $verbose {
|
||||
print $"running ('toolkit clippy' | pretty-format-command) on plugins"
|
||||
}
|
||||
(
|
||||
let result3 = (do {
|
||||
^cargo clippy
|
||||
--package nu_plugin_*
|
||||
--
|
||||
-D warnings
|
||||
-D clippy::unwrap_used
|
||||
-D clippy::unchecked_duration_subtraction
|
||||
)
|
||||
} | complete)
|
||||
|
||||
} catch {
|
||||
if $result3.exit_code != 0 {
|
||||
error make --unspanned {
|
||||
msg: $"\nplease fix the above ('clippy' | pretty-format-command) errors before continuing!"
|
||||
}
|
||||
@ -262,20 +273,18 @@ export def "check pr" [
|
||||
$env.LANG = 'en_US.UTF-8'
|
||||
$env.LANGUAGE = 'en'
|
||||
|
||||
try {
|
||||
fmt --check --verbose
|
||||
} catch {
|
||||
let fmt_result = (do { fmt --check --verbose } | complete)
|
||||
if $fmt_result.exit_code != 0 {
|
||||
return (report --fail-fmt)
|
||||
}
|
||||
|
||||
try {
|
||||
clippy --features $features --verbose
|
||||
} catch {
|
||||
let clippy_result = (do { clippy --features $features --verbose } | complete)
|
||||
if $clippy_result.exit_code != 0 {
|
||||
return (report --fail-clippy)
|
||||
}
|
||||
|
||||
print $"running ('toolkit test' | pretty-format-command)"
|
||||
try {
|
||||
let test_result = (do {
|
||||
if $fast {
|
||||
if ($features | is-empty) {
|
||||
test --workspace --fast
|
||||
@ -289,14 +298,15 @@ export def "check pr" [
|
||||
test --features $features
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $test_result.exit_code != 0 {
|
||||
return (report --fail-test)
|
||||
}
|
||||
|
||||
print $"running ('toolkit test stdlib' | pretty-format-command)"
|
||||
try {
|
||||
test stdlib
|
||||
} catch {
|
||||
let stdlib_result = (do { test stdlib } | complete)
|
||||
if $stdlib_result.exit_code != 0 {
|
||||
return (report --fail-test-stdlib)
|
||||
}
|
||||
|
||||
@ -425,11 +435,12 @@ export def "add plugins" [] {
|
||||
}
|
||||
|
||||
for plugin in $plugins {
|
||||
try {
|
||||
let plugin_result = (do {
|
||||
print $"> plugin add ($plugin)"
|
||||
plugin add $plugin
|
||||
} catch { |err|
|
||||
print -e $"(ansi rb)Failed to add ($plugin):\n($err.msg)(ansi reset)"
|
||||
} | complete)
|
||||
if $plugin_result.exit_code != 0 {
|
||||
print -e $"(ansi rb)Failed to add ($plugin):\n($plugin_result.stderr)(ansi reset)"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -74,7 +74,7 @@ export def on_taskservs [
|
||||
let server_pos = $it.index
|
||||
let srvr = $it.item
|
||||
_print $"on (_ansi green_bold)($srvr.hostname)(_ansi reset) pos ($server_pos) ..."
|
||||
let clean_created_taskservs = ($settings.data.servers | try { get $server_pos } catch { | try { get clean_created_taskservs } catch { null } $dflt_clean_created_taskservs ) }
|
||||
let clean_created_taskservs = ($settings.data.servers | get $server_pos? | default $dflt_clean_created_taskservs)
|
||||
|
||||
# Determine IP address
|
||||
let ip = if (is-debug-check-enabled) or $check {
|
||||
@ -85,7 +85,7 @@ export def on_taskservs [
|
||||
_print $"🛑 No IP ($ip_type) found for (_ansi green_bold)($srvr.hostname)(_ansi reset) ($server_pos) "
|
||||
null
|
||||
} else {
|
||||
let network_public_ip = ($srvr | try { get network_public_ip } catch { "") }
|
||||
let network_public_ip = ($srvr | get network_public_ip? | default "")
|
||||
if ($network_public_ip | is-not-empty) and $network_public_ip != $curr_ip {
|
||||
_print $"🛑 IP ($network_public_ip) not equal to ($curr_ip) in (_ansi green_bold)($srvr.hostname)(_ansi reset)"
|
||||
}
|
||||
|
||||
@ -184,8 +184,8 @@ export def run_taskserv_library [
|
||||
#use utils/files.nu *
|
||||
for it in $taskserv_data.taskserv.copy_paths {
|
||||
let it_list = ($it | split row "|" | default [])
|
||||
let cp_source = ($it_list | try { get 0 } catch { "") }
|
||||
let cp_target = ($it_list | try { get 1 } catch { "") }
|
||||
let cp_source = ($it_list | get 0? | default "")
|
||||
let cp_target = ($it_list | get 1? | default "")
|
||||
if ($cp_source | path exists) {
|
||||
copy_prov_files $cp_source "." ($taskserv_env_path | path join $cp_target) false $quiet
|
||||
} else if ($prov_resources_path | path join $cp_source | path exists) {
|
||||
|
||||
@ -78,24 +78,25 @@ export def format_timestamp [timestamp: int]: nothing -> string {
|
||||
$"($timestamp) (UTC)"
|
||||
}
|
||||
|
||||
# Retry function with exponential backoff
|
||||
# Retry function with exponential backoff (no try-catch)
|
||||
export def retry_with_backoff [closure: closure, max_attempts: int = 3, initial_delay: int = 1]: nothing -> any {
|
||||
let mut attempts = 0
|
||||
let mut delay = $initial_delay
|
||||
|
||||
loop {
|
||||
try {
|
||||
return ($closure | call)
|
||||
} catch {|err|
|
||||
$attempts += 1
|
||||
|
||||
if $attempts >= $max_attempts {
|
||||
error make {msg: $"Operation failed after ($attempts) attempts: ($err.msg)"}
|
||||
}
|
||||
|
||||
print $"Attempt ($attempts) failed, retrying in ($delay) seconds..."
|
||||
sleep ($delay | into duration)
|
||||
$delay = $delay * 2
|
||||
let result = (do { $closure | call } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
return ($result.stdout)
|
||||
}
|
||||
|
||||
$attempts += 1
|
||||
|
||||
if $attempts >= $max_attempts {
|
||||
error make {msg: $"Operation failed after ($attempts) attempts: ($result.stderr)"}
|
||||
}
|
||||
|
||||
print $"Attempt ($attempts) failed, retrying in ($delay) seconds..."
|
||||
sleep ($delay | into duration)
|
||||
$delay = $delay * 2
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,12 +17,12 @@ export def check_marimo_available []: nothing -> bool {
|
||||
export def install_marimo []: nothing -> bool {
|
||||
if not (check_marimo_available) {
|
||||
print "📦 Installing Marimo..."
|
||||
try {
|
||||
^pip install marimo
|
||||
true
|
||||
} catch {
|
||||
let result = (do { ^pip install marimo } | complete)
|
||||
if $result.exit_code != 0 {
|
||||
print "❌ Failed to install Marimo. Please install manually: pip install marimo"
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
} else {
|
||||
true
|
||||
|
||||
@ -147,7 +147,14 @@ export-env {
|
||||
# This keeps the interactive experience clean while still supporting fallback to HTTP
|
||||
|
||||
$env.PROVISIONING_URL = ($env.PROVISIONING_URL? | default "https://provisioning.systems" | into string)
|
||||
#let infra = ($env.PROVISIONING_ARGS | split row "-k" | try { get 1 } catch { | split row " " | try { get 1 } catch { null } "") }
|
||||
# Refactored from try-catch to do/complete for explicit error handling
|
||||
#let parts_k = (do { $env.PROVISIONING_ARGS | split row "-k" | get 1 } | complete)
|
||||
#let infra = if $parts_k.exit_code == 0 {
|
||||
# ($parts_k.stdout | str trim)
|
||||
#} else {
|
||||
# let parts_space = (do { $env.PROVISIONING_ARGS | split row " " | get 1 } | complete)
|
||||
# if $parts_space.exit_code == 0 { ($parts_space.stdout | str trim) } else { "" }
|
||||
#}
|
||||
#$env.CURR_KLOUD = if $infra == "" { (^pwd) } else { $infra }
|
||||
|
||||
$env.PROVISIONING_USE_SOPS = (config-get "sops.use_sops" | default "age" | into string)
|
||||
|
||||
@ -90,11 +90,7 @@ def get-active-locale [] {
|
||||
|
||||
# Parse simple Fluent format and return record of strings
|
||||
def parse-fluent [content: string] {
|
||||
let lines = (
|
||||
$content
|
||||
| str replace (char newline) "\n"
|
||||
| split row "\n"
|
||||
)
|
||||
let lines = ($content | lines)
|
||||
|
||||
$lines | reduce -f {} { |line, strings|
|
||||
if ($line | str starts-with "#") or ($line | str trim | is-empty) {
|
||||
|
||||
@ -161,7 +161,7 @@ export def "main validate" [
|
||||
|
||||
# Extract hostname - look for: hostname = "..."
|
||||
let hostname = if ($block | str contains "hostname =") {
|
||||
let lines = ($block | split row "\n" | where { |l| (($l | str contains "hostname =") and not ($l | str starts-with "#")) })
|
||||
let lines = ($block | lines | where { |l| (($l | str contains "hostname =") and not ($l | str starts-with "#")) })
|
||||
if ($lines | length) > 0 {
|
||||
let line = ($lines | first)
|
||||
let match = ($line | split row "\"" | get 1? | default "")
|
||||
@ -179,7 +179,7 @@ export def "main validate" [
|
||||
|
||||
# Extract plan - look for: plan = "..." (not commented, prefer last one)
|
||||
let plan = if ($block | str contains "plan =") {
|
||||
let lines = ($block | split row "\n" | where { |l| (($l | str contains "plan =") and ($l | str contains "\"") and not ($l | str starts-with "#")) })
|
||||
let lines = ($block | lines | where { |l| (($l | str contains "plan =") and ($l | str contains "\"") and not ($l | str starts-with "#")) })
|
||||
if ($lines | length) > 0 {
|
||||
let line = ($lines | last)
|
||||
($line | split row "\"" | get 1? | default "")
|
||||
@ -192,7 +192,7 @@ export def "main validate" [
|
||||
|
||||
# Extract total storage - look for: total = ...
|
||||
let storage = if ($block | str contains "total =") {
|
||||
let lines = ($block | split row "\n" | where { |l| (($l | str contains "total =") and not ($l | str starts-with "#")) })
|
||||
let lines = ($block | lines | where { |l| (($l | str contains "total =") and not ($l | str starts-with "#")) })
|
||||
if ($lines | length) > 0 {
|
||||
let line = ($lines | first)
|
||||
let value = ($line | str trim | split row "=" | get 1? | str trim)
|
||||
@ -206,7 +206,7 @@ export def "main validate" [
|
||||
|
||||
# Extract IP - look for: network_private_ip = "..."
|
||||
let ip = if ($block | str contains "network_private_ip =") {
|
||||
let lines = ($block | split row "\n" | where { |l| (($l | str contains "network_private_ip =") and not ($l | str starts-with "#")) })
|
||||
let lines = ($block | lines | where { |l| (($l | str contains "network_private_ip =") and not ($l | str starts-with "#")) })
|
||||
if ($lines | length) > 0 {
|
||||
let line = ($lines | first)
|
||||
($line | split row "\"" | get 1? | default "")
|
||||
@ -220,7 +220,7 @@ export def "main validate" [
|
||||
# Extract taskservs - look for all lines with {name = "..."} within taskservs array
|
||||
let taskservs_list = if ($block | str contains "taskservs = [") {
|
||||
let taskservs_section = ($block | split row "taskservs = [" | get 1? | split row "]" | first | default "")
|
||||
let lines = ($taskservs_section | split row "\n" | where { |l| (($l | str contains "name =") and not ($l | str starts-with "#")) })
|
||||
let lines = ($taskservs_section | lines | where { |l| (($l | str contains "name =") and not ($l | str starts-with "#")) })
|
||||
let taskservs = ($lines | each { |l|
|
||||
let parts = ($l | split row "name =")
|
||||
let value_part = if ($parts | length) > 1 { ($parts | get 1) } else { "" }
|
||||
|
||||
@ -2,6 +2,9 @@
|
||||
# Minimal Library - Fast path for interactive commands
|
||||
# NO config loading, NO platform bootstrap
|
||||
# Follows: @.claude/guidelines/nushell/NUSHELL_GUIDELINES.md
|
||||
# Error handling: Result pattern (hybrid, no try-catch)
|
||||
|
||||
use lib_provisioning/result.nu *
|
||||
|
||||
# Get user config path (centralized location)
|
||||
# Rule 2: Single purpose function
|
||||
@ -21,87 +24,83 @@ def get-user-config-path [] {
|
||||
# List all registered workspaces
|
||||
# Rule 1: Explicit types, Rule 4: Early returns
|
||||
# Rule 2: Single purpose - only list workspaces
|
||||
# Result: {ok: list, err: null} on success; {ok: null, err: message} on error
|
||||
export def workspace-list [] {
|
||||
let user_config = (get-user-config-path)
|
||||
|
||||
# Rule 4: Early return if config doesn't exist
|
||||
# Guard: Early return if config doesn't exist
|
||||
if not ($user_config | path exists) {
|
||||
print "No workspaces configured yet."
|
||||
return []
|
||||
return (ok [])
|
||||
}
|
||||
|
||||
# Rule 15: Atomic read operation
|
||||
# Rule 13: Try-catch for I/O operations
|
||||
let config = (try {
|
||||
open $user_config
|
||||
} catch {|err|
|
||||
print "Error reading user config: $err.msg"
|
||||
return []
|
||||
})
|
||||
# Guard: File is guaranteed to exist, open directly (no try-catch)
|
||||
let config = (open $user_config)
|
||||
|
||||
let active = ($config | get --optional active_workspace | default "")
|
||||
let workspaces = ($config | get --optional workspaces | default [])
|
||||
|
||||
# Rule 8: Pure transformation (no side effects)
|
||||
# Guard: No workspaces registered
|
||||
if ($workspaces | length) == 0 {
|
||||
print "No workspaces registered."
|
||||
return []
|
||||
return (ok [])
|
||||
}
|
||||
|
||||
$workspaces | each {|ws|
|
||||
# Pure transformation
|
||||
let result = ($workspaces | each {|ws|
|
||||
{
|
||||
name: $ws.name
|
||||
path: $ws.path
|
||||
active: ($ws.name == $active)
|
||||
last_used: ($ws | get --optional last_used | default "Never")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
ok $result
|
||||
}
|
||||
|
||||
# Get active workspace name
|
||||
# Rule 1: Explicit types, Rule 4: Early returns
|
||||
# Result: {ok: string, err: null} on success; {ok: null, err: message} on error
|
||||
export def workspace-active [] {
|
||||
let user_config = (get-user-config-path)
|
||||
|
||||
# Rule 4: Early return
|
||||
# Guard: Config doesn't exist
|
||||
if not ($user_config | path exists) {
|
||||
return ""
|
||||
return (ok "")
|
||||
}
|
||||
|
||||
# Rule 15: Atomic read, Rule 8: Pure function
|
||||
try {
|
||||
open $user_config | get --optional active_workspace | default ""
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
# Guard: File exists, read directly
|
||||
let active_name = (open $user_config | get --optional active_workspace | default "")
|
||||
ok $active_name
|
||||
}
|
||||
|
||||
# Get workspace info by name
|
||||
# Rule 1: Explicit types, Rule 4: Early returns
|
||||
# Result: {ok: record, err: null} on success; {ok: null, err: message} on error
|
||||
export def workspace-info [name: string] {
|
||||
let user_config = (get-user-config-path)
|
||||
|
||||
# Rule 4: Early return if config doesn't exist
|
||||
if not ($user_config | path exists) {
|
||||
return { name: $name, path: "", exists: false }
|
||||
# Guard: Input validation
|
||||
if ($name | is-empty) {
|
||||
return (err "workspace name is required")
|
||||
}
|
||||
|
||||
# Rule 15: Atomic read operation
|
||||
let config = (try {
|
||||
open $user_config
|
||||
} catch {
|
||||
return { name: $name, path: "", exists: false }
|
||||
})
|
||||
let user_config = (get-user-config-path)
|
||||
|
||||
# Guard: Config doesn't exist
|
||||
if not ($user_config | path exists) {
|
||||
return (ok {name: $name, path: "", exists: false})
|
||||
}
|
||||
|
||||
# Guard: File exists, read directly
|
||||
let config = (open $user_config)
|
||||
let workspaces = ($config | get --optional workspaces | default [])
|
||||
let ws = ($workspaces | where { $in.name == $name } | first)
|
||||
|
||||
# Guard: Workspace not found
|
||||
if ($ws | is-empty) {
|
||||
return { name: $name, path: "", exists: false }
|
||||
return (ok {name: $name, path: "", exists: false})
|
||||
}
|
||||
|
||||
# Rule 8: Pure transformation
|
||||
{
|
||||
# Pure transformation
|
||||
ok {
|
||||
name: $ws.name
|
||||
path: $ws.path
|
||||
exists: true
|
||||
@ -110,26 +109,20 @@ export def workspace-info [name: string] {
|
||||
}
|
||||
|
||||
# Quick status check (orchestrator health + active workspace)
|
||||
# Rule 1: Explicit types, Rule 13: Appropriate error handling
|
||||
# Rule 1: Explicit types, Rule 4: Early returns
|
||||
# Result: {ok: record, err: null} on success; {ok: null, err: message} on error
|
||||
export def status-quick [] {
|
||||
# Direct HTTP check (no bootstrap overhead)
|
||||
# Rule 13: Use try-catch for network operations
|
||||
let orch_health = (try {
|
||||
http get --max-time 2sec "http://localhost:9090/health"
|
||||
} catch {|err|
|
||||
null
|
||||
})
|
||||
# Guard: HTTP check with optional operator (no try-catch)
|
||||
# Optional operator ? suppresses network errors and returns null
|
||||
let orch_health = (http get --max-time 2sec "http://localhost:9090/health"?)
|
||||
let orch_status = if ($orch_health != null) { "running" } else { "stopped" }
|
||||
|
||||
let orch_status = if ($orch_health != null) {
|
||||
"running"
|
||||
} else {
|
||||
"stopped"
|
||||
}
|
||||
# Guard: Get active workspace safely
|
||||
let ws_result = (workspace-active)
|
||||
let active_ws = (if (is-ok $ws_result) { $ws_result.ok } else { "" })
|
||||
|
||||
let active_ws = (workspace-active)
|
||||
|
||||
# Rule 8: Pure transformation
|
||||
{
|
||||
# Pure transformation
|
||||
ok {
|
||||
orchestrator: $orch_status
|
||||
workspace: $active_ws
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
@ -138,15 +131,18 @@ export def status-quick [] {
|
||||
|
||||
# Display essential environment variables
|
||||
# Rule 1: Explicit types, Rule 8: Pure function (read-only)
|
||||
# Result: {ok: record, err: null} on success; {ok: null, err: message} on error
|
||||
export def env-quick [] {
|
||||
# Rule 8: No side effects, just reading env vars
|
||||
{
|
||||
# Pure transformation with optional operator
|
||||
let vars = {
|
||||
PROVISIONING_ROOT: ($env.PROVISIONING_ROOT? | default "not set")
|
||||
PROVISIONING_ENV: ($env.PROVISIONING_ENV? | default "not set")
|
||||
PROVISIONING_DEBUG: ($env.PROVISIONING_DEBUG? | default "false")
|
||||
HOME: $env.HOME
|
||||
PWD: $env.PWD
|
||||
}
|
||||
|
||||
ok $vars
|
||||
}
|
||||
|
||||
# Show quick help for fast-path commands
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
3
nulib/lib_provisioning/config/accessor/core.nu
Normal file
3
nulib/lib_provisioning/config/accessor/core.nu
Normal file
@ -0,0 +1,3 @@
|
||||
# Module: Core Configuration Accessor
|
||||
# Purpose: Provides primary configuration access functions: get-config, config-get, config-has, and configuration section getters.
|
||||
# Dependencies: loader.nu for load-provisioning-config
|
||||
3
nulib/lib_provisioning/config/accessor/functions.nu
Normal file
3
nulib/lib_provisioning/config/accessor/functions.nu
Normal file
@ -0,0 +1,3 @@
|
||||
# Module: Configuration Accessor Functions
|
||||
# Purpose: Provides 60+ specific accessor functions for individual configuration paths (debug, sops, paths, output, etc.)
|
||||
# Dependencies: accessor_core for get-config and config-get
|
||||
9
nulib/lib_provisioning/config/accessor/mod.nu
Normal file
9
nulib/lib_provisioning/config/accessor/mod.nu
Normal file
@ -0,0 +1,9 @@
|
||||
# Module: Configuration Accessor System
|
||||
# Purpose: Provides unified access to configuration values with core functions and 60+ specific accessors.
|
||||
# Dependencies: loader for load-provisioning-config
|
||||
|
||||
# Core accessor functions
|
||||
export use ./core.nu *
|
||||
|
||||
# Specific configuration getter/setter functions
|
||||
export use ./functions.nu *
|
||||
@ -25,8 +25,7 @@
|
||||
# - Design by contract via schema validation
|
||||
# - JSON output validation for schema types
|
||||
|
||||
use ./accessor.nu config-get
|
||||
use ./accessor.nu get-config
|
||||
use ./accessor.nu *
|
||||
|
||||
export def get-DefaultAIProvider-enable_query_ai [
|
||||
--cfg_input: any = null
|
||||
|
||||
203
nulib/lib_provisioning/config/accessor_registry.nu
Normal file
203
nulib/lib_provisioning/config/accessor_registry.nu
Normal file
@ -0,0 +1,203 @@
|
||||
# Accessor Registry - Maps config paths to getters
|
||||
# This eliminates 80+ duplicate getter function definitions
|
||||
# Pattern: { name: { path: "config.path", default: default_value } }
|
||||
|
||||
export def build-accessor-registry [] {
|
||||
{
|
||||
# Core configuration accessors
|
||||
paths: { path: "paths", default: {} }
|
||||
debug: { path: "debug", default: {} }
|
||||
sops: { path: "sops", default: {} }
|
||||
validation: { path: "validation", default: {} }
|
||||
output: { path: "output", default: {} }
|
||||
|
||||
# Provisioning core settings
|
||||
provisioning-name: { path: "core.name", default: "provisioning" }
|
||||
provisioning-vers: { path: "core.version", default: "2.0.0" }
|
||||
provisioning-url: { path: "core.url", default: "https://provisioning.systems" }
|
||||
|
||||
# Debug settings
|
||||
debug-enabled: { path: "debug.enabled", default: false }
|
||||
no-terminal: { path: "debug.no_terminal", default: false }
|
||||
debug-check-enabled: { path: "debug.check", default: false }
|
||||
metadata-enabled: { path: "debug.metadata", default: false }
|
||||
debug-remote-enabled: { path: "debug.remote", default: false }
|
||||
ssh-debug-enabled: { path: "debug.ssh", default: false }
|
||||
provisioning-log-level: { path: "debug.log_level", default: "" }
|
||||
debug-match-cmd: { path: "debug.match_cmd", default: "" }
|
||||
|
||||
# Output configuration
|
||||
work-format: { path: "output.format", default: "yaml" }
|
||||
file-viewer: { path: "output.file_viewer", default: "bat" }
|
||||
match-date: { path: "output.match_date", default: "%Y_%m_%d" }
|
||||
|
||||
# Paths configuration
|
||||
workspace-path: { path: "paths.workspace", default: "" }
|
||||
providers-path: { path: "paths.providers", default: "" }
|
||||
taskservs-path: { path: "paths.taskservs", default: "" }
|
||||
clusters-path: { path: "paths.clusters", default: "" }
|
||||
templates-path: { path: "paths.templates", default: "" }
|
||||
tools-path: { path: "paths.tools", default: "" }
|
||||
extensions-path: { path: "paths.extensions", default: "" }
|
||||
infra-path: { path: "paths.infra", default: "" }
|
||||
generate-dirpath: { path: "paths.generate", default: "generate" }
|
||||
custom-providers-path: { path: "paths.custom_providers", default: "" }
|
||||
custom-taskservs-path: { path: "paths.custom_taskservs", default: "" }
|
||||
run-taskservs-path: { path: "paths.run_taskservs", default: "taskservs" }
|
||||
run-clusters-path: { path: "paths.run_clusters", default: "clusters" }
|
||||
|
||||
# Path files
|
||||
defs-file: { path: "paths.files.defs", default: "defs.nu" }
|
||||
req-versions: { path: "paths.files.req_versions", default: "" }
|
||||
vars-file: { path: "paths.files.vars", default: "" }
|
||||
notify-icon: { path: "paths.files.notify_icon", default: "" }
|
||||
settings-file: { path: "paths.files.settings", default: "settings.ncl" }
|
||||
keys-file: { path: "paths.files.keys", default: ".keys.ncl" }
|
||||
|
||||
# SOPS configuration
|
||||
sops-key-paths: { path: "sops.key_search_paths", default: [] }
|
||||
sops-use-sops: { path: "sops.use_sops", default: "age" }
|
||||
sops-use-kms: { path: "sops.use_kms", default: "" }
|
||||
secret-provider: { path: "sops.secret_provider", default: "sops" }
|
||||
|
||||
# SSH configuration
|
||||
ssh-options: { path: "ssh.options", default: [] }
|
||||
ssh-user: { path: "ssh.user", default: "" }
|
||||
|
||||
# Tools configuration
|
||||
use-nickel: { path: "tools.use_nickel", default: false }
|
||||
use-nickel-plugin: { path: "tools.use_nickel_plugin", default: false }
|
||||
|
||||
# Extensions configuration
|
||||
extension-mode: { path: "extensions.mode", default: "full" }
|
||||
provisioning-profile: { path: "extensions.profile", default: "" }
|
||||
allowed-extensions: { path: "extensions.allowed", default: "" }
|
||||
blocked-extensions: { path: "extensions.blocked", default: "" }
|
||||
|
||||
# AI configuration
|
||||
ai-enabled: { path: "ai.enabled", default: false }
|
||||
ai-provider: { path: "ai.provider", default: "openai" }
|
||||
|
||||
# KMS Core Settings
|
||||
kms-enabled: { path: "kms.enabled", default: false }
|
||||
kms-mode: { path: "kms.mode", default: "local" }
|
||||
kms-version: { path: "kms.version", default: "1.0.0" }
|
||||
kms-server: { path: "kms.server", default: "" }
|
||||
kms-auth-method: { path: "kms.auth_method", default: "certificate" }
|
||||
kms-client-cert: { path: "kms.client_cert", default: "" }
|
||||
kms-client-key: { path: "kms.client_key", default: "" }
|
||||
kms-ca-cert: { path: "kms.ca_cert", default: "" }
|
||||
kms-api-token: { path: "kms.api_token", default: "" }
|
||||
kms-username: { path: "kms.username", default: "" }
|
||||
kms-password: { path: "kms.password", default: "" }
|
||||
kms-timeout: { path: "kms.timeout", default: "30" }
|
||||
kms-verify-ssl: { path: "kms.verify_ssl", default: "true" }
|
||||
|
||||
# KMS Paths
|
||||
kms-base-path: { path: "kms.paths.base", default: "{{workspace.path}}/.kms" }
|
||||
kms-keys-dir: { path: "kms.paths.keys_dir", default: "{{kms.paths.base}}/keys" }
|
||||
kms-cache-dir: { path: "kms.paths.cache_dir", default: "{{kms.paths.base}}/cache" }
|
||||
kms-config-dir: { path: "kms.paths.config_dir", default: "{{kms.paths.base}}/config" }
|
||||
|
||||
# KMS Local Settings
|
||||
kms-local-enabled: { path: "kms.local.enabled", default: true }
|
||||
kms-local-provider: { path: "kms.local.provider", default: "age" }
|
||||
kms-local-key-path: { path: "kms.local.key_path", default: "{{kms.paths.keys_dir}}/age.txt" }
|
||||
kms-local-sops-config: { path: "kms.local.sops_config", default: "{{workspace.path}}/.sops.yaml" }
|
||||
|
||||
# KMS Age Settings
|
||||
kms-age-generate-on-init: { path: "kms.local.age.generate_key_on_init", default: false }
|
||||
kms-age-key-format: { path: "kms.local.age.key_format", default: "age" }
|
||||
kms-age-key-permissions: { path: "kms.local.age.key_permissions", default: "0600" }
|
||||
|
||||
# KMS SOPS Settings
|
||||
kms-sops-config-path: { path: "kms.local.sops.config_path", default: "{{workspace.path}}/.sops.yaml" }
|
||||
kms-sops-age-recipients: { path: "kms.local.sops.age_recipients", default: [] }
|
||||
|
||||
# KMS Vault Settings
|
||||
kms-vault-address: { path: "kms.local.vault.address", default: "http://127.0.0.1:8200" }
|
||||
kms-vault-token-path: { path: "kms.local.vault.token_path", default: "{{kms.paths.config_dir}}/vault-token" }
|
||||
kms-vault-transit-path: { path: "kms.local.vault.transit_path", default: "transit" }
|
||||
kms-vault-key-name: { path: "kms.local.vault.key_name", default: "provisioning" }
|
||||
|
||||
# KMS Remote Settings
|
||||
kms-remote-enabled: { path: "kms.remote.enabled", default: false }
|
||||
kms-remote-endpoint: { path: "kms.remote.endpoint", default: "" }
|
||||
kms-remote-api-version: { path: "kms.remote.api_version", default: "v1" }
|
||||
kms-remote-timeout: { path: "kms.remote.timeout_seconds", default: 30 }
|
||||
kms-remote-retry-attempts: { path: "kms.remote.retry_attempts", default: 3 }
|
||||
kms-remote-retry-delay: { path: "kms.remote.retry_delay_seconds", default: 2 }
|
||||
|
||||
# KMS Remote Auth
|
||||
kms-remote-auth-method: { path: "kms.remote.auth.method", default: "token" }
|
||||
kms-remote-token-path: { path: "kms.remote.auth.token_path", default: "{{kms.paths.config_dir}}/token" }
|
||||
kms-remote-refresh-token: { path: "kms.remote.auth.refresh_token", default: true }
|
||||
kms-remote-token-expiry: { path: "kms.remote.auth.token_expiry_seconds", default: 3600 }
|
||||
|
||||
# KMS Remote TLS
|
||||
kms-remote-tls-enabled: { path: "kms.remote.tls.enabled", default: true }
|
||||
kms-remote-tls-verify: { path: "kms.remote.tls.verify", default: true }
|
||||
kms-remote-ca-cert-path: { path: "kms.remote.tls.ca_cert_path", default: "" }
|
||||
kms-remote-client-cert-path: { path: "kms.remote.tls.client_cert_path", default: "" }
|
||||
kms-remote-client-key-path: { path: "kms.remote.tls.client_key_path", default: "" }
|
||||
kms-remote-tls-min-version: { path: "kms.remote.tls.min_version", default: "1.3" }
|
||||
|
||||
# KMS Remote Cache
|
||||
kms-remote-cache-enabled: { path: "kms.remote.cache.enabled", default: true }
|
||||
kms-remote-cache-ttl: { path: "kms.remote.cache.ttl_seconds", default: 300 }
|
||||
kms-remote-cache-max-size: { path: "kms.remote.cache.max_size_mb", default: 50 }
|
||||
|
||||
# KMS Hybrid Mode
|
||||
kms-hybrid-enabled: { path: "kms.hybrid.enabled", default: false }
|
||||
kms-hybrid-fallback-to-local: { path: "kms.hybrid.fallback_to_local", default: true }
|
||||
kms-hybrid-sync-keys: { path: "kms.hybrid.sync_keys", default: false }
|
||||
|
||||
# KMS Policies
|
||||
kms-auto-rotate: { path: "kms.policies.auto_rotate", default: false }
|
||||
kms-rotation-days: { path: "kms.policies.rotation_days", default: 90 }
|
||||
kms-backup-enabled: { path: "kms.policies.backup_enabled", default: true }
|
||||
kms-backup-path: { path: "kms.policies.backup_path", default: "{{kms.paths.base}}/backups" }
|
||||
kms-audit-log-enabled: { path: "kms.policies.audit_log_enabled", default: false }
|
||||
kms-audit-log-path: { path: "kms.policies.audit_log_path", default: "{{kms.paths.base}}/audit.log" }
|
||||
|
||||
# KMS Encryption
|
||||
kms-encryption-algorithm: { path: "kms.encryption.algorithm", default: "ChaCha20-Poly1305" }
|
||||
kms-key-derivation: { path: "kms.encryption.key_derivation", default: "scrypt" }
|
||||
|
||||
# KMS Security
|
||||
kms-enforce-key-permissions: { path: "kms.security.enforce_key_permissions", default: true }
|
||||
kms-disallow-plaintext-secrets: { path: "kms.security.disallow_plaintext_secrets", default: true }
|
||||
kms-secret-scanning-enabled: { path: "kms.security.secret_scanning_enabled", default: false }
|
||||
kms-min-key-size-bits: { path: "kms.security.min_key_size_bits", default: 256 }
|
||||
|
||||
# KMS Operations
|
||||
kms-verbose: { path: "kms.operations.verbose", default: false }
|
||||
kms-debug: { path: "kms.operations.debug", default: false }
|
||||
kms-dry-run: { path: "kms.operations.dry_run", default: false }
|
||||
kms-max-file-size-mb: { path: "kms.operations.max_file_size_mb", default: 100 }
|
||||
|
||||
# Provider settings
|
||||
default-provider: { path: "providers.default", default: "local" }
|
||||
}
|
||||
}
|
||||
|
||||
# Get value using registry lookup
|
||||
export def get-by-registry [name: string, config: record] {
|
||||
let registry = (build-accessor-registry)
|
||||
|
||||
if not ($name in ($registry | columns)) {
|
||||
error make { msg: $"Unknown accessor: ($name)" }
|
||||
}
|
||||
|
||||
let accessor_def = ($registry | get $name)
|
||||
|
||||
let config_data = if ($config | is-empty) {
|
||||
{}
|
||||
} else {
|
||||
$config
|
||||
}
|
||||
|
||||
# Import and use get-config-value from loader module
|
||||
use loader.nu get-config-value
|
||||
get-config-value $config_data $accessor_def.path $accessor_def.default
|
||||
}
|
||||
@ -1,128 +0,0 @@
|
||||
#!/usr/bin/env nu
|
||||
# Benchmark script comparing minimal vs full config loaders
|
||||
# Shows performance improvements from modular architecture
|
||||
|
||||
use std log
|
||||
|
||||
# Run a command and measure execution time using bash 'time' command
|
||||
def benchmark [name: string, cmd: string] {
|
||||
# Use bash to run the command with time measurement
|
||||
let output = (^bash -c $"time -p ($cmd) 2>&1 | grep real | awk '{print $2}'")
|
||||
|
||||
# Parse the output (format: 0.023)
|
||||
let duration_s = ($output | str trim | into float)
|
||||
let duration_ms = (($duration_s * 1000) | math round)
|
||||
|
||||
{
|
||||
name: $name,
|
||||
duration_ms: $duration_ms,
|
||||
duration_human: $"{$duration_ms}ms"
|
||||
}
|
||||
}
|
||||
|
||||
# Benchmark minimal loader
|
||||
def bench-minimal [] {
|
||||
print "🚀 Benchmarking Minimal Loader..."
|
||||
|
||||
let result = (benchmark "Minimal: get-active-workspace"
|
||||
"nu -n -c 'use provisioning/core/nulib/lib_provisioning/config/loader-minimal.nu *; get-active-workspace'")
|
||||
|
||||
print $" ✓ ($result.name): ($result.duration_human)"
|
||||
$result
|
||||
}
|
||||
|
||||
# Benchmark full loader
|
||||
def bench-full [] {
|
||||
print "🚀 Benchmarking Full Loader..."
|
||||
|
||||
let result = (benchmark "Full: get-config"
|
||||
"nu -c 'use provisioning/core/nulib/lib_provisioning/config/accessor.nu *; get-config'")
|
||||
|
||||
print $" ✓ ($result.name): ($result.duration_human)"
|
||||
$result
|
||||
}
|
||||
|
||||
# Benchmark help command
|
||||
def bench-help [] {
|
||||
print "🚀 Benchmarking Help Commands..."
|
||||
|
||||
let commands = [
|
||||
"help",
|
||||
"help infrastructure",
|
||||
"help workspace",
|
||||
"help orchestration"
|
||||
]
|
||||
|
||||
mut results = []
|
||||
for cmd in $commands {
|
||||
let result = (benchmark $"Help: ($cmd)"
|
||||
$"./provisioning/core/cli/provisioning ($cmd) >/dev/null 2>&1")
|
||||
print $" ✓ Help: ($cmd): ($result.duration_human)"
|
||||
$results = ($results | append $result)
|
||||
}
|
||||
|
||||
$results
|
||||
}
|
||||
|
||||
# Benchmark workspace operations
|
||||
def bench-workspace [] {
|
||||
print "🚀 Benchmarking Workspace Commands..."
|
||||
|
||||
let commands = [
|
||||
"workspace list",
|
||||
"workspace active"
|
||||
]
|
||||
|
||||
mut results = []
|
||||
for cmd in $commands {
|
||||
let result = (benchmark $"Workspace: ($cmd)"
|
||||
$"./provisioning/core/cli/provisioning ($cmd) >/dev/null 2>&1")
|
||||
print $" ✓ Workspace: ($cmd): ($result.duration_human)"
|
||||
$results = ($results | append $result)
|
||||
}
|
||||
|
||||
$results
|
||||
}
|
||||
|
||||
# Main benchmark runner
|
||||
export def main [] {
|
||||
print "═════════════════════════════════════════════════════════════"
|
||||
print "Configuration Loader Performance Benchmarks"
|
||||
print "═════════════════════════════════════════════════════════════"
|
||||
print ""
|
||||
|
||||
# Run benchmarks
|
||||
let minimal = (bench-minimal)
|
||||
print ""
|
||||
|
||||
let full = (bench-full)
|
||||
print ""
|
||||
|
||||
let help = (bench-help)
|
||||
print ""
|
||||
|
||||
let workspace = (bench-workspace)
|
||||
print ""
|
||||
|
||||
# Calculate improvements
|
||||
let improvement = (($full.duration_ms - $minimal.duration_ms) / ($full.duration_ms) * 100 | into int)
|
||||
|
||||
print "═════════════════════════════════════════════════════════════"
|
||||
print "Performance Summary"
|
||||
print "═════════════════════════════════════════════════════════════"
|
||||
print ""
|
||||
print $"Minimal Loader: ($minimal.duration_ms)ms"
|
||||
print $"Full Loader: ($full.duration_ms)ms"
|
||||
print $"Speed Improvement: ($improvement)% faster"
|
||||
print ""
|
||||
print "Fast Path Operations (using minimal loader):"
|
||||
print $" • Help commands: ~($help | map {|r| $r.duration_ms} | math avg)ms average"
|
||||
print $" • Workspace ops: ~($workspace | map {|r| $r.duration_ms} | math avg)ms average"
|
||||
print ""
|
||||
print "✅ Modular architecture provides significant performance gains!"
|
||||
print " Help/Status commands: 4x+ faster"
|
||||
print " No performance penalty for infrastructure operations"
|
||||
print ""
|
||||
}
|
||||
|
||||
main
|
||||
4
nulib/lib_provisioning/config/cache/core.nu
vendored
4
nulib/lib_provisioning/config/cache/core.nu
vendored
@ -1,3 +1,7 @@
|
||||
# Module: Cache Core System
|
||||
# Purpose: Core caching system for configuration, compiled templates, and decrypted secrets.
|
||||
# Dependencies: metadata, config_manager, nickel, sops, final
|
||||
|
||||
# Configuration Cache System - Core Operations
|
||||
# Provides fundamental cache lookup, write, validation, and cleanup operations
|
||||
# Follows Nushell 0.109.0+ guidelines: explicit types, early returns, pure functions
|
||||
|
||||
138
nulib/lib_provisioning/config/context_manager.nu
Normal file
138
nulib/lib_provisioning/config/context_manager.nu
Normal file
@ -0,0 +1,138 @@
|
||||
# Module: Configuration Context Manager
|
||||
# Purpose: Manages workspace context, user configuration, and configuration file loading paths.
|
||||
# Dependencies: None (context utility)
|
||||
|
||||
# Context and Workspace Management Engine
|
||||
# Handles workspace tracking, user context overrides, and configuration value management
|
||||
|
||||
use std log
|
||||
|
||||
# Get active workspace from user config
|
||||
# CRITICAL: This replaces get-defaults-config-path
|
||||
export def get-active-workspace [] {
|
||||
let user_config_dir = ([$env.HOME "Library" "Application Support" "provisioning"] | path join)
|
||||
|
||||
if not ($user_config_dir | path exists) {
|
||||
return null
|
||||
}
|
||||
|
||||
# Load central user config
|
||||
let user_config_path = ($user_config_dir | path join "user_config.yaml")
|
||||
|
||||
if not ($user_config_path | path exists) {
|
||||
return null
|
||||
}
|
||||
|
||||
let user_config = (open $user_config_path)
|
||||
|
||||
# Check if active workspace is set
|
||||
if ($user_config.active_workspace == null) {
|
||||
null
|
||||
} else {
|
||||
# Find workspace in list
|
||||
let workspace_name = $user_config.active_workspace
|
||||
let workspace = ($user_config.workspaces | where name == $workspace_name | first)
|
||||
|
||||
if ($workspace | is-empty) {
|
||||
null
|
||||
} else {
|
||||
{
|
||||
name: $workspace.name
|
||||
path: $workspace.path
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Apply user context overrides with proper priority
|
||||
export def apply-user-context-overrides [
|
||||
config: record
|
||||
context: record
|
||||
] {
|
||||
let overrides = ($context | get -o overrides | default {})
|
||||
|
||||
mut result = $config
|
||||
|
||||
# Apply each override if present
|
||||
for key in ($overrides | columns) {
|
||||
let value = ($overrides | get $key)
|
||||
match $key {
|
||||
"debug_enabled" => { $result = ($result | upsert debug.enabled $value) }
|
||||
"log_level" => { $result = ($result | upsert debug.log_level $value) }
|
||||
"metadata" => { $result = ($result | upsert debug.metadata $value) }
|
||||
"secret_provider" => { $result = ($result | upsert secrets.provider $value) }
|
||||
"kms_mode" => { $result = ($result | upsert kms.mode $value) }
|
||||
"kms_endpoint" => { $result = ($result | upsert kms.remote.endpoint $value) }
|
||||
"ai_enabled" => { $result = ($result | upsert ai.enabled $value) }
|
||||
"ai_provider" => { $result = ($result | upsert ai.provider $value) }
|
||||
"default_provider" => { $result = ($result | upsert providers.default $value) }
|
||||
}
|
||||
}
|
||||
|
||||
# Update last_used timestamp for the workspace
|
||||
let workspace_name = ($context | get -o workspace.name | default null)
|
||||
if ($workspace_name | is-not-empty) {
|
||||
update-workspace-last-used-internal $workspace_name
|
||||
}
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Set a configuration value using dot notation
|
||||
export def set-config-value [
|
||||
config: record
|
||||
path: string
|
||||
value: any
|
||||
] {
|
||||
let path_parts = ($path | split row ".")
|
||||
mut result = $config
|
||||
|
||||
if ($path_parts | length) == 1 {
|
||||
$result | upsert ($path_parts | first) $value
|
||||
} else if ($path_parts | length) == 2 {
|
||||
let section = ($path_parts | first)
|
||||
let key = ($path_parts | last)
|
||||
let section_data = ($result | get -o $section | default {})
|
||||
$result | upsert $section ($section_data | upsert $key $value)
|
||||
} else if ($path_parts | length) == 3 {
|
||||
let section = ($path_parts | first)
|
||||
let subsection = ($path_parts | get 1)
|
||||
let key = ($path_parts | last)
|
||||
let section_data = ($result | get -o $section | default {})
|
||||
let subsection_data = ($section_data | get -o $subsection | default {})
|
||||
$result | upsert $section ($section_data | upsert $subsection ($subsection_data | upsert $key $value))
|
||||
} else {
|
||||
# For deeper nesting, use recursive approach
|
||||
set-config-value-recursive $result $path_parts $value
|
||||
}
|
||||
}
|
||||
|
||||
# Internal helper to update last_used timestamp
|
||||
def update-workspace-last-used-internal [workspace_name: string] {
|
||||
let user_config_dir = ([$env.HOME "Library" "Application Support" "provisioning"] | path join)
|
||||
let context_file = ($user_config_dir | path join $"ws_($workspace_name).yaml")
|
||||
|
||||
if ($context_file | path exists) {
|
||||
let config = (open $context_file)
|
||||
if ($config != null) {
|
||||
let updated = ($config | upsert metadata.last_used (date now | format date "%Y-%m-%dT%H:%M:%SZ"))
|
||||
$updated | to yaml | save --force $context_file
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Recursive helper for deep config value setting
|
||||
def set-config-value-recursive [
|
||||
config: record
|
||||
path_parts: list
|
||||
value: any
|
||||
] {
|
||||
if ($path_parts | length) == 1 {
|
||||
$config | upsert ($path_parts | first) $value
|
||||
} else {
|
||||
let current_key = ($path_parts | first)
|
||||
let remaining_parts = ($path_parts | skip 1)
|
||||
let current_section = ($config | get -o $current_key | default {})
|
||||
$config | upsert $current_key (set-config-value-recursive $current_section $remaining_parts $value)
|
||||
}
|
||||
}
|
||||
@ -76,37 +76,48 @@ export def decrypt-config-memory [
|
||||
}
|
||||
}
|
||||
|
||||
# TODO: Re-enable plugin-based KMS decryption after fixing try-catch syntax for Nushell 0.107
|
||||
# Try plugin-based KMS decryption first (10x faster, especially for Age)
|
||||
# let plugin_info = if (which plugin-kms-info | is-not-empty) {
|
||||
# do { plugin-kms-info } | default { plugin_available: false, default_backend: "age" }
|
||||
# } else {
|
||||
# { plugin_available: false, default_backend: "age" }
|
||||
# }
|
||||
# Plugin-based KMS decryption (10x faster for Age/RustyVault)
|
||||
# Refactored from try-catch to do/complete for explicit error handling
|
||||
let plugin_info = if (which plugin-kms-info | is-not-empty) {
|
||||
do { plugin-kms-info } | default { plugin_available: false, default_backend: "age" }
|
||||
} else {
|
||||
{ plugin_available: false, default_backend: "age" }
|
||||
}
|
||||
|
||||
# if $plugin_info.plugin_available and $plugin_info.default_backend in ["rustyvault", "age"] {
|
||||
# try {
|
||||
# let start_time = (date now)
|
||||
# let file_content = (open -r $file_path)
|
||||
if $plugin_info.plugin_available and $plugin_info.default_backend in ["rustyvault", "age"] {
|
||||
let start_time = (date now)
|
||||
let file_content_result = (do { open -r $file_path } | complete)
|
||||
|
||||
# # Check if this is a KMS-encrypted file (not SOPS)
|
||||
# if not ($file_content | str starts-with "sops:") and not ($file_content | str contains "sops_version") {
|
||||
# let decrypted = (plugin-kms-decrypt $file_content --backend $plugin_info.default_backend)
|
||||
# let elapsed = ((date now) - $start_time)
|
||||
if $file_content_result.exit_code == 0 {
|
||||
let file_content = ($file_content_result.stdout | str trim)
|
||||
|
||||
# if $debug {
|
||||
# print $"⚡ Decrypted in ($elapsed) using plugin ($plugin_info.default_backend)"
|
||||
# }
|
||||
# Check if this is a KMS-encrypted file (not SOPS)
|
||||
if not ($file_content | str starts-with "sops:") and not ($file_content | str contains "sops_version") {
|
||||
let decrypt_result = (do { plugin-kms-decrypt $file_content --backend $plugin_info.default_backend } | complete)
|
||||
|
||||
# return $decrypted
|
||||
# }
|
||||
# } catch { |err|
|
||||
# # Plugin failed, fall through to SOPS
|
||||
# if $debug {
|
||||
# print $"⚠️ Plugin decryption not applicable, using SOPS: ($err.msg)"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
if $decrypt_result.exit_code == 0 {
|
||||
let decrypted = ($decrypt_result.stdout | str trim)
|
||||
let elapsed = ((date now) - $start_time)
|
||||
|
||||
if $debug {
|
||||
print $"⚡ Decrypted in ($elapsed) using plugin ($plugin_info.default_backend)"
|
||||
}
|
||||
|
||||
return $decrypted
|
||||
} else {
|
||||
# Plugin decryption failed, fall through to SOPS
|
||||
if $debug {
|
||||
print $"⚠️ Plugin decryption failed, using SOPS fallback"
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
# File read failed, fall through to SOPS
|
||||
if $debug {
|
||||
print $"⚠️ Could not read file, using SOPS fallback"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Use SOPS to decrypt (output goes to stdout, captured in memory)
|
||||
let start_time = (date now)
|
||||
@ -159,41 +170,49 @@ export def encrypt-config [
|
||||
print $"Encrypting ($source_path) → ($target) using ($kms)"
|
||||
}
|
||||
|
||||
# TODO: Re-enable plugin-based encryption after fixing try-catch syntax for Nushell 0.107
|
||||
# Try plugin-based encryption for age and rustyvault (10x faster)
|
||||
# let plugin_info = if (which plugin-kms-info | is-not-empty) {
|
||||
# do { plugin-kms-info } | default { plugin_available: false, default_backend: "age" }
|
||||
# } else {
|
||||
# { plugin_available: false, default_backend: "age" }
|
||||
# }
|
||||
# Plugin-based encryption for age and rustyvault (10x faster)
|
||||
# Refactored from try-catch to do/complete for explicit error handling
|
||||
let plugin_info = if (which plugin-kms-info | is-not-empty) {
|
||||
do { plugin-kms-info } | default { plugin_available: false, default_backend: "age" }
|
||||
} else {
|
||||
{ plugin_available: false, default_backend: "age" }
|
||||
}
|
||||
|
||||
# if $plugin_info.plugin_available and $kms in ["age", "rustyvault"] {
|
||||
# try {
|
||||
# let start_time = (date now)
|
||||
# let file_content = (open -r $source_path)
|
||||
# let encrypted = (plugin-kms-encrypt $file_content --backend $kms)
|
||||
# let elapsed = ((date now) - $start_time)
|
||||
if $plugin_info.plugin_available and $kms in ["age", "rustyvault"] {
|
||||
let start_time = (date now)
|
||||
let file_content_result = (do { open -r $source_path } | complete)
|
||||
|
||||
# let ciphertext = if ($encrypted | describe) == "record" and "ciphertext" in $encrypted {
|
||||
# $encrypted.ciphertext
|
||||
# } else {
|
||||
# $encrypted
|
||||
# }
|
||||
if $file_content_result.exit_code == 0 {
|
||||
let file_content = ($file_content_result.stdout | str trim)
|
||||
let encrypt_result = (do { plugin-kms-encrypt $file_content --backend $kms } | complete)
|
||||
|
||||
# $ciphertext | save --force $target
|
||||
if $encrypt_result.exit_code == 0 {
|
||||
let encrypted = ($encrypt_result.stdout | str trim)
|
||||
let elapsed = ((date now) - $start_time)
|
||||
|
||||
# if $debug {
|
||||
# print $"⚡ Encrypted in ($elapsed) using plugin ($kms)"
|
||||
# }
|
||||
# print $"✅ Encrypted successfully with plugin ($kms): ($target)"
|
||||
# return
|
||||
# } catch { |err|
|
||||
# # Plugin failed, fall through to SOPS/CLI
|
||||
# if $debug {
|
||||
# print $"⚠️ Plugin encryption failed, using fallback: ($err.msg)"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
let ciphertext = if ($encrypted | describe) == "record" and "ciphertext" in $encrypted {
|
||||
$encrypted.ciphertext
|
||||
} else {
|
||||
$encrypted
|
||||
}
|
||||
|
||||
let save_result = (do { $ciphertext | save --force $target } | complete)
|
||||
|
||||
if $save_result.exit_code == 0 {
|
||||
if $debug {
|
||||
print $"⚡ Encrypted in ($elapsed) using plugin ($kms)"
|
||||
}
|
||||
print $"✅ Encrypted successfully with plugin ($kms): ($target)"
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Plugin encryption failed, fall through to SOPS/CLI
|
||||
if $debug {
|
||||
print $"⚠️ Plugin encryption failed, using fallback"
|
||||
}
|
||||
}
|
||||
|
||||
# Fallback: Encrypt based on KMS backend using SOPS/CLI
|
||||
let start_time = (date now)
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
# Configuration Encryption System Tests
|
||||
# Comprehensive test suite for encryption functionality
|
||||
# Error handling: Guard patterns (no try-catch for field access)
|
||||
|
||||
use encryption.nu *
|
||||
use ../kms/client.nu *
|
||||
@ -475,7 +476,8 @@ def test-encryption-validation [] {
|
||||
def show-test-result [result: record] {
|
||||
if $result.passed {
|
||||
print $" ✅ ($result.test_name)"
|
||||
if ($result | try { get skipped) }) catch { null } == true {
|
||||
# Guard: Check if skipped field exists in result
|
||||
if ("skipped" in ($result | columns)) and ($result | get skipped) == true {
|
||||
print $" ⚠️ ($result.error)"
|
||||
}
|
||||
} else {
|
||||
|
||||
311
nulib/lib_provisioning/config/interpolators.nu
Normal file
311
nulib/lib_provisioning/config/interpolators.nu
Normal file
@ -0,0 +1,311 @@
|
||||
# Module: Configuration Interpolators
|
||||
# Purpose: Handles variable substitution and interpolation in configuration values using templates and expressions.
|
||||
# Dependencies: None (core utility)
|
||||
|
||||
# Interpolation Engine - Handles variable substitution in configuration
|
||||
# Supports: environment variables, datetime, git info, SOPS config, provider references, advanced features
|
||||
|
||||
# Primary entry point: Interpolate all paths in configuration
|
||||
export def interpolate-config [
|
||||
config: record
|
||||
] {
|
||||
mut result = $config
|
||||
|
||||
# Get base path for interpolation
|
||||
let base_path = ($config | get -o paths.base | default "")
|
||||
|
||||
if ($base_path | is-not-empty) {
|
||||
# Interpolate the entire config structure
|
||||
$result = (interpolate-all-paths $result $base_path)
|
||||
}
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Interpolate variables in a string using ${path.to.value} syntax
|
||||
export def interpolate-string [
|
||||
text: string
|
||||
config: record
|
||||
] {
|
||||
mut result = $text
|
||||
|
||||
# Simple interpolation for {{paths.base}} pattern
|
||||
if ($result | str contains "{{paths.base}}") {
|
||||
let base_path = (get-config-value-internal $config "paths.base" "")
|
||||
$result = ($result | str replace --all "{{paths.base}}" $base_path)
|
||||
}
|
||||
|
||||
# Add more interpolation patterns as needed
|
||||
# This is a basic implementation - a full template engine would be more robust
|
||||
$result
|
||||
}
|
||||
|
||||
# Helper function to get nested configuration value using dot notation
|
||||
def get-config-value-internal [
|
||||
config: record
|
||||
path: string
|
||||
default_value: any = null
|
||||
] {
|
||||
let path_parts = ($path | split row ".")
|
||||
mut current = $config
|
||||
|
||||
for part in $path_parts {
|
||||
let immutable_current = $current
|
||||
let next_value = ($immutable_current | get -o $part | default null)
|
||||
if ($next_value | is-empty) {
|
||||
return $default_value
|
||||
}
|
||||
$current = $next_value
|
||||
}
|
||||
|
||||
$current
|
||||
}
|
||||
|
||||
# Enhanced interpolation function with comprehensive pattern support
|
||||
def interpolate-all-paths [
|
||||
config: record
|
||||
base_path: string
|
||||
] {
|
||||
# Convert to JSON for efficient string processing
|
||||
let json_str = ($config | to json)
|
||||
|
||||
# Start with existing pattern
|
||||
mut interpolated_json = ($json_str | str replace --all "{{paths.base}}" $base_path)
|
||||
|
||||
# Apply enhanced interpolation patterns
|
||||
$interpolated_json = (apply-enhanced-interpolation $interpolated_json $config)
|
||||
|
||||
# Convert back to record
|
||||
($interpolated_json | from json)
|
||||
}
|
||||
|
||||
# Apply enhanced interpolation patterns with security validation
|
||||
def apply-enhanced-interpolation [
|
||||
json_str: string
|
||||
config: record
|
||||
] {
|
||||
mut result = $json_str
|
||||
|
||||
# Environment variable interpolation with security checks
|
||||
$result = (interpolate-env-variables $result)
|
||||
|
||||
# Date and time interpolation
|
||||
$result = (interpolate-datetime $result)
|
||||
|
||||
# Git information interpolation
|
||||
$result = (interpolate-git-info $result)
|
||||
|
||||
# SOPS configuration interpolation
|
||||
$result = (interpolate-sops-config $result $config)
|
||||
|
||||
# Cross-section provider references
|
||||
$result = (interpolate-provider-refs $result $config)
|
||||
|
||||
# Advanced features: conditionals and functions
|
||||
$result = (interpolate-advanced-features $result $config)
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Interpolate environment variables with security validation
|
||||
def interpolate-env-variables [
|
||||
text: string
|
||||
] {
|
||||
mut result = $text
|
||||
|
||||
# Safe environment variables list (security)
|
||||
let safe_env_vars = [
|
||||
"HOME" "USER" "HOSTNAME" "PWD" "SHELL"
|
||||
"PROVISIONING" "PROVISIONING_WORKSPACE_PATH" "PROVISIONING_INFRA_PATH"
|
||||
"PROVISIONING_SOPS" "PROVISIONING_KAGE"
|
||||
]
|
||||
|
||||
for env_var in $safe_env_vars {
|
||||
let pattern = $"\\{\\{env\\.($env_var)\\}\\}"
|
||||
let env_value = ($env | get -o $env_var | default "")
|
||||
if ($env_value | is-not-empty) {
|
||||
$result = ($result | str replace --regex $pattern $env_value)
|
||||
}
|
||||
}
|
||||
|
||||
# Handle conditional environment variables like {{env.HOME || "/tmp"}}
|
||||
$result = (interpolate-conditional-env $result)
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Handle conditional environment variable interpolation
|
||||
def interpolate-conditional-env [
|
||||
text: string
|
||||
] {
|
||||
mut result = $text
|
||||
|
||||
# For now, implement basic conditional logic for common patterns
|
||||
if ($result | str contains "{{env.HOME || \"/tmp\"}}") {
|
||||
let home_value = ($env.HOME? | default "/tmp")
|
||||
$result = ($result | str replace --all "{{env.HOME || \"/tmp\"}}" $home_value)
|
||||
}
|
||||
|
||||
if ($result | str contains "{{env.USER || \"unknown\"}}") {
|
||||
let user_value = ($env.USER? | default "unknown")
|
||||
$result = ($result | str replace --all "{{env.USER || \"unknown\"}}" $user_value)
|
||||
}
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Interpolate date and time values
|
||||
def interpolate-datetime [
|
||||
text: string
|
||||
] {
|
||||
mut result = $text
|
||||
|
||||
# Current date in YYYY-MM-DD format
|
||||
let current_date = (date now | format date "%Y-%m-%d")
|
||||
$result = ($result | str replace --all "{{now.date}}" $current_date)
|
||||
|
||||
# Current timestamp (Unix timestamp)
|
||||
let current_timestamp = (date now | format date "%s")
|
||||
$result = ($result | str replace --all "{{now.timestamp}}" $current_timestamp)
|
||||
|
||||
# ISO 8601 timestamp
|
||||
let iso_timestamp = (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
$result = ($result | str replace --all "{{now.iso}}" $iso_timestamp)
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Interpolate git information
|
||||
def interpolate-git-info [
|
||||
text: string
|
||||
] {
|
||||
mut result = $text
|
||||
|
||||
# Get git branch (skip to avoid hanging)
|
||||
let git_branch = "unknown"
|
||||
$result = ($result | str replace --all "{{git.branch}}" $git_branch)
|
||||
|
||||
# Get git commit hash (skip to avoid hanging)
|
||||
let git_commit = "unknown"
|
||||
$result = ($result | str replace --all "{{git.commit}}" $git_commit)
|
||||
|
||||
# Get git remote origin URL (skip to avoid hanging)
|
||||
# Note: Skipped due to potential hanging on network/credential prompts
|
||||
let git_origin = "unknown"
|
||||
$result = ($result | str replace --all "{{git.origin}}" $git_origin)
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Interpolate SOPS configuration references
|
||||
def interpolate-sops-config [
|
||||
text: string
|
||||
config: record
|
||||
] {
|
||||
mut result = $text
|
||||
|
||||
# SOPS key file path
|
||||
let sops_key_file = ($config | get -o sops.age_key_file | default "")
|
||||
if ($sops_key_file | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{sops.key_file}}" $sops_key_file)
|
||||
}
|
||||
|
||||
# SOPS config path
|
||||
let sops_config_path = ($config | get -o sops.config_path | default "")
|
||||
if ($sops_config_path | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{sops.config_path}}" $sops_config_path)
|
||||
}
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Interpolate cross-section provider references
|
||||
def interpolate-provider-refs [
|
||||
text: string
|
||||
config: record
|
||||
] {
|
||||
mut result = $text
|
||||
|
||||
# AWS provider region
|
||||
let aws_region = ($config | get -o providers.aws.region | default "")
|
||||
if ($aws_region | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{providers.aws.region}}" $aws_region)
|
||||
}
|
||||
|
||||
# Default provider
|
||||
let default_provider = ($config | get -o providers.default | default "")
|
||||
if ($default_provider | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{providers.default}}" $default_provider)
|
||||
}
|
||||
|
||||
# UpCloud zone
|
||||
let upcloud_zone = ($config | get -o providers.upcloud.zone | default "")
|
||||
if ($upcloud_zone | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{providers.upcloud.zone}}" $upcloud_zone)
|
||||
}
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Interpolate advanced features (function calls, environment-aware paths)
|
||||
def interpolate-advanced-features [
|
||||
text: string
|
||||
config: record
|
||||
] {
|
||||
mut result = $text
|
||||
|
||||
# Function call: {{path.join(paths.base, "custom")}}
|
||||
if ($result | str contains "{{path.join(paths.base") {
|
||||
let base_path = ($config | get -o paths.base | default "")
|
||||
# Simple implementation for path.join with base path
|
||||
$result = ($result | str replace --regex "\\{\\{path\\.join\\(paths\\.base,\\s*\"([^\"]+)\"\\)\\}\\}" $"($base_path)/$1")
|
||||
}
|
||||
|
||||
# Environment-aware paths: {{paths.base.${env}}}
|
||||
let current_env = ($config | get -o current_environment | default "dev")
|
||||
$result = ($result | str replace --all "{{paths.base.${env}}}" $"{{paths.base}}.($current_env)")
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Interpolate with depth limiting to prevent infinite recursion
|
||||
export def interpolate-with-depth-limit [
|
||||
config: record
|
||||
base_path: string
|
||||
max_depth: int
|
||||
] {
|
||||
mut result = $config
|
||||
mut current_depth = 0
|
||||
|
||||
# Track interpolation patterns to detect loops
|
||||
mut seen_patterns = []
|
||||
|
||||
while $current_depth < $max_depth {
|
||||
let pre_interpolation = ($result | to json)
|
||||
$result = (interpolate-all-paths $result $base_path)
|
||||
let post_interpolation = ($result | to json)
|
||||
|
||||
# If no changes, we're done
|
||||
if $pre_interpolation == $post_interpolation {
|
||||
break
|
||||
}
|
||||
|
||||
# Check for circular dependencies
|
||||
if ($post_interpolation in $seen_patterns) {
|
||||
error make {
|
||||
msg: $"Circular interpolation dependency detected at depth ($current_depth)"
|
||||
}
|
||||
}
|
||||
|
||||
$seen_patterns = ($seen_patterns | append $post_interpolation)
|
||||
$current_depth = ($current_depth + 1)
|
||||
}
|
||||
|
||||
if $current_depth >= $max_depth {
|
||||
error make {
|
||||
msg: $"Maximum interpolation depth ($max_depth) exceeded - possible infinite recursion"
|
||||
}
|
||||
}
|
||||
|
||||
$result
|
||||
}
|
||||
@ -1,79 +0,0 @@
|
||||
# Lazy Configuration Loader
|
||||
# Dynamically loads full loader.nu only when needed
|
||||
# Provides fast-path for help and status commands
|
||||
|
||||
use ./loader-minimal.nu *
|
||||
|
||||
# Load full configuration loader (lazy-loaded on demand)
|
||||
# Used by commands that actually need to parse config
|
||||
def load-full-loader [] {
|
||||
# Import the full loader only when needed
|
||||
use ../config/loader.nu *
|
||||
}
|
||||
|
||||
# Smart config loader that checks if full config is needed
|
||||
# Returns minimal config for fast commands, full config for others
|
||||
export def get-config-smart [
|
||||
--command: string = "" # Current command being executed
|
||||
--debug = false
|
||||
--validate = true
|
||||
--environment: string
|
||||
] {
|
||||
# Fast-path for help and status commands (don't need full config)
|
||||
let is_fast_command = (
|
||||
$command == "help" or
|
||||
$command == "status" or
|
||||
$command == "version" or
|
||||
$command == "workspace" and ($command | str contains "list")
|
||||
)
|
||||
|
||||
if $is_fast_command {
|
||||
# Return minimal config for fast operations
|
||||
return (get-minimal-config --debug=$debug --environment=$environment)
|
||||
}
|
||||
|
||||
# For all other commands, load full configuration
|
||||
load-full-loader
|
||||
# This would call the full loader here, but since we're keeping loader.nu,
|
||||
# just return a marker that full config is needed
|
||||
"FULL_CONFIG_NEEDED"
|
||||
}
|
||||
|
||||
# Get minimal configuration for fast operations
|
||||
# Only includes workspace and environment detection
|
||||
def get-minimal-config [
|
||||
--debug = false
|
||||
--environment: string
|
||||
] {
|
||||
let current_environment = if ($environment | is-not-empty) {
|
||||
$environment
|
||||
} else {
|
||||
detect-current-environment
|
||||
}
|
||||
|
||||
let active_workspace = (get-active-workspace)
|
||||
|
||||
# Return minimal config record
|
||||
{
|
||||
workspace: $active_workspace
|
||||
environment: $current_environment
|
||||
debug: $debug
|
||||
paths: {
|
||||
base: if ($active_workspace | is-not-empty) {
|
||||
$active_workspace.path
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Check if a command needs full config loading
|
||||
export def command-needs-full-config [command: string] {
|
||||
let fast_commands = [
|
||||
"help", "version", "status", "workspace list", "workspace active",
|
||||
"plugin list", "env", "nu"
|
||||
]
|
||||
|
||||
not ($command in $fast_commands or ($command | str contains "help"))
|
||||
}
|
||||
@ -1,147 +0,0 @@
|
||||
# Minimal Configuration Loader
|
||||
# Fast-path config loading for help commands and basic operations
|
||||
# Contains ONLY essential path detection and workspace identification (~150 lines)
|
||||
|
||||
# Detect current environment from ENV, workspace name, or default
|
||||
export def detect-current-environment [] {
|
||||
# Check explicit environment variable
|
||||
if ($env.PROVISIONING_ENVIRONMENT? | is-not-empty) {
|
||||
return $env.PROVISIONING_ENVIRONMENT
|
||||
}
|
||||
|
||||
# Check if workspace name contains environment hints
|
||||
let active_ws = (get-active-workspace)
|
||||
if ($active_ws | is-not-empty) {
|
||||
let ws_name = $active_ws.name
|
||||
if ($ws_name | str contains "prod") { return "prod" }
|
||||
if ($ws_name | str contains "staging") { return "staging" }
|
||||
if ($ws_name | str contains "test") { return "test" }
|
||||
if ($ws_name | str contains "dev") { return "dev" }
|
||||
}
|
||||
|
||||
# Check PWD for environment hints
|
||||
if ($env.PWD | str contains "prod") { return "prod" }
|
||||
if ($env.PWD | str contains "staging") { return "staging" }
|
||||
if ($env.PWD | str contains "test") { return "test" }
|
||||
if ($env.PWD | str contains "dev") { return "dev" }
|
||||
|
||||
# Default environment
|
||||
"dev"
|
||||
}
|
||||
|
||||
# Get the currently active workspace (from central user config)
|
||||
export def get-active-workspace [] {
|
||||
let user_config_dir = ([$env.HOME "Library" "Application Support" "provisioning"] | path join)
|
||||
|
||||
if not ($user_config_dir | path exists) {
|
||||
return null
|
||||
}
|
||||
|
||||
# Load central user config
|
||||
let user_config_path = ($user_config_dir | path join "user_config.yaml")
|
||||
|
||||
if not ($user_config_path | path exists) {
|
||||
return null
|
||||
}
|
||||
|
||||
let user_config = (open $user_config_path)
|
||||
|
||||
# Check if active workspace is set
|
||||
if ($user_config.active_workspace == null) {
|
||||
null
|
||||
} else {
|
||||
# Find workspace in list
|
||||
let workspace_name = $user_config.active_workspace
|
||||
let workspace = ($user_config.workspaces | where name == $workspace_name | first)
|
||||
|
||||
if ($workspace | is-empty) {
|
||||
null
|
||||
} else {
|
||||
{
|
||||
name: $workspace.name
|
||||
path: $workspace.path
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Find project root by looking for nickel.mod or core/nulib directory
|
||||
export def get-project-root [] {
|
||||
let potential_roots = [
|
||||
$env.PWD
|
||||
($env.PWD | path dirname)
|
||||
($env.PWD | path dirname | path dirname)
|
||||
($env.PWD | path dirname | path dirname | path dirname)
|
||||
]
|
||||
|
||||
let matching_roots = ($potential_roots
|
||||
| where ($it | path join "nickel.mod" | path exists)
|
||||
or ($it | path join "core" "nulib" | path exists))
|
||||
|
||||
if ($matching_roots | length) > 0 {
|
||||
$matching_roots | first
|
||||
} else {
|
||||
$env.PWD
|
||||
}
|
||||
}
|
||||
|
||||
# Get system defaults configuration path
|
||||
export def get-defaults-config-path [] {
|
||||
let base_path = if ($env.PROVISIONING? | is-not-empty) {
|
||||
$env.PROVISIONING
|
||||
} else {
|
||||
"/usr/local/provisioning"
|
||||
}
|
||||
|
||||
($base_path | path join "provisioning" "config" "config.defaults.toml")
|
||||
}
|
||||
|
||||
# Check if a file is encrypted with SOPS
|
||||
export def check-if-sops-encrypted [file_path: string] {
|
||||
let file_exists = ($file_path | path exists)
|
||||
if not $file_exists {
|
||||
return false
|
||||
}
|
||||
|
||||
# Read first few bytes to check for SOPS marker
|
||||
let content = (^bash -c $"head -c 100 \"($file_path)\"")
|
||||
|
||||
# SOPS encrypted files contain "sops" key in the header
|
||||
($content | str contains "sops")
|
||||
}
|
||||
|
||||
# Get SOPS configuration path if it exists
|
||||
export def find-sops-config-path [] {
|
||||
let possible_paths = [
|
||||
($env.HOME | path join ".sops.yaml")
|
||||
($env.PWD | path join ".sops.yaml")
|
||||
($env.PWD | path join "sops" ".sops.yaml")
|
||||
($env.PWD | path join ".decrypted" ".sops.yaml")
|
||||
]
|
||||
|
||||
let existing_paths = ($possible_paths | where ($it | path exists))
|
||||
|
||||
if ($existing_paths | length) > 0 {
|
||||
$existing_paths | first
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
# Update workspace last-used timestamp (non-critical, safe to fail silently)
|
||||
export def update-workspace-last-used [workspace_name: string] {
|
||||
let user_config_dir = ([$env.HOME "Library" "Application Support" "provisioning"] | path join)
|
||||
|
||||
if not ($user_config_dir | path exists) {
|
||||
return
|
||||
}
|
||||
|
||||
let user_config_path = ($user_config_dir | path join "user_config.yaml")
|
||||
|
||||
if not ($user_config_path | path exists) {
|
||||
return
|
||||
}
|
||||
|
||||
# Safe fallback - if any part fails, silently continue
|
||||
# This is not critical path
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
754
nulib/lib_provisioning/config/loader/core.nu
Normal file
754
nulib/lib_provisioning/config/loader/core.nu
Normal file
@ -0,0 +1,754 @@
|
||||
# Module: Configuration Loader Core
|
||||
# Purpose: Main configuration loading logic with hierarchical source merging and environment-specific overrides.
|
||||
# Dependencies: interpolators, validators, context_manager, sops_handler, cache modules
|
||||
|
||||
# Core Configuration Loader Functions
|
||||
# Implements main configuration loading and file handling logic
|
||||
|
||||
use std log
|
||||
|
||||
# Interpolation engine - handles variable substitution
|
||||
use ../interpolators.nu *
|
||||
|
||||
# Context management - workspace and user config handling
|
||||
use ../context_manager.nu *
|
||||
|
||||
# SOPS handler - encryption and decryption
|
||||
use ../sops_handler.nu *
|
||||
|
||||
# Cache integration
|
||||
use ../cache/core.nu *
|
||||
use ../cache/metadata.nu *
|
||||
use ../cache/config_manager.nu *
|
||||
use ../cache/nickel.nu *
|
||||
use ../cache/sops.nu *
|
||||
use ../cache/final.nu *
|
||||
|
||||
# Main configuration loader - loads and merges all config sources
|
||||
export def load-provisioning-config [
|
||||
--debug = false # Enable debug logging
|
||||
--validate = false # Validate configuration (disabled by default for workspace-exempt commands)
|
||||
--environment: string # Override environment (dev/prod/test)
|
||||
--skip-env-detection = false # Skip automatic environment detection
|
||||
--no-cache = false # Disable cache (use --no-cache to skip cache)
|
||||
] {
|
||||
if $debug {
|
||||
# log debug "Loading provisioning configuration..."
|
||||
}
|
||||
|
||||
# Detect current environment if not specified
|
||||
let current_environment = if ($environment | is-not-empty) {
|
||||
$environment
|
||||
} else if not $skip_env_detection {
|
||||
detect-current-environment
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
if $debug and ($current_environment | is-not-empty) {
|
||||
# log debug $"Using environment: ($current_environment)"
|
||||
}
|
||||
|
||||
# NEW HIERARCHY (lowest to highest priority):
|
||||
# 1. Workspace config: workspace/{name}/config/provisioning.yaml
|
||||
# 2. Provider configs: workspace/{name}/config/providers/*.toml
|
||||
# 3. Platform configs: workspace/{name}/config/platform/*.toml
|
||||
# 4. User context: ~/Library/Application Support/provisioning/ws_{name}.yaml
|
||||
# 5. Environment variables: PROVISIONING_*
|
||||
|
||||
# Get active workspace
|
||||
let active_workspace = (get-active-workspace)
|
||||
|
||||
# Try final config cache first (if cache enabled and --no-cache not set)
|
||||
if (not $no_cache) and ($active_workspace | is-not-empty) {
|
||||
let cache_result = (lookup-final-config $active_workspace $current_environment)
|
||||
|
||||
if ($cache_result.valid? | default false) {
|
||||
if $debug {
|
||||
print "✅ Cache hit: final config"
|
||||
}
|
||||
return $cache_result.data
|
||||
}
|
||||
}
|
||||
|
||||
mut config_sources = []
|
||||
|
||||
if ($active_workspace | is-not-empty) {
|
||||
# Load workspace config - try Nickel first (new format), then Nickel, then YAML for backward compatibility
|
||||
let config_dir = ($active_workspace.path | path join "config")
|
||||
let ncl_config = ($config_dir | path join "config.ncl")
|
||||
let generated_workspace = ($config_dir | path join "generated" | path join "workspace.toml")
|
||||
let nickel_config = ($config_dir | path join "provisioning.ncl")
|
||||
let yaml_config = ($config_dir | path join "provisioning.yaml")
|
||||
|
||||
# Priority order: Generated TOML from TypeDialog > Nickel source > Nickel (legacy) > YAML (legacy)
|
||||
let config_file = if ($generated_workspace | path exists) {
|
||||
# Use generated TOML from TypeDialog (preferred)
|
||||
$generated_workspace
|
||||
} else if ($ncl_config | path exists) {
|
||||
# Use Nickel source directly (will be exported to TOML on-demand)
|
||||
$ncl_config
|
||||
} else if ($nickel_config | path exists) {
|
||||
$nickel_config
|
||||
} else if ($yaml_config | path exists) {
|
||||
$yaml_config
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
let config_format = if ($config_file | is-not-empty) {
|
||||
if ($config_file | str ends-with ".ncl") {
|
||||
"nickel"
|
||||
} else if ($config_file | str ends-with ".toml") {
|
||||
"toml"
|
||||
} else if ($config_file | str ends-with ".ncl") {
|
||||
"nickel"
|
||||
} else {
|
||||
"yaml"
|
||||
}
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
if ($config_file | is-not-empty) {
|
||||
$config_sources = ($config_sources | append {
|
||||
name: "workspace"
|
||||
path: $config_file
|
||||
required: true
|
||||
format: $config_format
|
||||
})
|
||||
}
|
||||
|
||||
# Load provider configs (prefer generated from TypeDialog, fallback to manual)
|
||||
let generated_providers_dir = ($active_workspace.path | path join "config" | path join "generated" | path join "providers")
|
||||
let manual_providers_dir = ($active_workspace.path | path join "config" | path join "providers")
|
||||
|
||||
# Load from generated directory (preferred)
|
||||
if ($generated_providers_dir | path exists) {
|
||||
let provider_configs = (ls $generated_providers_dir | where type == file and ($it.name | str ends-with '.toml') | get name)
|
||||
for provider_config in $provider_configs {
|
||||
$config_sources = ($config_sources | append {
|
||||
name: $"provider-($provider_config | path basename)"
|
||||
path: $"($generated_providers_dir)/($provider_config)"
|
||||
required: false
|
||||
format: "toml"
|
||||
})
|
||||
}
|
||||
} else if ($manual_providers_dir | path exists) {
|
||||
# Fallback to manual TOML files if generated don't exist
|
||||
let provider_configs = (ls $manual_providers_dir | where type == file and ($it.name | str ends-with '.toml') | get name)
|
||||
for provider_config in $provider_configs {
|
||||
$config_sources = ($config_sources | append {
|
||||
name: $"provider-($provider_config | path basename)"
|
||||
path: $"($manual_providers_dir)/($provider_config)"
|
||||
required: false
|
||||
format: "toml"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Load platform configs (prefer generated from TypeDialog, fallback to manual)
|
||||
let workspace_config_ncl = ($active_workspace.path | path join "config" | path join "config.ncl")
|
||||
let generated_platform_dir = ($active_workspace.path | path join "config" | path join "generated" | path join "platform")
|
||||
let manual_platform_dir = ($active_workspace.path | path join "config" | path join "platform")
|
||||
|
||||
# If Nickel config exists, ensure it's exported
|
||||
if ($workspace_config_ncl | path exists) {
|
||||
let export_result = (do {
|
||||
use ../export.nu *
|
||||
export-all-configs $active_workspace.path
|
||||
} | complete)
|
||||
if $export_result.exit_code != 0 {
|
||||
if $debug {
|
||||
# log debug $"Nickel export failed: ($export_result.stderr)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Load from generated directory (preferred)
|
||||
if ($generated_platform_dir | path exists) {
|
||||
let platform_configs = (ls $generated_platform_dir | where type == file and ($it.name | str ends-with '.toml') | get name)
|
||||
for platform_config in $platform_configs {
|
||||
$config_sources = ($config_sources | append {
|
||||
name: $"platform-($platform_config | path basename)"
|
||||
path: $"($generated_platform_dir)/($platform_config)"
|
||||
required: false
|
||||
format: "toml"
|
||||
})
|
||||
}
|
||||
} else if ($manual_platform_dir | path exists) {
|
||||
# Fallback to manual TOML files if generated don't exist
|
||||
let platform_configs = (ls $manual_platform_dir | where type == file and ($it.name | str ends-with '.toml') | get name)
|
||||
for platform_config in $platform_configs {
|
||||
$config_sources = ($config_sources | append {
|
||||
name: $"platform-($platform_config | path basename)"
|
||||
path: $"($manual_platform_dir)/($platform_config)"
|
||||
required: false
|
||||
format: "toml"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Load user context (highest config priority before env vars)
|
||||
let user_config_dir = ([$env.HOME "Library" "Application Support" "provisioning"] | path join)
|
||||
let user_context = ([$user_config_dir $"ws_($active_workspace.name).yaml"] | path join)
|
||||
if ($user_context | path exists) {
|
||||
$config_sources = ($config_sources | append {
|
||||
name: "user-context"
|
||||
path: $user_context
|
||||
required: false
|
||||
format: "yaml"
|
||||
})
|
||||
}
|
||||
} else {
|
||||
# Fallback: If no workspace active, try to find workspace from PWD
|
||||
# Try Nickel first, then Nickel, then YAML for backward compatibility
|
||||
let ncl_config = ($env.PWD | path join "config" | path join "config.ncl")
|
||||
let nickel_config = ($env.PWD | path join "config" | path join "provisioning.ncl")
|
||||
let yaml_config = ($env.PWD | path join "config" | path join "provisioning.yaml")
|
||||
|
||||
let workspace_config = if ($ncl_config | path exists) {
|
||||
# Export Nickel config to TOML
|
||||
let export_result = (do {
|
||||
use ../export.nu *
|
||||
export-all-configs $env.PWD
|
||||
} | complete)
|
||||
if $export_result.exit_code != 0 {
|
||||
# Silently continue if export fails
|
||||
}
|
||||
{
|
||||
path: ($env.PWD | path join "config" | path join "generated" | path join "workspace.toml")
|
||||
format: "toml"
|
||||
}
|
||||
} else if ($nickel_config | path exists) {
|
||||
{
|
||||
path: $nickel_config
|
||||
format: "nickel"
|
||||
}
|
||||
} else if ($yaml_config | path exists) {
|
||||
{
|
||||
path: $yaml_config
|
||||
format: "yaml"
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
if ($workspace_config | is-not-empty) {
|
||||
$config_sources = ($config_sources | append {
|
||||
name: "workspace"
|
||||
path: $workspace_config.path
|
||||
required: true
|
||||
format: $workspace_config.format
|
||||
})
|
||||
} else {
|
||||
# No active workspace - return empty config
|
||||
# Workspace enforcement in dispatcher.nu will handle the error message for commands that need workspace
|
||||
# This allows workspace-exempt commands (cache, help, etc.) to work
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
mut final_config = {}
|
||||
|
||||
# Load and merge configurations
|
||||
mut user_context_data = {}
|
||||
for source in $config_sources {
|
||||
let format = ($source.format | default "auto")
|
||||
let config_data = (load-config-file $source.path $source.required $debug $format)
|
||||
|
||||
# Ensure config_data is a record, not a string or other type
|
||||
if ($config_data | is-not-empty) {
|
||||
let safe_config = if ($config_data | type | str contains "record") {
|
||||
$config_data
|
||||
} else if ($config_data | type | str contains "string") {
|
||||
# If we got a string, try to parse it as YAML
|
||||
let yaml_result = (do {
|
||||
$config_data | from yaml
|
||||
} | complete)
|
||||
if $yaml_result.exit_code == 0 {
|
||||
$yaml_result.stdout
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
|
||||
if ($safe_config | is-not-empty) {
|
||||
if $debug {
|
||||
# log debug $"Loaded ($source.name) config from ($source.path)"
|
||||
}
|
||||
# Store user context separately for override processing
|
||||
if $source.name == "user-context" {
|
||||
$user_context_data = $safe_config
|
||||
} else {
|
||||
$final_config = (deep-merge $final_config $safe_config)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Apply user context overrides (highest config priority)
|
||||
if ($user_context_data | columns | length) > 0 {
|
||||
$final_config = (apply-user-context-overrides $final_config $user_context_data)
|
||||
}
|
||||
|
||||
# Apply environment-specific overrides
|
||||
# Per ADR-003: Nickel is source of truth for environments (provisioning/schemas/config/environments/main.ncl)
|
||||
if ($current_environment | is-not-empty) {
|
||||
# Priority: 1) Nickel environments schema (preferred), 2) config.defaults.toml (fallback)
|
||||
|
||||
# Try to load from Nickel first
|
||||
let nickel_environments = (load-environments-from-nickel)
|
||||
let env_config = if ($nickel_environments | is-empty) {
|
||||
# Fallback: try to get from current config TOML
|
||||
let current_config = $final_config
|
||||
let toml_environments = ($current_config | get -o environments | default {})
|
||||
if ($toml_environments | is-empty) {
|
||||
{} # No environment config found
|
||||
} else {
|
||||
($toml_environments | get -o $current_environment | default {})
|
||||
}
|
||||
} else {
|
||||
# Use Nickel environments
|
||||
($nickel_environments | get -o $current_environment | default {})
|
||||
}
|
||||
|
||||
if ($env_config | is-not-empty) {
|
||||
if $debug {
|
||||
# log debug $"Applying environment overrides for: ($current_environment)"
|
||||
}
|
||||
$final_config = (deep-merge $final_config $env_config)
|
||||
}
|
||||
}
|
||||
|
||||
# Apply environment variables as final overrides
|
||||
$final_config = (apply-environment-variable-overrides $final_config $debug)
|
||||
|
||||
# Store current environment in config for reference
|
||||
if ($current_environment | is-not-empty) {
|
||||
$final_config = ($final_config | upsert "current_environment" $current_environment)
|
||||
}
|
||||
|
||||
# Interpolate variables in the final configuration
|
||||
$final_config = (interpolate-config $final_config)
|
||||
|
||||
# Validate configuration if explicitly requested
|
||||
# By default validation is disabled to allow workspace-exempt commands (cache, help, etc.) to work
|
||||
if $validate {
|
||||
use ./validator.nu *
|
||||
let validation_result = (validate-config $final_config --detailed false --strict false)
|
||||
# The validate-config function will throw an error if validation fails when not in detailed mode
|
||||
}
|
||||
|
||||
# Cache the final config (if cache enabled and --no-cache not set, ignore errors)
|
||||
if (not $no_cache) and ($active_workspace | is-not-empty) {
|
||||
cache-final-config $final_config $active_workspace $current_environment
|
||||
}
|
||||
|
||||
if $debug {
|
||||
# log debug "Configuration loading completed"
|
||||
}
|
||||
|
||||
$final_config
|
||||
}
|
||||
|
||||
# Load a single configuration file (supports Nickel, Nickel, YAML and TOML with automatic decryption)
|
||||
export def load-config-file [
|
||||
file_path: string
|
||||
required = false
|
||||
debug = false
|
||||
format: string = "auto" # auto, ncl, nickel, yaml, toml
|
||||
--no-cache = false # Disable cache for this file
|
||||
] {
|
||||
if not ($file_path | path exists) {
|
||||
if $required {
|
||||
print $"❌ Required configuration file not found: ($file_path)"
|
||||
exit 1
|
||||
} else {
|
||||
if $debug {
|
||||
# log debug $"Optional config file not found: ($file_path)"
|
||||
}
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
if $debug {
|
||||
# log debug $"Loading config file: ($file_path)"
|
||||
}
|
||||
|
||||
# Determine format from file extension if auto
|
||||
let file_format = if $format == "auto" {
|
||||
let ext = ($file_path | path parse | get extension)
|
||||
match $ext {
|
||||
"ncl" => "ncl"
|
||||
"k" => "nickel"
|
||||
"yaml" | "yml" => "yaml"
|
||||
"toml" => "toml"
|
||||
_ => "toml" # default to toml for backward compatibility
|
||||
}
|
||||
} else {
|
||||
$format
|
||||
}
|
||||
|
||||
# Handle Nickel format (exports to JSON then parses)
|
||||
if $file_format == "ncl" {
|
||||
if $debug {
|
||||
# log debug $"Loading Nickel config file: ($file_path)"
|
||||
}
|
||||
let nickel_result = (do {
|
||||
nickel export --format json $file_path | from json
|
||||
} | complete)
|
||||
|
||||
if $nickel_result.exit_code == 0 {
|
||||
return $nickel_result.stdout
|
||||
} else {
|
||||
if $required {
|
||||
print $"❌ Failed to load Nickel config ($file_path): ($nickel_result.stderr)"
|
||||
exit 1
|
||||
} else {
|
||||
if $debug {
|
||||
# log debug $"Failed to load optional Nickel config: ($nickel_result.stderr)"
|
||||
}
|
||||
return {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Handle Nickel format separately (requires nickel compiler)
|
||||
if $file_format == "nickel" {
|
||||
let decl_result = (load-nickel-config $file_path $required $debug --no-cache $no_cache)
|
||||
return $decl_result
|
||||
}
|
||||
|
||||
# Check if file is encrypted and auto-decrypt (for YAML/TOML only)
|
||||
# Inline SOPS detection to avoid circular import
|
||||
if (check-if-sops-encrypted $file_path) {
|
||||
if $debug {
|
||||
# log debug $"Detected encrypted config, decrypting in memory: ($file_path)"
|
||||
}
|
||||
|
||||
# Try SOPS cache first (if cache enabled and --no-cache not set)
|
||||
if (not $no_cache) {
|
||||
let sops_cache = (lookup-sops-cache $file_path)
|
||||
|
||||
if ($sops_cache.valid? | default false) {
|
||||
if $debug {
|
||||
print $"✅ Cache hit: SOPS ($file_path)"
|
||||
}
|
||||
return ($sops_cache.data | from yaml)
|
||||
}
|
||||
}
|
||||
|
||||
# Decrypt in memory using SOPS
|
||||
let decrypted_content = (decrypt-sops-file $file_path)
|
||||
|
||||
if ($decrypted_content | is-empty) {
|
||||
if $debug {
|
||||
print $"⚠️ Failed to decrypt [$file_path], attempting to load as plain file"
|
||||
}
|
||||
open $file_path
|
||||
} else {
|
||||
# Cache the decrypted content (if cache enabled and --no-cache not set)
|
||||
if (not $no_cache) {
|
||||
cache-sops-decrypt $file_path $decrypted_content
|
||||
}
|
||||
|
||||
# Parse based on file extension
|
||||
match $file_format {
|
||||
"yaml" => ($decrypted_content | from yaml)
|
||||
"toml" => ($decrypted_content | from toml)
|
||||
"json" => ($decrypted_content | from json)
|
||||
_ => ($decrypted_content | from yaml) # default to yaml
|
||||
}
|
||||
}
|
||||
} else {
|
||||
# Load unencrypted file with appropriate parser
|
||||
# Note: open already returns parsed records for YAML/TOML
|
||||
if ($file_path | path exists) {
|
||||
open $file_path
|
||||
} else {
|
||||
if $required {
|
||||
print $"❌ Configuration file not found: ($file_path)"
|
||||
exit 1
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Load Nickel configuration file
|
||||
def load-nickel-config [
|
||||
file_path: string
|
||||
required = false
|
||||
debug = false
|
||||
--no-cache = false
|
||||
] {
|
||||
# Check if nickel command is available
|
||||
let nickel_exists = (which nickel | is-not-empty)
|
||||
if not $nickel_exists {
|
||||
if $required {
|
||||
print $"❌ Nickel compiler not found. Install Nickel to use .ncl config files"
|
||||
print $" Install from: https://nickel-lang.io/"
|
||||
exit 1
|
||||
} else {
|
||||
if $debug {
|
||||
print $"⚠️ Nickel compiler not found, skipping Nickel config file: ($file_path)"
|
||||
}
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
# Try Nickel cache first (if cache enabled and --no-cache not set)
|
||||
if (not $no_cache) {
|
||||
let nickel_cache = (lookup-nickel-cache $file_path)
|
||||
|
||||
if ($nickel_cache.valid? | default false) {
|
||||
if $debug {
|
||||
print $"✅ Cache hit: Nickel ($file_path)"
|
||||
}
|
||||
return $nickel_cache.data
|
||||
}
|
||||
}
|
||||
|
||||
# Evaluate Nickel file (produces JSON output)
|
||||
# Use 'nickel export' for both package-based and standalone Nickel files
|
||||
let file_dir = ($file_path | path dirname)
|
||||
let file_name = ($file_path | path basename)
|
||||
let decl_mod_exists = (($file_dir | path join "nickel.mod") | path exists)
|
||||
|
||||
let result = if $decl_mod_exists {
|
||||
# Use 'nickel export' for package-based configs (SST pattern with nickel.mod)
|
||||
# Must run from the config directory so relative paths in nickel.mod resolve correctly
|
||||
(^sh -c $"cd '($file_dir)' && nickel export ($file_name) --format json" | complete)
|
||||
} else {
|
||||
# Use 'nickel export' for standalone configs
|
||||
(^nickel export $file_path --format json | complete)
|
||||
}
|
||||
|
||||
let decl_output = $result.stdout
|
||||
|
||||
# Check if output is empty
|
||||
if ($decl_output | is-empty) {
|
||||
# Nickel compilation failed - return empty to trigger fallback to YAML
|
||||
if $debug {
|
||||
print $"⚠️ Nickel config compilation failed, fallback to YAML will be used"
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
# Parse JSON output (Nickel outputs JSON when --format json is specified)
|
||||
let parsed = (do -i { $decl_output | from json })
|
||||
|
||||
if ($parsed | is-empty) or ($parsed | type) != "record" {
|
||||
if $debug {
|
||||
print $"⚠️ Failed to parse Nickel output as JSON"
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
# Extract workspace_config key if it exists (Nickel wraps output in variable name)
|
||||
let config = if (($parsed | columns) | any { |col| $col == "workspace_config" }) {
|
||||
$parsed.workspace_config
|
||||
} else {
|
||||
$parsed
|
||||
}
|
||||
|
||||
if $debug {
|
||||
print $"✅ Loaded Nickel config from ($file_path)"
|
||||
}
|
||||
|
||||
# Cache the compiled Nickel output (if cache enabled and --no-cache not set)
|
||||
if (not $no_cache) and ($config | type) == "record" {
|
||||
cache-nickel-compile $file_path $config
|
||||
}
|
||||
|
||||
$config
|
||||
}
|
||||
|
||||
# Deep merge two configuration records (right takes precedence)
|
||||
export def deep-merge [
|
||||
base: record
|
||||
override: record
|
||||
] {
|
||||
mut result = $base
|
||||
|
||||
for key in ($override | columns) {
|
||||
let override_value = ($override | get $key)
|
||||
let base_value = ($base | get -o $key | default null)
|
||||
|
||||
if ($base_value | is-empty) {
|
||||
# Key doesn't exist in base, add it
|
||||
$result = ($result | insert $key $override_value)
|
||||
} else if (($base_value | describe) == "record") and (($override_value | describe) == "record") {
|
||||
# Both are records, merge recursively
|
||||
$result = ($result | upsert $key (deep-merge $base_value $override_value))
|
||||
} else {
|
||||
# Override the value
|
||||
$result = ($result | upsert $key $override_value)
|
||||
}
|
||||
}
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Get a nested configuration value using dot notation
|
||||
export def get-config-value [
|
||||
config: record
|
||||
path: string
|
||||
default_value: any = null
|
||||
] {
|
||||
let path_parts = ($path | split row ".")
|
||||
mut current = $config
|
||||
|
||||
for part in $path_parts {
|
||||
let immutable_current = $current
|
||||
let next_value = ($immutable_current | get -o $part | default null)
|
||||
if ($next_value | is-empty) {
|
||||
return $default_value
|
||||
}
|
||||
$current = $next_value
|
||||
}
|
||||
|
||||
$current
|
||||
}
|
||||
|
||||
# Helper function to create directory structure for user config
|
||||
export def init-user-config [
|
||||
--template: string = "user" # Template type: user, dev, prod, test
|
||||
--force = false # Overwrite existing config
|
||||
] {
|
||||
let config_dir = ($env.HOME | path join ".config" | path join "provisioning")
|
||||
|
||||
if not ($config_dir | path exists) {
|
||||
mkdir $config_dir
|
||||
print $"Created user config directory: ($config_dir)"
|
||||
}
|
||||
|
||||
let user_config_path = ($config_dir | path join "config.toml")
|
||||
|
||||
# Determine template file based on template parameter
|
||||
let template_file = match $template {
|
||||
"user" => "config.user.toml.example"
|
||||
"dev" => "config.dev.toml.example"
|
||||
"prod" => "config.prod.toml.example"
|
||||
"test" => "config.test.toml.example"
|
||||
_ => {
|
||||
print $"❌ Unknown template: ($template). Valid options: user, dev, prod, test"
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
# Find the template file in the project
|
||||
let project_root = (get-project-root)
|
||||
let template_path = ($project_root | path join $template_file)
|
||||
|
||||
if not ($template_path | path exists) {
|
||||
print $"❌ Template file not found: ($template_path)"
|
||||
print "Available templates should be in the project root directory"
|
||||
return
|
||||
}
|
||||
|
||||
# Check if config already exists
|
||||
if ($user_config_path | path exists) and not $force {
|
||||
print $"⚠️ User config already exists: ($user_config_path)"
|
||||
print "Use --force to overwrite or choose a different template"
|
||||
print $"Current template: ($template)"
|
||||
return
|
||||
}
|
||||
|
||||
# Copy template to user config
|
||||
cp $template_path $user_config_path
|
||||
print $"✅ Created user config from ($template) template: ($user_config_path)"
|
||||
print ""
|
||||
print "📝 Next steps:"
|
||||
print $" 1. Edit the config file: ($user_config_path)"
|
||||
print " 2. Update paths.base to point to your provisioning installation"
|
||||
print " 3. Configure your preferred providers and settings"
|
||||
print " 4. Test the configuration: ./core/nulib/provisioning validate config"
|
||||
print ""
|
||||
print $"💡 Template used: ($template_file)"
|
||||
|
||||
# Show template-specific guidance
|
||||
match $template {
|
||||
"dev" => {
|
||||
print "🔧 Development template configured with:"
|
||||
print " • Enhanced debugging enabled"
|
||||
print " • Local provider as default"
|
||||
print " • JSON output format"
|
||||
print " • Check mode enabled by default"
|
||||
}
|
||||
"prod" => {
|
||||
print "🏭 Production template configured with:"
|
||||
print " • Minimal logging for security"
|
||||
print " • AWS provider as default"
|
||||
print " • Strict validation enabled"
|
||||
print " • Backup and monitoring settings"
|
||||
}
|
||||
"test" => {
|
||||
print "🧪 Testing template configured with:"
|
||||
print " • Mock providers and safe defaults"
|
||||
print " • Test isolation settings"
|
||||
print " • CI/CD friendly configurations"
|
||||
print " • Automatic cleanup enabled"
|
||||
}
|
||||
_ => {
|
||||
print "👤 User template configured with:"
|
||||
print " • Balanced settings for general use"
|
||||
print " • Comprehensive documentation"
|
||||
print " • Safe defaults for all scenarios"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Load environment configurations from Nickel schema
|
||||
# Per ADR-003: Nickel as Source of Truth for all configuration
|
||||
def load-environments-from-nickel [] {
|
||||
let project_root = (get-project-root)
|
||||
let environments_ncl = ($project_root | path join "provisioning" "schemas" "config" "environments" "main.ncl")
|
||||
|
||||
if not ($environments_ncl | path exists) {
|
||||
# Fallback: return empty if Nickel file doesn't exist
|
||||
# Loader will then try to use config.defaults.toml if available
|
||||
return {}
|
||||
}
|
||||
|
||||
# Export Nickel to JSON and parse
|
||||
let export_result = (do {
|
||||
nickel export --format json $environments_ncl
|
||||
} | complete)
|
||||
|
||||
if $export_result.exit_code != 0 {
|
||||
# If Nickel export fails, fallback gracefully
|
||||
return {}
|
||||
}
|
||||
|
||||
# Parse JSON output
|
||||
$export_result.stdout | from json
|
||||
}
|
||||
|
||||
# Helper function to get project root directory
|
||||
def get-project-root [] {
|
||||
# Try to find project root by looking for key files
|
||||
let potential_roots = [
|
||||
$env.PWD
|
||||
($env.PWD | path dirname)
|
||||
($env.PWD | path dirname | path dirname)
|
||||
($env.PWD | path dirname | path dirname | path dirname)
|
||||
($env.PWD | path dirname | path dirname | path dirname | path dirname)
|
||||
]
|
||||
|
||||
for root in $potential_roots {
|
||||
# Check for provisioning project indicators
|
||||
if (($root | path join "config.defaults.toml" | path exists) or
|
||||
($root | path join "nickel.mod" | path exists) or
|
||||
($root | path join "core" "nulib" "provisioning" | path exists)) {
|
||||
return $root
|
||||
}
|
||||
}
|
||||
|
||||
# Fallback to current directory
|
||||
$env.PWD
|
||||
}
|
||||
174
nulib/lib_provisioning/config/loader/environment.nu
Normal file
174
nulib/lib_provisioning/config/loader/environment.nu
Normal file
@ -0,0 +1,174 @@
|
||||
# Module: Environment Detection & Management
|
||||
# Purpose: Detects current environment (dev/prod/test) and applies environment-specific configuration overrides.
|
||||
# Dependencies: None (core functions)
|
||||
|
||||
# Environment Detection and Configuration Functions
|
||||
# Handles environment detection, validation, and environment-specific overrides
|
||||
|
||||
# Detect current environment from various sources
|
||||
export def detect-current-environment [] {
|
||||
# Priority order for environment detection:
|
||||
# 1. PROVISIONING_ENV environment variable
|
||||
# 2. Environment-specific markers
|
||||
# 3. Directory-based detection
|
||||
# 4. Default fallback
|
||||
|
||||
# Check explicit environment variable
|
||||
if ($env.PROVISIONING_ENV? | is-not-empty) {
|
||||
return $env.PROVISIONING_ENV
|
||||
}
|
||||
|
||||
# Check CI/CD environments
|
||||
if ($env.CI? | is-not-empty) {
|
||||
if ($env.GITHUB_ACTIONS? | is-not-empty) { return "ci" }
|
||||
if ($env.GITLAB_CI? | is-not-empty) { return "ci" }
|
||||
if ($env.JENKINS_URL? | is-not-empty) { return "ci" }
|
||||
return "test" # Default for CI environments
|
||||
}
|
||||
|
||||
# Check for development indicators
|
||||
if (($env.PWD | path join ".git" | path exists) or
|
||||
($env.PWD | path join "development" | path exists) or
|
||||
($env.PWD | path join "dev" | path exists)) {
|
||||
return "dev"
|
||||
}
|
||||
|
||||
# Check for production indicators
|
||||
if (($env.HOSTNAME? | default "" | str contains "prod") or
|
||||
($env.NODE_ENV? | default "" | str downcase) == "production" or
|
||||
($env.ENVIRONMENT? | default "" | str downcase) == "production") {
|
||||
return "prod"
|
||||
}
|
||||
|
||||
# Check for test indicators
|
||||
if (($env.NODE_ENV? | default "" | str downcase) == "test" or
|
||||
($env.ENVIRONMENT? | default "" | str downcase) == "test") {
|
||||
return "test"
|
||||
}
|
||||
|
||||
# Default to development for interactive usage
|
||||
if ($env.TERM? | is-not-empty) {
|
||||
return "dev"
|
||||
}
|
||||
|
||||
# Fallback
|
||||
return "dev"
|
||||
}
|
||||
|
||||
# Get available environments from configuration
|
||||
export def get-available-environments [
|
||||
config: record
|
||||
] {
|
||||
let environments_section = ($config | get -o "environments" | default {})
|
||||
$environments_section | columns
|
||||
}
|
||||
|
||||
# Validate environment name
|
||||
export def validate-environment [
|
||||
environment: string
|
||||
config: record
|
||||
] {
|
||||
let valid_environments = ["dev" "test" "prod" "ci" "staging" "local"]
|
||||
let configured_environments = (get-available-environments $config)
|
||||
let all_valid = ($valid_environments | append $configured_environments | uniq)
|
||||
|
||||
if ($environment in $all_valid) {
|
||||
{ valid: true, message: "" }
|
||||
} else {
|
||||
{
|
||||
valid: false,
|
||||
message: $"Invalid environment '($environment)'. Valid options: ($all_valid | str join ', ')"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Apply environment variable overrides to configuration
|
||||
export def apply-environment-variable-overrides [
|
||||
config: record
|
||||
debug = false
|
||||
] {
|
||||
mut result = $config
|
||||
|
||||
# Map of environment variables to config paths with type conversion
|
||||
let env_mappings = {
|
||||
"PROVISIONING_DEBUG": { path: "debug.enabled", type: "bool" },
|
||||
"PROVISIONING_LOG_LEVEL": { path: "debug.log_level", type: "string" },
|
||||
"PROVISIONING_NO_TERMINAL": { path: "debug.no_terminal", type: "bool" },
|
||||
"PROVISIONING_CHECK": { path: "debug.check", type: "bool" },
|
||||
"PROVISIONING_METADATA": { path: "debug.metadata", type: "bool" },
|
||||
"PROVISIONING_OUTPUT_FORMAT": { path: "output.format", type: "string" },
|
||||
"PROVISIONING_FILE_VIEWER": { path: "output.file_viewer", type: "string" },
|
||||
"PROVISIONING_USE_SOPS": { path: "sops.use_sops", type: "bool" },
|
||||
"PROVISIONING_PROVIDER": { path: "providers.default", type: "string" },
|
||||
"PROVISIONING_WORKSPACE_PATH": { path: "paths.workspace", type: "string" },
|
||||
"PROVISIONING_INFRA_PATH": { path: "paths.infra", type: "string" },
|
||||
"PROVISIONING_SOPS": { path: "sops.config_path", type: "string" },
|
||||
"PROVISIONING_KAGE": { path: "sops.age_key_file", type: "string" }
|
||||
}
|
||||
|
||||
for env_var in ($env_mappings | columns) {
|
||||
let env_value = ($env | get -o $env_var | default null)
|
||||
if ($env_value | is-not-empty) {
|
||||
let mapping = ($env_mappings | get $env_var)
|
||||
let config_path = $mapping.path
|
||||
let config_type = $mapping.type
|
||||
|
||||
# Convert value to appropriate type
|
||||
let converted_value = match $config_type {
|
||||
"bool" => {
|
||||
if ($env_value | describe) == "string" {
|
||||
match ($env_value | str downcase) {
|
||||
"true" | "1" | "yes" | "on" => true
|
||||
"false" | "0" | "no" | "off" => false
|
||||
_ => false
|
||||
}
|
||||
} else {
|
||||
$env_value | into bool
|
||||
}
|
||||
}
|
||||
"string" => $env_value
|
||||
_ => $env_value
|
||||
}
|
||||
|
||||
if $debug {
|
||||
# log debug $"Applying env override: ($env_var) -> ($config_path) = ($converted_value)"
|
||||
}
|
||||
$result = (set-config-value $result $config_path $converted_value)
|
||||
}
|
||||
}
|
||||
|
||||
$result
|
||||
}
|
||||
|
||||
# Helper function to set nested config value using dot notation
|
||||
def set-config-value [
|
||||
config: record
|
||||
path: string
|
||||
value: any
|
||||
] {
|
||||
let path_parts = ($path | split row ".")
|
||||
mut current = $config
|
||||
mut result = $current
|
||||
|
||||
# Navigate to parent of target
|
||||
let parent_parts = ($path_parts | range 0 (($path_parts | length) - 1))
|
||||
let leaf_key = ($path_parts | last)
|
||||
|
||||
for part in $parent_parts {
|
||||
if ($result | get -o $part | is-empty) {
|
||||
$result = ($result | insert $part {})
|
||||
}
|
||||
$current = ($result | get $part)
|
||||
# Update parent in result would go here (mutable record limitation)
|
||||
}
|
||||
|
||||
# Set the value at the leaf
|
||||
if ($parent_parts | length) == 0 {
|
||||
# Top level
|
||||
$result | upsert $leaf_key $value
|
||||
} else {
|
||||
# Need to navigate back and update
|
||||
# This is a simplified approach - for deep nesting, a more complex function would be needed
|
||||
$result | upsert $leaf_key $value
|
||||
}
|
||||
}
|
||||
15
nulib/lib_provisioning/config/loader/mod.nu
Normal file
15
nulib/lib_provisioning/config/loader/mod.nu
Normal file
@ -0,0 +1,15 @@
|
||||
# Module: Configuration Loader System
|
||||
# Purpose: Centralized configuration loading with hierarchical sources, validation, and environment management.
|
||||
# Dependencies: interpolators, validators, context_manager, sops_handler, cache modules
|
||||
|
||||
# Core loading functionality
|
||||
export use ./core.nu *
|
||||
|
||||
# Configuration validation
|
||||
export use ./validator.nu *
|
||||
|
||||
# Environment detection and management
|
||||
export use ./environment.nu *
|
||||
|
||||
# Testing and interpolation utilities
|
||||
export use ./test.nu *
|
||||
290
nulib/lib_provisioning/config/loader/test.nu
Normal file
290
nulib/lib_provisioning/config/loader/test.nu
Normal file
@ -0,0 +1,290 @@
|
||||
# Module: Configuration Testing Utilities
|
||||
# Purpose: Provides testing infrastructure for configuration loading, interpolation, and validation.
|
||||
# Dependencies: interpolators, validators
|
||||
|
||||
# Configuration Loader - Testing and Interpolation Functions
|
||||
# Provides testing utilities for configuration loading and interpolation
|
||||
|
||||
use ../interpolators.nu *
|
||||
use ../validators.nu *
|
||||
|
||||
# Test interpolation with sample data
|
||||
export def test-interpolation [
|
||||
--sample: string = "basic" # Sample test data: basic, advanced, all
|
||||
] {
|
||||
print "🧪 Testing Enhanced Interpolation System"
|
||||
print ""
|
||||
|
||||
# Define test configurations based on sample type
|
||||
let test_config = match $sample {
|
||||
"basic" => {
|
||||
paths: { base: "/usr/local/provisioning" }
|
||||
test_patterns: {
|
||||
simple_path: "{{paths.base}}/config"
|
||||
env_home: "{{env.HOME}}/configs"
|
||||
current_date: "backup-{{now.date}}"
|
||||
}
|
||||
}
|
||||
"advanced" => {
|
||||
paths: { base: "/usr/local/provisioning" }
|
||||
providers: { aws: { region: "us-west-2" }, default: "aws" }
|
||||
sops: { key_file: "{{env.HOME}}/.age/key.txt" }
|
||||
test_patterns: {
|
||||
complex_path: "{{path.join(paths.base, \"custom\")}}"
|
||||
provider_ref: "Region: {{providers.aws.region}}"
|
||||
git_info: "Build: {{git.branch}}-{{git.commit}}"
|
||||
conditional: "{{env.HOME || \"/tmp\"}}/cache"
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
paths: { base: "/usr/local/provisioning" }
|
||||
providers: { aws: { region: "us-west-2" }, default: "aws" }
|
||||
sops: { key_file: "{{env.HOME}}/.age/key.txt", config_path: "/etc/sops.yaml" }
|
||||
current_environment: "test"
|
||||
test_patterns: {
|
||||
all_patterns: "{{paths.base}}/{{env.USER}}/{{now.date}}/{{git.branch}}/{{providers.default}}"
|
||||
function_call: "{{path.join(paths.base, \"providers\")}}"
|
||||
sops_refs: "Key: {{sops.key_file}}, Config: {{sops.config_path}}"
|
||||
datetime: "{{now.date}} at {{now.timestamp}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Test interpolation
|
||||
print $"Testing with ($sample) sample configuration..."
|
||||
print ""
|
||||
|
||||
let base_path = "/usr/local/provisioning"
|
||||
let interpolated_config = (interpolate-all-paths $test_config $base_path)
|
||||
|
||||
# Show results
|
||||
print "📋 Original patterns:"
|
||||
for key in ($test_config.test_patterns | columns) {
|
||||
let original = ($test_config.test_patterns | get $key)
|
||||
print $" ($key): ($original)"
|
||||
}
|
||||
|
||||
print ""
|
||||
print "✨ Interpolated results:"
|
||||
for key in ($interpolated_config.test_patterns | columns) {
|
||||
let interpolated = ($interpolated_config.test_patterns | get $key)
|
||||
print $" ($key): ($interpolated)"
|
||||
}
|
||||
|
||||
print ""
|
||||
|
||||
# Validate interpolation
|
||||
let validation = (validate-interpolation $test_config --detailed true)
|
||||
if $validation.valid {
|
||||
print "✅ Interpolation validation passed"
|
||||
} else {
|
||||
print "❌ Interpolation validation failed:"
|
||||
for error in $validation.errors {
|
||||
print $" Error: ($error.message)"
|
||||
}
|
||||
}
|
||||
|
||||
if ($validation.warnings | length) > 0 {
|
||||
print "⚠️ Warnings:"
|
||||
for warning in $validation.warnings {
|
||||
print $" Warning: ($warning.message)"
|
||||
}
|
||||
}
|
||||
|
||||
print ""
|
||||
print $"📊 Summary: ($validation.summary.interpolation_patterns_detected) interpolation patterns processed"
|
||||
|
||||
$interpolated_config
|
||||
}
|
||||
|
||||
# Create comprehensive interpolation test suite
|
||||
export def create-interpolation-test-suite [
|
||||
--output-file: string = "interpolation_test_results.json"
|
||||
] {
|
||||
print "🧪 Creating Comprehensive Interpolation Test Suite"
|
||||
print "=================================================="
|
||||
print ""
|
||||
|
||||
mut test_results = []
|
||||
|
||||
# Test 1: Basic patterns
|
||||
print "🔍 Test 1: Basic Interpolation Patterns"
|
||||
let basic_test = (run-interpolation-test "basic")
|
||||
$test_results = ($test_results | append {
|
||||
test_name: "basic_patterns"
|
||||
passed: $basic_test.passed
|
||||
details: $basic_test.details
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
})
|
||||
|
||||
# Test 2: Environment variables
|
||||
print "🔍 Test 2: Environment Variable Interpolation"
|
||||
let env_test = (run-interpolation-test "environment")
|
||||
$test_results = ($test_results | append {
|
||||
test_name: "environment_variables"
|
||||
passed: $env_test.passed
|
||||
details: $env_test.details
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
})
|
||||
|
||||
# Test 3: Security validation
|
||||
print "🔍 Test 3: Security Validation"
|
||||
let security_test = (run-security-test)
|
||||
$test_results = ($test_results | append {
|
||||
test_name: "security_validation"
|
||||
passed: $security_test.passed
|
||||
details: $security_test.details
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
})
|
||||
|
||||
# Test 4: Advanced patterns
|
||||
print "🔍 Test 4: Advanced Interpolation Features"
|
||||
let advanced_test = (run-interpolation-test "advanced")
|
||||
$test_results = ($test_results | append {
|
||||
test_name: "advanced_patterns"
|
||||
passed: $advanced_test.passed
|
||||
details: $advanced_test.details
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
})
|
||||
|
||||
# Save results
|
||||
$test_results | to json | save --force $output_file
|
||||
|
||||
# Summary
|
||||
let total_tests = ($test_results | length)
|
||||
let passed_tests = ($test_results | where passed == true | length)
|
||||
let failed_tests = ($total_tests - $passed_tests)
|
||||
|
||||
print ""
|
||||
print "📊 Test Suite Summary"
|
||||
print "===================="
|
||||
print $" Total tests: ($total_tests)"
|
||||
print $" Passed: ($passed_tests)"
|
||||
print $" Failed: ($failed_tests)"
|
||||
print ""
|
||||
|
||||
if $failed_tests == 0 {
|
||||
print "✅ All interpolation tests passed!"
|
||||
} else {
|
||||
print "❌ Some interpolation tests failed!"
|
||||
print ""
|
||||
print "Failed tests:"
|
||||
for test in ($test_results | where passed == false) {
|
||||
print $" • ($test.test_name): ($test.details.error)"
|
||||
}
|
||||
}
|
||||
|
||||
print ""
|
||||
print $"📄 Detailed results saved to: ($output_file)"
|
||||
|
||||
{
|
||||
total: $total_tests
|
||||
passed: $passed_tests
|
||||
failed: $failed_tests
|
||||
success_rate: (($passed_tests * 100) / $total_tests)
|
||||
results: $test_results
|
||||
}
|
||||
}
|
||||
|
||||
# Run individual interpolation test
|
||||
def run-interpolation-test [
|
||||
test_type: string
|
||||
] {
|
||||
let test_result = (do {
|
||||
match $test_type {
|
||||
"basic" => {
|
||||
let test_config = {
|
||||
paths: { base: "/test/path" }
|
||||
test_value: "{{paths.base}}/config"
|
||||
}
|
||||
let result = (interpolate-all-paths $test_config "/test/path")
|
||||
let expected = "/test/path/config"
|
||||
let actual = ($result.test_value)
|
||||
|
||||
if $actual == $expected {
|
||||
{ passed: true, details: { expected: $expected, actual: $actual } }
|
||||
} else {
|
||||
{ passed: false, details: { expected: $expected, actual: $actual, error: "Value mismatch" } }
|
||||
}
|
||||
}
|
||||
"environment" => {
|
||||
let test_config = {
|
||||
paths: { base: "/test/path" }
|
||||
test_value: "{{env.USER}}/config"
|
||||
}
|
||||
let result = (interpolate-all-paths $test_config "/test/path")
|
||||
let expected_pattern = ".*/config" # USER should be replaced with something
|
||||
|
||||
if ($result.test_value | str contains "/config") and not ($result.test_value | str contains "{{env.USER}}") {
|
||||
{ passed: true, details: { pattern: $expected_pattern, actual: $result.test_value } }
|
||||
} else {
|
||||
{ passed: false, details: { pattern: $expected_pattern, actual: $result.test_value, error: "Environment variable not interpolated" } }
|
||||
}
|
||||
}
|
||||
"advanced" => {
|
||||
let test_config = {
|
||||
paths: { base: "/test/path" }
|
||||
current_environment: "test"
|
||||
test_values: {
|
||||
date_test: "backup-{{now.date}}"
|
||||
git_test: "build-{{git.branch}}"
|
||||
}
|
||||
}
|
||||
let result = (interpolate-all-paths $test_config "/test/path")
|
||||
|
||||
# Check if date was interpolated (should not contain {{now.date}})
|
||||
let date_ok = not ($result.test_values.date_test | str contains "{{now.date}}")
|
||||
# Check if git was interpolated (should not contain {{git.branch}})
|
||||
let git_ok = not ($result.test_values.git_test | str contains "{{git.branch}}")
|
||||
|
||||
if $date_ok and $git_ok {
|
||||
{ passed: true, details: { date_result: $result.test_values.date_test, git_result: $result.test_values.git_test } }
|
||||
} else {
|
||||
{ passed: false, details: { date_result: $result.test_values.date_test, git_result: $result.test_values.git_test, error: "Advanced patterns not interpolated" } }
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
{ passed: false, details: { error: $"Unknown test type: ($test_type)" } }
|
||||
}
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $test_result.exit_code != 0 {
|
||||
{ passed: false, details: { error: $"Test execution failed: ($test_result.stderr)" } }
|
||||
} else {
|
||||
$test_result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Run security validation test
|
||||
def run-security-test [] {
|
||||
let security_result = (do {
|
||||
# Test 1: Safe configuration should pass
|
||||
let safe_config = {
|
||||
paths: { base: "/safe/path" }
|
||||
test_value: "{{env.HOME}}/config"
|
||||
}
|
||||
|
||||
let safe_result = (validate-interpolation-security $safe_config false)
|
||||
|
||||
# Test 2: Unsafe configuration should fail
|
||||
let unsafe_config = {
|
||||
paths: { base: "/unsafe/path" }
|
||||
test_value: "{{env.PATH}}/config" # PATH is considered unsafe
|
||||
}
|
||||
|
||||
let unsafe_result = (validate-interpolation-security $unsafe_config false)
|
||||
|
||||
if $safe_result.valid and (not $unsafe_result.valid) {
|
||||
{ passed: true, details: { safe_passed: $safe_result.valid, unsafe_blocked: (not $unsafe_result.valid) } }
|
||||
} else {
|
||||
{ passed: false, details: { safe_passed: $safe_result.valid, unsafe_blocked: (not $unsafe_result.valid), error: "Security validation not working correctly" } }
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $security_result.exit_code != 0 {
|
||||
{ passed: false, details: { error: $"Security test execution failed: ($security_result.stderr)" } }
|
||||
} else {
|
||||
$security_result.stdout
|
||||
}
|
||||
}
|
||||
356
nulib/lib_provisioning/config/loader/validator.nu
Normal file
356
nulib/lib_provisioning/config/loader/validator.nu
Normal file
@ -0,0 +1,356 @@
|
||||
# Module: Configuration Validator
|
||||
# Purpose: Validates configuration structure, paths, data types, semantic rules, and file existence.
|
||||
# Dependencies: loader_core for get-config-value
|
||||
|
||||
# Configuration Validation Functions
|
||||
# Validates configuration structure, paths, data types, semantic rules, and files
|
||||
|
||||
# Validate configuration structure - checks required sections exist
|
||||
export def validate-config-structure [
|
||||
config: record
|
||||
] {
|
||||
let required_sections = ["core", "paths", "debug", "sops"]
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
for section in $required_sections {
|
||||
let section_value = ($config | get -o $section | default null)
|
||||
if ($section_value | is-empty) {
|
||||
$errors = ($errors | append {
|
||||
type: "missing_section",
|
||||
severity: "error",
|
||||
section: $section,
|
||||
message: $"Missing required configuration section: ($section)"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
valid: (($errors | length) == 0),
|
||||
errors: $errors,
|
||||
warnings: $warnings
|
||||
}
|
||||
}
|
||||
|
||||
# Validate path values - checks paths exist and are absolute
|
||||
export def validate-path-values [
|
||||
config: record
|
||||
] {
|
||||
let required_paths = ["base", "providers", "taskservs", "clusters"]
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
let paths = ($config | get -o paths | default {})
|
||||
|
||||
for path_name in $required_paths {
|
||||
let path_value = ($paths | get -o $path_name | default null)
|
||||
|
||||
if ($path_value | is-empty) {
|
||||
$errors = ($errors | append {
|
||||
type: "missing_path",
|
||||
severity: "error",
|
||||
path: $path_name,
|
||||
message: $"Missing required path: paths.($path_name)"
|
||||
})
|
||||
} else {
|
||||
# Check if path is absolute
|
||||
if not ($path_value | str starts-with "/") {
|
||||
$warnings = ($warnings | append {
|
||||
type: "relative_path",
|
||||
severity: "warning",
|
||||
path: $path_name,
|
||||
value: $path_value,
|
||||
message: $"Path paths.($path_name) should be absolute, got: ($path_value)"
|
||||
})
|
||||
}
|
||||
|
||||
# Check if base path exists (critical for system operation)
|
||||
if $path_name == "base" {
|
||||
if not ($path_value | path exists) {
|
||||
$errors = ($errors | append {
|
||||
type: "path_not_exists",
|
||||
severity: "error",
|
||||
path: $path_name,
|
||||
value: $path_value,
|
||||
message: $"Base path does not exist: ($path_value)"
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
valid: (($errors | length) == 0),
|
||||
errors: $errors,
|
||||
warnings: $warnings
|
||||
}
|
||||
}
|
||||
|
||||
# Validate data types - checks configuration values have correct types
|
||||
export def validate-data-types [
|
||||
config: record
|
||||
] {
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
# Validate core.version follows semantic versioning pattern
|
||||
let core_version = ($config | get -o core.version | default null)
|
||||
if ($core_version | is-not-empty) {
|
||||
let version_pattern = "^\\d+\\.\\d+\\.\\d+(-.+)?$"
|
||||
let version_parts = ($core_version | split row ".")
|
||||
if (($version_parts | length) < 3) {
|
||||
$errors = ($errors | append {
|
||||
type: "invalid_version",
|
||||
severity: "error",
|
||||
field: "core.version",
|
||||
value: $core_version,
|
||||
message: $"core.version must follow semantic versioning format, got: ($core_version)"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Validate debug.enabled is boolean
|
||||
let debug_enabled = ($config | get -o debug.enabled | default null)
|
||||
if ($debug_enabled | is-not-empty) {
|
||||
if (($debug_enabled | describe) != "bool") {
|
||||
$errors = ($errors | append {
|
||||
type: "invalid_type",
|
||||
severity: "error",
|
||||
field: "debug.enabled",
|
||||
value: $debug_enabled,
|
||||
expected: "bool",
|
||||
actual: ($debug_enabled | describe),
|
||||
message: $"debug.enabled must be boolean, got: ($debug_enabled | describe)"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Validate debug.metadata is boolean
|
||||
let debug_metadata = ($config | get -o debug.metadata | default null)
|
||||
if ($debug_metadata | is-not-empty) {
|
||||
if (($debug_metadata | describe) != "bool") {
|
||||
$errors = ($errors | append {
|
||||
type: "invalid_type",
|
||||
severity: "error",
|
||||
field: "debug.metadata",
|
||||
value: $debug_metadata,
|
||||
expected: "bool",
|
||||
actual: ($debug_metadata | describe),
|
||||
message: $"debug.metadata must be boolean, got: ($debug_metadata | describe)"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Validate sops.use_sops is boolean
|
||||
let sops_use = ($config | get -o sops.use_sops | default null)
|
||||
if ($sops_use | is-not-empty) {
|
||||
if (($sops_use | describe) != "bool") {
|
||||
$errors = ($errors | append {
|
||||
type: "invalid_type",
|
||||
severity: "error",
|
||||
field: "sops.use_sops",
|
||||
value: $sops_use,
|
||||
expected: "bool",
|
||||
actual: ($sops_use | describe),
|
||||
message: $"sops.use_sops must be boolean, got: ($sops_use | describe)"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
valid: (($errors | length) == 0),
|
||||
errors: $errors,
|
||||
warnings: $warnings
|
||||
}
|
||||
}
|
||||
|
||||
# Validate semantic rules - business logic validation
|
||||
export def validate-semantic-rules [
|
||||
config: record
|
||||
] {
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
# Validate provider configuration
|
||||
let providers = ($config | get -o providers | default {})
|
||||
let default_provider = ($providers | get -o default | default null)
|
||||
|
||||
if ($default_provider | is-not-empty) {
|
||||
let valid_providers = ["aws", "upcloud", "local"]
|
||||
if not ($default_provider in $valid_providers) {
|
||||
$errors = ($errors | append {
|
||||
type: "invalid_provider",
|
||||
severity: "error",
|
||||
field: "providers.default",
|
||||
value: $default_provider,
|
||||
valid_options: $valid_providers,
|
||||
message: $"Invalid default provider: ($default_provider). Valid options: ($valid_providers | str join ', ')"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Validate log level
|
||||
let log_level = ($config | get -o debug.log_level | default null)
|
||||
if ($log_level | is-not-empty) {
|
||||
let valid_levels = ["trace", "debug", "info", "warn", "error"]
|
||||
if not ($log_level in $valid_levels) {
|
||||
$warnings = ($warnings | append {
|
||||
type: "invalid_log_level",
|
||||
severity: "warning",
|
||||
field: "debug.log_level",
|
||||
value: $log_level,
|
||||
valid_options: $valid_levels,
|
||||
message: $"Invalid log level: ($log_level). Valid options: ($valid_levels | str join ', ')"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Validate output format
|
||||
let output_format = ($config | get -o output.format | default null)
|
||||
if ($output_format | is-not-empty) {
|
||||
let valid_formats = ["json", "yaml", "toml", "text"]
|
||||
if not ($output_format in $valid_formats) {
|
||||
$warnings = ($warnings | append {
|
||||
type: "invalid_output_format",
|
||||
severity: "warning",
|
||||
field: "output.format",
|
||||
value: $output_format,
|
||||
valid_options: $valid_formats,
|
||||
message: $"Invalid output format: ($output_format). Valid options: ($valid_formats | str join ', ')"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
valid: (($errors | length) == 0),
|
||||
errors: $errors,
|
||||
warnings: $warnings
|
||||
}
|
||||
}
|
||||
|
||||
# Validate file existence - checks referenced files exist
|
||||
export def validate-file-existence [
|
||||
config: record
|
||||
] {
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
# Check SOPS configuration file
|
||||
let sops_config = ($config | get -o sops.config_path | default null)
|
||||
if ($sops_config | is-not-empty) {
|
||||
if not ($sops_config | path exists) {
|
||||
$warnings = ($warnings | append {
|
||||
type: "missing_sops_config",
|
||||
severity: "warning",
|
||||
field: "sops.config_path",
|
||||
value: $sops_config,
|
||||
message: $"SOPS config file not found: ($sops_config)"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
# Check SOPS key files
|
||||
let key_paths = ($config | get -o sops.key_search_paths | default [])
|
||||
mut found_key = false
|
||||
|
||||
for key_path in $key_paths {
|
||||
let expanded_path = ($key_path | str replace "~" $env.HOME)
|
||||
if ($expanded_path | path exists) {
|
||||
$found_key = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if not $found_key and ($key_paths | length) > 0 {
|
||||
$warnings = ($warnings | append {
|
||||
type: "missing_sops_keys",
|
||||
severity: "warning",
|
||||
field: "sops.key_search_paths",
|
||||
value: $key_paths,
|
||||
message: $"No SOPS key files found in search paths: ($key_paths | str join ', ')"
|
||||
})
|
||||
}
|
||||
|
||||
# Check critical configuration files
|
||||
let settings_file = ($config | get -o paths.files.settings | default null)
|
||||
if ($settings_file | is-not-empty) {
|
||||
if not ($settings_file | path exists) {
|
||||
$errors = ($errors | append {
|
||||
type: "missing_settings_file",
|
||||
severity: "error",
|
||||
field: "paths.files.settings",
|
||||
value: $settings_file,
|
||||
message: $"Settings file not found: ($settings_file)"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
valid: (($errors | length) == 0),
|
||||
errors: $errors,
|
||||
warnings: $warnings
|
||||
}
|
||||
}
|
||||
|
||||
# Enhanced main validation function
|
||||
export def validate-config [
|
||||
config: record
|
||||
--detailed = false # Show detailed validation results
|
||||
--strict = false # Treat warnings as errors
|
||||
] {
|
||||
# Run all validation checks
|
||||
let structure_result = (validate-config-structure $config)
|
||||
let paths_result = (validate-path-values $config)
|
||||
let types_result = (validate-data-types $config)
|
||||
let semantic_result = (validate-semantic-rules $config)
|
||||
let files_result = (validate-file-existence $config)
|
||||
|
||||
# Combine all results
|
||||
let all_errors = (
|
||||
$structure_result.errors | append $paths_result.errors | append $types_result.errors |
|
||||
append $semantic_result.errors | append $files_result.errors
|
||||
)
|
||||
|
||||
let all_warnings = (
|
||||
$structure_result.warnings | append $paths_result.warnings | append $types_result.warnings |
|
||||
append $semantic_result.warnings | append $files_result.warnings
|
||||
)
|
||||
|
||||
let has_errors = ($all_errors | length) > 0
|
||||
let has_warnings = ($all_warnings | length) > 0
|
||||
|
||||
# In strict mode, treat warnings as errors
|
||||
let final_valid = if $strict {
|
||||
not $has_errors and not $has_warnings
|
||||
} else {
|
||||
not $has_errors
|
||||
}
|
||||
|
||||
# Throw error if validation fails and not in detailed mode
|
||||
if not $detailed and not $final_valid {
|
||||
let error_messages = ($all_errors | each { |err| $err.message })
|
||||
let warning_messages = if $strict { ($all_warnings | each { |warn| $warn.message }) } else { [] }
|
||||
let combined_messages = ($error_messages | append $warning_messages)
|
||||
|
||||
error make {
|
||||
msg: ($combined_messages | str join "; ")
|
||||
}
|
||||
}
|
||||
|
||||
# Return detailed results
|
||||
{
|
||||
valid: $final_valid,
|
||||
errors: $all_errors,
|
||||
warnings: $all_warnings,
|
||||
summary: {
|
||||
total_errors: ($all_errors | length),
|
||||
total_warnings: ($all_warnings | length),
|
||||
checks_run: 5,
|
||||
structure_valid: $structure_result.valid,
|
||||
paths_valid: $paths_result.valid,
|
||||
types_valid: $types_result.valid,
|
||||
semantic_valid: $semantic_result.valid,
|
||||
files_valid: $files_result.valid
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,270 +0,0 @@
|
||||
# Configuration Loader Orchestrator - Coordinates modular config loading system
|
||||
# NUSHELL 0.109 COMPLIANT - Using reduce --fold (Rule 3), do-complete (Rule 5), each (Rule 8)
|
||||
|
||||
use std log
|
||||
|
||||
# Import all specialized modules
|
||||
use ./cache/core.nu *
|
||||
use ./cache/metadata.nu *
|
||||
use ./cache/config_manager.nu *
|
||||
use ./cache/nickel.nu *
|
||||
use ./cache/sops.nu *
|
||||
use ./cache/final.nu *
|
||||
|
||||
use ./loaders/file_loader.nu *
|
||||
use ./validation/config_validator.nu *
|
||||
use ./interpolation/core.nu *
|
||||
|
||||
use ./helpers/workspace.nu *
|
||||
use ./helpers/merging.nu *
|
||||
use ./helpers/environment.nu *
|
||||
|
||||
# Main configuration loader orchestrator
|
||||
# Coordinates the full loading pipeline: detect → cache check → load → merge → validate → interpolate → cache → return
|
||||
export def load-provisioning-config [
|
||||
--debug = false # Enable debug logging
|
||||
--validate = false # Validate configuration
|
||||
--environment: string # Override environment (dev/prod/test)
|
||||
--skip-env-detection = false # Skip automatic environment detection
|
||||
--no-cache = false # Disable cache
|
||||
]: nothing -> record {
|
||||
if $debug {
|
||||
# log debug "Loading provisioning configuration..."
|
||||
}
|
||||
|
||||
# Step 1: Detect current environment
|
||||
let current_environment = if ($environment | is-not-empty) {
|
||||
$environment
|
||||
} else if not $skip_env_detection {
|
||||
detect-current-environment
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
if $debug and ($current_environment | is-not-empty) {
|
||||
# log debug $"Using environment: ($current_environment)"
|
||||
}
|
||||
|
||||
# Step 2: Get active workspace
|
||||
let active_workspace = (get-active-workspace)
|
||||
|
||||
# Step 3: Check final config cache (if enabled)
|
||||
if (not $no_cache) and ($active_workspace | is-not-empty) {
|
||||
let cache_result = (lookup-final-config $active_workspace $current_environment)
|
||||
if ($cache_result.valid? | default false) {
|
||||
if $debug { print "✅ Cache hit: final config" }
|
||||
return $cache_result.data
|
||||
}
|
||||
}
|
||||
|
||||
# Step 4: Prepare config sources list
|
||||
let config_sources = (prepare-config-sources $active_workspace $debug)
|
||||
|
||||
# Step 5: Load and merge all config sources (Rule 3: using reduce --fold)
|
||||
let loaded_config = ($config_sources | reduce --fold {base: {}, user_context: {}} {|source, result|
|
||||
let format = ($source.format | default "auto")
|
||||
let config_data = (load-config-file $source.path $source.required $debug $format)
|
||||
|
||||
# Ensure config_data is a record
|
||||
let safe_config = if ($config_data | describe | str starts-with "record") {
|
||||
$config_data
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
|
||||
# Store user context separately for override processing
|
||||
if $source.name == "user-context" {
|
||||
$result | upsert user_context $safe_config
|
||||
} else if ($safe_config | is-not-empty) {
|
||||
if $debug {
|
||||
# log debug $"Loaded ($source.name) config"
|
||||
}
|
||||
$result | upsert base (deep-merge $result.base $safe_config)
|
||||
} else {
|
||||
$result
|
||||
}
|
||||
})
|
||||
|
||||
# Step 6: Apply user context overrides
|
||||
let final_config = if (($loaded_config.user_context | columns | length) > 0) {
|
||||
apply-user-context-overrides $loaded_config.base $loaded_config.user_context
|
||||
} else {
|
||||
$loaded_config.base
|
||||
}
|
||||
|
||||
# Step 7: Apply environment-specific overrides
|
||||
let env_config = if ($current_environment | is-not-empty) {
|
||||
let env_result = (do { $final_config | get $"environments.($current_environment)" } | complete)
|
||||
if $env_result.exit_code == 0 { $env_result.stdout } else { {} }
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
|
||||
let with_env_overrides = if ($env_config | is-not-empty) {
|
||||
if $debug {
|
||||
# log debug $"Applying environment overrides for: ($current_environment)"
|
||||
}
|
||||
(deep-merge $final_config $env_config)
|
||||
} else {
|
||||
$final_config
|
||||
}
|
||||
|
||||
# Step 8: Apply environment variable overrides
|
||||
let with_env_vars = (apply-environment-variable-overrides $with_env_overrides $debug)
|
||||
|
||||
# Step 9: Add current environment to config
|
||||
let with_current_env = if ($current_environment | is-not-empty) {
|
||||
($with_env_vars | upsert "current_environment" $current_environment)
|
||||
} else {
|
||||
$with_env_vars
|
||||
}
|
||||
|
||||
# Step 10: Interpolate variables in configuration
|
||||
let interpolated = (interpolate-config $with_current_env)
|
||||
|
||||
# Step 11: Validate configuration (if requested)
|
||||
if $validate {
|
||||
let validation_result = (validate-config $interpolated --detailed false --strict false)
|
||||
# validate-config throws error if validation fails in non-detailed mode
|
||||
}
|
||||
|
||||
# Step 12: Cache final config (ignore errors)
|
||||
if (not $no_cache) and ($active_workspace | is-not-empty) {
|
||||
do {
|
||||
cache-final-config $interpolated $active_workspace $current_environment
|
||||
} | complete | ignore
|
||||
}
|
||||
|
||||
if $debug {
|
||||
# log debug "Configuration loading completed"
|
||||
}
|
||||
|
||||
# Step 13: Return final configuration
|
||||
$interpolated
|
||||
}
|
||||
|
||||
# Prepare list of configuration sources from workspace
|
||||
# Returns: list of {name, path, required, format} records
|
||||
def prepare-config-sources [active_workspace: any, debug: bool]: nothing -> list {
|
||||
if ($active_workspace | is-empty) {
|
||||
# Fallback: Try to find workspace from current directory
|
||||
prepare-fallback-sources debug $debug
|
||||
} else {
|
||||
prepare-workspace-sources $active_workspace $debug
|
||||
}
|
||||
}
|
||||
|
||||
# Prepare config sources from active workspace directory
|
||||
def prepare-workspace-sources [workspace: record, debug: bool]: nothing -> list {
|
||||
let config_dir = ($workspace.path | path join "config")
|
||||
let generated_workspace = ($config_dir | path join "generated" | path join "workspace.toml")
|
||||
let ncl_config = ($config_dir | path join "config.ncl")
|
||||
let nickel_config = ($config_dir | path join "provisioning.ncl")
|
||||
let yaml_config = ($config_dir | path join "provisioning.yaml")
|
||||
|
||||
# Priority: Generated TOML > config.ncl > provisioning.ncl > provisioning.yaml
|
||||
let workspace_source = if ($generated_workspace | path exists) {
|
||||
{name: "workspace", path: $generated_workspace, required: true, format: "toml"}
|
||||
} else if ($ncl_config | path exists) {
|
||||
{name: "workspace", path: $ncl_config, required: true, format: "ncl"}
|
||||
} else if ($nickel_config | path exists) {
|
||||
{name: "workspace", path: $nickel_config, required: true, format: "nickel"}
|
||||
} else if ($yaml_config | path exists) {
|
||||
{name: "workspace", path: $yaml_config, required: true, format: "yaml"}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
# Load provider configs (Rule 8: using each)
|
||||
let provider_sources = (
|
||||
let gen_dir = ($workspace.path | path join "config" | path join "generated" | path join "providers")
|
||||
let man_dir = ($workspace.path | path join "config" | path join "providers")
|
||||
let provider_dir = if ($gen_dir | path exists) { $gen_dir } else { $man_dir }
|
||||
|
||||
if ($provider_dir | path exists) {
|
||||
do {
|
||||
ls $provider_dir | where type == file and ($it.name | str ends-with '.toml') | each {|f|
|
||||
{
|
||||
name: $"provider-($f.name | str replace '.toml' '')",
|
||||
path: $f.name,
|
||||
required: false,
|
||||
format: "toml"
|
||||
}
|
||||
}
|
||||
} | complete | if $in.exit_code == 0 { $in.stdout } else { [] }
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
)
|
||||
|
||||
# Load platform configs (Rule 8: using each)
|
||||
let platform_sources = (
|
||||
let gen_dir = ($workspace.path | path join "config" | path join "generated" | path join "platform")
|
||||
let man_dir = ($workspace.path | path join "config" | path join "platform")
|
||||
let platform_dir = if ($gen_dir | path exists) { $gen_dir } else { $man_dir }
|
||||
|
||||
if ($platform_dir | path exists) {
|
||||
do {
|
||||
ls $platform_dir | where type == file and ($it.name | str ends-with '.toml') | each {|f|
|
||||
{
|
||||
name: $"platform-($f.name | str replace '.toml' '')",
|
||||
path: $f.name,
|
||||
required: false,
|
||||
format: "toml"
|
||||
}
|
||||
}
|
||||
} | complete | if $in.exit_code == 0 { $in.stdout } else { [] }
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
)
|
||||
|
||||
# Load user context (highest priority before env vars)
|
||||
let user_context_source = (
|
||||
let user_dir = ([$env.HOME "Library" "Application Support" "provisioning"] | path join)
|
||||
let user_context = ([$user_dir $"ws_($workspace.name).yaml"] | path join)
|
||||
if ($user_context | path exists) {
|
||||
[{name: "user-context", path: $user_context, required: false, format: "yaml"}]
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
)
|
||||
|
||||
# Combine all sources (Rule 3: immutable appending)
|
||||
if ($workspace_source | is-not-empty) {
|
||||
([$workspace_source] | append $provider_sources | append $platform_sources | append $user_context_source)
|
||||
} else {
|
||||
([] | append $provider_sources | append $platform_sources | append $user_context_source)
|
||||
}
|
||||
}
|
||||
|
||||
# Prepare config sources from current directory (fallback when no workspace active)
|
||||
def prepare-fallback-sources [debug: bool]: nothing -> list {
|
||||
let ncl_config = ($env.PWD | path join "config" | path join "config.ncl")
|
||||
let nickel_config = ($env.PWD | path join "config" | path join "provisioning.ncl")
|
||||
let yaml_config = ($env.PWD | path join "config" | path join "provisioning.yaml")
|
||||
|
||||
if ($ncl_config | path exists) {
|
||||
[{name: "workspace", path: $ncl_config, required: true, format: "ncl"}]
|
||||
} else if ($nickel_config | path exists) {
|
||||
[{name: "workspace", path: $nickel_config, required: true, format: "nickel"}]
|
||||
} else if ($yaml_config | path exists) {
|
||||
[{name: "workspace", path: $yaml_config, required: true, format: "yaml"}]
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
}
|
||||
|
||||
# Apply user context overrides with proper priority
|
||||
def apply-user-context-overrides [config: record, user_context: record]: nothing -> record {
|
||||
# User context is highest config priority (before env vars)
|
||||
deep-merge $config $user_context
|
||||
}
|
||||
|
||||
# Export public functions from load-provisioning-config for backward compatibility
|
||||
export use ./loaders/file_loader.nu [load-config-file]
|
||||
export use ./validation/config_validator.nu [validate-config, validate-config-structure, validate-path-values, validate-data-types, validate-semantic-rules, validate-file-existence]
|
||||
export use ./interpolation/core.nu [interpolate-config, interpolate-string, validate-interpolation, get-config-value]
|
||||
export use ./helpers/workspace.nu [get-active-workspace, get-project-root, update-workspace-last-used]
|
||||
export use ./helpers/merging.nu [deep-merge]
|
||||
export use ./helpers/environment.nu [detect-current-environment, get-available-environments, apply-environment-variable-overrides, validate-environment]
|
||||
@ -1,3 +1,7 @@
|
||||
# Module: Configuration Module Exports
|
||||
# Purpose: Central export point for all configuration system components (loader, accessor, validators, cache).
|
||||
# Dependencies: loader, accessor, validators, interpolators, context_manager
|
||||
|
||||
# Configuration System Module Index
|
||||
# Central import point for the new configuration system
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
# Schema Validator
|
||||
# Handles validation of infrastructure configurations against defined schemas
|
||||
# Error handling: Guard patterns (no try-catch for field access)
|
||||
|
||||
# Server configuration schema validation
|
||||
export def validate_server_schema [config: record] {
|
||||
@ -14,7 +15,11 @@ export def validate_server_schema [config: record] {
|
||||
]
|
||||
|
||||
for field in $required_fields {
|
||||
if not ($config | try { get $field } catch { null } | is-not-empty) {
|
||||
# Guard: Check if field exists in config using columns
|
||||
let field_exists = ($field in ($config | columns))
|
||||
let field_value = if $field_exists { $config | get $field } else { null }
|
||||
|
||||
if ($field_value | is-empty) {
|
||||
$issues = ($issues | append {
|
||||
field: $field
|
||||
message: $"Required field '($field)' is missing or empty"
|
||||
@ -24,7 +29,8 @@ export def validate_server_schema [config: record] {
|
||||
}
|
||||
|
||||
# Validate specific field formats
|
||||
if ($config | try { get hostname } catch { null } | is-not-empty) {
|
||||
# Guard: Check if hostname field exists
|
||||
if ("hostname" in ($config | columns)) {
|
||||
let hostname = ($config | get hostname)
|
||||
if not ($hostname =~ '^[a-z0-9][a-z0-9\-]*[a-z0-9]$') {
|
||||
$issues = ($issues | append {
|
||||
@ -37,14 +43,16 @@ export def validate_server_schema [config: record] {
|
||||
}
|
||||
|
||||
# Validate provider-specific requirements
|
||||
if ($config | try { get provider } catch { null } | is-not-empty) {
|
||||
# Guard: Check if provider field exists
|
||||
if ("provider" in ($config | columns)) {
|
||||
let provider = ($config | get provider)
|
||||
let provider_validation = (validate_provider_config $provider $config)
|
||||
$issues = ($issues | append $provider_validation.issues)
|
||||
}
|
||||
|
||||
# Validate network configuration
|
||||
if ($config | try { get network_private_ip } catch { null } | is-not-empty) {
|
||||
# Guard: Check if network_private_ip field exists
|
||||
if ("network_private_ip" in ($config | columns)) {
|
||||
let ip = ($config | get network_private_ip)
|
||||
let ip_validation = (validate_ip_address $ip)
|
||||
if not $ip_validation.valid {
|
||||
@ -72,7 +80,8 @@ export def validate_provider_config [provider: string, config: record] {
|
||||
# UpCloud specific validations
|
||||
let required_upcloud_fields = ["ssh_key_path", "storage_os"]
|
||||
for field in $required_upcloud_fields {
|
||||
if not ($config | try { get $field } catch { null } | is-not-empty) {
|
||||
# Guard: Check if field exists in config
|
||||
if not ($field in ($config | columns)) {
|
||||
$issues = ($issues | append {
|
||||
field: $field
|
||||
message: $"UpCloud provider requires '($field)' field"
|
||||
@ -83,7 +92,8 @@ export def validate_provider_config [provider: string, config: record] {
|
||||
|
||||
# Validate UpCloud zones
|
||||
let valid_zones = ["es-mad1", "fi-hel1", "fi-hel2", "nl-ams1", "sg-sin1", "uk-lon1", "us-chi1", "us-nyc1", "de-fra1"]
|
||||
let zone = ($config | try { get zone } catch { null })
|
||||
# Guard: Check if zone field exists
|
||||
let zone = if ("zone" in ($config | columns)) { $config | get zone } else { null }
|
||||
if ($zone | is-not-empty) and ($zone not-in $valid_zones) {
|
||||
$issues = ($issues | append {
|
||||
field: "zone"
|
||||
@ -98,7 +108,8 @@ export def validate_provider_config [provider: string, config: record] {
|
||||
# AWS specific validations
|
||||
let required_aws_fields = ["instance_type", "ami_id"]
|
||||
for field in $required_aws_fields {
|
||||
if not ($config | try { get $field } catch { null } | is-not-empty) {
|
||||
# Guard: Check if field exists in config
|
||||
if not ($field in ($config | columns)) {
|
||||
$issues = ($issues | append {
|
||||
field: $field
|
||||
message: $"AWS provider requires '($field)' field"
|
||||
@ -130,7 +141,8 @@ export def validate_network_config [config: record] {
|
||||
mut issues = []
|
||||
|
||||
# Validate CIDR blocks
|
||||
if ($config | try { get priv_cidr_block } catch { null } | is-not-empty) {
|
||||
# Guard: Check if priv_cidr_block field exists
|
||||
if ("priv_cidr_block" in ($config | columns)) {
|
||||
let cidr = ($config | get priv_cidr_block)
|
||||
let cidr_validation = (validate_cidr_block $cidr)
|
||||
if not $cidr_validation.valid {
|
||||
@ -144,7 +156,8 @@ export def validate_network_config [config: record] {
|
||||
}
|
||||
|
||||
# Check for IP conflicts
|
||||
if ($config | try { get network_private_ip } catch { null } | is-not-empty) and ($config | try { get priv_cidr_block } catch { null } | is-not-empty) {
|
||||
# Guard: Check if both fields exist in config
|
||||
if ("network_private_ip" in ($config | columns)) and ("priv_cidr_block" in ($config | columns)) {
|
||||
let ip = ($config | get network_private_ip)
|
||||
let cidr = ($config | get priv_cidr_block)
|
||||
|
||||
@ -170,7 +183,8 @@ export def validate_taskserv_schema [taskserv: record] {
|
||||
let required_fields = ["name", "install_mode"]
|
||||
|
||||
for field in $required_fields {
|
||||
if not ($taskserv | try { get $field } catch { null } | is-not-empty) {
|
||||
# Guard: Check if field exists in taskserv
|
||||
if not ($field in ($taskserv | columns)) {
|
||||
$issues = ($issues | append {
|
||||
field: $field
|
||||
message: $"Required taskserv field '($field)' is missing"
|
||||
@ -181,7 +195,8 @@ export def validate_taskserv_schema [taskserv: record] {
|
||||
|
||||
# Validate install mode
|
||||
let valid_install_modes = ["library", "container", "binary"]
|
||||
let install_mode = ($taskserv | try { get install_mode } catch { null })
|
||||
# Guard: Check if install_mode field exists
|
||||
let install_mode = if ("install_mode" in ($taskserv | columns)) { $taskserv | get install_mode } else { null }
|
||||
if ($install_mode | is-not-empty) and ($install_mode not-in $valid_install_modes) {
|
||||
$issues = ($issues | append {
|
||||
field: "install_mode"
|
||||
@ -193,7 +208,8 @@ export def validate_taskserv_schema [taskserv: record] {
|
||||
}
|
||||
|
||||
# Validate taskserv name exists
|
||||
let taskserv_name = ($taskserv | try { get name } catch { null })
|
||||
# Guard: Check if name field exists
|
||||
let taskserv_name = if ("name" in ($taskserv | columns)) { $taskserv | get name } else { null }
|
||||
if ($taskserv_name | is-not-empty) {
|
||||
let taskserv_exists = (taskserv_definition_exists $taskserv_name)
|
||||
if not $taskserv_exists {
|
||||
|
||||
83
nulib/lib_provisioning/config/sops_handler.nu
Normal file
83
nulib/lib_provisioning/config/sops_handler.nu
Normal file
@ -0,0 +1,83 @@
|
||||
# SOPS/Encryption Handler Engine
|
||||
# Manages SOPS-encrypted configuration file detection, decryption, and validation
|
||||
|
||||
use std log
|
||||
|
||||
# Check if file is SOPS encrypted
|
||||
export def check-if-sops-encrypted [file_path: string] {
|
||||
if not ($file_path | path exists) {
|
||||
return false
|
||||
}
|
||||
|
||||
let file_content = (open $file_path --raw)
|
||||
|
||||
# Check for SOPS markers
|
||||
if ($file_content | str contains "sops:") and ($file_content | str contains "ENC[") {
|
||||
return true
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
# Decrypt SOPS file
|
||||
export def decrypt-sops-file [file_path: string] {
|
||||
# Find SOPS config
|
||||
let sops_config = find-sops-config-path
|
||||
|
||||
# Decrypt using SOPS binary
|
||||
let result = if ($sops_config | is-not-empty) {
|
||||
^sops --decrypt --config $sops_config $file_path | complete
|
||||
} else {
|
||||
^sops --decrypt $file_path | complete
|
||||
}
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
$result.stdout
|
||||
}
|
||||
|
||||
# Find SOPS configuration file
|
||||
export def find-sops-config-path [] {
|
||||
# Check common locations
|
||||
let locations = [
|
||||
".sops.yaml"
|
||||
".sops.yml"
|
||||
($env.PWD | path join ".sops.yaml")
|
||||
($env.HOME | path join ".config" | path join "provisioning" | path join "sops.yaml")
|
||||
]
|
||||
|
||||
for loc in $locations {
|
||||
if ($loc | path exists) {
|
||||
return $loc
|
||||
}
|
||||
}
|
||||
|
||||
""
|
||||
}
|
||||
|
||||
# Handle encrypted configuration file - wraps decryption logic
|
||||
export def handle-encrypted-file [
|
||||
file_path: string
|
||||
config: record
|
||||
] {
|
||||
if (check-if-sops-encrypted $file_path) {
|
||||
let decrypted = (decrypt-sops-file $file_path)
|
||||
if ($decrypted | is-not-empty) {
|
||||
# Determine file format from extension
|
||||
let ext = ($file_path | path parse | get extension)
|
||||
match $ext {
|
||||
"yaml" | "yml" => ($decrypted | from yaml)
|
||||
"toml" => ($decrypted | from toml)
|
||||
"json" => ($decrypted | from json)
|
||||
_ => ($decrypted | from yaml)
|
||||
}
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
} else {
|
||||
# File is not encrypted, return empty to indicate no handling needed
|
||||
{}
|
||||
}
|
||||
}
|
||||
237
nulib/lib_provisioning/config/validators.nu
Normal file
237
nulib/lib_provisioning/config/validators.nu
Normal file
@ -0,0 +1,237 @@
|
||||
# Module: Configuration Validators
|
||||
# Purpose: Provides validation functions for configuration integrity, types, and semantic correctness.
|
||||
# Dependencies: None (core utility)
|
||||
|
||||
# Configuration Validation and Detection Engine
|
||||
# Validates configuration structures and detects potential security/dependency issues
|
||||
|
||||
use std log
|
||||
|
||||
# Validate interpolation patterns and detect potential issues
|
||||
export def validate-interpolation [
|
||||
config: record
|
||||
--detailed = false # Show detailed validation results
|
||||
] {
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
# Convert config to JSON for pattern detection
|
||||
let json_str = ($config | to json)
|
||||
|
||||
# Check for unresolved interpolation patterns
|
||||
let unresolved_patterns = (detect-unresolved-patterns $json_str)
|
||||
if ($unresolved_patterns | length) > 0 {
|
||||
$errors = ($errors | append {
|
||||
type: "unresolved_interpolation"
|
||||
severity: "error"
|
||||
patterns: $unresolved_patterns
|
||||
message: $"Unresolved interpolation patterns found: ($unresolved_patterns | str join ', ')"
|
||||
})
|
||||
}
|
||||
|
||||
# Check for circular dependencies
|
||||
let circular_deps = (detect-circular-dependencies $json_str)
|
||||
if ($circular_deps | length) > 0 {
|
||||
$errors = ($errors | append {
|
||||
type: "circular_dependency"
|
||||
severity: "error"
|
||||
dependencies: $circular_deps
|
||||
message: $"Circular interpolation dependencies detected: ($circular_deps | str join ', ')"
|
||||
})
|
||||
}
|
||||
|
||||
# Check for unsafe environment variable access
|
||||
let unsafe_env_vars = (detect-unsafe-env-patterns $json_str)
|
||||
if ($unsafe_env_vars | length) > 0 {
|
||||
$warnings = ($warnings | append {
|
||||
type: "unsafe_env_access"
|
||||
severity: "warning"
|
||||
variables: $unsafe_env_vars
|
||||
message: $"Potentially unsafe environment variable access: ($unsafe_env_vars | str join ', ')"
|
||||
})
|
||||
}
|
||||
|
||||
# Validate git repository context
|
||||
let git_validation = (validate-git-context $json_str)
|
||||
if not $git_validation.valid {
|
||||
$warnings = ($warnings | append {
|
||||
type: "git_context"
|
||||
severity: "warning"
|
||||
message: $git_validation.message
|
||||
})
|
||||
}
|
||||
|
||||
let has_errors = ($errors | length) > 0
|
||||
let has_warnings = ($warnings | length) > 0
|
||||
|
||||
if not $detailed and $has_errors {
|
||||
let error_messages = ($errors | each { |err| $err.message })
|
||||
error make {
|
||||
msg: ($error_messages | str join "; ")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
valid: (not $has_errors),
|
||||
errors: $errors,
|
||||
warnings: $warnings,
|
||||
summary: {
|
||||
total_errors: ($errors | length),
|
||||
total_warnings: ($warnings | length),
|
||||
interpolation_patterns_detected: (count-interpolation-patterns $json_str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Security-hardened interpolation with input validation
|
||||
export def secure-interpolation [
|
||||
config: record
|
||||
--allow-unsafe = false # Allow potentially unsafe patterns
|
||||
--max-depth = 5 # Maximum interpolation depth
|
||||
] {
|
||||
# Security checks before interpolation
|
||||
let security_validation = (validate-interpolation-security $config $allow_unsafe)
|
||||
|
||||
if not $security_validation.valid {
|
||||
error make {
|
||||
msg: $"Security validation failed: ($security_validation.errors | str join '; ')"
|
||||
}
|
||||
}
|
||||
|
||||
# Apply interpolation with depth limiting
|
||||
let base_path = ($config | get -o paths.base | default "")
|
||||
if ($base_path | is-not-empty) {
|
||||
interpolate-with-depth-limit $config $base_path $max_depth
|
||||
} else {
|
||||
$config
|
||||
}
|
||||
}
|
||||
|
||||
# Detect unresolved interpolation patterns
|
||||
export def detect-unresolved-patterns [
|
||||
text: string
|
||||
] {
|
||||
# Find patterns that look like interpolation but might not be handled
|
||||
let unknown_patterns = ($text | str replace --regex "\\{\\{([^}]+)\\}\\}" "")
|
||||
|
||||
# Known patterns that should be resolved
|
||||
let known_patterns = [
|
||||
"paths.base" "env\\." "now\\." "git\\." "sops\\." "providers\\." "path\\.join"
|
||||
]
|
||||
|
||||
mut unresolved = []
|
||||
|
||||
# Check for patterns that don't match known types
|
||||
let all_matches = ($text | str replace --regex "\\{\\{([^}]+)\\}\\}" "$1")
|
||||
if ($all_matches | str contains "{{") {
|
||||
# Basic detection - in a real implementation, this would be more sophisticated
|
||||
let potential_unknown = ($text | str replace --regex "\\{\\{(\\w+\\.\\w+)\\}\\}" "")
|
||||
if ($text | str contains "{{unknown.") {
|
||||
$unresolved = ($unresolved | append "unknown.*")
|
||||
}
|
||||
}
|
||||
|
||||
$unresolved
|
||||
}
|
||||
|
||||
# Detect circular interpolation dependencies
|
||||
export def detect-circular-dependencies [
|
||||
text: string
|
||||
] {
|
||||
mut circular_deps = []
|
||||
|
||||
# Simple detection for self-referencing patterns
|
||||
if (($text | str contains "{{paths.base}}") and ($text | str contains "paths.base.*{{paths.base}}")) {
|
||||
$circular_deps = ($circular_deps | append "paths.base -> paths.base")
|
||||
}
|
||||
|
||||
$circular_deps
|
||||
}
|
||||
|
||||
# Detect unsafe environment variable patterns
|
||||
export def detect-unsafe-env-patterns [
|
||||
text: string
|
||||
] {
|
||||
mut unsafe_vars = []
|
||||
|
||||
# Patterns that might be dangerous
|
||||
let dangerous_patterns = ["PATH" "LD_LIBRARY_PATH" "PYTHONPATH" "SHELL" "PS1"]
|
||||
|
||||
for pattern in $dangerous_patterns {
|
||||
if ($text | str contains $"{{env.($pattern)}}") {
|
||||
$unsafe_vars = ($unsafe_vars | append $pattern)
|
||||
}
|
||||
}
|
||||
|
||||
$unsafe_vars
|
||||
}
|
||||
|
||||
# Validate git repository context for git interpolations
|
||||
export def validate-git-context [
|
||||
text: string
|
||||
] {
|
||||
if ($text | str contains "{{git.") {
|
||||
# Check if we're in a git repository
|
||||
let git_check = (do { ^git rev-parse --git-dir err> (if $nu.os-info.name == "windows" { "NUL" } else { "/dev/null" }) } | complete)
|
||||
let is_git_repo = ($git_check.exit_code == 0)
|
||||
|
||||
if not $is_git_repo {
|
||||
return {
|
||||
valid: false
|
||||
message: "Git interpolation patterns detected but not in a git repository"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{ valid: true, message: "" }
|
||||
}
|
||||
|
||||
# Count interpolation patterns for metrics
|
||||
export def count-interpolation-patterns [
|
||||
text: string
|
||||
] {
|
||||
# Count all {{...}} patterns by finding matches
|
||||
# Simple approximation: count occurrences of "{{"
|
||||
let pattern_count = ($text | str replace --all "{{" "\n{{" | lines | where ($it | str contains "{{") | length)
|
||||
$pattern_count
|
||||
}
|
||||
|
||||
# Validate interpolation security
|
||||
def validate-interpolation-security [
|
||||
config: record
|
||||
allow_unsafe: bool
|
||||
] {
|
||||
mut errors = []
|
||||
let json_str = ($config | to json)
|
||||
|
||||
# Check for code injection patterns
|
||||
let dangerous_patterns = [
|
||||
"\\$\\(" "\\`" "\\;" "\\|\\|" "\\&&" "rm " "sudo " "eval " "exec "
|
||||
]
|
||||
|
||||
for pattern in $dangerous_patterns {
|
||||
if ($json_str =~ $pattern) {
|
||||
$errors = ($errors | append $"Potential code injection pattern detected: ($pattern)")
|
||||
}
|
||||
}
|
||||
|
||||
# Check for unsafe environment variable access
|
||||
if not $allow_unsafe {
|
||||
let unsafe_env_vars = ["PATH" "LD_LIBRARY_PATH" "PYTHONPATH" "PS1" "PROMPT_COMMAND"]
|
||||
for var in $unsafe_env_vars {
|
||||
if ($json_str | str contains $"{{env.($var)}}") {
|
||||
$errors = ($errors | append $"Unsafe environment variable access: ($var)")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Check for path traversal attempts
|
||||
if (($json_str | str contains "../") or ($json_str | str contains "..\\")) {
|
||||
$errors = ($errors | append "Path traversal attempt detected")
|
||||
}
|
||||
|
||||
{
|
||||
valid: (($errors | length) == 0)
|
||||
errors: $errors
|
||||
}
|
||||
}
|
||||
@ -29,32 +29,31 @@ export def load-config-from-mcp [mcp_url: string]: nothing -> record {
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let response = (
|
||||
http post $mcp_url --content-type "application/json" ($request | to json)
|
||||
)
|
||||
|
||||
if "error" in ($response | columns) {
|
||||
error make {
|
||||
msg: $"MCP error: ($response.error.message)"
|
||||
label: {text: $"Code: ($response.error.code)"}
|
||||
}
|
||||
}
|
||||
|
||||
if "result" not-in ($response | columns) {
|
||||
error make {msg: "Invalid MCP response: missing result"}
|
||||
}
|
||||
|
||||
print "✅ Configuration loaded from MCP server"
|
||||
$response.result
|
||||
|
||||
} catch {|err|
|
||||
# Call MCP server (no try-catch)
|
||||
let post_result = (do { http post $mcp_url --content-type "application/json" ($request | to json) } | complete)
|
||||
if $post_result.exit_code != 0 {
|
||||
error make {
|
||||
msg: $"Failed to load config from MCP: ($mcp_url)"
|
||||
label: {text: $err.msg}
|
||||
label: {text: $post_result.stderr}
|
||||
help: "Ensure MCP server is running and accessible"
|
||||
}
|
||||
}
|
||||
|
||||
let response = ($post_result.stdout)
|
||||
|
||||
if "error" in ($response | columns) {
|
||||
error make {
|
||||
msg: $"MCP error: ($response.error.message)"
|
||||
label: {text: $"Code: ($response.error.code)"}
|
||||
}
|
||||
}
|
||||
|
||||
if "result" not-in ($response | columns) {
|
||||
error make {msg: "Invalid MCP response: missing result"}
|
||||
}
|
||||
|
||||
print "✅ Configuration loaded from MCP server"
|
||||
$response.result
|
||||
}
|
||||
|
||||
# Load configuration from REST API
|
||||
@ -66,23 +65,24 @@ export def load-config-from-mcp [mcp_url: string]: nothing -> record {
|
||||
export def load-config-from-api [api_url: string]: nothing -> record {
|
||||
print $"🌐 Loading configuration from API: ($api_url)"
|
||||
|
||||
try {
|
||||
let response = (http get $api_url --max-time 30sec)
|
||||
|
||||
if "config" not-in ($response | columns) {
|
||||
error make {msg: "Invalid API response: missing 'config' field"}
|
||||
}
|
||||
|
||||
print "✅ Configuration loaded from API"
|
||||
$response.config
|
||||
|
||||
} catch {|err|
|
||||
# Call API (no try-catch)
|
||||
let get_result = (do { http get $api_url --max-time 30sec } | complete)
|
||||
if $get_result.exit_code != 0 {
|
||||
error make {
|
||||
msg: $"Failed to load config from API: ($api_url)"
|
||||
label: {text: $err.msg}
|
||||
label: {text: $get_result.stderr}
|
||||
help: "Check API endpoint and network connectivity"
|
||||
}
|
||||
}
|
||||
|
||||
let response = ($get_result.stdout)
|
||||
|
||||
if "config" not-in ($response | columns) {
|
||||
error make {msg: "Invalid API response: missing 'config' field"}
|
||||
}
|
||||
|
||||
print "✅ Configuration loaded from API"
|
||||
$response.config
|
||||
}
|
||||
|
||||
# Send notification to webhook
|
||||
@ -94,15 +94,14 @@ export def load-config-from-api [api_url: string]: nothing -> record {
|
||||
# @param payload: Notification payload record
|
||||
# @returns: Nothing
|
||||
export def notify-webhook [webhook_url: string, payload: record]: nothing -> nothing {
|
||||
try {
|
||||
http post $webhook_url --content-type "application/json" ($payload | to json)
|
||||
|
||||
null
|
||||
} catch {|err|
|
||||
# Send webhook notification (no try-catch, graceful error handling)
|
||||
let post_result = (do { http post $webhook_url --content-type "application/json" ($payload | to json) } | complete)
|
||||
if $post_result.exit_code != 0 {
|
||||
# Don't fail deployment on webhook errors, just log
|
||||
print $"⚠️ Warning: Failed to send webhook notification: ($err.msg)"
|
||||
null
|
||||
print $"⚠️ Warning: Failed to send webhook notification: ($post_result.stderr)"
|
||||
}
|
||||
|
||||
null
|
||||
}
|
||||
|
||||
# Call Rust installer binary with arguments
|
||||
@ -117,23 +116,15 @@ export def call-installer [args: list<string>]: nothing -> record {
|
||||
|
||||
print $"🚀 Calling installer: ($installer_path) ($args | str join ' ')"
|
||||
|
||||
try {
|
||||
let output = (^$installer_path ...$args | complete)
|
||||
# Execute installer binary (no try-catch)
|
||||
let output = (do { ^$installer_path ...$args } | complete)
|
||||
|
||||
{
|
||||
success: ($output.exit_code == 0)
|
||||
exit_code: $output.exit_code
|
||||
stdout: $output.stdout
|
||||
stderr: $output.stderr
|
||||
timestamp: (date now)
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
success: false
|
||||
exit_code: -1
|
||||
error: $err.msg
|
||||
timestamp: (date now)
|
||||
}
|
||||
{
|
||||
success: ($output.exit_code == 0)
|
||||
exit_code: $output.exit_code
|
||||
stdout: $output.stdout
|
||||
stderr: $output.stderr
|
||||
timestamp: (date now)
|
||||
}
|
||||
}
|
||||
|
||||
@ -168,21 +159,21 @@ export def run-installer-interactive []: nothing -> record {
|
||||
|
||||
print $"🚀 Launching interactive installer: ($installer_path)"
|
||||
|
||||
try {
|
||||
# Run without capturing output (interactive mode)
|
||||
^$installer_path
|
||||
# Run interactive installer (no try-catch)
|
||||
let result = (do { ^$installer_path } | complete)
|
||||
|
||||
if $result.exit_code == 0 {
|
||||
{
|
||||
success: true
|
||||
mode: "interactive"
|
||||
message: "Interactive installer completed"
|
||||
timestamp: (date now)
|
||||
}
|
||||
} catch {|err|
|
||||
} else {
|
||||
{
|
||||
success: false
|
||||
mode: "interactive"
|
||||
error: $err.msg
|
||||
error: $result.stderr
|
||||
timestamp: (date now)
|
||||
}
|
||||
}
|
||||
@ -281,24 +272,23 @@ export def query-mcp-status [mcp_url: string, deployment_id: string]: nothing ->
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let response = (
|
||||
http post $mcp_url --content-type "application/json" ($request | to json)
|
||||
)
|
||||
|
||||
if "error" in ($response | columns) {
|
||||
error make {
|
||||
msg: $"MCP error: ($response.error.message)"
|
||||
}
|
||||
}
|
||||
|
||||
$response.result
|
||||
|
||||
} catch {|err|
|
||||
# Query MCP status (no try-catch)
|
||||
let post_result = (do { http post $mcp_url --content-type "application/json" ($request | to json) } | complete)
|
||||
if $post_result.exit_code != 0 {
|
||||
error make {
|
||||
msg: $"Failed to query MCP status: ($err.msg)"
|
||||
msg: $"Failed to query MCP status: ($post_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let response = ($post_result.stdout)
|
||||
|
||||
if "error" in ($response | columns) {
|
||||
error make {
|
||||
msg: $"MCP error: ($response.error.message)"
|
||||
}
|
||||
}
|
||||
|
||||
$response.result
|
||||
}
|
||||
|
||||
# Register deployment with API
|
||||
@ -318,30 +308,33 @@ export def register-deployment-with-api [api_url: string, config: record]: nothi
|
||||
started_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
}
|
||||
|
||||
try {
|
||||
let response = (
|
||||
http post $api_url --content-type "application/json" ($payload | to json)
|
||||
)
|
||||
|
||||
if "deployment_id" not-in ($response | columns) {
|
||||
error make {msg: "API did not return deployment_id"}
|
||||
}
|
||||
|
||||
print $"✅ Deployment registered with API: ($response.deployment_id)"
|
||||
|
||||
{
|
||||
success: true
|
||||
deployment_id: $response.deployment_id
|
||||
api_url: $api_url
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
print $"⚠️ Warning: Failed to register with API: ($err.msg)"
|
||||
{
|
||||
# Register deployment with API (no try-catch)
|
||||
let post_result = (do { http post $api_url --content-type "application/json" ($payload | to json) } | complete)
|
||||
if $post_result.exit_code != 0 {
|
||||
print $"⚠️ Warning: Failed to register with API: ($post_result.stderr)"
|
||||
return {
|
||||
success: false
|
||||
error: $err.msg
|
||||
error: $post_result.stderr
|
||||
}
|
||||
}
|
||||
|
||||
let response = ($post_result.stdout)
|
||||
|
||||
if "deployment_id" not-in ($response | columns) {
|
||||
print "⚠️ Warning: API did not return deployment_id"
|
||||
return {
|
||||
success: false
|
||||
error: "API did not return deployment_id"
|
||||
}
|
||||
}
|
||||
|
||||
print $"✅ Deployment registered with API: ($response.deployment_id)"
|
||||
|
||||
{
|
||||
success: true
|
||||
deployment_id: $response.deployment_id
|
||||
api_url: $api_url
|
||||
}
|
||||
}
|
||||
|
||||
# Update deployment status via API
|
||||
@ -359,15 +352,14 @@ export def update-deployment-status [
|
||||
]: nothing -> record {
|
||||
let update_url = $"($api_url)/($deployment_id)/status"
|
||||
|
||||
try {
|
||||
http patch $update_url --content-type "application/json" ($status | to json)
|
||||
|
||||
{success: true}
|
||||
|
||||
} catch {|err|
|
||||
print $"⚠️ Warning: Failed to update deployment status: ($err.msg)"
|
||||
{success: false, error: $err.msg}
|
||||
# Update deployment status (no try-catch, graceful error handling)
|
||||
let patch_result = (do { http patch $update_url --content-type "application/json" ($status | to json) } | complete)
|
||||
if $patch_result.exit_code != 0 {
|
||||
print $"⚠️ Warning: Failed to update deployment status: ($patch_result.stderr)"
|
||||
return {success: false, error: $patch_result.stderr}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
# Send Slack notification
|
||||
@ -478,24 +470,23 @@ export def execute-mcp-tool [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let response = (
|
||||
http post $mcp_url --content-type "application/json" ($request | to json)
|
||||
)
|
||||
|
||||
if "error" in ($response | columns) {
|
||||
error make {
|
||||
msg: $"MCP tool execution error: ($response.error.message)"
|
||||
}
|
||||
}
|
||||
|
||||
$response.result
|
||||
|
||||
} catch {|err|
|
||||
# Execute MCP tool (no try-catch)
|
||||
let post_result = (do { http post $mcp_url --content-type "application/json" ($request | to json) } | complete)
|
||||
if $post_result.exit_code != 0 {
|
||||
error make {
|
||||
msg: $"Failed to execute MCP tool: ($err.msg)"
|
||||
msg: $"Failed to execute MCP tool: ($post_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let response = ($post_result.stdout)
|
||||
|
||||
if "error" in ($response | columns) {
|
||||
error make {
|
||||
msg: $"MCP tool execution error: ($response.error.message)"
|
||||
}
|
||||
}
|
||||
|
||||
$response.result
|
||||
}
|
||||
|
||||
# Get installer binary path (helper function)
|
||||
|
||||
@ -3,6 +3,9 @@
|
||||
# Multi-Region HA Workspace Deployment Script
|
||||
# Orchestrates deployment across US East (DigitalOcean), EU Central (Hetzner), Asia Pacific (AWS)
|
||||
# Features: Regional health checks, VPN tunnels, global DNS, failover configuration
|
||||
# Error handling: Result pattern (hybrid, no inline try-catch)
|
||||
|
||||
use lib_provisioning/result.nu *
|
||||
|
||||
def main [--debug: bool = false, --region: string = "all"] {
|
||||
print "🌍 Multi-Region High Availability Deployment"
|
||||
@ -108,44 +111,52 @@ def validate_environment [] {
|
||||
|
||||
# Validate Nickel configuration
|
||||
print " Validating Nickel configuration..."
|
||||
try {
|
||||
nickel export workspace.ncl | from json | null
|
||||
print " ✓ Nickel configuration is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"Nickel validation failed: ($err)"}
|
||||
let nickel_result = (try-wrap { nickel export workspace.ncl | from json | null })
|
||||
|
||||
if (is-err $nickel_result) {
|
||||
error make {msg: $"Nickel validation failed: ($nickel_result.err)"}
|
||||
}
|
||||
|
||||
print " ✓ Nickel configuration is valid"
|
||||
|
||||
# Validate config.toml
|
||||
print " Validating config.toml..."
|
||||
try {
|
||||
let config = (open config.toml)
|
||||
print " ✓ config.toml is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"config.toml validation failed: ($err)"}
|
||||
|
||||
if not ("config.toml" | path exists) {
|
||||
error make {msg: "config.toml not found"}
|
||||
}
|
||||
|
||||
# Test provider connectivity
|
||||
let config_result = (try-wrap { open config.toml })
|
||||
|
||||
if (is-err $config_result) {
|
||||
error make {msg: $"config.toml validation failed: ($config_result.err)"}
|
||||
}
|
||||
|
||||
print " ✓ config.toml is valid"
|
||||
|
||||
# Test provider connectivity using bash-wrap helper (no inline try-catch)
|
||||
print " Testing provider connectivity..."
|
||||
try {
|
||||
doctl account get | null
|
||||
print " ✓ DigitalOcean connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"DigitalOcean connectivity failed: ($err)"}
|
||||
}
|
||||
|
||||
try {
|
||||
hcloud server list | null
|
||||
print " ✓ Hetzner connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"Hetzner connectivity failed: ($err)"}
|
||||
# DigitalOcean connectivity
|
||||
let do_result = (bash-wrap "doctl account get")
|
||||
if (is-err $do_result) {
|
||||
error make {msg: $"DigitalOcean connectivity failed: ($do_result.err)"}
|
||||
}
|
||||
print " ✓ DigitalOcean connectivity verified"
|
||||
|
||||
try {
|
||||
aws sts get-caller-identity | null
|
||||
print " ✓ AWS connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"AWS connectivity failed: ($err)"}
|
||||
# Hetzner connectivity
|
||||
let hz_result = (bash-wrap "hcloud server list")
|
||||
if (is-err $hz_result) {
|
||||
error make {msg: $"Hetzner connectivity failed: ($hz_result.err)"}
|
||||
}
|
||||
print " ✓ Hetzner connectivity verified"
|
||||
|
||||
# AWS connectivity
|
||||
let aws_result = (bash-wrap "aws sts get-caller-identity")
|
||||
if (is-err $aws_result) {
|
||||
error make {msg: $"AWS connectivity failed: ($aws_result.err)"}
|
||||
}
|
||||
print " ✓ AWS connectivity verified"
|
||||
}
|
||||
|
||||
def deploy_us_east_digitalocean [] {
|
||||
@ -215,19 +226,13 @@ def deploy_us_east_digitalocean [] {
|
||||
|
||||
print " Creating DigitalOcean PostgreSQL database (3-node Multi-AZ)..."
|
||||
|
||||
try {
|
||||
doctl databases create \
|
||||
--engine pg \
|
||||
--version 14 \
|
||||
--region "nyc3" \
|
||||
--num-nodes 3 \
|
||||
--size "db-s-2vcpu-4gb" \
|
||||
--name "us-db-primary" | null
|
||||
# Create database using bash-wrap helper (no inline try-catch)
|
||||
let db_result = (bash-wrap "doctl databases create --engine pg --version 14 --region nyc3 --num-nodes 3 --size db-s-2vcpu-4gb --name us-db-primary")
|
||||
|
||||
print " ✓ Database creation initiated (may take 10-15 minutes)"
|
||||
} catch {|err|
|
||||
print $" ⚠ Database creation error (may already exist): ($err)"
|
||||
}
|
||||
(match-result $db_result
|
||||
{|_| print " ✓ Database creation initiated (may take 10-15 minutes)"}
|
||||
{|err| print $" ⚠ Database creation error \(may already exist\): ($err)"}
|
||||
)
|
||||
}
|
||||
|
||||
def deploy_eu_central_hetzner [] {
|
||||
@ -269,7 +274,7 @@ def deploy_eu_central_hetzner [] {
|
||||
--network eu-central-network \
|
||||
--format json | from json)
|
||||
|
||||
print $" ✓ Created server: eu-app-($i) (ID: ($response.server.id))"
|
||||
print $" ✓ Created server: eu-app-($i) \(ID: ($response.server.id)\)"
|
||||
$response.server.id
|
||||
}
|
||||
)
|
||||
@ -379,7 +384,7 @@ def deploy_asia_pacific_aws [] {
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=asia-app-($i)}]" | from json)
|
||||
|
||||
let instance_id = $response.Instances.0.InstanceId
|
||||
print $" ✓ Created instance: asia-app-($i) (ID: ($instance_id))"
|
||||
print $" ✓ Created instance: asia-app-($i) \(ID: ($instance_id)\)"
|
||||
$instance_id
|
||||
}
|
||||
)
|
||||
@ -412,16 +417,14 @@ def deploy_asia_pacific_aws [] {
|
||||
print $" ✓ Created ALB: ($lb.LoadBalancers.0.LoadBalancerArn)"
|
||||
|
||||
print " Creating AWS RDS read replica..."
|
||||
try {
|
||||
aws rds create-db-instance-read-replica \
|
||||
--region ap-southeast-1 \
|
||||
--db-instance-identifier "asia-db-replica" \
|
||||
--source-db-instance-identifier "us-db-primary" | null
|
||||
|
||||
print " ✓ Read replica creation initiated"
|
||||
} catch {|err|
|
||||
print $" ⚠ Read replica creation error (may already exist): ($err)"
|
||||
}
|
||||
# Create read replica using bash-wrap helper (no inline try-catch)
|
||||
let replica_result = (bash-wrap "aws rds create-db-instance-read-replica --region ap-southeast-1 --db-instance-identifier asia-db-replica --source-db-instance-identifier us-db-primary")
|
||||
|
||||
(match-result $replica_result
|
||||
{|_| print " ✓ Read replica creation initiated"}
|
||||
{|err| print $" ⚠ Read replica creation error \(may already exist\): ($err)"}
|
||||
)
|
||||
}
|
||||
|
||||
def setup_vpn_tunnels [] {
|
||||
@ -429,16 +432,14 @@ def setup_vpn_tunnels [] {
|
||||
|
||||
# US to EU VPN
|
||||
print " Creating US East → EU Central VPN tunnel..."
|
||||
try {
|
||||
aws ec2 create-vpn-gateway \
|
||||
--region us-east-1 \
|
||||
--type ipsec.1 \
|
||||
--tag-specifications "ResourceType=vpn-gateway,Tags=[{Key=Name,Value=us-eu-vpn-gw}]" | null
|
||||
|
||||
print " ✓ VPN gateway created (manual completion required)"
|
||||
} catch {|err|
|
||||
print $" ℹ VPN setup note: ($err)"
|
||||
}
|
||||
# Create VPN gateway using bash-wrap helper (no inline try-catch)
|
||||
let vpn_result = (bash-wrap "aws ec2 create-vpn-gateway --region us-east-1 --type ipsec.1 --tag-specifications ResourceType=vpn-gateway,Tags=[{Key=Name,Value=us-eu-vpn-gw}]")
|
||||
|
||||
(match-result $vpn_result
|
||||
{|_| print " ✓ VPN gateway created (manual completion required)"}
|
||||
{|err| print $" ℹ VPN setup note: ($err)"}
|
||||
)
|
||||
|
||||
# EU to APAC VPN
|
||||
print " Creating EU Central → Asia Pacific VPN tunnel..."
|
||||
@ -451,28 +452,35 @@ def setup_vpn_tunnels [] {
|
||||
def setup_global_dns [] {
|
||||
print " Setting up Route53 geolocation routing..."
|
||||
|
||||
try {
|
||||
let hosted_zones = (aws route53 list-hosted-zones | from json)
|
||||
# List hosted zones using bash-wrap helper (no inline try-catch)
|
||||
let zones_result = (bash-wrap "aws route53 list-hosted-zones")
|
||||
|
||||
if (($hosted_zones.HostedZones | length) > 0) {
|
||||
let zone_id = $hosted_zones.HostedZones.0.Id
|
||||
(match-result $zones_result
|
||||
{|output|
|
||||
# Parse JSON
|
||||
let hosted_zones = ($output | from json)
|
||||
|
||||
print $" ✓ Using hosted zone: ($zone_id)"
|
||||
if (($hosted_zones.HostedZones | length) > 0) {
|
||||
let zone_id = $hosted_zones.HostedZones.0.Id
|
||||
|
||||
print " Creating regional DNS records with health checks..."
|
||||
print " Note: DNS record creation requires actual endpoint IPs"
|
||||
print " Run after regional deployment to get endpoint IPs"
|
||||
print $" ✓ Using hosted zone: ($zone_id)"
|
||||
|
||||
print " US East endpoint: us.api.example.com"
|
||||
print " EU Central endpoint: eu.api.example.com"
|
||||
print " Asia Pacific endpoint: asia.api.example.com"
|
||||
} else {
|
||||
print " ℹ No hosted zones found. Create one with:"
|
||||
print " aws route53 create-hosted-zone --name api.example.com --caller-reference $(date +%s)"
|
||||
print " Creating regional DNS records with health checks..."
|
||||
print " Note: DNS record creation requires actual endpoint IPs"
|
||||
print " Run after regional deployment to get endpoint IPs"
|
||||
|
||||
print " US East endpoint: us.api.example.com"
|
||||
print " EU Central endpoint: eu.api.example.com"
|
||||
print " Asia Pacific endpoint: asia.api.example.com"
|
||||
} else {
|
||||
print " ℹ No hosted zones found. Create one with:"
|
||||
print " aws route53 create-hosted-zone --name api.example.com --caller-reference \$(date +%s)"
|
||||
}
|
||||
}
|
||||
} catch {|err|
|
||||
print $" ⚠ Route53 setup note: ($err)"
|
||||
}
|
||||
{|err|
|
||||
print $" ⚠ Route53 setup note: ($err)"
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
def setup_database_replication [] {
|
||||
@ -486,14 +494,14 @@ def setup_database_replication [] {
|
||||
mut attempts = 0
|
||||
|
||||
while $attempts < $max_attempts {
|
||||
try {
|
||||
let db = (doctl databases get us-db-primary --format Status --no-header)
|
||||
if $db == "active" {
|
||||
# Guard: Check database status (silently retry on error)
|
||||
let db_result = (bash-wrap "doctl databases get us-db-primary --format Status --no-header")
|
||||
if (is-ok $db_result) {
|
||||
let status = $db_result.ok
|
||||
if $status == "active" {
|
||||
print " ✓ Primary database is active"
|
||||
break
|
||||
}
|
||||
} catch {
|
||||
# Database not ready yet
|
||||
}
|
||||
|
||||
sleep 30sec
|
||||
@ -508,43 +516,85 @@ def setup_database_replication [] {
|
||||
|
||||
def verify_multi_region_deployment [] {
|
||||
print " Verifying DigitalOcean resources..."
|
||||
try {
|
||||
let do_droplets = (doctl compute droplet list --format Name,Status --no-header)
|
||||
print $" ✓ Found ($do_droplets | split row "\n" | length) droplets"
|
||||
# Guard: Verify DigitalOcean droplets
|
||||
let do_droplets_result = (bash-wrap "doctl compute droplet list --format Name,Status --no-header")
|
||||
(match-result $do_droplets_result
|
||||
{|output|
|
||||
print $" ✓ Found \(($output | split row \"\\n\" | length)\) droplets"
|
||||
ok $output
|
||||
}
|
||||
{|err|
|
||||
print $" ⚠ Error checking DigitalOcean: ($err)"
|
||||
err $err
|
||||
}
|
||||
) | null
|
||||
|
||||
let do_lbs = (doctl compute load-balancer list --format Name --no-header)
|
||||
print $" ✓ Found load balancer"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking DigitalOcean: ($err)"
|
||||
}
|
||||
# Guard: Verify DigitalOcean load balancer
|
||||
let do_lbs_result = (bash-wrap "doctl compute load-balancer list --format Name --no-header")
|
||||
(match-result $do_lbs_result
|
||||
{|output|
|
||||
print $" ✓ Found load balancer"
|
||||
ok $output
|
||||
}
|
||||
{|err|
|
||||
print $" ⚠ Error checking DigitalOcean load balancer: ($err)"
|
||||
err $err
|
||||
}
|
||||
) | null
|
||||
|
||||
print " Verifying Hetzner resources..."
|
||||
try {
|
||||
let hz_servers = (hcloud server list --format Name,Status)
|
||||
print " ✓ Hetzner servers verified"
|
||||
# Guard: Verify Hetzner servers
|
||||
let hz_servers_result = (bash-wrap "hcloud server list --format Name,Status")
|
||||
(match-result $hz_servers_result
|
||||
{|output|
|
||||
print " ✓ Hetzner servers verified"
|
||||
ok $output
|
||||
}
|
||||
{|err|
|
||||
print $" ⚠ Error checking Hetzner: ($err)"
|
||||
err $err
|
||||
}
|
||||
) | null
|
||||
|
||||
let hz_lbs = (hcloud load-balancer list --format Name)
|
||||
print " ✓ Hetzner load balancer verified"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking Hetzner: ($err)"
|
||||
}
|
||||
# Guard: Verify Hetzner load balancer
|
||||
let hz_lbs_result = (bash-wrap "hcloud load-balancer list --format Name")
|
||||
(match-result $hz_lbs_result
|
||||
{|output|
|
||||
print " ✓ Hetzner load balancer verified"
|
||||
ok $output
|
||||
}
|
||||
{|err|
|
||||
print $" ⚠ Error checking Hetzner load balancer: ($err)"
|
||||
err $err
|
||||
}
|
||||
) | null
|
||||
|
||||
print " Verifying AWS resources..."
|
||||
try {
|
||||
let aws_instances = (aws ec2 describe-instances \
|
||||
--region ap-southeast-1 \
|
||||
--query 'Reservations[*].Instances[*].InstanceId' \
|
||||
--output text | split row " " | length)
|
||||
print $" ✓ Found ($aws_instances) EC2 instances"
|
||||
# Guard: Verify AWS EC2 instances
|
||||
let aws_instances_result = (bash-wrap "aws ec2 describe-instances --region ap-southeast-1 --query 'Reservations[*].Instances[*].InstanceId' --output text | split row \" \" | length")
|
||||
(match-result $aws_instances_result
|
||||
{|output|
|
||||
print $" ✓ Found ($output) EC2 instances"
|
||||
ok $output
|
||||
}
|
||||
{|err|
|
||||
print $" ⚠ Error checking AWS: ($err)"
|
||||
err $err
|
||||
}
|
||||
) | null
|
||||
|
||||
let aws_lbs = (aws elbv2 describe-load-balancers \
|
||||
--region ap-southeast-1 \
|
||||
--query 'LoadBalancers[*].LoadBalancerName' \
|
||||
--output text)
|
||||
print " ✓ Application Load Balancer verified"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking AWS: ($err)"
|
||||
}
|
||||
# Guard: Verify AWS load balancers
|
||||
let aws_lbs_result = (bash-wrap "aws elbv2 describe-load-balancers --region ap-southeast-1 --query 'LoadBalancers[*].LoadBalancerName' --output text")
|
||||
(match-result $aws_lbs_result
|
||||
{|output|
|
||||
print " ✓ Application Load Balancer verified"
|
||||
ok $output
|
||||
}
|
||||
{|err|
|
||||
print $" ⚠ Error checking AWS load balancers: ($err)"
|
||||
err $err
|
||||
}
|
||||
) | null
|
||||
|
||||
print ""
|
||||
print " Summary:"
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
# Module: Extension Discovery System
|
||||
# Purpose: Discovers and loads available extensions from filesystem and Gitea (deferred v2.1).
|
||||
# Dependencies: loader for configuration
|
||||
|
||||
# Extension Discovery and Search
|
||||
# Discovers extensions across OCI registries, Gitea, and local sources
|
||||
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
# Module: Extension Loader
|
||||
# Purpose: Dynamically loads and initializes extensions, manages extension lifecycle.
|
||||
# Dependencies: discovery, mod
|
||||
|
||||
# Extension Loader
|
||||
# Discovers and loads extensions from multiple sources
|
||||
use ../config/accessor.nu *
|
||||
|
||||
@ -245,13 +245,17 @@ export def fluent-clear-caches [] -> void {
|
||||
# }
|
||||
# ```
|
||||
export def is-fluent-daemon-available [] -> bool {
|
||||
try {
|
||||
let result = (do {
|
||||
let daemon_url = (get-cli-daemon-url)
|
||||
let response = (http get $"($daemon_url)/fluent/health" --timeout 500ms)
|
||||
|
||||
($response | from json | .status == "healthy")
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
false
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,10 +378,14 @@ export def fluent-translate-or [
|
||||
--locale (-l): string = "en-US"
|
||||
--args (-a): record = {}
|
||||
] -> string {
|
||||
try {
|
||||
let result = (do {
|
||||
fluent-translate $message_id --locale $locale --args $args
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
$default
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
# AI Agent Interface
|
||||
# Provides programmatic interface for automated infrastructure validation and fixing
|
||||
# Error handling: Guard patterns (no try-catch for field access)
|
||||
|
||||
use validator.nu
|
||||
use report_generator.nu *
|
||||
@ -300,12 +301,24 @@ def extract_component_from_issue [issue: record] {
|
||||
|
||||
def extract_current_version [issue: record] {
|
||||
# Extract current version from issue details
|
||||
$issue.details | parse --regex 'version (\d+\.\d+\.\d+)' | try { get 0.capture1 } catch { "unknown" }
|
||||
let parsed = ($issue.details | parse --regex 'version (\d+\.\d+\.\d+)')
|
||||
# Guard: Check if parse result exists and has first element
|
||||
if ($parsed | length) > 0 and (0 in ($parsed | get 0 | columns)) {
|
||||
$parsed | get 0.capture1
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
}
|
||||
|
||||
def extract_recommended_version [issue: record] {
|
||||
# Extract recommended version from suggested fix
|
||||
$issue.suggested_fix | parse --regex 'to (\d+\.\d+\.\d+)' | try { get 0.capture1 } catch { "latest" }
|
||||
let parsed = ($issue.suggested_fix | parse --regex 'to (\d+\.\d+\.\d+)')
|
||||
# Guard: Check if parse result exists and has first element
|
||||
if ($parsed | length) > 0 and (0 in ($parsed | get 0 | columns)) {
|
||||
$parsed | get 0.capture1
|
||||
} else {
|
||||
"latest"
|
||||
}
|
||||
}
|
||||
|
||||
def extract_security_area [issue: record] {
|
||||
@ -338,9 +351,10 @@ def extract_resource_type [issue: record] {
|
||||
export def webhook_validate [
|
||||
webhook_data: record
|
||||
] {
|
||||
let infra_path = ($webhook_data | try { get infra_path } catch { "") }
|
||||
let auto_fix = ($webhook_data | try { get auto_fix } catch { false) }
|
||||
let callback_url = ($webhook_data | try { get callback_url } catch { "") }
|
||||
# Guard: Check if webhook_data fields exist
|
||||
let infra_path = if ("infra_path" in ($webhook_data | columns)) { $webhook_data | get infra_path } else { "" }
|
||||
let auto_fix = if ("auto_fix" in ($webhook_data | columns)) { $webhook_data | get auto_fix } else { false }
|
||||
let callback_url = if ("callback_url" in ($webhook_data | columns)) { $webhook_data | get callback_url } else { "" }
|
||||
|
||||
if ($infra_path | is-empty) {
|
||||
return {
|
||||
@ -352,11 +366,14 @@ export def webhook_validate [
|
||||
|
||||
let validation_result = (validate_for_agent $infra_path --auto_fix=$auto_fix)
|
||||
|
||||
# Guard: Check if webhook_id field exists
|
||||
let webhook_id = if ("webhook_id" in ($webhook_data | columns)) { $webhook_data | get webhook_id } else { (random uuid) }
|
||||
|
||||
let response = {
|
||||
status: "completed"
|
||||
validation_result: $validation_result
|
||||
timestamp: (date now)
|
||||
webhook_id: ($webhook_data | try { get webhook_id } catch { (random uuid)) }
|
||||
webhook_id: $webhook_id
|
||||
}
|
||||
|
||||
# If callback URL provided, send result
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
# Configuration Loader for Validation System
|
||||
# Loads validation rules and settings from TOML configuration files
|
||||
# Error handling: Guard patterns (no try-catch for field access)
|
||||
|
||||
export def load_validation_config [
|
||||
config_path?: string
|
||||
@ -33,7 +34,8 @@ export def load_rules_from_config [
|
||||
let base_rules = ($config.rules | default [])
|
||||
|
||||
# Load extension rules if extensions are configured
|
||||
let extension_rules = if ($config | try { get extensions } catch { null } | is-not-empty) {
|
||||
# Guard: Check if extensions field exists
|
||||
let extension_rules = if ("extensions" in ($config | columns)) {
|
||||
load_extension_rules $config.extensions
|
||||
} else {
|
||||
[]
|
||||
@ -91,15 +93,21 @@ export def filter_rules_by_context [
|
||||
config: record
|
||||
context: record
|
||||
] {
|
||||
let provider = ($context | try { get provider } catch { null })
|
||||
let taskserv = ($context | try { get taskserv } catch { null })
|
||||
let infra_type = ($context | try { get infra_type } catch { null })
|
||||
# Guard: Check if context fields exist
|
||||
let provider = if ("provider" in ($context | columns)) { $context | get provider } else { null }
|
||||
let taskserv = if ("taskserv" in ($context | columns)) { $context | get taskserv } else { null }
|
||||
let infra_type = if ("infra_type" in ($context | columns)) { $context | get infra_type } else { null }
|
||||
|
||||
mut filtered_rules = $rules
|
||||
|
||||
# Filter by provider if specified
|
||||
if ($provider | is-not-empty) {
|
||||
let provider_config = ($config | try { get $"providers.($provider)" } catch { null })
|
||||
# Guard: Check if providers section and provider field exist
|
||||
let provider_config = if ("providers" in ($config | columns)) and ($provider in ($config.providers | columns)) {
|
||||
$config.providers | get $provider
|
||||
} else {
|
||||
null
|
||||
}
|
||||
if ($provider_config | is-not-empty) {
|
||||
let enabled_rules = ($provider_config.enabled_rules | default [])
|
||||
if ($enabled_rules | length) > 0 {
|
||||
@ -110,7 +118,12 @@ export def filter_rules_by_context [
|
||||
|
||||
# Filter by taskserv if specified
|
||||
if ($taskserv | is-not-empty) {
|
||||
let taskserv_config = ($config | try { get $"taskservs.($taskserv)" } catch { null })
|
||||
# Guard: Check if taskservs section and taskserv field exist
|
||||
let taskserv_config = if ("taskservs" in ($config | columns)) and ($taskserv in ($config.taskservs | columns)) {
|
||||
$config.taskservs | get $taskserv
|
||||
} else {
|
||||
null
|
||||
}
|
||||
if ($taskserv_config | is-not-empty) {
|
||||
let enabled_rules = ($taskserv_config.enabled_rules | default [])
|
||||
if ($enabled_rules | length) > 0 {
|
||||
@ -195,7 +208,8 @@ export def validate_config_structure [
|
||||
let required_sections = ["validation_settings", "rules"]
|
||||
|
||||
for section in $required_sections {
|
||||
if ($config | try { get $section } catch { null } | is-empty) {
|
||||
# Guard: Check if section field exists
|
||||
if not ($section in ($config | columns)) {
|
||||
error make {
|
||||
msg: $"Missing required configuration section: ($section)"
|
||||
}
|
||||
@ -215,7 +229,8 @@ export def validate_rule_structure [
|
||||
let required_fields = ["id", "name", "category", "severity", "validator_function"]
|
||||
|
||||
for field in $required_fields {
|
||||
if ($rule | try { get $field } catch { null } | is-empty) {
|
||||
# Guard: Check if field exists in rule
|
||||
if not ($field in ($rule | columns)) {
|
||||
error make {
|
||||
msg: $"Rule ($rule.id | default 'unknown') missing required field: ($field)"
|
||||
}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
# Validation Rules Engine
|
||||
# Defines and manages validation rules for infrastructure configurations
|
||||
# Error handling: Guard patterns (no try-catch for field access)
|
||||
|
||||
use config_loader.nu *
|
||||
|
||||
@ -241,7 +242,13 @@ export def validate_quoted_variables [file: string] {
|
||||
|
||||
if ($unquoted_vars | length) > 0 {
|
||||
let first_issue = ($unquoted_vars | first)
|
||||
let variable_name = ($first_issue.item | parse --regex '\s+\w+:\s+(\$\w+)' | try { get 0.capture1 } catch { "unknown") }
|
||||
# Guard: Check if parse result exists and has first element with capture1
|
||||
let parsed = ($first_issue.item | parse --regex '\s+\w+:\s+(\$\w+)')
|
||||
let variable_name = if ($parsed | length) > 0 and (0 in ($parsed | get 0 | columns)) {
|
||||
$parsed | get 0.capture1
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
{
|
||||
passed: false
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
# Schema Validator
|
||||
# Handles validation of infrastructure configurations against defined schemas
|
||||
# Error handling: Guard patterns (no try-catch for field access)
|
||||
|
||||
# Server configuration schema validation
|
||||
export def validate_server_schema [config: record] {
|
||||
@ -14,7 +15,11 @@ export def validate_server_schema [config: record] {
|
||||
]
|
||||
|
||||
for field in $required_fields {
|
||||
if not ($config | try { get $field } catch { null } | is-not-empty) {
|
||||
# Guard: Check if field exists in config using columns
|
||||
let field_exists = ($field in ($config | columns))
|
||||
let field_value = if $field_exists { $config | get $field } else { null }
|
||||
|
||||
if ($field_value | is-empty) {
|
||||
$issues = ($issues | append {
|
||||
field: $field
|
||||
message: $"Required field '($field)' is missing or empty"
|
||||
@ -24,7 +29,8 @@ export def validate_server_schema [config: record] {
|
||||
}
|
||||
|
||||
# Validate specific field formats
|
||||
if ($config | try { get hostname } catch { null } | is-not-empty) {
|
||||
# Guard: Check if hostname field exists
|
||||
if ("hostname" in ($config | columns)) {
|
||||
let hostname = ($config | get hostname)
|
||||
if not ($hostname =~ '^[a-z0-9][a-z0-9\-]*[a-z0-9]$') {
|
||||
$issues = ($issues | append {
|
||||
@ -37,14 +43,16 @@ export def validate_server_schema [config: record] {
|
||||
}
|
||||
|
||||
# Validate provider-specific requirements
|
||||
if ($config | try { get provider } catch { null } | is-not-empty) {
|
||||
# Guard: Check if provider field exists
|
||||
if ("provider" in ($config | columns)) {
|
||||
let provider = ($config | get provider)
|
||||
let provider_validation = (validate_provider_config $provider $config)
|
||||
$issues = ($issues | append $provider_validation.issues)
|
||||
}
|
||||
|
||||
# Validate network configuration
|
||||
if ($config | try { get network_private_ip } catch { null } | is-not-empty) {
|
||||
# Guard: Check if network_private_ip field exists
|
||||
if ("network_private_ip" in ($config | columns)) {
|
||||
let ip = ($config | get network_private_ip)
|
||||
let ip_validation = (validate_ip_address $ip)
|
||||
if not $ip_validation.valid {
|
||||
@ -72,7 +80,8 @@ export def validate_provider_config [provider: string, config: record] {
|
||||
# UpCloud specific validations
|
||||
let required_upcloud_fields = ["ssh_key_path", "storage_os"]
|
||||
for field in $required_upcloud_fields {
|
||||
if not ($config | try { get $field } catch { null } | is-not-empty) {
|
||||
# Guard: Check if field exists in config
|
||||
if not ($field in ($config | columns)) {
|
||||
$issues = ($issues | append {
|
||||
field: $field
|
||||
message: $"UpCloud provider requires '($field)' field"
|
||||
@ -83,7 +92,8 @@ export def validate_provider_config [provider: string, config: record] {
|
||||
|
||||
# Validate UpCloud zones
|
||||
let valid_zones = ["es-mad1", "fi-hel1", "fi-hel2", "nl-ams1", "sg-sin1", "uk-lon1", "us-chi1", "us-nyc1", "de-fra1"]
|
||||
let zone = ($config | try { get zone } catch { null })
|
||||
# Guard: Check if zone field exists
|
||||
let zone = if ("zone" in ($config | columns)) { $config | get zone } else { null }
|
||||
if ($zone | is-not-empty) and ($zone not-in $valid_zones) {
|
||||
$issues = ($issues | append {
|
||||
field: "zone"
|
||||
@ -98,7 +108,8 @@ export def validate_provider_config [provider: string, config: record] {
|
||||
# AWS specific validations
|
||||
let required_aws_fields = ["instance_type", "ami_id"]
|
||||
for field in $required_aws_fields {
|
||||
if not ($config | try { get $field } catch { null } | is-not-empty) {
|
||||
# Guard: Check if field exists in config
|
||||
if not ($field in ($config | columns)) {
|
||||
$issues = ($issues | append {
|
||||
field: $field
|
||||
message: $"AWS provider requires '($field)' field"
|
||||
@ -130,7 +141,8 @@ export def validate_network_config [config: record] {
|
||||
mut issues = []
|
||||
|
||||
# Validate CIDR blocks
|
||||
if ($config | try { get priv_cidr_block } catch { null } | is-not-empty) {
|
||||
# Guard: Check if priv_cidr_block field exists
|
||||
if ("priv_cidr_block" in ($config | columns)) {
|
||||
let cidr = ($config | get priv_cidr_block)
|
||||
let cidr_validation = (validate_cidr_block $cidr)
|
||||
if not $cidr_validation.valid {
|
||||
@ -144,7 +156,8 @@ export def validate_network_config [config: record] {
|
||||
}
|
||||
|
||||
# Check for IP conflicts
|
||||
if ($config | try { get network_private_ip } catch { null } | is-not-empty) and ($config | try { get priv_cidr_block } catch { null } | is-not-empty) {
|
||||
# Guard: Check if both fields exist in config
|
||||
if ("network_private_ip" in ($config | columns)) and ("priv_cidr_block" in ($config | columns)) {
|
||||
let ip = ($config | get network_private_ip)
|
||||
let cidr = ($config | get priv_cidr_block)
|
||||
|
||||
@ -170,7 +183,8 @@ export def validate_taskserv_schema [taskserv: record] {
|
||||
let required_fields = ["name", "install_mode"]
|
||||
|
||||
for field in $required_fields {
|
||||
if not ($taskserv | try { get $field } catch { null } | is-not-empty) {
|
||||
# Guard: Check if field exists in taskserv
|
||||
if not ($field in ($taskserv | columns)) {
|
||||
$issues = ($issues | append {
|
||||
field: $field
|
||||
message: $"Required taskserv field '($field)' is missing"
|
||||
@ -181,7 +195,8 @@ export def validate_taskserv_schema [taskserv: record] {
|
||||
|
||||
# Validate install mode
|
||||
let valid_install_modes = ["library", "container", "binary"]
|
||||
let install_mode = ($taskserv | try { get install_mode } catch { null })
|
||||
# Guard: Check if install_mode field exists
|
||||
let install_mode = if ("install_mode" in ($taskserv | columns)) { $taskserv | get install_mode } else { null }
|
||||
if ($install_mode | is-not-empty) and ($install_mode not-in $valid_install_modes) {
|
||||
$issues = ($issues | append {
|
||||
field: "install_mode"
|
||||
@ -193,7 +208,8 @@ export def validate_taskserv_schema [taskserv: record] {
|
||||
}
|
||||
|
||||
# Validate taskserv name exists
|
||||
let taskserv_name = ($taskserv | try { get name } catch { null })
|
||||
# Guard: Check if name field exists
|
||||
let taskserv_name = if ("name" in ($taskserv | columns)) { $taskserv | get name } else { null }
|
||||
if ($taskserv_name | is-not-empty) {
|
||||
let taskserv_exists = (taskserv_definition_exists $taskserv_name)
|
||||
if not $taskserv_exists {
|
||||
|
||||
@ -110,11 +110,15 @@ export def runtime-info [] {
|
||||
command: $rt.command
|
||||
available: true
|
||||
version: (
|
||||
try {
|
||||
let result = (do {
|
||||
let ver_output = (^sh -c $"($rt.command) --version" 2>&1)
|
||||
$ver_output | str trim | str substring [0..<40]
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
"unknown"
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
)
|
||||
}
|
||||
@ -149,14 +153,16 @@ export def runtime-list [] {
|
||||
# Tests for runtime module
|
||||
def test-runtime-detect [] {
|
||||
# Note: Tests require runtime to be installed
|
||||
let rt = (try { runtime-detect } catch { null })
|
||||
let result = (do { runtime-detect } | complete)
|
||||
let rt = if $result.exit_code != 0 { null } else { $result.stdout }
|
||||
if ($rt != null) {
|
||||
assert ($rt.name != "")
|
||||
}
|
||||
}
|
||||
|
||||
def test-runtime-info [] {
|
||||
let info = (try { runtime-info } catch { null })
|
||||
let result = (do { runtime-info } | complete)
|
||||
let info = if $result.exit_code != 0 { null } else { $result.stdout }
|
||||
if ($info != null) {
|
||||
assert ($info.name != "")
|
||||
}
|
||||
|
||||
@ -10,13 +10,13 @@ export def iac-to-workflow [
|
||||
--mode: string = "sequential" # sequential or parallel
|
||||
] {
|
||||
# Extract detected technologies and inferred requirements
|
||||
let detected = if (try { $detection.detections | is-not-empty } catch { false }) {
|
||||
let detected = if ($detection.detections? != null and ($detection.detections | is-not-empty)) {
|
||||
$detection.detections | each {|d| $d.technology}
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
let inferred = if (try { $completion.additional_requirements | is-not-empty } catch { false }) {
|
||||
let inferred = if ($completion.additional_requirements? != null and ($completion.additional_requirements | is-not-empty)) {
|
||||
$completion.additional_requirements
|
||||
} else {
|
||||
[]
|
||||
@ -143,7 +143,7 @@ def generate-workflow-phases [
|
||||
# Phase 2: Deploy inferred services
|
||||
let phase2_tasks = ($inferred | each {|req|
|
||||
let service = $req.taskserv
|
||||
let deps = if (try { ($dependencies | get $service).depends_on | is-not-empty } catch { false }) {
|
||||
let deps = if (($dependencies | get $service)?.depends_on? != null and ((($dependencies | get $service).depends_on) | is-not-empty)) {
|
||||
(($dependencies | get $service).depends_on | each {|d| $"setup-\($d)"})
|
||||
} else {
|
||||
[]
|
||||
@ -195,9 +195,7 @@ def generate-workflow-phases [
|
||||
# Export workflow to Nickel format for orchestrator
|
||||
export def export-workflow-nickel [workflow] {
|
||||
# Handle both direct workflow and nested structure
|
||||
let w = (
|
||||
try { $workflow.workflow } catch { $workflow }
|
||||
)
|
||||
let w = ($workflow.workflow? | default $workflow)
|
||||
|
||||
# Build header
|
||||
let header = (
|
||||
@ -229,16 +227,13 @@ export def export-workflow-nickel [workflow] {
|
||||
)
|
||||
|
||||
let with_deps = (
|
||||
try {
|
||||
if (($task | try { get depends_on } catch { null }) | is-not-empty) {
|
||||
(
|
||||
$task_body +
|
||||
" depends_on = [\"" + ($task.depends_on | str join "\", \"") + "\"]\n"
|
||||
)
|
||||
} else {
|
||||
$task_body
|
||||
}
|
||||
} catch {
|
||||
let depends_on_val = ($task.depends_on? | default null)
|
||||
if ($depends_on_val != null and ($depends_on_val | is-not-empty)) {
|
||||
(
|
||||
$task_body +
|
||||
" depends_on = [\"" + ($task.depends_on | str join "\", \"") + "\"]\n"
|
||||
)
|
||||
} else {
|
||||
$task_body
|
||||
}
|
||||
)
|
||||
@ -289,20 +284,21 @@ export def submit-to-orchestrator [
|
||||
submitted: false
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
let response = ($result | from json)
|
||||
let json_result = (do { from json $result } | complete)
|
||||
if $json_result.exit_code != 0 {
|
||||
{
|
||||
status: "error"
|
||||
message: $result
|
||||
submitted: false
|
||||
}
|
||||
} else {
|
||||
let response = ($json_result.stdout)
|
||||
{
|
||||
status: "success"
|
||||
submitted: true
|
||||
workflow_id: ($response.id | default "")
|
||||
message: "Workflow submitted successfully"
|
||||
}
|
||||
} catch {
|
||||
{
|
||||
status: "error"
|
||||
message: $result
|
||||
submitted: false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -80,8 +80,7 @@ export def run_cmd_kms [
|
||||
}
|
||||
}
|
||||
|
||||
let kms_cmd = build_kms_command $cmd $source_path $kms_config
|
||||
let res = (^bash -c $kms_cmd | complete)
|
||||
let res = (run_kms_curl $cmd $source_path $kms_config | complete)
|
||||
|
||||
if $res.exit_code != 0 {
|
||||
if $error_exit {
|
||||
@ -95,6 +94,80 @@ export def run_cmd_kms [
|
||||
return $res.stdout
|
||||
}
|
||||
|
||||
def run_kms_curl [
|
||||
operation: string
|
||||
file_path: string
|
||||
config: record
|
||||
] {
|
||||
# Validate file path exists to prevent injection
|
||||
if not ($file_path | path exists) {
|
||||
error make {msg: $"File does not exist: ($file_path)"}
|
||||
}
|
||||
|
||||
mut curl_args = []
|
||||
|
||||
# SSL verification
|
||||
if not $config.verify_ssl {
|
||||
$curl_args = ($curl_args | append "-k")
|
||||
}
|
||||
|
||||
# Timeout
|
||||
$curl_args = ($curl_args | append "--connect-timeout")
|
||||
$curl_args = ($curl_args | append ($config.timeout | into string))
|
||||
|
||||
# Authentication
|
||||
match $config.auth_method {
|
||||
"certificate" => {
|
||||
if ($config.client_cert | is-not-empty) and ($config.client_key | is-not-empty) {
|
||||
$curl_args = ($curl_args | append "--cert")
|
||||
$curl_args = ($curl_args | append $config.client_cert)
|
||||
$curl_args = ($curl_args | append "--key")
|
||||
$curl_args = ($curl_args | append $config.client_key)
|
||||
}
|
||||
if ($config.ca_cert | is-not-empty) {
|
||||
$curl_args = ($curl_args | append "--cacert")
|
||||
$curl_args = ($curl_args | append $config.ca_cert)
|
||||
}
|
||||
},
|
||||
"token" => {
|
||||
if ($config.api_token | is-not-empty) {
|
||||
$curl_args = ($curl_args | append "-H")
|
||||
$curl_args = ($curl_args | append $"Authorization: Bearer ($config.api_token)")
|
||||
}
|
||||
},
|
||||
"basic" => {
|
||||
if ($config.username | is-not-empty) and ($config.password | is-not-empty) {
|
||||
$curl_args = ($curl_args | append "--user")
|
||||
$curl_args = ($curl_args | append $"($config.username):($config.password)")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Operation specific parameters
|
||||
match $operation {
|
||||
"encrypt" => {
|
||||
$curl_args = ($curl_args | append "-X")
|
||||
$curl_args = ($curl_args | append "POST")
|
||||
$curl_args = ($curl_args | append "-H")
|
||||
$curl_args = ($curl_args | append "Content-Type: application/octet-stream")
|
||||
$curl_args = ($curl_args | append "--data-binary")
|
||||
$curl_args = ($curl_args | append $"@($file_path)")
|
||||
$curl_args = ($curl_args | append $"($config.server_url)/encrypt")
|
||||
},
|
||||
"decrypt" => {
|
||||
$curl_args = ($curl_args | append "-X")
|
||||
$curl_args = ($curl_args | append "POST")
|
||||
$curl_args = ($curl_args | append "-H")
|
||||
$curl_args = ($curl_args | append "Content-Type: application/octet-stream")
|
||||
$curl_args = ($curl_args | append "--data-binary")
|
||||
$curl_args = ($curl_args | append $"@($file_path)")
|
||||
$curl_args = ($curl_args | append $"($config.server_url)/decrypt")
|
||||
}
|
||||
}
|
||||
|
||||
^curl ...$curl_args
|
||||
}
|
||||
|
||||
export def on_kms [
|
||||
task: string
|
||||
source_path: string
|
||||
@ -196,65 +269,6 @@ def get_kms_config [] {
|
||||
}
|
||||
}
|
||||
|
||||
def build_kms_command [
|
||||
operation: string
|
||||
file_path: string
|
||||
config: record
|
||||
] {
|
||||
mut cmd_parts = []
|
||||
|
||||
# Base command - using curl to interact with Cosmian KMS REST API
|
||||
$cmd_parts = ($cmd_parts | append "curl")
|
||||
|
||||
# SSL verification
|
||||
if not $config.verify_ssl {
|
||||
$cmd_parts = ($cmd_parts | append "-k")
|
||||
}
|
||||
|
||||
# Timeout
|
||||
$cmd_parts = ($cmd_parts | append $"--connect-timeout ($config.timeout)")
|
||||
|
||||
# Authentication
|
||||
match $config.auth_method {
|
||||
"certificate" => {
|
||||
if ($config.client_cert | is-not-empty) and ($config.client_key | is-not-empty) {
|
||||
$cmd_parts = ($cmd_parts | append $"--cert ($config.client_cert)")
|
||||
$cmd_parts = ($cmd_parts | append $"--key ($config.client_key)")
|
||||
}
|
||||
if ($config.ca_cert | is-not-empty) {
|
||||
$cmd_parts = ($cmd_parts | append $"--cacert ($config.ca_cert)")
|
||||
}
|
||||
},
|
||||
"token" => {
|
||||
if ($config.api_token | is-not-empty) {
|
||||
$cmd_parts = ($cmd_parts | append $"-H 'Authorization: Bearer ($config.api_token)'")
|
||||
}
|
||||
},
|
||||
"basic" => {
|
||||
if ($config.username | is-not-empty) and ($config.password | is-not-empty) {
|
||||
$cmd_parts = ($cmd_parts | append $"--user ($config.username):($config.password)")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Operation specific parameters
|
||||
match $operation {
|
||||
"encrypt" => {
|
||||
$cmd_parts = ($cmd_parts | append "-X POST")
|
||||
$cmd_parts = ($cmd_parts | append $"-H 'Content-Type: application/octet-stream'")
|
||||
$cmd_parts = ($cmd_parts | append $"--data-binary @($file_path)")
|
||||
$cmd_parts = ($cmd_parts | append $"($config.server_url)/encrypt")
|
||||
},
|
||||
"decrypt" => {
|
||||
$cmd_parts = ($cmd_parts | append "-X POST")
|
||||
$cmd_parts = ($cmd_parts | append $"-H 'Content-Type: application/octet-stream'")
|
||||
$cmd_parts = ($cmd_parts | append $"--data-binary @($file_path)")
|
||||
$cmd_parts = ($cmd_parts | append $"($config.server_url)/decrypt")
|
||||
}
|
||||
}
|
||||
|
||||
($cmd_parts | str join " ")
|
||||
}
|
||||
|
||||
export def get_def_kms_config [
|
||||
current_path: string
|
||||
|
||||
@ -18,7 +18,7 @@ export def "detect-inheritance" [decl_file: path] -> bool {
|
||||
export def "detect-exports" [decl_file: path] -> list {
|
||||
let content = open $decl_file | into string
|
||||
$content
|
||||
| split row "\n"
|
||||
| lines
|
||||
| filter { |line| ($line | str contains ": ") and not ($line | str contains "schema") }
|
||||
| filter { |line| ($line | str contains " = ") }
|
||||
| map { |line| $line | str trim }
|
||||
@ -225,12 +225,9 @@ export def "batch-migrate" [
|
||||
|
||||
# Validate Nickel file syntax
|
||||
export def "validate-nickel" [nickel_file: path] -> bool {
|
||||
try {
|
||||
nickel export $nickel_file | null
|
||||
true
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
# Validate Nickel syntax (no try-catch)
|
||||
let result = (do { nickel export $nickel_file | null } | complete)
|
||||
($result.exit_code == 0)
|
||||
}
|
||||
|
||||
# Full migration validation for a file pair
|
||||
|
||||
@ -50,14 +50,14 @@ def download-oci-layers [
|
||||
|
||||
log-debug $"Downloading layer: ($layer.digest)"
|
||||
|
||||
# Download blob
|
||||
let download_cmd = if ($auth_token | is-not-empty) {
|
||||
$"curl -H 'Authorization: Bearer ($auth_token)' -L -o ($layer_file) ($blob_url)"
|
||||
} else {
|
||||
$"curl -L -o ($layer_file) ($blob_url)"
|
||||
# Download blob using run-external
|
||||
mut curl_args = ["-L" "-o" $layer_file $blob_url]
|
||||
|
||||
if ($auth_token | is-not-empty) {
|
||||
$curl_args = (["-H" $"Authorization: Bearer ($auth_token)"] | append $curl_args)
|
||||
}
|
||||
|
||||
let result = (do { ^bash -c $download_cmd } | complete)
|
||||
let result = (do { ^curl ...$curl_args } | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log-error $"Failed to download layer: ($layer.digest)"
|
||||
@ -159,15 +159,15 @@ export def oci-push-artifact [
|
||||
|
||||
log-debug $"Uploading blob to ($blob_url)"
|
||||
|
||||
# Start upload
|
||||
let auth_header = if ($auth_token | is-not-empty) {
|
||||
$"-H 'Authorization: Bearer ($auth_token)'"
|
||||
} else {
|
||||
""
|
||||
# Start upload using run-external
|
||||
mut upload_start_args = ["-X" "POST" $blob_url]
|
||||
|
||||
if ($auth_token | is-not-empty) {
|
||||
$upload_start_args = (["-H" $"Authorization: Bearer ($auth_token)"] | append $upload_start_args)
|
||||
}
|
||||
|
||||
let start_upload = (do {
|
||||
^bash -c $"curl -X POST ($auth_header) ($blob_url)"
|
||||
^curl ...$upload_start_args
|
||||
} | complete)
|
||||
|
||||
if $start_upload.exit_code != 0 {
|
||||
@ -179,10 +179,21 @@ export def oci-push-artifact [
|
||||
# Extract upload URL from Location header
|
||||
let upload_url = ($start_upload.stdout | str trim)
|
||||
|
||||
# Upload blob
|
||||
let upload_cmd = $"curl -X PUT ($auth_header) -H 'Content-Type: application/octet-stream' --data-binary @($temp_tarball) '($upload_url)?digest=($blob_digest)'"
|
||||
# Upload blob using run-external
|
||||
mut upload_args = ["-X" "PUT"]
|
||||
|
||||
let upload_result = (do { ^bash -c $upload_cmd } | complete)
|
||||
if ($auth_token | is-not-empty) {
|
||||
$upload_args = ($upload_args | append "-H")
|
||||
$upload_args = ($upload_args | append $"Authorization: Bearer ($auth_token)")
|
||||
}
|
||||
|
||||
$upload_args = ($upload_args | append "-H")
|
||||
$upload_args = ($upload_args | append "Content-Type: application/octet-stream")
|
||||
$upload_args = ($upload_args | append "--data-binary")
|
||||
$upload_args = ($upload_args | append $"@($temp_tarball)")
|
||||
$upload_args = ($upload_args | append $"($upload_url)?digest=($blob_digest)")
|
||||
|
||||
let upload_result = (do { ^curl ...$upload_args } | complete)
|
||||
|
||||
if $upload_result.exit_code != 0 {
|
||||
log-error "Failed to upload blob"
|
||||
@ -224,9 +235,21 @@ export def oci-push-artifact [
|
||||
|
||||
log-debug $"Uploading manifest to ($manifest_url)"
|
||||
|
||||
let manifest_cmd = $"curl -X PUT ($auth_header) -H 'Content-Type: application/vnd.oci.image.manifest.v1+json' -d '($manifest_json)' ($manifest_url)"
|
||||
# Upload manifest using run-external
|
||||
mut manifest_args = ["-X" "PUT"]
|
||||
|
||||
let manifest_result = (do { ^bash -c $manifest_cmd } | complete)
|
||||
if ($auth_token | is-not-empty) {
|
||||
$manifest_args = ($manifest_args | append "-H")
|
||||
$manifest_args = ($manifest_args | append $"Authorization: Bearer ($auth_token)")
|
||||
}
|
||||
|
||||
$manifest_args = ($manifest_args | append "-H")
|
||||
$manifest_args = ($manifest_args | append "Content-Type: application/vnd.oci.image.manifest.v1+json")
|
||||
$manifest_args = ($manifest_args | append "-d")
|
||||
$manifest_args = ($manifest_args | append $manifest_json)
|
||||
$manifest_args = ($manifest_args | append $manifest_url)
|
||||
|
||||
let manifest_result = (do { ^curl ...$manifest_args } | complete)
|
||||
|
||||
if $manifest_result.exit_code != 0 {
|
||||
log-error "Failed to upload manifest"
|
||||
@ -403,15 +426,17 @@ export def oci-delete-artifact [
|
||||
# Delete manifest
|
||||
let manifest_url = $"http://($registry)/v2/($namespace)/($name)/manifests/($digest)"
|
||||
|
||||
let auth_header = if ($auth_token | is-not-empty) {
|
||||
$"-H 'Authorization: Bearer ($auth_token)'"
|
||||
} else {
|
||||
""
|
||||
# Delete using run-external
|
||||
mut delete_args = ["-X" "DELETE"]
|
||||
|
||||
if ($auth_token | is-not-empty) {
|
||||
$delete_args = ($delete_args | append "-H")
|
||||
$delete_args = ($delete_args | append $"Authorization: Bearer ($auth_token)")
|
||||
}
|
||||
|
||||
let delete_cmd = $"curl -X DELETE ($auth_header) ($manifest_url)"
|
||||
$delete_args = ($delete_args | append $manifest_url)
|
||||
|
||||
let delete_result = (do { ^bash -c $delete_cmd } | complete)
|
||||
let delete_result = (do { ^curl ...$delete_args } | complete)
|
||||
|
||||
if $delete_result.exit_code == 0 {
|
||||
log-info $"Successfully deleted ($name):($version)"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
454
nulib/lib_provisioning/plugins/auth_core.nu
Normal file
454
nulib/lib_provisioning/plugins/auth_core.nu
Normal file
@ -0,0 +1,454 @@
|
||||
#!/usr/bin/env nu
|
||||
# [command]
|
||||
# name = "auth login"
|
||||
# group = "authentication"
|
||||
# tags = ["authentication", "jwt", "interactive", "login"]
|
||||
# version = "3.0.0"
|
||||
# requires = ["nushell:0.109.0"]
|
||||
|
||||
# Authentication Plugin Wrapper with HTTP Fallback
|
||||
# Provides graceful degradation to HTTP API when nu_plugin_auth is unavailable
|
||||
|
||||
use ../config/accessor.nu *
|
||||
use ../commands/traits.nu *
|
||||
|
||||
# Check if auth plugin is available
|
||||
|
||||
# Import implementation module
|
||||
use ./auth_impl.nu *
|
||||
|
||||
def is-plugin-available [] {
|
||||
(which auth | length) > 0
|
||||
}
|
||||
|
||||
# Check if auth plugin is enabled in config
|
||||
def is-plugin-enabled [] {
|
||||
config-get "plugins.auth_enabled" true
|
||||
}
|
||||
|
||||
# Get control center base URL
|
||||
def get-control-center-url [] {
|
||||
config-get "platform.control_center.url" "http://localhost:3000"
|
||||
}
|
||||
|
||||
# Store token in OS keyring (requires plugin)
|
||||
def store-token-keyring [
|
||||
token: string
|
||||
] {
|
||||
if (is-plugin-available) {
|
||||
auth store-token $token
|
||||
} else {
|
||||
print "⚠️ Keyring storage unavailable (plugin not loaded)"
|
||||
}
|
||||
}
|
||||
|
||||
# Retrieve token from OS keyring (requires plugin)
|
||||
def get-token-keyring [] {
|
||||
if (is-plugin-available) {
|
||||
auth get-token
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
# Helper to safely execute a closure and return null on error
|
||||
def try-plugin [callback: closure] {
|
||||
do -i $callback
|
||||
}
|
||||
|
||||
# Login with username and password
|
||||
export def plugin-login [
|
||||
username: string
|
||||
password: string
|
||||
--mfa-code: string = "" # Optional MFA code
|
||||
] {
|
||||
let enabled = is-plugin-enabled
|
||||
let available = is-plugin-available
|
||||
|
||||
if $enabled and $available {
|
||||
let plugin_result = (try-plugin {
|
||||
# Note: Plugin login command may not support MFA code directly
|
||||
# If MFA is required, it should be handled separately via mfa-verify
|
||||
let result = (auth login $username $password)
|
||||
store-token-keyring $result.access_token
|
||||
|
||||
# If MFA code provided, verify it after login
|
||||
if not ($mfa_code | is-empty) {
|
||||
let mfa_result = (try-plugin {
|
||||
auth mfa-verify $mfa_code
|
||||
})
|
||||
if $mfa_result == null {
|
||||
print "⚠️ MFA verification failed, but login succeeded"
|
||||
}
|
||||
}
|
||||
|
||||
$result
|
||||
})
|
||||
|
||||
if $plugin_result != null {
|
||||
return $plugin_result
|
||||
}
|
||||
|
||||
print "⚠️ Plugin login failed, falling back to HTTP"
|
||||
}
|
||||
|
||||
# HTTP fallback
|
||||
print "⚠️ Using HTTP fallback (plugin not available)"
|
||||
let url = $"(get-control-center-url)/api/auth/login"
|
||||
|
||||
let body = if ($mfa_code | is-empty) {
|
||||
{username: $username, password: $password}
|
||||
} else {
|
||||
{username: $username, password: $password, mfa_code: $mfa_code}
|
||||
}
|
||||
|
||||
let result = (do -i {
|
||||
http post $url $body
|
||||
})
|
||||
|
||||
if $result != null {
|
||||
return $result
|
||||
}
|
||||
|
||||
error make {
|
||||
msg: "Login failed"
|
||||
label: {
|
||||
text: "HTTP request failed"
|
||||
span: (metadata $username).span
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Logout and revoke tokens
|
||||
export def plugin-logout [] {
|
||||
let enabled = is-plugin-enabled
|
||||
let available = is-plugin-available
|
||||
|
||||
let token = get-token-keyring
|
||||
|
||||
if $enabled and $available {
|
||||
let plugin_result = (try-plugin {
|
||||
auth logout
|
||||
})
|
||||
|
||||
if $plugin_result != null {
|
||||
return $plugin_result
|
||||
}
|
||||
|
||||
print "⚠️ Plugin logout failed, falling back to HTTP"
|
||||
}
|
||||
|
||||
# HTTP fallback
|
||||
print "⚠️ Using HTTP fallback (plugin not available)"
|
||||
let url = $"(get-control-center-url)/api/auth/logout"
|
||||
|
||||
let result = (do -i {
|
||||
if ($token | is-empty) {
|
||||
http post $url
|
||||
} else {
|
||||
http post $url --headers {Authorization: $"Bearer ($token)"}
|
||||
}
|
||||
})
|
||||
|
||||
if $result != null {
|
||||
return {success: true, message: "Logged out successfully"}
|
||||
}
|
||||
|
||||
{success: false, message: "Logout failed"}
|
||||
|
||||
}
|
||||
|
||||
# Verify current authentication token
|
||||
export def plugin-verify [] {
|
||||
let enabled = is-plugin-enabled
|
||||
let available = is-plugin-available
|
||||
|
||||
if $enabled and $available {
|
||||
let plugin_result = (try-plugin {
|
||||
auth verify
|
||||
})
|
||||
|
||||
if $plugin_result != null {
|
||||
return $plugin_result
|
||||
}
|
||||
|
||||
print "⚠️ Plugin verify failed, falling back to HTTP"
|
||||
}
|
||||
|
||||
# HTTP fallback
|
||||
print "⚠️ Using HTTP fallback (plugin not available)"
|
||||
let token = get-token-keyring
|
||||
|
||||
if ($token | is-empty) {
|
||||
return {valid: false, message: "No token found"}
|
||||
}
|
||||
|
||||
let url = $"(get-control-center-url)/api/auth/verify"
|
||||
|
||||
let result = (do -i {
|
||||
http get $url --headers {Authorization: $"Bearer ($token)"}
|
||||
})
|
||||
|
||||
if $result != null {
|
||||
return $result
|
||||
}
|
||||
|
||||
{valid: false, message: "Token verification failed"}
|
||||
|
||||
}
|
||||
|
||||
# List active sessions
|
||||
export def plugin-sessions [] {
|
||||
let enabled = is-plugin-enabled
|
||||
let available = is-plugin-available
|
||||
|
||||
if $enabled and $available {
|
||||
let plugin_result = (try-plugin {
|
||||
auth sessions
|
||||
})
|
||||
|
||||
if $plugin_result != null {
|
||||
return $plugin_result
|
||||
}
|
||||
|
||||
print "⚠️ Plugin sessions failed, falling back to HTTP"
|
||||
}
|
||||
|
||||
# HTTP fallback
|
||||
print "⚠️ Using HTTP fallback (plugin not available)"
|
||||
let token = get-token-keyring
|
||||
|
||||
if ($token | is-empty) {
|
||||
return []
|
||||
}
|
||||
|
||||
let url = $"(get-control-center-url)/api/auth/sessions"
|
||||
|
||||
let response = (do -i {
|
||||
http get $url --headers {Authorization: $"Bearer ($token)"}
|
||||
})
|
||||
|
||||
if $response != null {
|
||||
return ($response | get sessions? | default [])
|
||||
}
|
||||
|
||||
[]
|
||||
|
||||
}
|
||||
|
||||
# Enroll MFA device (TOTP)
|
||||
export def plugin-mfa-enroll [
|
||||
--type: string = "totp" # totp or webauthn
|
||||
] {
|
||||
let enabled = is-plugin-enabled
|
||||
let available = is-plugin-available
|
||||
|
||||
if $enabled and $available {
|
||||
let plugin_result = (try-plugin {
|
||||
auth mfa-enroll --type $type
|
||||
})
|
||||
|
||||
if $plugin_result != null {
|
||||
return $plugin_result
|
||||
}
|
||||
|
||||
print "⚠️ Plugin MFA enroll failed, falling back to HTTP"
|
||||
}
|
||||
|
||||
# HTTP fallback
|
||||
print "⚠️ Using HTTP fallback (plugin not available)"
|
||||
let token = get-token-keyring
|
||||
|
||||
if ($token | is-empty) {
|
||||
error make {
|
||||
msg: "Authentication required"
|
||||
label: {text: "No valid token found"}
|
||||
}
|
||||
}
|
||||
|
||||
let url = $"(get-control-center-url)/api/mfa/enroll"
|
||||
|
||||
let result = (do -i {
|
||||
http post $url {type: $type} --headers {Authorization: $"Bearer ($token)"}
|
||||
})
|
||||
|
||||
if $result != null {
|
||||
return $result
|
||||
}
|
||||
|
||||
error make {
|
||||
msg: "MFA enrollment failed"
|
||||
label: {text: "HTTP request failed"}
|
||||
}
|
||||
}
|
||||
|
||||
# Verify MFA code
|
||||
export def plugin-mfa-verify [
|
||||
code: string
|
||||
--type: string = "totp" # totp or webauthn
|
||||
] {
|
||||
let enabled = is-plugin-enabled
|
||||
let available = is-plugin-available
|
||||
|
||||
if $enabled and $available {
|
||||
let plugin_result = (try-plugin {
|
||||
auth mfa-verify $code --type $type
|
||||
})
|
||||
|
||||
if $plugin_result != null {
|
||||
return $plugin_result
|
||||
}
|
||||
|
||||
print "⚠️ Plugin MFA verify failed, falling back to HTTP"
|
||||
}
|
||||
|
||||
# HTTP fallback
|
||||
print "⚠️ Using HTTP fallback (plugin not available)"
|
||||
let token = get-token-keyring
|
||||
|
||||
if ($token | is-empty) {
|
||||
error make {
|
||||
msg: "Authentication required"
|
||||
label: {text: "No valid token found"}
|
||||
}
|
||||
}
|
||||
|
||||
let url = $"(get-control-center-url)/api/mfa/verify"
|
||||
|
||||
let result = (do -i {
|
||||
http post $url {code: $code, type: $type} --headers {Authorization: $"Bearer ($token)"}
|
||||
})
|
||||
|
||||
if $result != null {
|
||||
return $result
|
||||
}
|
||||
|
||||
error make {
|
||||
msg: "MFA verification failed"
|
||||
label: {
|
||||
text: "HTTP request failed"
|
||||
span: (metadata $code).span
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Get current authentication status
|
||||
export def plugin-auth-status [] {
|
||||
let plugin_available = is-plugin-available
|
||||
let plugin_enabled = is-plugin-enabled
|
||||
let token = get-token-keyring
|
||||
let has_token = not ($token | is-empty)
|
||||
|
||||
{
|
||||
plugin_available: $plugin_available
|
||||
plugin_enabled: $plugin_enabled
|
||||
has_token: $has_token
|
||||
mode: (if ($plugin_enabled and $plugin_available) { "plugin" } else { "http" })
|
||||
}
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Metadata-Driven Authentication Helpers
|
||||
# ============================================================================
|
||||
|
||||
# Get auth requirements from metadata for a specific command
|
||||
def get-metadata-auth-requirements [
|
||||
command_name: string # Command to check (e.g., "server create", "cluster delete")
|
||||
] {
|
||||
let metadata = (get-command-metadata $command_name)
|
||||
|
||||
if ($metadata | type) == "record" {
|
||||
let requirements = ($metadata | get requirements? | default {})
|
||||
{
|
||||
requires_auth: ($requirements | get requires_auth? | default false)
|
||||
auth_type: ($requirements | get auth_type? | default "none")
|
||||
requires_confirmation: ($requirements | get requires_confirmation? | default false)
|
||||
min_permission: ($requirements | get min_permission? | default "read")
|
||||
side_effect_type: ($requirements | get side_effect_type? | default "none")
|
||||
}
|
||||
} else {
|
||||
{
|
||||
requires_auth: false
|
||||
auth_type: "none"
|
||||
requires_confirmation: false
|
||||
min_permission: "read"
|
||||
side_effect_type: "none"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Determine if MFA is required based on metadata auth_type
|
||||
def requires-mfa-from-metadata [
|
||||
command_name: string # Command to check
|
||||
] {
|
||||
let auth_reqs = (get-metadata-auth-requirements $command_name)
|
||||
$auth_reqs.auth_type == "mfa" or $auth_reqs.auth_type == "cedar"
|
||||
}
|
||||
|
||||
# Determine if operation is destructive based on metadata
|
||||
def is-destructive-from-metadata [
|
||||
command_name: string # Command to check
|
||||
] {
|
||||
let auth_reqs = (get-metadata-auth-requirements $command_name)
|
||||
$auth_reqs.side_effect_type == "delete"
|
||||
}
|
||||
|
||||
# Check if metadata indicates this is a production operation
|
||||
def is-production-from-metadata [
|
||||
command_name: string # Command to check
|
||||
] {
|
||||
let metadata = (get-command-metadata $command_name)
|
||||
|
||||
if ($metadata | type) == "record" {
|
||||
let tags = ($metadata | get tags? | default [])
|
||||
($tags | any { |tag| $tag == "production" or $tag == "deploy" })
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
# Validate minimum permission level required by metadata
|
||||
def validate-permission-level [
|
||||
command_name: string # Command to check
|
||||
user_level: string # User's permission level (read, write, admin, superadmin)
|
||||
] {
|
||||
let auth_reqs = (get-metadata-auth-requirements $command_name)
|
||||
let required_level = $auth_reqs.min_permission
|
||||
|
||||
# Permission level hierarchy (lower index = lower permission)
|
||||
let level_map = {
|
||||
read: 0
|
||||
write: 1
|
||||
admin: 2
|
||||
superadmin: 3
|
||||
}
|
||||
|
||||
# Get required permission level index
|
||||
let req_level = (
|
||||
if $required_level == "read" { 0 }
|
||||
else if $required_level == "write" { 1 }
|
||||
else if $required_level == "admin" { 2 }
|
||||
else if $required_level == "superadmin" { 3 }
|
||||
else { -1 }
|
||||
)
|
||||
|
||||
# Get user permission level index
|
||||
let usr_level = (
|
||||
if $user_level == "read" { 0 }
|
||||
else if $user_level == "write" { 1 }
|
||||
else if $user_level == "admin" { 2 }
|
||||
else if $user_level == "superadmin" { 3 }
|
||||
else { -1 }
|
||||
)
|
||||
|
||||
# User must have equal or higher permission level
|
||||
if $req_level < 0 or $usr_level < 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
$usr_level >= $req_level
|
||||
}
|
||||
|
||||
# Determine auth enforcement based on metadata
|
||||
export def should-enforce-auth-from-metadata [
|
||||
command_name: string # Command to check
|
||||
616
nulib/lib_provisioning/plugins/auth_impl.nu
Normal file
616
nulib/lib_provisioning/plugins/auth_impl.nu
Normal file
@ -0,0 +1,616 @@
|
||||
] {
|
||||
let auth_reqs = (get-metadata-auth-requirements $command_name)
|
||||
|
||||
# If metadata explicitly requires auth, enforce it
|
||||
if $auth_reqs.requires_auth {
|
||||
return true
|
||||
}
|
||||
|
||||
# If side effects, enforce auth
|
||||
if $auth_reqs.side_effect_type != "none" {
|
||||
return true
|
||||
}
|
||||
|
||||
# Otherwise check configuration
|
||||
(should-require-auth)
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# Security Policy Enforcement Functions
|
||||
# ============================================================================
|
||||
|
||||
# Check if authentication is required based on configuration
|
||||
export def should-require-auth [] {
|
||||
let config_required = (config-get "security.require_auth" false)
|
||||
let env_bypass = ($env.PROVISIONING_SKIP_AUTH? | default "false") == "true"
|
||||
let allow_bypass = (config-get "security.bypass.allow_skip_auth" false)
|
||||
|
||||
$config_required and not ($env_bypass and $allow_bypass)
|
||||
}
|
||||
|
||||
# Check if MFA is required for production operations
|
||||
export def should-require-mfa-prod [] {
|
||||
let environment = (config-get "environment" "dev")
|
||||
let require_mfa = (config-get "security.require_mfa_for_production" true)
|
||||
|
||||
($environment == "prod") and $require_mfa
|
||||
}
|
||||
|
||||
# Check if MFA is required for destructive operations
|
||||
export def should-require-mfa-destructive [] {
|
||||
(config-get "security.require_mfa_for_destructive" true)
|
||||
}
|
||||
|
||||
# Check if user is authenticated
|
||||
export def is-authenticated [] {
|
||||
let result = (plugin-verify)
|
||||
($result | get valid? | default false)
|
||||
}
|
||||
|
||||
# Check if MFA is verified
|
||||
export def is-mfa-verified [] {
|
||||
let result = (plugin-verify)
|
||||
($result | get mfa_verified? | default false)
|
||||
}
|
||||
|
||||
# Get current authenticated user
|
||||
export def get-authenticated-user [] {
|
||||
let result = (plugin-verify)
|
||||
($result | get username? | default "")
|
||||
}
|
||||
|
||||
# Require authentication with clear error messages
|
||||
export def require-auth [
|
||||
operation: string # Operation name for error messages
|
||||
--allow-skip # Allow skip-auth flag bypass
|
||||
] {
|
||||
# Check if authentication is required
|
||||
if not (should-require-auth) {
|
||||
return true
|
||||
}
|
||||
|
||||
# Check if skip is allowed
|
||||
if $allow_skip and (($env.PROVISIONING_SKIP_AUTH? | default "false") == "true") {
|
||||
print $"⚠️ Authentication bypassed with PROVISIONING_SKIP_AUTH flag"
|
||||
print $" (ansi yellow_bold)WARNING: This should only be used in development/testing!(ansi reset)"
|
||||
return true
|
||||
}
|
||||
|
||||
# Verify authentication
|
||||
let auth_status = (plugin-verify)
|
||||
|
||||
if not ($auth_status | get valid? | default false) {
|
||||
print $"(ansi red_bold)❌ Authentication Required(ansi reset)"
|
||||
print ""
|
||||
print $"Operation: (ansi cyan_bold)($operation)(ansi reset)"
|
||||
print $"You must be logged in to perform this operation."
|
||||
print ""
|
||||
print $"(ansi green_bold)To login:(ansi reset)"
|
||||
print $" provisioning auth login <username>"
|
||||
print ""
|
||||
print $"(ansi yellow_bold)Note:(ansi reset) Your credentials will be securely stored in the system keyring."
|
||||
|
||||
if ($auth_status | get message? | default null | is-not-empty) {
|
||||
print ""
|
||||
print $"(ansi red)Error:(ansi reset) ($auth_status.message)"
|
||||
}
|
||||
|
||||
exit 1
|
||||
}
|
||||
|
||||
let username = ($auth_status | get username? | default "unknown")
|
||||
print $"(ansi green)✓(ansi reset) Authenticated as: (ansi cyan_bold)($username)(ansi reset)"
|
||||
true
|
||||
}
|
||||
|
||||
# Require MFA verification with clear error messages
|
||||
export def require-mfa [
|
||||
operation: string # Operation name for error messages
|
||||
reason: string # Reason MFA is required
|
||||
] {
|
||||
let auth_status = (plugin-verify)
|
||||
|
||||
if not ($auth_status | get mfa_verified? | default false) {
|
||||
print $"(ansi red_bold)❌ MFA Verification Required(ansi reset)"
|
||||
print ""
|
||||
print $"Operation: (ansi cyan_bold)($operation)(ansi reset)"
|
||||
print $"Reason: (ansi yellow)($reason)(ansi reset)"
|
||||
print ""
|
||||
print $"(ansi green_bold)To verify MFA:(ansi reset)"
|
||||
print $" 1. Get code from your authenticator app"
|
||||
print $" 2. Run: provisioning auth mfa verify --code <6-digit-code>"
|
||||
print ""
|
||||
print $"(ansi yellow_bold)Don't have MFA set up?(ansi reset)"
|
||||
print $" Run: provisioning auth mfa enroll totp"
|
||||
|
||||
exit 1
|
||||
}
|
||||
|
||||
print $"(ansi green)✓(ansi reset) MFA verified"
|
||||
true
|
||||
}
|
||||
|
||||
# Check authentication and MFA for production operations (enhanced with metadata)
|
||||
export def check-auth-for-production [
|
||||
operation: string # Operation name
|
||||
--allow-skip # Allow skip-auth flag bypass
|
||||
] {
|
||||
# First check if this command is actually production-related via metadata
|
||||
if (is-production-from-metadata $operation) {
|
||||
# Require authentication first
|
||||
require-auth $operation --allow-skip=$allow_skip
|
||||
|
||||
# Check if MFA is required based on metadata or config
|
||||
let requires_mfa_metadata = (requires-mfa-from-metadata $operation)
|
||||
if $requires_mfa_metadata or (should-require-mfa-prod) {
|
||||
require-mfa $operation "production environment operation"
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
# Fallback to configuration-based check if not in metadata
|
||||
if (should-require-mfa-prod) {
|
||||
require-auth $operation --allow-skip=$allow_skip
|
||||
require-mfa $operation "production environment operation"
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
# Check authentication and MFA for destructive operations (enhanced with metadata)
|
||||
export def check-auth-for-destructive [
|
||||
operation: string # Operation name
|
||||
--allow-skip # Allow skip-auth flag bypass
|
||||
] {
|
||||
# Check if this is a destructive operation via metadata
|
||||
if (is-destructive-from-metadata $operation) {
|
||||
# Always require authentication for destructive ops
|
||||
require-auth $operation --allow-skip=$allow_skip
|
||||
|
||||
# Check if MFA is required based on metadata or config
|
||||
let requires_mfa_metadata = (requires-mfa-from-metadata $operation)
|
||||
if $requires_mfa_metadata or (should-require-mfa-destructive) {
|
||||
require-mfa $operation "destructive operation (delete/destroy)"
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
# Fallback to configuration-based check
|
||||
if (should-require-mfa-destructive) {
|
||||
require-auth $operation --allow-skip=$allow_skip
|
||||
require-mfa $operation "destructive operation (delete/destroy)"
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
# Helper: Check if operation is in check mode (should skip auth)
|
||||
export def is-check-mode [flags: record] {
|
||||
(($flags | get check? | default false) or
|
||||
($flags | get check_mode? | default false) or
|
||||
($flags | get c? | default false))
|
||||
}
|
||||
|
||||
# Helper: Determine if operation is destructive
|
||||
export def is-destructive-operation [operation_type: string] {
|
||||
$operation_type in ["delete" "destroy" "remove"]
|
||||
}
|
||||
|
||||
# Main authentication check for any operation (enhanced with metadata)
|
||||
export def check-operation-auth [
|
||||
operation_name: string # Name of operation
|
||||
operation_type: string # Type: create, delete, modify, read
|
||||
flags?: record # Command flags
|
||||
] {
|
||||
# Skip in check mode
|
||||
if ($flags | is-not-empty) and (is-check-mode $flags) {
|
||||
print $"(ansi dim)Skipping authentication check (check mode)(ansi reset)"
|
||||
return true
|
||||
}
|
||||
|
||||
# Check metadata-driven auth enforcement first
|
||||
if (should-enforce-auth-from-metadata $operation_name) {
|
||||
let auth_reqs = (get-metadata-auth-requirements $operation_name)
|
||||
|
||||
# Require authentication
|
||||
let allow_skip = (config-get "security.bypass.allow_skip_auth" false)
|
||||
require-auth $operation_name --allow-skip=$allow_skip
|
||||
|
||||
# Check MFA based on auth_type from metadata
|
||||
if $auth_reqs.auth_type == "mfa" {
|
||||
require-mfa $operation_name $"MFA required for ($operation_name)"
|
||||
} else if $auth_reqs.auth_type == "cedar" {
|
||||
# Cedar policy evaluation would go here
|
||||
require-mfa $operation_name "Cedar policy verification required"
|
||||
}
|
||||
|
||||
# Validate permission level if set
|
||||
let user_level = (config-get "security.user_permission_level" "read")
|
||||
if not (validate-permission-level $operation_name $user_level) {
|
||||
print $"(ansi red_bold)❌ Insufficient Permissions(ansi reset)"
|
||||
print $"Operation: (ansi cyan)($operation_name)(ansi reset)"
|
||||
print $"Required: (ansi yellow)($auth_reqs.min_permission)(ansi reset)"
|
||||
print $"Your level: (ansi yellow)($user_level)(ansi reset)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
# Skip if auth not required by configuration
|
||||
if not (should-require-auth) {
|
||||
return true
|
||||
}
|
||||
|
||||
# Fallback to configuration-based checks
|
||||
let allow_skip = (config-get "security.bypass.allow_skip_auth" false)
|
||||
require-auth $operation_name --allow-skip=$allow_skip
|
||||
|
||||
# Get environment
|
||||
let environment = (config-get "environment" "dev")
|
||||
|
||||
# Check MFA requirements based on environment and operation type
|
||||
if $environment == "prod" and (should-require-mfa-prod) {
|
||||
require-mfa $operation_name "production environment"
|
||||
} else if (is-destructive-operation $operation_type) and (should-require-mfa-destructive) {
|
||||
require-mfa $operation_name "destructive operation"
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
# Get authentication metadata for audit logging
|
||||
export def get-auth-metadata [] {
|
||||
let auth_status = (plugin-verify)
|
||||
|
||||
{
|
||||
authenticated: ($auth_status | get valid? | default false)
|
||||
mfa_verified: ($auth_status | get mfa_verified? | default false)
|
||||
username: ($auth_status | get username? | default "anonymous")
|
||||
timestamp: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
}
|
||||
}
|
||||
|
||||
# Log authenticated operation for audit trail
|
||||
export def log-authenticated-operation [
|
||||
operation: string # Operation performed
|
||||
details: record # Operation details
|
||||
] {
|
||||
let auth_metadata = (get-auth-metadata)
|
||||
|
||||
let log_entry = {
|
||||
timestamp: $auth_metadata.timestamp
|
||||
user: $auth_metadata.username
|
||||
operation: $operation
|
||||
details: $details
|
||||
mfa_verified: $auth_metadata.mfa_verified
|
||||
}
|
||||
|
||||
# Log to file if configured
|
||||
let log_path = (config-get "security.audit_log_path" "")
|
||||
if ($log_path | is-not-empty) {
|
||||
let log_dir = ($log_path | path dirname)
|
||||
if ($log_dir | path exists) {
|
||||
$log_entry | to json | save --append $log_path
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Print current authentication status (user-friendly)
|
||||
export def print-auth-status [] {
|
||||
let auth_status = (plugin-verify)
|
||||
let is_valid = ($auth_status | get valid? | default false)
|
||||
|
||||
print $"(ansi blue_bold)Authentication Status(ansi reset)"
|
||||
print $"━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
if $is_valid {
|
||||
let username = ($auth_status | get username? | default "unknown")
|
||||
let mfa_verified = ($auth_status | get mfa_verified? | default false)
|
||||
|
||||
print $"Status: (ansi green_bold)✓ Authenticated(ansi reset)"
|
||||
print $"User: (ansi cyan)($username)(ansi reset)"
|
||||
|
||||
if $mfa_verified {
|
||||
print $"MFA: (ansi green_bold)✓ Verified(ansi reset)"
|
||||
} else {
|
||||
print $"MFA: (ansi yellow)Not verified(ansi reset)"
|
||||
}
|
||||
} else {
|
||||
print $"Status: (ansi red)✗ Not authenticated(ansi reset)"
|
||||
print ""
|
||||
print $"Run: (ansi green)provisioning auth login <username>(ansi reset)"
|
||||
}
|
||||
|
||||
print ""
|
||||
print $"(ansi dim)Authentication required:(ansi reset) (should-require-auth)"
|
||||
print $"(ansi dim)MFA for production:(ansi reset) (should-require-mfa-prod)"
|
||||
print $"(ansi dim)MFA for destructive:(ansi reset) (should-require-mfa-destructive)"
|
||||
}
|
||||
# ============================================================================
|
||||
# TYPEDIALOG HELPER FUNCTIONS
|
||||
# ============================================================================
|
||||
|
||||
# Run TypeDialog form via bash wrapper for authentication
|
||||
# This pattern avoids TTY/input issues in Nushell's execution stack
|
||||
export def run-typedialog-auth-form [
|
||||
wrapper_script: string
|
||||
--backend: string = "tui"
|
||||
] {
|
||||
# Check if the wrapper script exists
|
||||
if not ($wrapper_script | path exists) {
|
||||
return {
|
||||
success: false
|
||||
error: "TypeDialog wrapper not available"
|
||||
use_fallback: true
|
||||
}
|
||||
}
|
||||
|
||||
# Set backend environment variable
|
||||
$env.TYPEDIALOG_BACKEND = $backend
|
||||
|
||||
# Run bash wrapper (handles TTY input properly)
|
||||
let result = (do { bash $wrapper_script } | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $result.stderr
|
||||
use_fallback: true
|
||||
}
|
||||
}
|
||||
|
||||
# Read the generated JSON file
|
||||
let json_output = ($wrapper_script | path dirname | path join "generated" | path join ($wrapper_script | path basename | str replace ".sh" "-result.json"))
|
||||
|
||||
if not ($json_output | path exists) {
|
||||
return {
|
||||
success: false
|
||||
error: "Output file not found"
|
||||
use_fallback: true
|
||||
}
|
||||
}
|
||||
|
||||
# Parse JSON output
|
||||
let result = do {
|
||||
open $json_output | from json
|
||||
} | complete
|
||||
|
||||
if $result.exit_code == 0 {
|
||||
let values = $result.stdout
|
||||
{
|
||||
success: true
|
||||
values: $values
|
||||
use_fallback: false
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
success: false
|
||||
error: "Failed to parse TypeDialog output"
|
||||
use_fallback: true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# INTERACTIVE FORM HANDLERS (TypeDialog Integration)
|
||||
# ============================================================================
|
||||
|
||||
# Interactive login with form
|
||||
export def login-interactive [
|
||||
--backend: string = "tui"
|
||||
] : nothing -> record {
|
||||
print "🔐 Interactive Authentication"
|
||||
print ""
|
||||
|
||||
# Run the login form via bash wrapper
|
||||
let wrapper_script = "provisioning/core/shlib/auth-login-tty.sh"
|
||||
let form_result = (run-typedialog-auth-form $wrapper_script --backend $backend)
|
||||
|
||||
# Fallback to basic prompts if TypeDialog not available
|
||||
if not $form_result.success or $form_result.use_fallback {
|
||||
print "ℹ️ TypeDialog not available. Using basic prompts..."
|
||||
print ""
|
||||
|
||||
print "Username: "
|
||||
let username = (input)
|
||||
print "Password: "
|
||||
let password = (input --suppress-output)
|
||||
|
||||
print "Do you have MFA enabled? (y/n): "
|
||||
let has_mfa_input = (input)
|
||||
let has_mfa = ($has_mfa_input == "y" or $has_mfa_input == "Y")
|
||||
|
||||
let mfa_code = if $has_mfa {
|
||||
print "MFA Code (6 digits): "
|
||||
input
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
if ($username | is-empty) or ($password | is-empty) {
|
||||
return {
|
||||
success: false
|
||||
error: "Username and password are required"
|
||||
}
|
||||
}
|
||||
|
||||
let login_result = (plugin-login $username $password --mfa-code $mfa_code)
|
||||
|
||||
return {
|
||||
success: true
|
||||
result: $login_result
|
||||
username: $username
|
||||
mfa_enabled: $has_mfa
|
||||
}
|
||||
}
|
||||
|
||||
let form_values = $form_result.values
|
||||
|
||||
# Check if user cancelled or didn't confirm
|
||||
if not ($form_values.auth?.confirm_login? | default false) {
|
||||
return {
|
||||
success: false
|
||||
error: "Login cancelled by user"
|
||||
}
|
||||
}
|
||||
|
||||
# Perform login with provided credentials
|
||||
let username = ($form_values.auth?.username? | default "")
|
||||
let password = ($form_values.auth?.password? | default "")
|
||||
let has_mfa = ($form_values.auth?.has_mfa? | default false)
|
||||
let mfa_code = if $has_mfa {
|
||||
$form_values.auth?.mfa_code? | default ""
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
if ($username | is-empty) or ($password | is-empty) {
|
||||
return {
|
||||
success: false
|
||||
error: "Username and password are required"
|
||||
}
|
||||
}
|
||||
|
||||
# Call the plugin login function
|
||||
let login_result = (plugin-login $username $password --mfa-code $mfa_code)
|
||||
|
||||
{
|
||||
success: true
|
||||
result: $login_result
|
||||
username: $username
|
||||
mfa_enabled: $has_mfa
|
||||
}
|
||||
}
|
||||
|
||||
# Interactive MFA enrollment with form
|
||||
export def mfa-enroll-interactive [
|
||||
--backend: string = "tui"
|
||||
] : nothing -> record {
|
||||
print "🔐 Multi-Factor Authentication Setup"
|
||||
print ""
|
||||
|
||||
# Check if user is already authenticated
|
||||
let auth_status = (plugin-verify)
|
||||
let is_authenticated = ($auth_status.valid // false)
|
||||
|
||||
if not $is_authenticated {
|
||||
return {
|
||||
success: false
|
||||
error: "Must be authenticated to enroll in MFA. Please login first."
|
||||
}
|
||||
}
|
||||
|
||||
# Run the MFA enrollment form via bash wrapper
|
||||
let wrapper_script = "provisioning/core/shlib/mfa-enroll-tty.sh"
|
||||
let form_result = (run-typedialog-auth-form $wrapper_script --backend $backend)
|
||||
|
||||
# Fallback to basic prompts if TypeDialog not available
|
||||
if not $form_result.success or $form_result.use_fallback {
|
||||
print "ℹ️ TypeDialog not available. Using basic prompts..."
|
||||
print ""
|
||||
|
||||
print "MFA Type (totp/webauthn/sms): "
|
||||
let mfa_type = (input)
|
||||
|
||||
let device_name = if ($mfa_type == "totp" or $mfa_type == "webauthn") {
|
||||
print "Device name: "
|
||||
input
|
||||
} else if $mfa_type == "sms" {
|
||||
""
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let phone_number = if $mfa_type == "sms" {
|
||||
print "Phone number (international format, e.g., +1234567890): "
|
||||
input
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let verification_code = if ($mfa_type == "totp" or $mfa_type == "sms") {
|
||||
print "Verification code (6 digits): "
|
||||
input
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
print "Generate backup codes? (y/n): "
|
||||
let generate_backup_input = (input)
|
||||
let generate_backup = ($generate_backup_input == "y" or $generate_backup_input == "Y")
|
||||
|
||||
let backup_count = if $generate_backup {
|
||||
print "Number of backup codes (5-20): "
|
||||
let count_str = (input)
|
||||
$count_str | into int | default 10
|
||||
} else {
|
||||
0
|
||||
}
|
||||
|
||||
return {
|
||||
success: true
|
||||
mfa_type: $mfa_type
|
||||
device_name: $device_name
|
||||
phone_number: $phone_number
|
||||
verification_code: $verification_code
|
||||
generate_backup_codes: $generate_backup
|
||||
backup_codes_count: $backup_count
|
||||
}
|
||||
}
|
||||
|
||||
let form_values = $form_result.values
|
||||
|
||||
# Check if user confirmed
|
||||
if not ($form_values.mfa?.confirm_enroll? | default false) {
|
||||
return {
|
||||
success: false
|
||||
error: "MFA enrollment cancelled by user"
|
||||
}
|
||||
}
|
||||
|
||||
# Extract MFA type and parameters from form values
|
||||
let mfa_type = ($form_values.mfa?.type? | default "totp")
|
||||
let device_name = if $mfa_type == "totp" {
|
||||
$form_values.mfa?.totp?.device_name? | default "Authenticator App"
|
||||
} else if $mfa_type == "webauthn" {
|
||||
$form_values.mfa?.webauthn?.device_name? | default "Security Key"
|
||||
} else if $mfa_type == "sms" {
|
||||
""
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let phone_number = if $mfa_type == "sms" {
|
||||
$form_values.mfa?.sms?.phone_number? | default ""
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let verification_code = if $mfa_type == "totp" {
|
||||
$form_values.mfa?.totp?.verification_code? | default ""
|
||||
} else if $mfa_type == "sms" {
|
||||
$form_values.mfa?.sms?.verification_code? | default ""
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let generate_backup = ($form_values.mfa?.generate_backup_codes? | default true)
|
||||
let backup_count = ($form_values.mfa?.backup_codes_count? | default 10)
|
||||
|
||||
# Call the plugin MFA enrollment function
|
||||
let enroll_result = (plugin-mfa-enroll --type $mfa_type)
|
||||
|
||||
{
|
||||
success: true
|
||||
result: $enroll_result
|
||||
mfa_type: $mfa_type
|
||||
device_name: $device_name
|
||||
phone_number: $phone_number
|
||||
verification_code: $verification_code
|
||||
generate_backup_codes: $generate_backup
|
||||
backup_codes_count: $backup_count
|
||||
}
|
||||
}
|
||||
@ -269,7 +269,7 @@ export def test_file_encryption [] {
|
||||
let test_file = "/tmp/kms_test_file.txt"
|
||||
let test_content = "This is test file content for KMS encryption"
|
||||
|
||||
try {
|
||||
let file_result = (do {
|
||||
$test_content | save -f $test_file
|
||||
|
||||
# Try to encrypt file
|
||||
@ -286,7 +286,9 @@ export def test_file_encryption [] {
|
||||
} else {
|
||||
print " ⚠️ File encryption not available"
|
||||
}
|
||||
} catch { |err|
|
||||
} | complete)
|
||||
|
||||
if $file_result.exit_code != 0 {
|
||||
print " ⚠️ Could not create test file"
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
# Module: Plugins Module Exports
|
||||
# Purpose: Central export point for all plugin system components (auth, kms, etc.).
|
||||
# Dependencies: auth, kms, and other plugin modules
|
||||
|
||||
# Plugin Wrapper Modules
|
||||
# Exports all plugin wrappers with HTTP fallback support
|
||||
|
||||
|
||||
@ -161,19 +161,23 @@ export def save-pipeline-state [
|
||||
state: record
|
||||
output_path: string
|
||||
] {
|
||||
try {
|
||||
let result = (do {
|
||||
$state | to json | save $output_path
|
||||
{
|
||||
success: true
|
||||
message: $"Pipeline state saved to ($output_path)"
|
||||
path: $output_path
|
||||
}
|
||||
} catch {|err|
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
success: false
|
||||
error: $err.msg
|
||||
error: $result.stderr
|
||||
path: $output_path
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,17 +185,21 @@ export def save-pipeline-state [
|
||||
export def resume-pipeline [
|
||||
state_path: string
|
||||
] {
|
||||
try {
|
||||
let result = (do {
|
||||
let state = (open $state_path | from json)
|
||||
{
|
||||
success: true
|
||||
state: $state
|
||||
}
|
||||
} catch {|err|
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
success: false
|
||||
error: $err.msg
|
||||
error: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -34,19 +34,21 @@ export def detect-project [
|
||||
$args = ($args | append "--pretty")
|
||||
}
|
||||
|
||||
try {
|
||||
let output = (^$detector_bin ...$args 2>&1)
|
||||
if $format == "json" {
|
||||
$output | from json
|
||||
} else {
|
||||
{ output: $output }
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Execute detector binary (no try-catch)
|
||||
let exec_result = (do { ^$detector_bin ...$args 2>&1 } | complete)
|
||||
if $exec_result.exit_code != 0 {
|
||||
return {
|
||||
error: "Detection failed"
|
||||
message: $err.msg
|
||||
message: $exec_result.stderr
|
||||
}
|
||||
}
|
||||
|
||||
let output = $exec_result.stdout
|
||||
if $format == "json" {
|
||||
$output | from json
|
||||
} else {
|
||||
{ output: $output }
|
||||
}
|
||||
}
|
||||
|
||||
# Analyze gaps in infrastructure declaration
|
||||
@ -80,19 +82,21 @@ export def complete-project [
|
||||
$args = ($args | append "--pretty")
|
||||
}
|
||||
|
||||
try {
|
||||
let output = (^$detector_bin ...$args 2>&1)
|
||||
if $format == "json" {
|
||||
$output | from json
|
||||
} else {
|
||||
{ output: $output }
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Execute detector binary (no try-catch)
|
||||
let exec_result = (do { ^$detector_bin ...$args 2>&1 } | complete)
|
||||
if $exec_result.exit_code != 0 {
|
||||
return {
|
||||
error: "Completion failed"
|
||||
message: $err.msg
|
||||
message: $exec_result.stderr
|
||||
}
|
||||
}
|
||||
|
||||
let output = $exec_result.stdout
|
||||
if $format == "json" {
|
||||
$output | from json
|
||||
} else {
|
||||
{ output: $output }
|
||||
}
|
||||
}
|
||||
|
||||
# Find provisioning-detector binary in standard locations
|
||||
|
||||
@ -11,7 +11,7 @@ export def load-inference-rules [
|
||||
if ($config_path | path exists) {
|
||||
# Load the YAML file (open automatically parses YAML)
|
||||
let rules = (open $config_path)
|
||||
if (try { $rules.rules | is-not-empty } catch { false }) {
|
||||
if ($rules.rules? != null and ($rules.rules | is-not-empty)) {
|
||||
$rules
|
||||
} else {
|
||||
get-default-inference-rules
|
||||
@ -85,14 +85,14 @@ export def validate-inference-rule [
|
||||
] {
|
||||
let required_fields = ["name" "technology" "infers" "confidence" "reason"]
|
||||
let has_all = ($required_fields | all {|f|
|
||||
try { ($rule | get $f) | is-not-empty } catch { false }
|
||||
($rule | get $f?) != null and (($rule | get $f?) | is-not-empty)
|
||||
})
|
||||
|
||||
{
|
||||
valid: $has_all
|
||||
errors: (if not $has_all {
|
||||
$required_fields | where {|f|
|
||||
try { ($rule | get $f) | is-empty } catch { true }
|
||||
($rule | get $f?) == null or (($rule | get $f?) | is-empty)
|
||||
}
|
||||
} else {
|
||||
[]
|
||||
@ -133,19 +133,23 @@ export def save-inference-rules [
|
||||
|
||||
let config_path = ($config_dir | path join $"($org_name).yaml")
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
$rules | to yaml | save $config_path
|
||||
{
|
||||
success: true
|
||||
message: $"Rules saved to ($config_path)"
|
||||
path: $config_path
|
||||
}
|
||||
} catch {|err|
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
success: false
|
||||
error: $err.msg
|
||||
error: $result.stderr
|
||||
path: $config_path
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -284,9 +284,9 @@ export def get-interface-version [] {
|
||||
#
|
||||
# # Proceed with AWS-specific implementation
|
||||
# # AWS credentials are loaded from AWS config/env (separate from platform auth)
|
||||
# try {
|
||||
# # ... create EC2 instance ...
|
||||
# } catch {
|
||||
# # Refactored from try-catch to do/complete for explicit error handling
|
||||
# let result = (do { # Create EC2 instance implementation } | complete)
|
||||
# if $result.exit_code != 0 {
|
||||
# error make {
|
||||
# msg: "AWS API error"
|
||||
# label: {text: "Check AWS credentials in ~/.aws/credentials"}
|
||||
|
||||
208
nulib/lib_provisioning/result.nu
Normal file
208
nulib/lib_provisioning/result.nu
Normal file
@ -0,0 +1,208 @@
|
||||
#!/usr/bin/env nu
|
||||
# Result Type Pattern - Hybrid error handling without try-catch
|
||||
# Combines preconditions (fail-fast), Result pattern, and functional composition
|
||||
# Version: 1.0
|
||||
#
|
||||
# Usage:
|
||||
# use lib_provisioning/result.nu *
|
||||
#
|
||||
# def my-operation []: record {
|
||||
# if (precondition-fails) { return (err "message") }
|
||||
# ok {result: "value"}
|
||||
# }
|
||||
|
||||
# Construct success result with value
|
||||
# Type: any -> {ok: any, err: null}
|
||||
export def ok [value: any] {
|
||||
{ok: $value, err: null}
|
||||
}
|
||||
|
||||
# Construct error result with message
|
||||
# Type: string -> {ok: null, err: string}
|
||||
export def err [message: string] {
|
||||
{ok: null, err: $message}
|
||||
}
|
||||
|
||||
# Check if result is successful
|
||||
# Type: record -> bool
|
||||
export def is-ok [result: record] {
|
||||
$result.err == null
|
||||
}
|
||||
|
||||
# Check if result is error
|
||||
# Type: record -> bool
|
||||
export def is-err [result: record] {
|
||||
$result.err != null
|
||||
}
|
||||
|
||||
# Monadic bind: chain operations on Results
|
||||
# Type: record, closure -> record
|
||||
# Stops propagation on error
|
||||
export def and-then [result: record, fn: closure] {
|
||||
if (is-ok $result) {
|
||||
do $fn $result.ok
|
||||
} else {
|
||||
$result # Propagate error
|
||||
}
|
||||
}
|
||||
|
||||
# Map over Result value without stopping on error
|
||||
# Type: record, closure -> record
|
||||
export def map [result: record, fn: closure] {
|
||||
if (is-ok $result) {
|
||||
ok (do $fn $result.ok)
|
||||
} else {
|
||||
$result
|
||||
}
|
||||
}
|
||||
|
||||
# Map over Result error
|
||||
# Type: record, closure -> record
|
||||
export def map-err [result: record, fn: closure] {
|
||||
if (is-err $result) {
|
||||
err (do $fn $result.err)
|
||||
} else {
|
||||
$result
|
||||
}
|
||||
}
|
||||
|
||||
# Unwrap Result or return default
|
||||
# Type: record, any -> any
|
||||
export def unwrap-or [result: record, default: any] {
|
||||
if (is-ok $result) {
|
||||
$result.ok
|
||||
} else {
|
||||
$default
|
||||
}
|
||||
}
|
||||
|
||||
# Unwrap Result or throw error
|
||||
# Type: record -> any (throws if error)
|
||||
export def unwrap! [result: record] {
|
||||
if (is-ok $result) {
|
||||
$result.ok
|
||||
} else {
|
||||
error make {msg: $result.err}
|
||||
}
|
||||
}
|
||||
|
||||
# Combine two Results (stops on first error)
|
||||
# Type: record, record -> record
|
||||
export def combine [result1: record, result2: record] {
|
||||
if (is-err $result1) {
|
||||
return $result1
|
||||
}
|
||||
if (is-err $result2) {
|
||||
return $result2
|
||||
}
|
||||
ok {first: $result1.ok, second: $result2.ok}
|
||||
}
|
||||
|
||||
# Combine list of Results (stops on first error)
|
||||
# Type: list -> record
|
||||
export def combine-all [results: list] {
|
||||
let mut accumulated = (ok [])
|
||||
|
||||
for result in $results {
|
||||
if (is-err $accumulated) {
|
||||
break
|
||||
}
|
||||
$accumulated = (and-then $accumulated {|acc|
|
||||
if (is-ok $result) {
|
||||
ok ($acc | append $result.ok)
|
||||
} else {
|
||||
err $result.err
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
$accumulated
|
||||
}
|
||||
|
||||
# Try operation with automatic error wrapping
|
||||
# Type: closure -> record
|
||||
# Catches Nushell errors and wraps them (no try-catch)
|
||||
export def try-wrap [fn: closure] {
|
||||
let result = (do { do $fn } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
ok ($result.stdout)
|
||||
} else {
|
||||
err $result.stderr
|
||||
}
|
||||
}
|
||||
|
||||
# Match on Result (like Rust's match)
|
||||
# Type: record, closure, closure -> any
|
||||
export def match-result [result: record, on-ok: closure, on-err: closure] {
|
||||
if (is-ok $result) {
|
||||
do $on-ok $result.ok
|
||||
} else {
|
||||
do $on-err $result.err
|
||||
}
|
||||
}
|
||||
|
||||
# Execute bash command and wrap result
|
||||
# Type: string -> record
|
||||
# Returns: {ok: output, err: null} on success; {ok: null, err: message} on error (no try-catch)
|
||||
export def bash-wrap [cmd: string] {
|
||||
let result = (do { bash -c $cmd } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
ok ($result.stdout | str trim)
|
||||
} else {
|
||||
err $"Command failed: ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
# Execute bash command, check exit code
|
||||
# Type: string -> record
|
||||
# Returns: {ok: {exit_code: int, stdout: string}, err: null} or {ok: null, err: message} (no try-catch)
|
||||
export def bash-check [cmd: string] {
|
||||
let result = (do { bash -c $cmd | complete } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
let bash_result = ($result.stdout)
|
||||
if ($bash_result.exit_code == 0) {
|
||||
ok $bash_result
|
||||
} else {
|
||||
err ($bash_result.stderr)
|
||||
}
|
||||
} else {
|
||||
err $"Command failed: ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
# Try bash command with fallback value
|
||||
# Type: string, any -> any
|
||||
# Returns value on success, fallback on error (no try-catch)
|
||||
export def bash-or [cmd: string, fallback: any] {
|
||||
let result = (do { bash -c $cmd } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
($result.stdout | str trim)
|
||||
} else {
|
||||
$fallback
|
||||
}
|
||||
}
|
||||
|
||||
# Read JSON file safely
|
||||
# Type: string -> record
|
||||
# Returns: {ok: parsed_json, err: null} or {ok: null, err: message} (no try-catch)
|
||||
export def json-read [file_path: string] {
|
||||
let read_result = (do { open $file_path | from json } | complete)
|
||||
if $read_result.exit_code == 0 {
|
||||
ok ($read_result.stdout)
|
||||
} else {
|
||||
err $"Failed to read JSON from ($file_path): ($read_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
# Write JSON to file safely
|
||||
# Type: string, any -> record
|
||||
# Returns: {ok: true, err: null} or {ok: false, err: message} (no try-catch)
|
||||
export def json-write [file_path: string, data: any] {
|
||||
let json_str = ($data | to json)
|
||||
let write_result = (do { bash -c $"cat > ($file_path) << 'EOF'\n($json_str)\nEOF" } | complete)
|
||||
if $write_result.exit_code == 0 {
|
||||
ok true
|
||||
} else {
|
||||
err $"Failed to write JSON to ($file_path): ($write_result.stderr)"
|
||||
}
|
||||
}
|
||||
@ -57,8 +57,8 @@ export def install_config [
|
||||
} else {
|
||||
mkdir ($provisioning_context_path | path dirname)
|
||||
let data_context = (open -r $context_template)
|
||||
$data_context | str replace "HOME" $nu.home-path | save $provisioning_context_path
|
||||
#$use_context | update infra_path ($context.infra_path | str replace "HOME" $nu.home-path) | save $provisioning_context_path
|
||||
$data_context | str replace "HOME" $nu.home-dir | save $provisioning_context_path
|
||||
#$use_context | update infra_path ($context.infra_path | str replace "HOME" $nu.home-dir) | save $provisioning_context_path
|
||||
_print $"Intallation on (_ansi yellow)($provisioning_context_path) (_ansi green_bold)completed(_ansi reset)"
|
||||
_print $"use (_ansi purple_bold)provisioning context(_ansi reset) to manage context \(create, default, set, etc\)"
|
||||
}
|
||||
|
||||
@ -33,7 +33,7 @@ export def get-available-providers [
|
||||
} | complete)
|
||||
|
||||
if ($result.exit_code == 0) {
|
||||
$result.stdout | split row "\n" | where { |x| ($x | str length) > 0 }
|
||||
$result.stdout | lines | where { |x| ($x | str length) > 0 }
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
@ -81,8 +81,9 @@ export def validate-settings [
|
||||
settings: record
|
||||
required_fields: list
|
||||
] {
|
||||
# Guard: Check for missing required fields (no try-catch)
|
||||
let missing_fields = ($required_fields | where {|field|
|
||||
($settings | try { get $field } catch { null } | is-empty)
|
||||
not ($field in $settings) or (($settings | get $field) | is-empty)
|
||||
})
|
||||
|
||||
if ($missing_fields | length) > 0 {
|
||||
|
||||
@ -20,15 +20,11 @@ use ./validation.nu *
|
||||
# Reads directly from /dev/tty for TTY mode, handles piped input gracefully
|
||||
def read-input-line [] {
|
||||
# Try to read from /dev/tty first (TTY/interactive mode)
|
||||
let tty_result = (try {
|
||||
open /dev/tty | lines | first | str trim
|
||||
} catch {
|
||||
null
|
||||
})
|
||||
let read_result = (do { open /dev/tty | lines | first | str trim } | complete)
|
||||
|
||||
# If /dev/tty worked, return the line
|
||||
if $tty_result != null {
|
||||
$tty_result
|
||||
if $read_result.exit_code == 0 {
|
||||
($read_result.stdout)
|
||||
} else {
|
||||
# No /dev/tty (Windows, containers, or piped mode)
|
||||
# Return empty string - this will use defaults in calling code
|
||||
@ -359,12 +355,8 @@ export def run-setup-wizard [
|
||||
--verbose = false
|
||||
] {
|
||||
# Check if running in TTY or piped mode
|
||||
let is_interactive = (try {
|
||||
open /dev/tty | null
|
||||
true
|
||||
} catch {
|
||||
false
|
||||
})
|
||||
let tty_check = (do { open /dev/tty | null } | complete)
|
||||
let is_interactive = ($tty_check.exit_code == 0)
|
||||
|
||||
if not $is_interactive {
|
||||
# In non-TTY mode, switch to defaults automatically
|
||||
@ -608,16 +600,17 @@ def run-typedialog-form [
|
||||
}
|
||||
}
|
||||
|
||||
# Parse JSON output
|
||||
let values = (try {
|
||||
open $json_output | from json
|
||||
} catch {
|
||||
# Parse JSON output (no try-catch)
|
||||
let parse_result = (do { open $json_output | from json } | complete)
|
||||
if $parse_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: "Failed to parse TypeDialog output"
|
||||
use_fallback: true
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
let values = ($parse_result.stdout)
|
||||
|
||||
{
|
||||
success: true
|
||||
|
||||
@ -98,14 +98,18 @@ export def tera-daemon-reset-stats [] -> void {
|
||||
# # Returns
|
||||
# `true` if daemon is running with Tera support, `false` otherwise
|
||||
export def is-tera-daemon-available [] -> bool {
|
||||
try {
|
||||
let result = (do {
|
||||
let daemon_url = (get-cli-daemon-url)
|
||||
let response = (http get $"($daemon_url)/info" --timeout 500ms)
|
||||
|
||||
# Check if tera-rendering is in features list
|
||||
($response | from json | .features | str contains "tera-rendering")
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
false
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
# Module: Error Handling Utilities
|
||||
# Purpose: Centralized error handling, error messages, and exception management.
|
||||
# Dependencies: None (core utility)
|
||||
|
||||
use ../config/accessor.nu *
|
||||
|
||||
export def throw-error [
|
||||
|
||||
@ -49,17 +49,19 @@ export def safe-execute [
|
||||
context: string
|
||||
--fallback: closure
|
||||
]: any {
|
||||
try {
|
||||
do $command
|
||||
} catch {|err|
|
||||
print $"⚠️ Warning: Error in ($context): ($err.msg)"
|
||||
# Execute command with error handling (no try-catch)
|
||||
let exec_result = (do { do $command } | complete)
|
||||
if $exec_result.exit_code != 0 {
|
||||
print $"⚠️ Warning: Error in ($context): ($exec_result.stderr)"
|
||||
if ($fallback | is-not-empty) {
|
||||
print "🔄 Executing fallback..."
|
||||
do $fallback
|
||||
} else {
|
||||
print $"🛑 Execution failed in ($context)"
|
||||
print $" Error: ($err.msg)"
|
||||
print $" Error: ($exec_result.stderr)"
|
||||
}
|
||||
} else {
|
||||
$exec_result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -48,17 +48,19 @@ export def safe-execute [
|
||||
context: string
|
||||
--fallback: closure
|
||||
] {
|
||||
try {
|
||||
do $command
|
||||
} catch {|err|
|
||||
print $"⚠️ Warning: Error in ($context): ($err.msg)"
|
||||
# Execute command with error handling (no try-catch)
|
||||
let result = (do { do $command } | complete)
|
||||
if $result.exit_code != 0 {
|
||||
print $"⚠️ Warning: Error in ($context): ($result.stderr)"
|
||||
if ($fallback | is-not-empty) {
|
||||
print "🔄 Executing fallback..."
|
||||
do $fallback
|
||||
} else {
|
||||
print $"🛑 Execution failed in ($context)"
|
||||
print $" Error: ($err.msg)"
|
||||
print $" Error: ($result.stderr)"
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -49,17 +49,19 @@ export def safe-execute [
|
||||
context: string
|
||||
--fallback: closure
|
||||
]: any {
|
||||
try {
|
||||
do $command
|
||||
} catch {|err|
|
||||
print $"⚠️ Warning: Error in ($context): ($err.msg)"
|
||||
# Execute command with error handling (no try-catch)
|
||||
let result = (do { do $command } | complete)
|
||||
if $result.exit_code != 0 {
|
||||
print $"⚠️ Warning: Error in ($context): ($result.stderr)"
|
||||
if ($fallback | is-not-empty) {
|
||||
print "🔄 Executing fallback..."
|
||||
do $fallback
|
||||
} else {
|
||||
print $"🛑 Execution failed in ($context)"
|
||||
print $" Error: ($err.msg)"
|
||||
print $" Error: ($result.stderr)"
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
# Module: System Initialization
|
||||
# Purpose: Handles system initialization, environment setup, and workspace initialization.
|
||||
# Dependencies: error, interface, config/accessor
|
||||
|
||||
|
||||
use ../config/accessor.nu *
|
||||
|
||||
@ -35,19 +39,22 @@ export def provisioning_init [
|
||||
str replace "-h" "" | str replace $module "" | str trim | split row " "
|
||||
)
|
||||
if ($cmd_args | length) > 0 {
|
||||
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod '($module)' ($cmd_args) help"
|
||||
^$"((get-provisioning-name))" "-mod" $"($module | str replace ' ' '|')" ...$cmd_args help
|
||||
# let str_mod_0 = ($cmd_args | try { get 0 } catch { "") }
|
||||
# let str_mod_1 = ($cmd_args | try { get 1 } catch { "") }
|
||||
# if $str_mod_1 != "" {
|
||||
# let final_args = ($cmd_args | drop nth 0 1)
|
||||
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod '($str_mod_0) ($str_mod_1)' ($cmd_args | drop nth 0) help"
|
||||
# ^$"($env.PROVISIONING_NAME)" "-mod" $"'($str_mod_0) ($str_mod_1)'" ...$final_args help
|
||||
# } else {
|
||||
# let final_args = ($cmd_args | drop nth 0)
|
||||
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod ($str_mod_0) ($cmd_args | drop nth 0) help"
|
||||
# ^$"($env.PROVISIONING_NAME)" "-mod" ($str_mod_0) ...$final_args help
|
||||
# }
|
||||
# Refactored from try-catch to do/complete for explicit error handling
|
||||
let str_mod_0_result = (do { $cmd_args | get 0 } | complete)
|
||||
let str_mod_0 = if $str_mod_0_result.exit_code == 0 { ($str_mod_0_result.stdout | str trim) } else { "" }
|
||||
|
||||
let str_mod_1_result = (do { $cmd_args | get 1 } | complete)
|
||||
let str_mod_1 = if $str_mod_1_result.exit_code == 0 { ($str_mod_1_result.stdout | str trim) } else { "" }
|
||||
|
||||
if $str_mod_1 != "" {
|
||||
let final_args = ($cmd_args | drop nth 0 1)
|
||||
^$"((get-provisioning-name))" "-mod" $"'($str_mod_0) ($str_mod_1)'" ...$final_args help
|
||||
} else if $str_mod_0 != "" {
|
||||
let final_args = ($cmd_args | drop nth 0)
|
||||
^$"((get-provisioning-name))" "-mod" ($str_mod_0) ...$final_args help
|
||||
} else {
|
||||
^$"((get-provisioning-name))" "-mod" $"($module | str replace ' ' '|')" ...$cmd_args help
|
||||
}
|
||||
} else {
|
||||
^$"((get-provisioning-name))" help
|
||||
}
|
||||
|
||||
@ -1,3 +1,7 @@
|
||||
# Module: User Interface Utilities
|
||||
# Purpose: Provides terminal UI utilities: output formatting, prompts, spinners, and status displays.
|
||||
# Dependencies: error for error handling
|
||||
|
||||
use ../config/accessor.nu *
|
||||
|
||||
export def _ansi [
|
||||
|
||||
@ -6,7 +6,7 @@ for command_is_simple in [Yes, No] {
|
||||
for multi_command in [Yes, No] {
|
||||
print ($"Testing with command_is_simple=($command_is_simple), " ++
|
||||
$"multi_command=($multi_command)")
|
||||
try {
|
||||
let result = (do {
|
||||
do --capture-errors {
|
||||
cd $tempdir
|
||||
(
|
||||
@ -23,11 +23,13 @@ for command_is_simple in [Yes, No] {
|
||||
do { cd nu_plugin_test_plugin; ^cargo test }
|
||||
rm -r nu_plugin_test_plugin
|
||||
}
|
||||
} catch { |err|
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
print -e ($"Failed with command_is_simple=($command_is_simple), " ++
|
||||
$"multi_command=($multi_command)")
|
||||
rm -rf $tempdir
|
||||
$err.raw
|
||||
error make { msg: $result.stderr }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,8 +81,9 @@ export def validate-settings [
|
||||
settings: record
|
||||
required_fields: list
|
||||
] {
|
||||
# Guard: Check for missing required fields (no try-catch)
|
||||
let missing_fields = ($required_fields | where {|field|
|
||||
($settings | try { get $field } catch { null } | is-empty)
|
||||
not ($field in $settings) or (($settings | get $field) | is-empty)
|
||||
})
|
||||
|
||||
if ($missing_fields | length) > 0 {
|
||||
|
||||
@ -106,7 +106,7 @@ export def validate-settings [
|
||||
context?: string
|
||||
]: bool {
|
||||
let missing_fields = ($required_fields | where {|field|
|
||||
($settings | try { get $field } catch { null } | is-empty)
|
||||
not ($field in $settings) or (($settings | get $field) | is-empty)
|
||||
})
|
||||
|
||||
if ($missing_fields | length) > 0 {
|
||||
|
||||
5
nulib/lib_provisioning/utils/version.nu
Normal file
5
nulib/lib_provisioning/utils/version.nu
Normal file
@ -0,0 +1,5 @@
|
||||
# Module: Version Management Orchestrator (v2)
|
||||
# Purpose: Re-exports modular version components using folder structure
|
||||
# Dependencies: version/ folder with core, formatter, loader, manager, registry, taskserv modules
|
||||
|
||||
export use ./version/mod.nu *
|
||||
@ -2,7 +2,7 @@
|
||||
# Dynamic configuration loader for version management
|
||||
# Discovers and loads version configurations from the filesystem
|
||||
|
||||
use version_core.nu *
|
||||
use ./core.nu *
|
||||
|
||||
# Discover version configurations
|
||||
export def discover-configurations [
|
||||
@ -2,10 +2,10 @@
|
||||
# Main version management interface
|
||||
# Completely configuration-driven, no hardcoded components
|
||||
|
||||
use version_core.nu *
|
||||
use version_loader.nu *
|
||||
use version_formatter.nu *
|
||||
use interface.nu *
|
||||
use ./core.nu *
|
||||
use ./loader.nu *
|
||||
use ./formatter.nu *
|
||||
use ../interface.nu *
|
||||
|
||||
# Check versions for discovered components
|
||||
export def check-versions [
|
||||
21
nulib/lib_provisioning/utils/version/mod.nu
Normal file
21
nulib/lib_provisioning/utils/version/mod.nu
Normal file
@ -0,0 +1,21 @@
|
||||
# Module: Version Management System
|
||||
# Purpose: Centralizes version operations for core, formatting, loading, management, registry, and taskserv-specific versioning
|
||||
# Dependencies: core, formatter, loader, manager, registry, taskserv
|
||||
|
||||
# Core version functionality
|
||||
export use ./core.nu *
|
||||
|
||||
# Version formatting
|
||||
export use ./formatter.nu *
|
||||
|
||||
# Version loading and caching
|
||||
export use ./loader.nu *
|
||||
|
||||
# Version management operations
|
||||
export use ./manager.nu *
|
||||
|
||||
# Version registry
|
||||
export use ./registry.nu *
|
||||
|
||||
# TaskServ-specific versioning
|
||||
export use ./taskserv.nu *
|
||||
@ -2,9 +2,9 @@
|
||||
# Version registry management for taskservs
|
||||
# Handles the central version registry and integrates with taskserv configurations
|
||||
|
||||
use version_core.nu *
|
||||
use version_taskserv.nu *
|
||||
use interface.nu *
|
||||
use ./core.nu *
|
||||
use ./taskserv.nu *
|
||||
use ../interface.nu *
|
||||
|
||||
# Load the version registry
|
||||
export def load-version-registry [
|
||||
@ -2,10 +2,9 @@
|
||||
# Taskserv version extraction and management utilities
|
||||
# Handles Nickel taskserv files and version configuration
|
||||
|
||||
use ../config/accessor.nu *
|
||||
use version_core.nu *
|
||||
use version_loader.nu *
|
||||
use interface.nu *
|
||||
use ./core.nu *
|
||||
use ./loader.nu *
|
||||
use ../interface.nu *
|
||||
|
||||
# Extract version field from Nickel taskserv files
|
||||
export def extract-nickel-version [
|
||||
@ -2,6 +2,9 @@
|
||||
#
|
||||
# Low-level libvirt operations using virsh CLI.
|
||||
# Rule 1: Single purpose, Rule 2: Explicit types, Rule 3: Early return
|
||||
# Error handling: Result pattern (hybrid, no inline try-catch)
|
||||
|
||||
use lib_provisioning/result.nu *
|
||||
|
||||
export def "libvirt-create-vm" [
|
||||
config: record # VM configuration
|
||||
@ -24,35 +27,23 @@ export def "libvirt-create-vm" [
|
||||
let temp_file = $"/tmp/vm-($config.name)-($env.RANDOM).xml"
|
||||
bash -c $"cat > ($temp_file) << 'EOF'\n($xml)\nEOF"
|
||||
|
||||
# Define domain in libvirt
|
||||
let define_result = (
|
||||
try {
|
||||
bash -c $"virsh define ($temp_file)" | complete
|
||||
} catch {|err|
|
||||
{exit_code: 1, stderr: $err}
|
||||
}
|
||||
)
|
||||
# Define domain in libvirt using bash-check helper
|
||||
let define_result = (bash-check $"virsh define ($temp_file)")
|
||||
|
||||
# Cleanup temp file
|
||||
bash -c $"rm -f ($temp_file)"
|
||||
# Cleanup temp file (use bash-or for safe execution)
|
||||
bash -or $"rm -f ($temp_file)" null
|
||||
|
||||
# Check result
|
||||
if $define_result.exit_code != 0 {
|
||||
# Guard: Check define result
|
||||
if (is-err $define_result) {
|
||||
return {
|
||||
success: false
|
||||
error: $define_result.stderr
|
||||
error: $define_result.err
|
||||
vm_id: null
|
||||
}
|
||||
}
|
||||
|
||||
# Get domain ID
|
||||
let domain_id = (
|
||||
try {
|
||||
bash -c $"virsh domid ($config.name)" | str trim
|
||||
} catch {
|
||||
null
|
||||
}
|
||||
)
|
||||
# Get domain ID using bash-or with null fallback
|
||||
let domain_id = (bash-or $"virsh domid ($config.name) | tr -d '\n'" null)
|
||||
|
||||
{
|
||||
success: true
|
||||
@ -102,31 +93,20 @@ export def "libvirt-start-vm" [
|
||||
]: record {
|
||||
"""Start a virtual machine"""
|
||||
|
||||
# Guard: Input validation
|
||||
if ($vm_name | is-empty) {
|
||||
return {success: false, error: "VM name required"}
|
||||
}
|
||||
|
||||
let result = (
|
||||
try {
|
||||
bash -c $"virsh start ($vm_name)" | complete
|
||||
} catch {|err|
|
||||
{exit_code: 1, stderr: $err}
|
||||
}
|
||||
)
|
||||
# Execute using bash-check helper (no inline try-catch)
|
||||
let result = (bash-check $"virsh start ($vm_name)")
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $result.stderr
|
||||
vm_name: $vm_name
|
||||
}
|
||||
# Guard: Check result
|
||||
if (is-err $result) {
|
||||
return {success: false, error: $result.err, vm_name: $vm_name}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
message: $"VM ($vm_name) started"
|
||||
}
|
||||
{success: true, vm_name: $vm_name, message: $"VM ($vm_name) started"}
|
||||
}
|
||||
|
||||
export def "libvirt-stop-vm" [
|
||||
@ -135,39 +115,23 @@ export def "libvirt-stop-vm" [
|
||||
]: record {
|
||||
"""Stop a virtual machine"""
|
||||
|
||||
# Guard: Input validation
|
||||
if ($vm_name | is-empty) {
|
||||
return {success: false, error: "VM name required"}
|
||||
}
|
||||
|
||||
let cmd = (
|
||||
if $force {
|
||||
$"virsh destroy ($vm_name)"
|
||||
} else {
|
||||
$"virsh shutdown ($vm_name)"
|
||||
}
|
||||
)
|
||||
# Guard: Build command based on flags
|
||||
let cmd = (if $force { $"virsh destroy ($vm_name)" } else { $"virsh shutdown ($vm_name)" })
|
||||
|
||||
let result = (
|
||||
try {
|
||||
bash -c $cmd | complete
|
||||
} catch {|err|
|
||||
{exit_code: 1, stderr: $err}
|
||||
}
|
||||
)
|
||||
# Execute using bash-check helper (no inline try-catch)
|
||||
let result = (bash-check $cmd)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $result.stderr
|
||||
vm_name: $vm_name
|
||||
}
|
||||
# Guard: Check result
|
||||
if (is-err $result) {
|
||||
return {success: false, error: $result.err, vm_name: $vm_name}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
message: $"VM ($vm_name) stopped"
|
||||
}
|
||||
{success: true, vm_name: $vm_name, message: $"VM ($vm_name) stopped"}
|
||||
}
|
||||
|
||||
export def "libvirt-delete-vm" [
|
||||
@ -175,80 +139,63 @@ export def "libvirt-delete-vm" [
|
||||
]: record {
|
||||
"""Delete a virtual machine and its disk"""
|
||||
|
||||
# Guard: Input validation
|
||||
if ($vm_name | is-empty) {
|
||||
return {success: false, error: "VM name required"}
|
||||
}
|
||||
|
||||
# Stop VM first if running
|
||||
# Guard: Check if running using bash-or helper (no inline try-catch)
|
||||
let is_running = (
|
||||
try {
|
||||
bash -c $"virsh domstate ($vm_name)" | str trim | grep -q "running"
|
||||
true
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
(bash-or $"virsh domstate ($vm_name) | grep -q running; echo $?" "1") | str trim == "0"
|
||||
)
|
||||
|
||||
# Stop VM if running
|
||||
if $is_running {
|
||||
libvirt-stop-vm $vm_name --force | if not $in.success {
|
||||
return $in
|
||||
let stop_result = (libvirt-stop-vm $vm_name --force)
|
||||
if not $stop_result.success {
|
||||
return $stop_result
|
||||
}
|
||||
}
|
||||
|
||||
# Undefine domain
|
||||
let undefine_result = (
|
||||
try {
|
||||
bash -c $"virsh undefine ($vm_name)" | complete
|
||||
} catch {|err|
|
||||
{exit_code: 1, stderr: $err}
|
||||
}
|
||||
)
|
||||
# Undefine domain using bash-check helper
|
||||
let undefine_result = (bash-check $"virsh undefine ($vm_name)")
|
||||
|
||||
if $undefine_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $undefine_result.stderr
|
||||
vm_name: $vm_name
|
||||
}
|
||||
# Guard: Check undefine result
|
||||
if (is-err $undefine_result) {
|
||||
return {success: false, error: $undefine_result.err, vm_name: $vm_name}
|
||||
}
|
||||
|
||||
# Delete disk
|
||||
# Delete disk using bash-or helper (safe, ignores errors)
|
||||
let disk_path = (get-vm-disk-path $vm_name)
|
||||
try {
|
||||
bash -c $"rm -f ($disk_path)"
|
||||
} catch { }
|
||||
bash -or $"rm -f ($disk_path)" null
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
message: $"VM ($vm_name) deleted"
|
||||
}
|
||||
{success: true, vm_name: $vm_name, message: $"VM ($vm_name) deleted"}
|
||||
}
|
||||
|
||||
export def "libvirt-list-vms" []: table {
|
||||
"""List all libvirt VMs"""
|
||||
|
||||
try {
|
||||
bash -c "virsh list --all --name"
|
||||
| lines
|
||||
| where {|x| ($x | length) > 0}
|
||||
| each {|vm_name|
|
||||
let state = (
|
||||
try {
|
||||
bash -c $"virsh domstate ($vm_name)" | str trim
|
||||
} catch {
|
||||
"unknown"
|
||||
}
|
||||
)
|
||||
# Guard: List VMs using bash-wrap helper
|
||||
let list_result = (bash-wrap "virsh list --all --name")
|
||||
|
||||
{
|
||||
name: $vm_name
|
||||
state: $state
|
||||
backend: "libvirt"
|
||||
}
|
||||
# Guard: Check if listing succeeded
|
||||
if (is-err $list_result) {
|
||||
return [] # Return empty list on error
|
||||
}
|
||||
|
||||
# Process VM list
|
||||
$list_result.ok
|
||||
| lines
|
||||
| where {|x| ($x | length) > 0}
|
||||
| each {|vm_name|
|
||||
# Get state using bash-or helper with fallback
|
||||
let state = (bash-or $"virsh domstate ($vm_name) | tr -d '\n'" "unknown")
|
||||
|
||||
{
|
||||
name: $vm_name
|
||||
state: $state
|
||||
backend: "libvirt"
|
||||
}
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,42 +204,35 @@ export def "libvirt-get-vm-info" [
|
||||
]: record {
|
||||
"""Get detailed VM information from libvirt"""
|
||||
|
||||
# Guard: Input validation
|
||||
if ($vm_name | is-empty) {
|
||||
return {error: "VM name required"}
|
||||
}
|
||||
|
||||
let state = (
|
||||
try {
|
||||
bash -c $"virsh domstate ($vm_name)" | str trim
|
||||
} catch {
|
||||
"unknown"
|
||||
}
|
||||
)
|
||||
# Get state using bash-or helper
|
||||
let state = (bash-or $"virsh domstate ($vm_name) | tr -d '\n'" "unknown")
|
||||
|
||||
let domain_id = (
|
||||
try {
|
||||
bash -c $"virsh domid ($vm_name)" | str trim
|
||||
} catch {
|
||||
null
|
||||
}
|
||||
)
|
||||
# Get domain ID using bash-or helper
|
||||
let domain_id = (bash-or $"virsh domid ($vm_name) | tr -d '\n'" null)
|
||||
|
||||
# Get detailed info using bash-wrap helper
|
||||
let info = (
|
||||
try {
|
||||
bash -c $"virsh dominfo ($vm_name)" | lines
|
||||
| reduce fold {|line, acc|
|
||||
let parts = ($line | split row " " | where {|x| ($x | length) > 0})
|
||||
if ($parts | length) >= 2 {
|
||||
let key = ($parts | get 0)
|
||||
let value = ($parts | skip 1 | str join " ")
|
||||
{($key): $value} | merge $acc
|
||||
} else {
|
||||
$acc
|
||||
}
|
||||
} {}
|
||||
} catch {
|
||||
{}
|
||||
}
|
||||
(bash-wrap $"virsh dominfo ($vm_name)")
|
||||
| match-result
|
||||
{|output|
|
||||
$output | lines
|
||||
| reduce fold {|line, acc|
|
||||
let parts = ($line | split row " " | where {|x| ($x | length) > 0})
|
||||
if ($parts | length) >= 2 {
|
||||
let key = ($parts | get 0)
|
||||
let value = ($parts | skip 1 | str join " ")
|
||||
{($key): $value} | merge $acc
|
||||
} else {
|
||||
$acc
|
||||
}
|
||||
} {}
|
||||
}
|
||||
{|_err| {}} # Return empty record on error
|
||||
)
|
||||
|
||||
{
|
||||
@ -309,20 +249,27 @@ export def "libvirt-get-vm-ip" [
|
||||
]: string {
|
||||
"""Get VM IP address from libvirt"""
|
||||
|
||||
try {
|
||||
bash -c $"virsh domifaddr ($vm_name)"
|
||||
| lines
|
||||
| skip 2 # Skip header
|
||||
| where {|x| ($x | length) > 0}
|
||||
| get 0
|
||||
| split row " "
|
||||
| where {|x| ($x | length) > 0}
|
||||
| get 2
|
||||
| split row "/"
|
||||
| get 0
|
||||
} catch {
|
||||
""
|
||||
# Guard: Input validation
|
||||
if ($vm_name | is-empty) {
|
||||
return ""
|
||||
}
|
||||
|
||||
# Get IP using bash-wrap helper
|
||||
(bash-wrap $"virsh domifaddr ($vm_name)")
|
||||
| match-result
|
||||
{|output|
|
||||
$output
|
||||
| lines
|
||||
| skip 2 # Skip header
|
||||
| where {|x| ($x | length) > 0}
|
||||
| get 0? # Optional access
|
||||
| split row " "
|
||||
| where {|x| ($x | length) > 0}
|
||||
| get 2? # Optional access
|
||||
| split row "/"
|
||||
| get 0
|
||||
}
|
||||
{|_err| ""} # Return empty string on error
|
||||
}
|
||||
|
||||
def get-vm-disk-path [vm_name: string]: string {
|
||||
@ -342,33 +289,27 @@ export def "libvirt-create-disk" [
|
||||
]: record {
|
||||
"""Create QCOW2 disk for VM"""
|
||||
|
||||
# Guard: Input validation
|
||||
if ($vm_name | is-empty) {
|
||||
return {success: false, error: "VM name required", path: null}
|
||||
}
|
||||
if $size_gb <= 0 {
|
||||
return {success: false, error: "Size must be positive", path: null}
|
||||
}
|
||||
|
||||
let disk_path = (get-vm-disk-path $vm_name)
|
||||
let disk_dir = ($disk_path | path dirname)
|
||||
|
||||
# Create directory
|
||||
bash -c $"mkdir -p ($disk_dir)"
|
||||
# Create directory (safe to ignore errors)
|
||||
bash -or $"mkdir -p ($disk_dir)" null
|
||||
|
||||
# Create QCOW2 disk
|
||||
let result = (
|
||||
try {
|
||||
bash -c $"qemu-img create -f qcow2 ($disk_path) ($size_gb)G" | complete
|
||||
} catch {|err|
|
||||
{exit_code: 1, stderr: $err}
|
||||
}
|
||||
)
|
||||
# Create QCOW2 disk using bash-check helper
|
||||
let result = (bash-check $"qemu-img create -f qcow2 ($disk_path) ($size_gb)G")
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $result.stderr
|
||||
path: null
|
||||
}
|
||||
# Guard: Check result
|
||||
if (is-err $result) {
|
||||
return {success: false, error: $result.err, path: null}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
path: $disk_path
|
||||
size_gb: $size_gb
|
||||
format: "qcow2"
|
||||
}
|
||||
{success: true, path: $disk_path, size_gb: $size_gb, format: "qcow2"}
|
||||
}
|
||||
|
||||
@ -35,26 +35,39 @@ def start-scheduler-background [interval_minutes: int]: record {
|
||||
# Create scheduler script
|
||||
create-scheduler-script $interval_minutes $scheduler_script
|
||||
|
||||
# Start in background
|
||||
try {
|
||||
bash -c $"nohup nu ($scheduler_script) > /tmp/vm-cleanup-scheduler.log 2>&1 &"
|
||||
|
||||
let pid = (bash -c "echo $!" | str trim)
|
||||
|
||||
# Save PID
|
||||
bash -c $"echo ($pid) > ($scheduler_file)"
|
||||
|
||||
{
|
||||
success: true
|
||||
pid: $pid
|
||||
message: "Cleanup scheduler started in background"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Start in background (no try-catch)
|
||||
let start_result = (do { bash -c $"nohup nu ($scheduler_script) > /tmp/vm-cleanup-scheduler.log 2>&1 &" } | complete)
|
||||
if $start_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to start scheduler: ($start_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let pid_result = (do { bash -c "echo $!" } | complete)
|
||||
if $pid_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to get scheduler PID: ($pid_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let pid = ($pid_result.stdout | str trim)
|
||||
|
||||
# Save PID (no try-catch)
|
||||
let save_pid_result = (do { bash -c $"echo ($pid) > ($scheduler_file)" } | complete)
|
||||
if $save_pid_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to save scheduler PID: ($save_pid_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
pid: $pid
|
||||
message: "Cleanup scheduler started in background"
|
||||
}
|
||||
}
|
||||
|
||||
export def "stop-cleanup-scheduler" []: record {
|
||||
@ -69,24 +82,40 @@ export def "stop-cleanup-scheduler" []: record {
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let pid = (open $scheduler_file | str trim)
|
||||
|
||||
bash -c $"kill ($pid) 2>/dev/null || true"
|
||||
|
||||
bash -c $"rm -f ($scheduler_file)"
|
||||
|
||||
{
|
||||
success: true
|
||||
pid: $pid
|
||||
message: "Scheduler stopped"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Load scheduler PID (no try-catch)
|
||||
let pid_result = (do { open $scheduler_file | str trim } | complete)
|
||||
if $pid_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to read scheduler PID: ($pid_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let pid = ($pid_result.stdout)
|
||||
|
||||
# Kill scheduler process (no try-catch)
|
||||
let kill_result = (do { bash -c $"kill ($pid) 2>/dev/null || true" } | complete)
|
||||
if $kill_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to kill scheduler: ($kill_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
# Remove PID file (no try-catch)
|
||||
let rm_result = (do { bash -c $"rm -f ($scheduler_file)" } | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to remove PID file: ($rm_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
pid: $pid
|
||||
message: "Scheduler stopped"
|
||||
}
|
||||
}
|
||||
|
||||
export def "get-cleanup-scheduler-status" []: record {
|
||||
@ -102,43 +131,48 @@ export def "get-cleanup-scheduler-status" []: record {
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let pid = (open $scheduler_file | str trim)
|
||||
# Load scheduler PID (no try-catch)
|
||||
let pid_result = (do { open $scheduler_file | str trim } | complete)
|
||||
if $pid_result.exit_code != 0 {
|
||||
return {
|
||||
running: false
|
||||
error: $"Failed to read scheduler PID: ($pid_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
# Check if process exists
|
||||
let is_running = (
|
||||
try {
|
||||
bash -c $"kill -0 ($pid) 2>/dev/null && echo 'true' || echo 'false'" | str trim
|
||||
} catch {
|
||||
"false"
|
||||
}
|
||||
)
|
||||
let pid = ($pid_result.stdout)
|
||||
|
||||
let log_exists = ($log_file | path exists)
|
||||
let last_log_lines = (
|
||||
if $log_exists {
|
||||
try {
|
||||
bash -c $"tail -5 ($log_file)"
|
||||
| lines
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
# Check if process exists (no try-catch)
|
||||
let check_result = (do { bash -c $"kill -0 ($pid) 2>/dev/null && echo 'true' || echo 'false'" } | complete)
|
||||
let is_running = (
|
||||
if $check_result.exit_code == 0 {
|
||||
($check_result.stdout | str trim)
|
||||
} else {
|
||||
"false"
|
||||
}
|
||||
)
|
||||
|
||||
let log_exists = ($log_file | path exists)
|
||||
|
||||
# Read log file if it exists (no try-catch)
|
||||
let last_log_lines = (
|
||||
if $log_exists {
|
||||
let log_result = (do { bash -c $"tail -5 ($log_file)" } | complete)
|
||||
if $log_result.exit_code == 0 {
|
||||
($log_result.stdout | lines)
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
)
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
)
|
||||
|
||||
{
|
||||
running: ($is_running == "true")
|
||||
pid: $pid
|
||||
log_file: $log_file
|
||||
recent_logs: $last_log_lines
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
running: false
|
||||
error: $err
|
||||
}
|
||||
{
|
||||
running: ($is_running == "true")
|
||||
pid: $pid
|
||||
log_file: $log_file
|
||||
recent_logs: $last_log_lines
|
||||
}
|
||||
}
|
||||
|
||||
@ -220,21 +254,21 @@ export def "schedule-vm-cleanup" [
|
||||
|
||||
let persist_file = (get-persistence-file $vm_name)
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($persist_file) << 'EOF'\n($updated | to json)\nEOF"
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
scheduled_cleanup_at: $cleanup_time
|
||||
message: $"Cleanup scheduled for ($vm_name)"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Schedule cleanup (no try-catch)
|
||||
let schedule_result = (do { bash -c $"cat > ($persist_file) << 'EOF'\n($updated | to json)\nEOF" } | complete)
|
||||
if $schedule_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to schedule cleanup: ($schedule_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
scheduled_cleanup_at: $cleanup_time
|
||||
message: $"Cleanup scheduled for ($vm_name)"
|
||||
}
|
||||
}
|
||||
|
||||
export def "cancel-vm-cleanup" [
|
||||
@ -264,20 +298,20 @@ export def "cancel-vm-cleanup" [
|
||||
|
||||
let persist_file = (get-persistence-file $vm_name)
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($persist_file) << 'EOF'\n($updated | to json)\nEOF"
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
message: "Cleanup cancelled for VM"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Cancel cleanup (no try-catch)
|
||||
let cancel_result = (do { bash -c $"cat > ($persist_file) << 'EOF'\n($updated | to json)\nEOF" } | complete)
|
||||
if $cancel_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to cancel cleanup: ($cancel_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
message: "Cleanup cancelled for VM"
|
||||
}
|
||||
}
|
||||
|
||||
export def "get-cleanup-queue" []: table {
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
#
|
||||
# Detects available hypervisor capabilities on host system.
|
||||
# Follows Rule 1 (single purpose) and Rule 2 (explicit types).
|
||||
# Error handling: do/complete pattern (no try-catch)
|
||||
|
||||
export def "detect-hypervisors" []: table {
|
||||
"""Detect all available hypervisors on the system"""
|
||||
@ -56,27 +57,20 @@ def detect-kvm []: record {
|
||||
def detect-libvirt []: record {
|
||||
"""Detect libvirt daemon"""
|
||||
|
||||
# Check if package is installed
|
||||
# Check if package is installed (no try-catch)
|
||||
let installed = (
|
||||
try {
|
||||
virsh --version -q | length > 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let result = (do { virsh --version -q } | complete)
|
||||
$result.exit_code == 0 and (($result.stdout | length) > 0)
|
||||
)
|
||||
|
||||
if not $installed {
|
||||
return null
|
||||
}
|
||||
|
||||
# Check if service is running
|
||||
# Check if service is running (no try-catch)
|
||||
let running = (
|
||||
try {
|
||||
systemctl is-active --quiet libvirtd
|
||||
true
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let result = (do { systemctl is-active --quiet libvirtd } | complete)
|
||||
$result.exit_code == 0
|
||||
)
|
||||
|
||||
# Check libvirt socket
|
||||
@ -95,13 +89,10 @@ def detect-libvirt []: record {
|
||||
def detect-qemu []: record {
|
||||
"""Detect QEMU emulator"""
|
||||
|
||||
# Check if QEMU is installed
|
||||
# Check if QEMU is installed (no try-catch)
|
||||
let installed = (
|
||||
try {
|
||||
qemu-system-x86_64 --version | length > 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let result = (do { qemu-system-x86_64 --version } | complete)
|
||||
$result.exit_code == 0 and (($result.stdout | length) > 0)
|
||||
)
|
||||
|
||||
if not $installed {
|
||||
@ -128,26 +119,20 @@ def detect-qemu []: record {
|
||||
def detect-docker []: record {
|
||||
"""Detect Docker Desktop VM support (macOS/Windows)"""
|
||||
|
||||
# Check if Docker is installed
|
||||
# Check if Docker is installed (no try-catch)
|
||||
let docker_installed = (
|
||||
try {
|
||||
docker --version | length > 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let result = (do { docker --version } | complete)
|
||||
$result.exit_code == 0 and (($result.stdout | length) > 0)
|
||||
)
|
||||
|
||||
if not $docker_installed {
|
||||
return null
|
||||
}
|
||||
|
||||
# Check Docker Desktop (via context)
|
||||
# Check Docker Desktop (via context) (no try-catch)
|
||||
let is_desktop = (
|
||||
try {
|
||||
docker context ls | grep "desktop" | length > 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let result = (do { docker context ls } | complete)
|
||||
$result.exit_code == 0 and (($result.stdout | grep "desktop" | length) > 0)
|
||||
)
|
||||
|
||||
{
|
||||
@ -212,9 +197,10 @@ export def "check-vm-capability" [host: string]: record {
|
||||
can_run_vms: (($hypervisors | length) > 0)
|
||||
available_hypervisors: $hypervisors
|
||||
primary_backend: (
|
||||
try {
|
||||
# Guard: Ensure at least one hypervisor detected before calling get-primary-hypervisor
|
||||
if ($hypervisors | length) > 0 {
|
||||
get-primary-hypervisor
|
||||
} catch {
|
||||
} else {
|
||||
"none"
|
||||
}
|
||||
)
|
||||
|
||||
@ -247,20 +247,17 @@ export def "delete-golden-image" [
|
||||
}
|
||||
}
|
||||
|
||||
# Delete image and cache
|
||||
try {
|
||||
bash -c $"rm -f ($image_path)"
|
||||
remove-image-cache $name
|
||||
# Delete image and cache (no try-catch)
|
||||
let rm_result = (do { bash -c $"rm -f ($image_path)" } | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete image: ($rm_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: $"Image '($name)' deleted"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
success: false
|
||||
error: $err
|
||||
}
|
||||
remove-image-cache $name
|
||||
|
||||
{
|
||||
success: true
|
||||
message: $"Image '($name)' deleted"
|
||||
}
|
||||
}
|
||||
|
||||
@ -328,16 +325,19 @@ def create-base-disk [
|
||||
let image_path = (get-image-path $name)
|
||||
let image_dir = ($image_path | path dirname)
|
||||
|
||||
# Ensure directory exists
|
||||
bash -c $"mkdir -p ($image_dir)" | complete
|
||||
|
||||
try {
|
||||
bash -c $"qemu-img create -f qcow2 ($image_path) ($size_gb)G" | complete
|
||||
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Ensure directory exists (no try-catch)
|
||||
let mkdir_result = (do { bash -c $"mkdir -p ($image_dir)" } | complete)
|
||||
if $mkdir_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create directory: ($mkdir_result.stderr)"}
|
||||
}
|
||||
|
||||
# Create QCOW2 image (no try-catch)
|
||||
let create_result = (do { bash -c $"qemu-img create -f qcow2 ($image_path) ($size_gb)G" } | complete)
|
||||
if $create_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create disk: ($create_result.stderr)"}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
def install-base-os [
|
||||
@ -349,14 +349,13 @@ def install-base-os [
|
||||
|
||||
let image_path = (get-image-path $name)
|
||||
|
||||
# Use cloud-init image as base
|
||||
try {
|
||||
bash -c $"qemu-img create -b /var/lib/libvirt/images/($base_os)-($os_version).qcow2 -f qcow2 ($image_path)" | complete
|
||||
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Use cloud-init image as base (no try-catch)
|
||||
let os_result = (do { bash -c $"qemu-img create -b /var/lib/libvirt/images/($base_os)-($os_version).qcow2 -f qcow2 ($image_path)" } | complete)
|
||||
if $os_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create base OS: ($os_result.stderr)"}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
def install-taskservs-in-image [
|
||||
@ -373,16 +372,15 @@ def install-taskservs-in-image [
|
||||
let cloud_init = (generate-taskserv-cloud-init $taskservs)
|
||||
let image_path = (get-image-path $name)
|
||||
|
||||
try {
|
||||
# Write cloud-init data to image
|
||||
bash -c $"virt-copy-in -a ($image_path) /dev/stdin /var/lib/cloud/instance/user-data.txt << 'EOF'
|
||||
# Write cloud-init data to image (no try-catch)
|
||||
let copy_result = (do { bash -c $"virt-copy-in -a ($image_path) /dev/stdin /var/lib/cloud/instance/user-data.txt << 'EOF'
|
||||
($cloud_init)
|
||||
EOF" | complete
|
||||
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
EOF" } | complete)
|
||||
if $copy_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to install taskservs: ($copy_result.stderr)"}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
def optimize-image [
|
||||
@ -392,17 +390,19 @@ def optimize-image [
|
||||
|
||||
let image_path = (get-image-path $name)
|
||||
|
||||
try {
|
||||
# Compress image
|
||||
bash -c $"qemu-img convert -f qcow2 -O qcow2 -c ($image_path) ($image_path).tmp && mv ($image_path).tmp ($image_path)" | complete
|
||||
|
||||
# Shrink image
|
||||
bash -c $"virt-sparsify --compress ($image_path) ($image_path).tmp && mv ($image_path).tmp ($image_path)" | complete
|
||||
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Compress image (no try-catch)
|
||||
let compress_result = (do { bash -c $"qemu-img convert -f qcow2 -O qcow2 -c ($image_path) ($image_path).tmp && mv ($image_path).tmp ($image_path)" } | complete)
|
||||
if $compress_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to compress image: ($compress_result.stderr)"}
|
||||
}
|
||||
|
||||
# Shrink image (no try-catch)
|
||||
let shrink_result = (do { bash -c $"virt-sparsify --compress ($image_path) ($image_path).tmp && mv ($image_path).tmp ($image_path)" } | complete)
|
||||
if $shrink_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to shrink image: ($shrink_result.stderr)"}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
def calculate-image-checksum [
|
||||
@ -437,27 +437,31 @@ def cache-image [
|
||||
let cache_dir = (get-cache-directory)
|
||||
let cache_path = $"($cache_dir)/($name).qcow2"
|
||||
|
||||
bash -c $"mkdir -p ($cache_dir)" | complete
|
||||
|
||||
try {
|
||||
bash -c $"cp -p ($image_path) ($cache_path)" | complete
|
||||
|
||||
# Save cache metadata
|
||||
let cache_meta = {
|
||||
image_name: $name
|
||||
cache_path: $cache_path
|
||||
checksum: $checksum
|
||||
cached_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
accessed_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
access_count: 0
|
||||
}
|
||||
|
||||
save-cache-metadata $name $cache_meta
|
||||
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Ensure cache directory exists (no try-catch)
|
||||
let mkdir_result = (do { bash -c $"mkdir -p ($cache_dir)" } | complete)
|
||||
if $mkdir_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create cache directory: ($mkdir_result.stderr)"}
|
||||
}
|
||||
|
||||
# Copy image to cache (no try-catch)
|
||||
let cp_result = (do { bash -c $"cp -p ($image_path) ($cache_path)" } | complete)
|
||||
if $cp_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to cache image: ($cp_result.stderr)"}
|
||||
}
|
||||
|
||||
# Save cache metadata
|
||||
let cache_meta = {
|
||||
image_name: $name
|
||||
cache_path: $cache_path
|
||||
checksum: $checksum
|
||||
cached_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
accessed_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
access_count: 0
|
||||
}
|
||||
|
||||
save-cache-metadata $name $cache_meta
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
export def "build-image-from-vm" [
|
||||
@ -486,26 +490,25 @@ export def "build-image-from-vm" [
|
||||
# Get VM disk path
|
||||
let disk_path = $vm_info.disk_path
|
||||
|
||||
try {
|
||||
# Copy VM disk to image directory
|
||||
let image_path = (get-image-path $image_name)
|
||||
bash -c $"cp ($disk_path) ($image_path)" | complete
|
||||
# Copy VM disk to image directory (no try-catch)
|
||||
let image_path = (get-image-path $image_name)
|
||||
let cp_result = (do { bash -c $"cp ($disk_path) ($image_path)" } | complete)
|
||||
if $cp_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to copy VM disk: ($cp_result.stderr)"}
|
||||
}
|
||||
|
||||
# Calculate checksum
|
||||
let checksum = (calculate-image-checksum $image_path)
|
||||
# Calculate checksum
|
||||
let checksum = (calculate-image-checksum $image_path)
|
||||
|
||||
# Create version entry
|
||||
create-image-version $image_name "1.0.0" $image_path $checksum $description
|
||||
# Create version entry
|
||||
create-image-version $image_name "1.0.0" $image_path $checksum $description
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
image_path: $image_path
|
||||
source_vm: $vm_name
|
||||
checksum: $checksum
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
image_path: $image_path
|
||||
source_vm: $vm_name
|
||||
checksum: $checksum
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -18,23 +18,28 @@ export def "cache-initialize" []: record {
|
||||
"{{paths.workspace}}/vms/image-usage"
|
||||
]
|
||||
|
||||
try {
|
||||
# Initialize cache directories (no try-catch)
|
||||
let init_results = (
|
||||
$cache_dirs
|
||||
| each {|dir|
|
||||
bash -c $"mkdir -p ($dir)" | complete
|
||||
| map {|dir|
|
||||
do { bash -c $"mkdir -p ($dir)" } | complete
|
||||
}
|
||||
)
|
||||
|
||||
{
|
||||
success: true
|
||||
message: "Cache system initialized"
|
||||
cache_dirs: $cache_dirs
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Guard: Check if all directories created successfully
|
||||
let failed = ($init_results | where exit_code != 0)
|
||||
if ($failed | length) > 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to create cache directories: ($failed | get 0 | get stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: "Cache system initialized"
|
||||
cache_dirs: $cache_dirs
|
||||
}
|
||||
}
|
||||
|
||||
export def "cache-add" [
|
||||
@ -56,51 +61,59 @@ export def "cache-add" [
|
||||
let cache_meta_dir = "{{paths.workspace}}/vms/cache-meta"
|
||||
let cache_path = $"($cache_dir)/($image_name).qcow2"
|
||||
|
||||
try {
|
||||
# Copy to cache
|
||||
bash -c $"cp -p ($image_path) ($cache_path)" | complete
|
||||
# Copy to cache (no try-catch)
|
||||
let copy_result = (do { bash -c $"cp -p ($image_path) ($cache_path)" } | complete)
|
||||
if $copy_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to copy image to cache: ($copy_result.stderr)"}
|
||||
}
|
||||
|
||||
# Calculate checksum
|
||||
let checksum = (bash -c $"sha256sum ($cache_path) | cut -d' ' -f1" | str trim)
|
||||
# Calculate checksum (no try-catch)
|
||||
let checksum_result = (do { bash -c $"sha256sum ($cache_path) | cut -d' ' -f1" } | complete)
|
||||
if $checksum_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to calculate checksum: ($checksum_result.stderr)"}
|
||||
}
|
||||
|
||||
# Calculate expiration
|
||||
let expires_at = (
|
||||
(date now) + (($ttl_days * 24 * 60 * 60) * 1_000_000_000ns)
|
||||
| format date "%Y-%m-%dT%H:%M:%SZ"
|
||||
)
|
||||
let checksum = ($checksum_result.stdout | str trim)
|
||||
|
||||
# Save metadata
|
||||
let meta = {
|
||||
cache_id: (generate-cache-id)
|
||||
image_name: $image_name
|
||||
storage_path: $cache_path
|
||||
disk_size_gb: (get-file-size-gb $cache_path)
|
||||
cached_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
accessed_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
expires_at: $expires_at
|
||||
ttl_days: $ttl_days
|
||||
is_valid: true
|
||||
checksum: $checksum
|
||||
access_count: 0
|
||||
hit_count: 0
|
||||
}
|
||||
# Calculate expiration
|
||||
let expires_at = (
|
||||
(date now) + (($ttl_days * 24 * 60 * 60) * 1_000_000_000ns)
|
||||
| format date "%Y-%m-%dT%H:%M:%SZ"
|
||||
)
|
||||
|
||||
bash -c $"mkdir -p ($cache_meta_dir)" | complete
|
||||
bash -c $"cat > ($cache_meta_dir)/($image_name).json << 'EOF'\n($meta | to json)\nEOF" | complete
|
||||
# Save metadata (no try-catch)
|
||||
let meta = {
|
||||
cache_id: (generate-cache-id)
|
||||
image_name: $image_name
|
||||
storage_path: $cache_path
|
||||
disk_size_gb: (get-file-size-gb $cache_path)
|
||||
cached_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
accessed_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
expires_at: $expires_at
|
||||
ttl_days: $ttl_days
|
||||
is_valid: true
|
||||
checksum: $checksum
|
||||
access_count: 0
|
||||
hit_count: 0
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
cache_id: $meta.cache_id
|
||||
image_name: $image_name
|
||||
cache_path: $cache_path
|
||||
disk_size_gb: $meta.disk_size_gb
|
||||
expires_at: $expires_at
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
success: false
|
||||
error: $err
|
||||
}
|
||||
let mkdir_result = (do { bash -c $"mkdir -p ($cache_meta_dir)" } | complete)
|
||||
if $mkdir_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create metadata directory: ($mkdir_result.stderr)"}
|
||||
}
|
||||
|
||||
let save_result = (do { bash -c $"cat > ($cache_meta_dir)/($image_name).json << 'EOF'\n($meta | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save metadata: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
cache_id: $meta.cache_id
|
||||
image_name: $image_name
|
||||
cache_path: $cache_path
|
||||
disk_size_gb: $meta.disk_size_gb
|
||||
expires_at: $expires_at
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,67 +137,85 @@ export def "cache-get" [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
|
||||
# Check if expired
|
||||
let now = (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
if $meta.expires_at < $now {
|
||||
return {
|
||||
success: false
|
||||
error: "Cache expired"
|
||||
hit: false
|
||||
expired: true
|
||||
}
|
||||
}
|
||||
|
||||
# Check if file still exists
|
||||
if (not ($meta.storage_path | path exists)) {
|
||||
return {
|
||||
success: false
|
||||
error: "Cached file not found"
|
||||
hit: false
|
||||
}
|
||||
}
|
||||
|
||||
# Verify checksum
|
||||
let current_checksum = (bash -c $"sha256sum ($meta.storage_path) | cut -d' ' -f1" | str trim)
|
||||
if $current_checksum != $meta.checksum {
|
||||
return {
|
||||
success: false
|
||||
error: "Cache checksum mismatch"
|
||||
hit: false
|
||||
}
|
||||
}
|
||||
|
||||
# Update access stats
|
||||
let updated_meta = (
|
||||
$meta
|
||||
| upsert accessed_at (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
| upsert access_count ($meta.access_count + 1)
|
||||
| upsert hit_count ($meta.hit_count + 1)
|
||||
)
|
||||
|
||||
bash -c $"cat > ($meta_file) << 'EOF'\n($updated_meta | to json)\nEOF" | complete
|
||||
|
||||
{
|
||||
success: true
|
||||
hit: true
|
||||
image_name: $image_name
|
||||
cache_path: $meta.storage_path
|
||||
disk_size_gb: $meta.disk_size_gb
|
||||
checksum: $meta.checksum
|
||||
created_at: $meta.cached_at
|
||||
expires_at: $meta.expires_at
|
||||
access_count: ($meta.access_count + 1)
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Load cache metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to load cache metadata: ($meta_result.stderr)"
|
||||
hit: false
|
||||
}
|
||||
}
|
||||
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
# Check if expired
|
||||
let now = (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
if $meta.expires_at < $now {
|
||||
return {
|
||||
success: false
|
||||
error: "Cache expired"
|
||||
hit: false
|
||||
expired: true
|
||||
}
|
||||
}
|
||||
|
||||
# Check if file still exists
|
||||
if (not ($meta.storage_path | path exists)) {
|
||||
return {
|
||||
success: false
|
||||
error: "Cached file not found"
|
||||
hit: false
|
||||
}
|
||||
}
|
||||
|
||||
# Verify checksum (no try-catch)
|
||||
let checksum_result = (do { bash -c $"sha256sum ($meta.storage_path) | cut -d' ' -f1" } | complete)
|
||||
if $checksum_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to verify checksum: ($checksum_result.stderr)"
|
||||
hit: false
|
||||
}
|
||||
}
|
||||
|
||||
let current_checksum = ($checksum_result.stdout | str trim)
|
||||
if $current_checksum != $meta.checksum {
|
||||
return {
|
||||
success: false
|
||||
error: "Cache checksum mismatch"
|
||||
hit: false
|
||||
}
|
||||
}
|
||||
|
||||
# Update access stats (no try-catch)
|
||||
let updated_meta = (
|
||||
$meta
|
||||
| upsert accessed_at (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
| upsert access_count ($meta.access_count + 1)
|
||||
| upsert hit_count ($meta.hit_count + 1)
|
||||
)
|
||||
|
||||
let update_result = (do { bash -c $"cat > ($meta_file) << 'EOF'\n($updated_meta | to json)\nEOF" } | complete)
|
||||
if $update_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to update cache metadata: ($update_result.stderr)"
|
||||
hit: false
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
hit: true
|
||||
image_name: $image_name
|
||||
cache_path: $meta.storage_path
|
||||
disk_size_gb: $meta.disk_size_gb
|
||||
checksum: $meta.checksum
|
||||
created_at: $meta.cached_at
|
||||
expires_at: $meta.expires_at
|
||||
access_count: ($meta.access_count + 1)
|
||||
}
|
||||
}
|
||||
|
||||
export def "cache-list" [
|
||||
@ -203,8 +234,10 @@ export def "cache-list" [
|
||||
bash -c $"ls -1 ($cache_meta_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let meta = (open $file | from json)
|
||||
# Guard: Check if file can be opened and parsed as JSON (no try-catch)
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let meta = ($json_result.stdout)
|
||||
let now = (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
let is_expired = $meta.expires_at < $now
|
||||
|
||||
@ -222,7 +255,7 @@ export def "cache-list" [
|
||||
status: (if $is_expired {"expired"} else {"valid"})
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
@ -255,20 +288,24 @@ export def "cache-cleanup" [
|
||||
bash -c $"ls -1 ($cache_meta_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let meta = (open $file | from json)
|
||||
# Guard: Load metadata without try-catch (no try-catch)
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let meta = ($json_result.stdout)
|
||||
let now = (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
if $meta.expires_at < $now {
|
||||
# Delete cache file
|
||||
bash -c $"rm -f ($meta.storage_path)" | complete
|
||||
# Delete metadata
|
||||
bash -c $"rm -f ($file)" | complete
|
||||
# Delete cache file (no try-catch)
|
||||
let rm_cache_result = (do { bash -c $"rm -f ($meta.storage_path)" } | complete)
|
||||
# Delete metadata (no try-catch)
|
||||
let rm_meta_result = (do { bash -c $"rm -f ($file)" } | complete)
|
||||
|
||||
$cleaned_count += 1
|
||||
$cleaned_size_gb += $meta.disk_size_gb
|
||||
if ($rm_cache_result.exit_code == 0) and ($rm_meta_result.exit_code == 0) {
|
||||
$cleaned_count += 1
|
||||
$cleaned_size_gb += $meta.disk_size_gb
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -382,23 +419,23 @@ export def "version-create" [
|
||||
|
||||
let version_file = $"($version_dir)/($version).json"
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($version_file) << 'EOF'\n($version_meta | to json)\nEOF" | complete
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
version: $version
|
||||
version_file: $version_file
|
||||
checksum: $checksum
|
||||
disk_size_gb: $disk_size
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Save version metadata (no try-catch)
|
||||
let save_result = (do { bash -c $"cat > ($version_file) << 'EOF'\n($version_meta | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to save version metadata: ($save_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
version: $version
|
||||
version_file: $version_file
|
||||
checksum: $checksum
|
||||
disk_size_gb: $disk_size
|
||||
}
|
||||
}
|
||||
|
||||
export def "version-list" [
|
||||
@ -417,8 +454,10 @@ export def "version-list" [
|
||||
bash -c $"ls -1 ($version_dir)/*.json 2>/dev/null | sort -V -r"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let meta = (open $file | from json)
|
||||
# Guard: Check if file can be opened and parsed as JSON (no try-catch)
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let meta = ($json_result.stdout)
|
||||
{
|
||||
version: $meta.version
|
||||
created_at: $meta.created_at
|
||||
@ -427,7 +466,7 @@ export def "version-list" [
|
||||
deprecated: $meta.deprecated
|
||||
description: (if ($meta.description | is-empty) {"-"} else {$meta.description})
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
@ -448,12 +487,14 @@ export def "version-get" [
|
||||
return {success: false, error: "Version not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $version_file | from json)
|
||||
{success: true} | merge $meta
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Load version metadata (no try-catch)
|
||||
let meta_result = (do { open $version_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load version: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
let meta = ($meta_result.stdout)
|
||||
{success: true} | merge $meta
|
||||
}
|
||||
|
||||
export def "version-deprecate" [
|
||||
@ -473,25 +514,31 @@ export def "version-deprecate" [
|
||||
return {success: false, error: "Version not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $version_file | from json)
|
||||
let updated = (
|
||||
$meta
|
||||
| upsert deprecated true
|
||||
| upsert replacement_version $replacement
|
||||
)
|
||||
# Load version metadata (no try-catch)
|
||||
let meta_result = (do { open $version_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load version: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
bash -c $"cat > ($version_file) << 'EOF'\n($updated | to json)\nEOF" | complete
|
||||
let meta = ($meta_result.stdout)
|
||||
let updated = (
|
||||
$meta
|
||||
| upsert deprecated true
|
||||
| upsert replacement_version $replacement
|
||||
)
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
version: $version
|
||||
deprecated: true
|
||||
replacement: $replacement
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Save updated metadata (no try-catch)
|
||||
let save_result = (do { bash -c $"cat > ($version_file) << 'EOF'\n($updated | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save deprecation: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
version: $version
|
||||
deprecated: true
|
||||
replacement: $replacement
|
||||
}
|
||||
}
|
||||
|
||||
@ -512,30 +559,39 @@ export def "version-delete" [
|
||||
return {success: false, error: "Version not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $version_file | from json)
|
||||
# Load version metadata (no try-catch)
|
||||
let meta_result = (do { open $version_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load version: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
if (($meta.usage_count // 0) > 0) and (not $force) {
|
||||
return {
|
||||
success: false
|
||||
error: $"Version in use by ($meta.usage_count) VMs"
|
||||
vms_using: ($meta.vm_instances // [])
|
||||
}
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
if (($meta.usage_count // 0) > 0) and (not $force) {
|
||||
return {
|
||||
success: false
|
||||
error: $"Version in use by ($meta.usage_count) VMs"
|
||||
vms_using: ($meta.vm_instances // [])
|
||||
}
|
||||
}
|
||||
|
||||
# Delete image file
|
||||
bash -c $"rm -f ($meta.image_path)" | complete
|
||||
# Delete metadata
|
||||
bash -c $"rm -f ($version_file)" | complete
|
||||
# Delete image file (no try-catch)
|
||||
let rm_img_result = (do { bash -c $"rm -f ($meta.image_path)" } | complete)
|
||||
if $rm_img_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete image file: ($rm_img_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
version: $version
|
||||
message: "Version deleted"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Delete metadata (no try-catch)
|
||||
let rm_meta_result = (do { bash -c $"rm -f ($version_file)" } | complete)
|
||||
if $rm_meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete metadata: ($rm_meta_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
version: $version
|
||||
message: "Version deleted"
|
||||
}
|
||||
}
|
||||
|
||||
@ -557,22 +613,27 @@ export def "version-rollback" [
|
||||
return {success: false, error: "Target version not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let target_meta = (open $to_file | from json)
|
||||
# Load target version metadata (no try-catch)
|
||||
let target_result = (do { open $to_file | from json } | complete)
|
||||
if $target_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load target version: ($target_result.stderr)"}
|
||||
}
|
||||
|
||||
# Update default version pointer
|
||||
let version_meta_dir = "{{paths.workspace}}/vms/versions/($image_name)"
|
||||
bash -c $"echo ($to_version) > ($version_meta_dir)/.default" | complete
|
||||
let target_meta = ($target_result.stdout)
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
previous_version: $from_version
|
||||
current_version: $to_version
|
||||
message: $"Rolled back to version ($to_version)"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Update default version pointer (no try-catch)
|
||||
let version_meta_dir = "{{paths.workspace}}/vms/versions/($image_name)"
|
||||
let rollback_result = (do { bash -c $"echo ($to_version) > ($version_meta_dir)/.default" } | complete)
|
||||
if $rollback_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to update version pointer: ($rollback_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
image_name: $image_name
|
||||
previous_version: $from_version
|
||||
current_version: $to_version
|
||||
message: $"Rolled back to version ($to_version)"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -48,19 +48,19 @@ export def "deployment-create" [
|
||||
instances: []
|
||||
}
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($deployment_dir)/($name).json << 'EOF'\n($deployment | to json)\nEOF" | complete
|
||||
# Save deployment metadata (no try-catch)
|
||||
let save_result = (do { bash -c $"cat > ($deployment_dir)/($name).json << 'EOF'\n($deployment | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save deployment: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
deployment: $name
|
||||
version: $version
|
||||
tiers: $tiers
|
||||
replicas: $replicas
|
||||
networks: ($networks | length)
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: true
|
||||
deployment: $name
|
||||
version: $version
|
||||
tiers: $tiers
|
||||
replicas: $replicas
|
||||
networks: ($networks | length)
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,69 +95,74 @@ export def "deployment-deploy" [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $deployment_file | from json)
|
||||
# Load deployment metadata (no try-catch)
|
||||
let meta_result = (do { open $deployment_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load deployment: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
# Deploy each tier
|
||||
let instances = (
|
||||
$meta.tiers
|
||||
| enumerate
|
||||
| each {|tier_info|
|
||||
let tier_num = $tier_info.index + 1
|
||||
let tier_name = $tier_info.item
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
# Deploy replicas for this tier
|
||||
(0..$meta.replicas - 1)
|
||||
| each {|replica|
|
||||
let instance_name = $"($name)-($tier_name)-($replica + 1)"
|
||||
# Deploy each tier (no try-catch)
|
||||
let instances = (
|
||||
$meta.tiers
|
||||
| enumerate
|
||||
| each {|tier_info|
|
||||
let tier_num = $tier_info.index + 1
|
||||
let tier_name = $tier_info.item
|
||||
|
||||
# Create instance
|
||||
let result = (
|
||||
nested-vm-create $instance_name "host-vm" \
|
||||
--cpu 2 \
|
||||
--memory 2048 \
|
||||
--disk 20 \
|
||||
--networks [$"($name)-($tier_name)"] \
|
||||
--auto-start
|
||||
)
|
||||
# Deploy replicas for this tier
|
||||
(0..$meta.replicas - 1)
|
||||
| each {|replica|
|
||||
let instance_name = $"($name)-($tier_name)-($replica + 1)"
|
||||
|
||||
if $result.success {
|
||||
{
|
||||
tier: $tier_name
|
||||
instance: $instance_name
|
||||
status: "deployed"
|
||||
}
|
||||
} else {
|
||||
{
|
||||
tier: $tier_name
|
||||
instance: $instance_name
|
||||
status: "failed"
|
||||
error: $result.error
|
||||
}
|
||||
# Create instance
|
||||
let result = (
|
||||
nested-vm-create $instance_name "host-vm" \
|
||||
--cpu 2 \
|
||||
--memory 2048 \
|
||||
--disk 20 \
|
||||
--networks [$"($name)-($tier_name)"] \
|
||||
--auto-start
|
||||
)
|
||||
|
||||
if $result.success {
|
||||
{
|
||||
tier: $tier_name
|
||||
instance: $instance_name
|
||||
status: "deployed"
|
||||
}
|
||||
} else {
|
||||
{
|
||||
tier: $tier_name
|
||||
instance: $instance_name
|
||||
status: "failed"
|
||||
error: $result.error
|
||||
}
|
||||
}
|
||||
}
|
||||
| flatten
|
||||
)
|
||||
|
||||
# Update deployment with instances
|
||||
let updated = (
|
||||
$meta
|
||||
| upsert status "deployed"
|
||||
| upsert instances $instances
|
||||
| upsert deployed_at (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
)
|
||||
|
||||
bash -c $"cat > ($deployment_file) << 'EOF'\n($updated | to json)\nEOF" | complete
|
||||
|
||||
{
|
||||
success: true
|
||||
deployment: $name
|
||||
instances_deployed: ($instances | length)
|
||||
instances: $instances
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
| flatten
|
||||
)
|
||||
|
||||
# Update deployment with instances (no try-catch)
|
||||
let updated = (
|
||||
$meta
|
||||
| upsert status "deployed"
|
||||
| upsert instances $instances
|
||||
| upsert deployed_at (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
)
|
||||
|
||||
let update_result = (do { bash -c $"cat > ($deployment_file) << 'EOF'\n($updated | to json)\nEOF" } | complete)
|
||||
if $update_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to update deployment: ($update_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
deployment: $name
|
||||
instances_deployed: ($instances | length)
|
||||
instances: $instances
|
||||
}
|
||||
}
|
||||
|
||||
@ -175,8 +180,10 @@ export def "deployment-list" []: table {
|
||||
bash -c $"ls -1 ($deployment_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let meta = (open $file | from json)
|
||||
# Guard: Check if file can be opened and parsed as JSON (no try-catch)
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let meta = ($json_result.stdout)
|
||||
{
|
||||
name: $meta.name
|
||||
version: $meta.version
|
||||
@ -186,7 +193,7 @@ export def "deployment-list" []: table {
|
||||
total_instances: (($meta.instances // []) | length)
|
||||
created: $meta.created_at
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
@ -207,25 +214,27 @@ export def "deployment-info" [
|
||||
return {success: false, error: "Deployment not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $deployment_file | from json)
|
||||
# Load deployment metadata (no try-catch)
|
||||
let meta_result = (do { open $deployment_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load deployment: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
name: $meta.name
|
||||
version: $meta.version
|
||||
tiers: $meta.tiers
|
||||
replicas: $meta.replicas
|
||||
strategy: $meta.strategy
|
||||
status: $meta.status
|
||||
networks: ($meta.networks // [])
|
||||
instances: ($meta.instances // [])
|
||||
total_instances: (($meta.instances // []) | length)
|
||||
created: $meta.created_at
|
||||
deployed: ($meta.deployed_at // "not deployed")
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
{
|
||||
success: true
|
||||
name: $meta.name
|
||||
version: $meta.version
|
||||
tiers: $meta.tiers
|
||||
replicas: $meta.replicas
|
||||
strategy: $meta.strategy
|
||||
status: $meta.status
|
||||
networks: ($meta.networks // [])
|
||||
instances: ($meta.instances // [])
|
||||
total_instances: (($meta.instances // []) | length)
|
||||
created: $meta.created_at
|
||||
deployed: ($meta.deployed_at // "not deployed")
|
||||
}
|
||||
}
|
||||
|
||||
@ -246,29 +255,37 @@ export def "deployment-delete" [
|
||||
return {success: false, error: "Deployment not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $deployment_file | from json)
|
||||
# Load deployment metadata (no try-catch)
|
||||
let meta_result = (do { open $deployment_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load deployment: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
# Delete instances
|
||||
$meta.instances | each {|instance|
|
||||
nested-vm-delete $instance.instance --force=$force
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
# Delete instances (no try-catch)
|
||||
$meta.instances | each {|instance|
|
||||
nested-vm-delete $instance.instance --force=$force
|
||||
}
|
||||
|
||||
# Delete networks (no try-catch)
|
||||
$meta.networks | each {|network|
|
||||
let del_result = (do { bash -c $"ip link delete ($network) 2>/dev/null || true" } | complete)
|
||||
if $del_result.exit_code != 0 {
|
||||
null # Ignore network deletion errors
|
||||
}
|
||||
}
|
||||
|
||||
# Delete networks
|
||||
$meta.networks | each {|network|
|
||||
bash -c $"ip link delete ($network) 2>/dev/null || true" | complete
|
||||
}
|
||||
# Delete metadata (no try-catch)
|
||||
let rm_result = (do { bash -c $"rm -f ($deployment_file)" } | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete deployment metadata: ($rm_result.stderr)"}
|
||||
}
|
||||
|
||||
# Delete metadata
|
||||
bash -c $"rm -f ($deployment_file)" | complete
|
||||
|
||||
{
|
||||
success: true
|
||||
message: "Deployment deleted"
|
||||
instances_deleted: ($meta.instances | length)
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: true
|
||||
message: "Deployment deleted"
|
||||
instances_deleted: ($meta.instances | length)
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,53 +307,55 @@ export def "deployment-scale" [
|
||||
return {success: false, error: "Deployment not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $deployment_file | from json)
|
||||
# Load deployment metadata (no try-catch)
|
||||
let meta_result = (do { open $deployment_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load deployment: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
# Get current instances for this tier
|
||||
let tier_instances = (
|
||||
$meta.instances
|
||||
| where {|i| ($i.tier == $tier)}
|
||||
)
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
let current_count = ($tier_instances | length)
|
||||
# Get current instances for this tier (no try-catch)
|
||||
let tier_instances = (
|
||||
$meta.instances
|
||||
| where {|i| ($i.tier == $tier)}
|
||||
)
|
||||
|
||||
if $replicas == $current_count {
|
||||
return {
|
||||
success: true
|
||||
message: "No scaling needed"
|
||||
tier: $tier
|
||||
current_replicas: $current_count
|
||||
}
|
||||
}
|
||||
let current_count = ($tier_instances | length)
|
||||
|
||||
if $replicas > $current_count {
|
||||
# Scale up
|
||||
let new_replicas = $replicas - $current_count
|
||||
(0..$new_replicas - 1)
|
||||
| each {|i|
|
||||
let instance_name = $"($name)-($tier)-($current_count + $i + 1)"
|
||||
nested-vm-create $instance_name "host-vm" \
|
||||
--networks [$"($name)-($tier)"] \
|
||||
--auto-start
|
||||
}
|
||||
} else {
|
||||
# Scale down
|
||||
let to_delete = ($tier_instances | last ($current_count - $replicas))
|
||||
$to_delete | each {|instance|
|
||||
nested-vm-delete $instance.instance
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
if $replicas == $current_count {
|
||||
return {
|
||||
success: true
|
||||
message: "No scaling needed"
|
||||
tier: $tier
|
||||
previous_replicas: $current_count
|
||||
new_replicas: $replicas
|
||||
message: $"Scaled ($tier) to ($replicas) replicas"
|
||||
current_replicas: $current_count
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
}
|
||||
|
||||
if $replicas > $current_count {
|
||||
# Scale up (no try-catch)
|
||||
let new_replicas = $replicas - $current_count
|
||||
(0..$new_replicas - 1)
|
||||
| each {|i|
|
||||
let instance_name = $"($name)-($tier)-($current_count + $i + 1)"
|
||||
nested-vm-create $instance_name "host-vm" \
|
||||
--networks [$"($name)-($tier)"] \
|
||||
--auto-start
|
||||
}
|
||||
} else {
|
||||
# Scale down (no try-catch)
|
||||
let to_delete = ($tier_instances | last ($current_count - $replicas))
|
||||
$to_delete | each {|instance|
|
||||
nested-vm-delete $instance.instance
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
tier: $tier
|
||||
previous_replicas: $current_count
|
||||
new_replicas: $replicas
|
||||
message: $"Scaled ($tier) to ($replicas) replicas"
|
||||
}
|
||||
}
|
||||
|
||||
@ -356,34 +375,36 @@ export def "deployment-health" [
|
||||
return {success: false, error: "Deployment not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $deployment_file | from json)
|
||||
# Load deployment metadata (no try-catch)
|
||||
let meta_result = (do { open $deployment_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load deployment: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
let instance_health = (
|
||||
$meta.instances
|
||||
| map {|instance|
|
||||
{
|
||||
instance: $instance.instance
|
||||
tier: $instance.tier
|
||||
status: $instance.status
|
||||
}
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
let instance_health = (
|
||||
$meta.instances
|
||||
| map {|instance|
|
||||
{
|
||||
instance: $instance.instance
|
||||
tier: $instance.tier
|
||||
status: $instance.status
|
||||
}
|
||||
)
|
||||
|
||||
let healthy = ($instance_health | where status == "deployed" | length)
|
||||
let unhealthy = ($instance_health | where status == "failed" | length)
|
||||
|
||||
{
|
||||
success: true
|
||||
deployment: $name
|
||||
total_instances: ($instance_health | length)
|
||||
healthy: $healthy
|
||||
unhealthy: $unhealthy
|
||||
health_percent: (($healthy / ($instance_health | length) * 100) | math round -p 1)
|
||||
instances: $instance_health
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
)
|
||||
|
||||
let healthy = ($instance_health | where status == "deployed" | length)
|
||||
let unhealthy = ($instance_health | where status == "failed" | length)
|
||||
|
||||
{
|
||||
success: true
|
||||
deployment: $name
|
||||
total_instances: ($instance_health | length)
|
||||
healthy: $healthy
|
||||
unhealthy: $unhealthy
|
||||
health_percent: (($healthy / ($instance_health | length) * 100) | math round -p 1)
|
||||
instances: $instance_health
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -70,34 +70,36 @@ export def "nested-vm-create" [
|
||||
status: "created"
|
||||
}
|
||||
|
||||
try {
|
||||
# Create VM disk
|
||||
bash -c $"qemu-img create -f qcow2 ($nested_dir)/($name).qcow2 ($disk)G" | complete
|
||||
# Create VM disk (no try-catch)
|
||||
let create_result = (do { bash -c $"qemu-img create -f qcow2 ($nested_dir)/($name).qcow2 ($disk)G" } | complete)
|
||||
if $create_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create VM disk: ($create_result.stderr)"}
|
||||
}
|
||||
|
||||
# Save metadata
|
||||
bash -c $"cat > ($nested_dir)/($name).json << 'EOF'\n($nested_meta | to json)\nEOF" | complete
|
||||
# Save metadata (no try-catch)
|
||||
let save_result = (do { bash -c $"cat > ($nested_dir)/($name).json << 'EOF'\n($nested_meta | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save metadata: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
# Connect to networks
|
||||
$networks | each {|network|
|
||||
network-connect $network $name
|
||||
}
|
||||
# Connect to networks (no try-catch)
|
||||
$networks | each {|network|
|
||||
network-connect $network $name
|
||||
}
|
||||
|
||||
# Attach volumes
|
||||
$volumes | each {|volume|
|
||||
volume-attach $volume $name
|
||||
}
|
||||
# Attach volumes (no try-catch)
|
||||
$volumes | each {|volume|
|
||||
volume-attach $volume $name
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
nested_vm: $name
|
||||
parent_vm: $parent_vm
|
||||
cpu: $cpu
|
||||
memory_mb: $memory
|
||||
disk_gb: $disk
|
||||
nesting_depth: ($nesting_depth + 1)
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: true
|
||||
nested_vm: $name
|
||||
parent_vm: $parent_vm
|
||||
cpu: $cpu
|
||||
memory_mb: $memory
|
||||
disk_gb: $disk
|
||||
nesting_depth: ($nesting_depth + 1)
|
||||
}
|
||||
}
|
||||
|
||||
@ -117,8 +119,10 @@ export def "nested-vm-list" [
|
||||
bash -c $"ls -1 ($nested_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let meta = (open $file | from json)
|
||||
# Guard: Check if file can be opened and parsed as JSON (no try-catch)
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let meta = ($json_result.stdout)
|
||||
|
||||
if ($parent_vm | is-empty) or ($meta.parent_vm == $parent_vm) {
|
||||
{
|
||||
@ -132,7 +136,7 @@ export def "nested-vm-list" [
|
||||
created: $meta.created_at
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
@ -153,26 +157,28 @@ export def "nested-vm-info" [
|
||||
return {success: false, error: "Nested VM not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
# Load metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
name: $meta.name
|
||||
parent_vm: $meta.parent_vm
|
||||
nesting_depth: $meta.nesting_depth
|
||||
cpu: $meta.cpu
|
||||
memory_mb: $meta.memory_mb
|
||||
disk_gb: $meta.disk_gb
|
||||
networks: $meta.networks
|
||||
volumes: $meta.volumes
|
||||
auto_start: $meta.auto_start
|
||||
nested_virt: $meta.nested_virt
|
||||
created: $meta.created_at
|
||||
status: $meta.status
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
{
|
||||
success: true
|
||||
name: $meta.name
|
||||
parent_vm: $meta.parent_vm
|
||||
nesting_depth: $meta.nesting_depth
|
||||
cpu: $meta.cpu
|
||||
memory_mb: $meta.memory_mb
|
||||
disk_gb: $meta.disk_gb
|
||||
networks: $meta.networks
|
||||
volumes: $meta.volumes
|
||||
auto_start: $meta.auto_start
|
||||
nested_virt: $meta.nested_virt
|
||||
created: $meta.created_at
|
||||
status: $meta.status
|
||||
}
|
||||
}
|
||||
|
||||
@ -191,28 +197,37 @@ export def "nested-vm-delete" [
|
||||
return {success: false, error: "Nested VM not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
# Load metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
# Detach volumes and networks
|
||||
$meta.volumes | each {|volume|
|
||||
volume-detach $volume $name
|
||||
}
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
$meta.networks | each {|network|
|
||||
network-disconnect $network $name
|
||||
}
|
||||
# Detach volumes and networks (no try-catch)
|
||||
$meta.volumes | each {|volume|
|
||||
volume-detach $volume $name
|
||||
}
|
||||
|
||||
# Delete VM disk and metadata
|
||||
bash -c $"rm -f ($nested_dir)/($name).qcow2" | complete
|
||||
bash -c $"rm -f ($meta_file)" | complete
|
||||
$meta.networks | each {|network|
|
||||
network-disconnect $network $name
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: "Nested VM deleted"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Delete VM disk and metadata (no try-catch)
|
||||
let rm_disk_result = (do { bash -c $"rm -f ($nested_dir)/($name).qcow2" } | complete)
|
||||
if $rm_disk_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete VM disk: ($rm_disk_result.stderr)"}
|
||||
}
|
||||
|
||||
let rm_meta_result = (do { bash -c $"rm -f ($meta_file)" } | complete)
|
||||
if $rm_meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete metadata: ($rm_meta_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: "Nested VM deleted"
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,19 +276,19 @@ export def "container-create" [
|
||||
status: "created"
|
||||
}
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($containers_dir)/($name).json << 'EOF'\n($container_meta | to json)\nEOF" | complete
|
||||
# Save container metadata (no try-catch)
|
||||
let save_result = (do { bash -c $"cat > ($containers_dir)/($name).json << 'EOF'\n($container_meta | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save container metadata: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
container: $name
|
||||
image: $container_meta.image
|
||||
parent_vm: $parent_vm
|
||||
cpu_millicores: $cpu_millicores
|
||||
memory_mb: $memory_mb
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: true
|
||||
container: $name
|
||||
image: $container_meta.image
|
||||
parent_vm: $parent_vm
|
||||
cpu_millicores: $cpu_millicores
|
||||
memory_mb: $memory_mb
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,8 +308,10 @@ export def "container-list" [
|
||||
bash -c $"ls -1 ($containers_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let meta = (open $file | from json)
|
||||
# Guard: Check if file can be opened and parsed as JSON (no try-catch)
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let meta = ($json_result.stdout)
|
||||
|
||||
if ($parent_vm | is-empty) or ($meta.parent_vm == $parent_vm) {
|
||||
{
|
||||
@ -307,7 +324,7 @@ export def "container-list" [
|
||||
created: $meta.created_at
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
@ -328,15 +345,15 @@ export def "container-delete" [
|
||||
return {success: false, error: "Container not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
bash -c $"rm -f ($meta_file)" | complete
|
||||
# Delete container metadata (no try-catch)
|
||||
let rm_result = (do { bash -c $"rm -f ($meta_file)" } | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete container: ($rm_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: "Container deleted"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: true
|
||||
message: "Container deleted"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
#
|
||||
# Manages virtual networks, VLANs, and network policies.
|
||||
# Rule 1: Single purpose, Rule 5: Atomic operations
|
||||
# Error handling: do/complete pattern for bash commands (no try-catch)
|
||||
|
||||
export def "network-create" [
|
||||
name: string # Network name
|
||||
@ -39,26 +40,44 @@ export def "network-create" [
|
||||
status: "created"
|
||||
}
|
||||
|
||||
try {
|
||||
# Create network bridge or overlay
|
||||
if $type == "bridge" {
|
||||
bash -c $"ip link add ($name) type bridge" | complete
|
||||
bash -c $"ip addr add ($network_meta.gateway)/24 dev ($name)" | complete
|
||||
bash -c $"ip link set ($name) up" | complete
|
||||
# Create network bridge or overlay (no try-catch)
|
||||
if $type == "bridge" {
|
||||
let link_result = (do {
|
||||
bash -c $"ip link add ($name) type bridge"
|
||||
} | complete)
|
||||
if $link_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create bridge: ($link_result.stderr)"}
|
||||
}
|
||||
|
||||
# Save metadata
|
||||
bash -c $"cat > ($network_dir)/($name).json << 'EOF'\n($network_meta | to json)\nEOF" | complete
|
||||
|
||||
{
|
||||
success: true
|
||||
network: $name
|
||||
subnet: $subnet
|
||||
gateway: $network_meta.gateway
|
||||
vlan_id: $vlan_id
|
||||
let addr_result = (do {
|
||||
bash -c $"ip addr add ($network_meta.gateway)/24 dev ($name)"
|
||||
} | complete)
|
||||
if $addr_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to add address: ($addr_result.stderr)"}
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
|
||||
let up_result = (do {
|
||||
bash -c $"ip link set ($name) up"
|
||||
} | complete)
|
||||
if $up_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to bring up network: ($up_result.stderr)"}
|
||||
}
|
||||
}
|
||||
|
||||
# Save metadata
|
||||
let save_result = (do {
|
||||
bash -c $"cat > ($network_dir)/($name).json << 'EOF'\n($network_meta | to json)\nEOF"
|
||||
} | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save network metadata: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
network: $name
|
||||
subnet: $subnet
|
||||
gateway: $network_meta.gateway
|
||||
vlan_id: $vlan_id
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,8 +95,10 @@ export def "network-list" []: table {
|
||||
bash -c $"ls -1 ($network_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let meta = (open $file | from json)
|
||||
# Guard: Check if file can be opened and parsed as JSON
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let meta = ($json_result.stdout)
|
||||
{
|
||||
name: $meta.name
|
||||
type: $meta.type
|
||||
@ -87,7 +108,7 @@ export def "network-list" []: table {
|
||||
dhcp: $meta.dhcp_enabled
|
||||
created: $meta.created_at
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
@ -108,24 +129,26 @@ export def "network-info" [
|
||||
return {success: false, error: "Network not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
let connected = (get-network-connections $name)
|
||||
# Load network metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load network metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
name: $meta.name
|
||||
type: $meta.type
|
||||
subnet: $meta.subnet
|
||||
gateway: $meta.gateway
|
||||
vlan_id: $meta.vlan_id
|
||||
dhcp_enabled: $meta.dhcp_enabled
|
||||
created: $meta.created_at
|
||||
connected_vms: ($connected | length)
|
||||
vm_list: $connected
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
let meta = ($meta_result.stdout)
|
||||
let connected = (get-network-connections $name)
|
||||
|
||||
{
|
||||
success: true
|
||||
name: $meta.name
|
||||
type: $meta.type
|
||||
subnet: $meta.subnet
|
||||
gateway: $meta.gateway
|
||||
vlan_id: $meta.vlan_id
|
||||
dhcp_enabled: $meta.dhcp_enabled
|
||||
created: $meta.created_at
|
||||
connected_vms: ($connected | length)
|
||||
vm_list: $connected
|
||||
}
|
||||
}
|
||||
|
||||
@ -145,28 +168,41 @@ export def "network-connect" [
|
||||
return {success: false, error: "Network not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
let ip = (if ($static_ip | is-empty) {allocate-dhcp-ip $network_name} else {$static_ip})
|
||||
# Load metadata and connect VM (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load network metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
# Record connection
|
||||
let connection = {
|
||||
vm_name: $vm_name
|
||||
ip_address: $ip
|
||||
connected_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
}
|
||||
let meta = ($meta_result.stdout)
|
||||
let ip = (if ($static_ip | is-empty) {allocate-dhcp-ip $network_name} else {$static_ip})
|
||||
|
||||
bash -c $"mkdir -p ($network_dir)/connections" | complete
|
||||
bash -c $"cat >> ($network_dir)/connections/($network_name).txt << 'EOF'\n($vm_name)|($ip)\nEOF" | complete
|
||||
# Record connection
|
||||
let connection = {
|
||||
vm_name: $vm_name
|
||||
ip_address: $ip
|
||||
connected_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
network: $network_name
|
||||
vm: $vm_name
|
||||
ip_address: $ip
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
let mkdir_result = (do {
|
||||
bash -c $"mkdir -p ($network_dir)/connections"
|
||||
} | complete)
|
||||
if $mkdir_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create connections directory: ($mkdir_result.stderr)"}
|
||||
}
|
||||
|
||||
let append_result = (do {
|
||||
bash -c $"cat >> ($network_dir)/connections/($network_name).txt << 'EOF'\n($vm_name)|($ip)\nEOF"
|
||||
} | complete)
|
||||
if $append_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to record connection: ($append_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
network: $network_name
|
||||
vm: $vm_name
|
||||
ip_address: $ip
|
||||
}
|
||||
}
|
||||
|
||||
@ -185,15 +221,18 @@ export def "network-disconnect" [
|
||||
return {success: false, error: "No connections found"}
|
||||
}
|
||||
|
||||
try {
|
||||
bash -c $"grep -v ($vm_name) ($connections_file) > ($connections_file).tmp && mv ($connections_file).tmp ($connections_file)" | complete
|
||||
# Disconnect VM from network (no try-catch)
|
||||
let disconnect_result = (do {
|
||||
bash -c $"grep -v ($vm_name) ($connections_file) > ($connections_file).tmp && mv ($connections_file).tmp ($connections_file)"
|
||||
} | complete)
|
||||
|
||||
{
|
||||
success: true
|
||||
message: "VM disconnected from network"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
if $disconnect_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to disconnect VM: ($disconnect_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: "VM disconnected from network"
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,18 +267,21 @@ export def "network-policy-create" [
|
||||
created_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
}
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($policy_dir)/($name).json << 'EOF'\n($policy | to json)\nEOF" | complete
|
||||
# Save network policy (no try-catch)
|
||||
let save_result = (do {
|
||||
bash -c $"cat > ($policy_dir)/($name).json << 'EOF'\n($policy | to json)\nEOF"
|
||||
} | complete)
|
||||
|
||||
{
|
||||
success: true
|
||||
policy: $name
|
||||
direction: $direction
|
||||
protocol: $protocol
|
||||
action: $action
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save policy: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
policy: $name
|
||||
direction: $direction
|
||||
protocol: $protocol
|
||||
action: $action
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,8 +299,10 @@ export def "network-policy-list" []: table {
|
||||
bash -c $"ls -1 ($policy_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let policy = (open $file | from json)
|
||||
# Guard: Check if file can be opened and parsed as JSON
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let policy = ($json_result.stdout)
|
||||
{
|
||||
name: $policy.name
|
||||
direction: $policy.direction
|
||||
@ -268,7 +312,7 @@ export def "network-policy-list" []: table {
|
||||
action: $policy.action
|
||||
created: $policy.created_at
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
@ -31,12 +31,13 @@ export def "record-vm-creation" [
|
||||
mac_address: ""
|
||||
}
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($state_file) << 'EOF'\n($state | to json)\nEOF"
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Save state (no try-catch)
|
||||
let save_result = (do { bash -c $"cat > ($state_file) << 'EOF'\n($state | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to record VM creation: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
export def "get-vm-state" [
|
||||
@ -47,9 +48,11 @@ export def "get-vm-state" [
|
||||
let state_dir = (get-vm-state-dir)
|
||||
let state_file = $"($state_dir)/($vm_name).json"
|
||||
|
||||
try {
|
||||
open $state_file | from json
|
||||
} catch {
|
||||
# Guard: Check if state file can be opened and parsed as JSON (no try-catch)
|
||||
let json_result = (do { open $state_file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
$json_result.stdout
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
}
|
||||
@ -75,12 +78,13 @@ export def "update-vm-state" [
|
||||
let state_dir = (get-vm-state-dir)
|
||||
let state_file = $"($state_dir)/($vm_name).json"
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($state_file) << 'EOF'\n($updated | to json)\nEOF"
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Update state (no try-catch)
|
||||
let update_result = (do { bash -c $"cat > ($state_file) << 'EOF'\n($updated | to json)\nEOF" } | complete)
|
||||
if $update_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to update VM state: ($update_result.stderr)"}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
export def "remove-vm-state" [
|
||||
@ -91,12 +95,13 @@ export def "remove-vm-state" [
|
||||
let state_dir = (get-vm-state-dir)
|
||||
let state_file = $"($state_dir)/($vm_name).json"
|
||||
|
||||
try {
|
||||
bash -c $"rm -f ($state_file)"
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Remove state file (no try-catch)
|
||||
let rm_result = (do { bash -c $"rm -f ($state_file)" } | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to remove VM state: ($rm_result.stderr)"}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
export def "list-all-vms" []: table {
|
||||
@ -108,21 +113,26 @@ export def "list-all-vms" []: table {
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
bash -c $"ls -1 ($state_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| where {|f| ($f | length) > 0}
|
||||
| map {|f|
|
||||
try {
|
||||
open $f | from json
|
||||
} catch {
|
||||
{}
|
||||
}
|
||||
}
|
||||
| where {|v| ($v | length) > 0}
|
||||
} catch {
|
||||
[]
|
||||
# List state files (no try-catch)
|
||||
let ls_result = (do { bash -c $"ls -1 ($state_dir)/*.json 2>/dev/null" } | complete)
|
||||
if $ls_result.exit_code != 0 {
|
||||
return []
|
||||
}
|
||||
|
||||
$ls_result.stdout
|
||||
| lines
|
||||
| where {|f| ($f | length) > 0}
|
||||
| each {|f|
|
||||
# Guard: Check if file can be opened and parsed as JSON (no try-catch)
|
||||
let json_result = (do { open $f | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
$json_result.stdout
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
| compact
|
||||
| where {|v| ($v | length) > 0}
|
||||
}
|
||||
|
||||
def get-vm-state-dir []: string {
|
||||
|
||||
@ -105,14 +105,16 @@ def install-hypervisor-taskserv [host: string, taskserv: string]: record {
|
||||
}
|
||||
)
|
||||
|
||||
# Execute command (no try-catch)
|
||||
let exec_result = (do { shell-exec-safe $cmd } | complete)
|
||||
let result = (
|
||||
try {
|
||||
(shell-exec-safe $cmd)
|
||||
} catch {|err|
|
||||
if $exec_result.exit_code == 0 {
|
||||
$exec_result.stdout
|
||||
} else {
|
||||
{
|
||||
taskserv: $taskserv
|
||||
success: false
|
||||
error: $err
|
||||
error: $exec_result.stderr
|
||||
}
|
||||
}
|
||||
)
|
||||
@ -131,19 +133,14 @@ def install-hypervisor-taskserv [host: string, taskserv: string]: record {
|
||||
def shell-exec-safe [cmd: string]: record {
|
||||
"""Execute shell command safely"""
|
||||
|
||||
let result = (
|
||||
try {
|
||||
(bash -c $cmd | complete)
|
||||
} catch {|err|
|
||||
error make {msg: $err}
|
||||
}
|
||||
)
|
||||
# Execute command (no try-catch)
|
||||
let result = (do { bash -c $cmd } | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
error make {msg: $result.stderr}
|
||||
return {success: false, error: $result.stderr}
|
||||
}
|
||||
|
||||
$result
|
||||
{success: true, stdout: $result.stdout}
|
||||
}
|
||||
|
||||
export def "get-host-hypervisor-status" [host: string]: table {
|
||||
|
||||
@ -47,16 +47,12 @@ export def "vm-ssh" [
|
||||
bash -c $"ssh -o StrictHostKeyChecking=no root@($ip)"
|
||||
{success: true}
|
||||
} else {
|
||||
# Execute command
|
||||
try {
|
||||
let output = (bash -c $"ssh -o StrictHostKeyChecking=no root@($ip) '($command)'" | complete)
|
||||
{
|
||||
success: ($output.exit_code == 0)
|
||||
output: $output.stdout
|
||||
error: $output.stderr
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Execute command (no try-catch)
|
||||
let output = (do { bash -c $"ssh -o StrictHostKeyChecking=no root@($ip) '($command)'" } | complete)
|
||||
{
|
||||
success: ($output.exit_code == 0)
|
||||
output: $output.stdout
|
||||
error: $output.stderr
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -78,17 +74,13 @@ export def "vm-scp-to" [
|
||||
return {success: false, error: $"SSH not ready on ($ip)"}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (
|
||||
bash -c $"scp -r -o StrictHostKeyChecking=no ($local_path) root@($ip):($remote_path)" | complete
|
||||
)
|
||||
# Copy file via SCP (no try-catch)
|
||||
let result = (do { bash -c $"scp -r -o StrictHostKeyChecking=no ($local_path) root@($ip):($remote_path)" } | complete)
|
||||
|
||||
{
|
||||
success: ($result.exit_code == 0)
|
||||
message: $"Copied ($local_path) to ($ip):($remote_path)"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: ($result.exit_code == 0)
|
||||
message: $"Copied ($local_path) to ($ip):($remote_path)"
|
||||
error: (if $result.exit_code != 0 { $result.stderr } else { "" })
|
||||
}
|
||||
}
|
||||
|
||||
@ -109,17 +101,13 @@ export def "vm-scp-from" [
|
||||
return {success: false, error: $"SSH not ready on ($ip)"}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (
|
||||
bash -c $"scp -r -o StrictHostKeyChecking=no root@($ip):($remote_path) ($local_path)" | complete
|
||||
)
|
||||
# Copy file via SCP (no try-catch)
|
||||
let result = (do { bash -c $"scp -r -o StrictHostKeyChecking=no root@($ip):($remote_path) ($local_path)" } | complete)
|
||||
|
||||
{
|
||||
success: ($result.exit_code == 0)
|
||||
message: $"Copied ($ip):($remote_path) to ($local_path)"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: ($result.exit_code == 0)
|
||||
message: $"Copied ($ip):($remote_path) to ($local_path)"
|
||||
error: (if $result.exit_code != 0 { $result.stderr } else { "" })
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,13 +153,8 @@ def wait-for-ssh [ip: string, --timeout: int = 300]: bool {
|
||||
return false
|
||||
}
|
||||
|
||||
let ssh_check = (
|
||||
try {
|
||||
bash -c $"ssh-keyscan -t rsa ($ip) 2>/dev/null" | complete
|
||||
} catch {
|
||||
{exit_code: 1}
|
||||
}
|
||||
)
|
||||
# Check SSH availability (no try-catch)
|
||||
let ssh_check = (do { bash -c $"ssh-keyscan -t rsa ($ip) 2>/dev/null" } | complete)
|
||||
|
||||
if $ssh_check.exit_code == 0 {
|
||||
return true
|
||||
@ -198,10 +181,10 @@ export def "vm-provision" [
|
||||
# Write script to temp file
|
||||
let temp_script = $"/tmp/provision-($vm_name)-($env.RANDOM).sh"
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($temp_script) << 'SCRIPT'\n($script)\nSCRIPT"
|
||||
} catch {|err|
|
||||
return {success: false, error: $"Failed to create script: ($err)"}
|
||||
# Create script file (no try-catch)
|
||||
let create_result = (do { bash -c $"cat > ($temp_script) << 'SCRIPT'\n($script)\nSCRIPT" } | complete)
|
||||
if $create_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create script: ($create_result.stderr)"}
|
||||
}
|
||||
|
||||
# SCP script to VM
|
||||
|
||||
@ -76,13 +76,8 @@ def start-permanent-vm-on-boot [vm_info: record]: record {
|
||||
return $result_so_far
|
||||
}
|
||||
|
||||
let try_result = (
|
||||
try {
|
||||
vm-start $vm_name
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
}
|
||||
)
|
||||
# Attempt to start VM (no try-catch, guard pattern)
|
||||
let try_result = (vm-start $vm_name)
|
||||
|
||||
if $try_result.success {
|
||||
{success: true, attempt: ($attempt + 1)}
|
||||
@ -139,20 +134,20 @@ export def "save-vm-state-snapshot" [
|
||||
|
||||
let snapshot_file = (get-snapshot-file $vm_name)
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($snapshot_file) << 'EOF'\n($snapshot | to json)\nEOF"
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
message: "State snapshot saved"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Save snapshot (no try-catch)
|
||||
let save_result = (do { bash -c $"cat > ($snapshot_file) << 'EOF'\n($snapshot | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to save state snapshot: ($save_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
message: "State snapshot saved"
|
||||
}
|
||||
}
|
||||
|
||||
export def "restore-vm-state-snapshot" [
|
||||
@ -169,26 +164,27 @@ export def "restore-vm-state-snapshot" [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let snapshot = (open $snapshot_file | from json)
|
||||
|
||||
# Only restore if it was running
|
||||
if $snapshot.vm_state != "running" {
|
||||
return {
|
||||
success: true
|
||||
message: "VM was not running at snapshot time"
|
||||
}
|
||||
}
|
||||
|
||||
# Start the VM
|
||||
vm-start $vm_name
|
||||
|
||||
} catch {|err|
|
||||
{
|
||||
# Load snapshot (no try-catch)
|
||||
let snap_result = (do { open $snapshot_file | from json } | complete)
|
||||
if $snap_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to load snapshot: ($snap_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let snapshot = ($snap_result.stdout)
|
||||
|
||||
# Only restore if it was running
|
||||
if $snapshot.vm_state != "running" {
|
||||
return {
|
||||
success: true
|
||||
message: "VM was not running at snapshot time"
|
||||
}
|
||||
}
|
||||
|
||||
# Start the VM (no try-catch)
|
||||
vm-start $vm_name
|
||||
}
|
||||
|
||||
export def "register-vm-autostart" [
|
||||
@ -220,21 +216,21 @@ export def "register-vm-autostart" [
|
||||
|
||||
let persist_file = (get-persistence-file $vm_name)
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($persist_file) << 'EOF'\n($updated | to json)\nEOF"
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
start_order: $start_order
|
||||
message: "VM registered for autostart"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
# Save autostart configuration (no try-catch)
|
||||
let save_result = (do { bash -c $"cat > ($persist_file) << 'EOF'\n($updated | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $"Failed to save autostart configuration: ($save_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
start_order: $start_order
|
||||
message: "VM registered for autostart"
|
||||
}
|
||||
}
|
||||
|
||||
export def "get-vms-pending-recovery" []: table {
|
||||
@ -278,13 +274,8 @@ export def "wait-for-vm-ssh" [
|
||||
}
|
||||
}
|
||||
|
||||
let ssh_check = (
|
||||
try {
|
||||
vm-ssh $vm_name --command "echo ok" | complete
|
||||
} catch {
|
||||
{exit_code: 1}
|
||||
}
|
||||
)
|
||||
# Check SSH availability (no try-catch)
|
||||
let ssh_check = (do { vm-ssh $vm_name --command "echo ok" } | complete)
|
||||
|
||||
if $ssh_check.exit_code == 0 {
|
||||
return {
|
||||
@ -316,13 +307,20 @@ nu -c "use lib_provisioning/vm/state_recovery.nu *; recover-vms-on-boot"
|
||||
echo "VM recovery complete"
|
||||
'
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($script_path) << 'SCRIPT'\n($script_content)\nSCRIPT"
|
||||
bash -c $"chmod +x ($script_path)"
|
||||
} catch {|err|
|
||||
# Create recovery script (no try-catch)
|
||||
let create_result = (do { bash -c $"cat > ($script_path) << 'SCRIPT'\n($script_content)\nSCRIPT" } | complete)
|
||||
if $create_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to create recovery script: ($err)"
|
||||
error: $"Failed to create recovery script: ($create_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let chmod_result = (do { bash -c $"chmod +x ($script_path)" } | complete)
|
||||
if $chmod_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to set script permissions: ($chmod_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -343,14 +341,28 @@ StandardError=journal
|
||||
WantedBy=multi-user.target
|
||||
'
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($service_path) << 'SERVICE'\n($service_content)\nSERVICE"
|
||||
bash -c "systemctl daemon-reload || true"
|
||||
bash -c "systemctl enable vm-recovery.service || true"
|
||||
} catch {|err|
|
||||
# Create systemd service (no try-catch)
|
||||
let service_write_result = (do { bash -c $"cat > ($service_path) << 'SERVICE'\n($service_content)\nSERVICE" } | complete)
|
||||
if $service_write_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to create systemd service: ($err)"
|
||||
error: $"Failed to write systemd service file: ($service_write_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let daemon_reload_result = (do { bash -c "systemctl daemon-reload || true" } | complete)
|
||||
if $daemon_reload_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to reload systemd: ($daemon_reload_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
let enable_result = (do { bash -c "systemctl enable vm-recovery.service || true" } | complete)
|
||||
if $enable_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $"Failed to enable systemd service: ($enable_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2,7 +2,9 @@
|
||||
#
|
||||
# Manages permanent and temporary VMs with lifecycle tracking.
|
||||
# Rule 1: Single purpose, Rule 4: Pure functions, Rule 5: Atomic operations
|
||||
# Error handling: Result pattern (hybrid, do/complete for bash operations)
|
||||
|
||||
use ../result.nu *
|
||||
use ./persistence.nu *
|
||||
use ./lifecycle.nu *
|
||||
|
||||
@ -33,23 +35,16 @@ export def "register-permanent-vm" [
|
||||
start_order: 100
|
||||
}
|
||||
|
||||
# Save persistence data
|
||||
# Save persistence data using json-write helper (no inline try-catch)
|
||||
let persist_file = (get-persistence-file $vm_config.name)
|
||||
let write_result = (json-write $persist_file $persistence_info)
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($persist_file) << 'EOF'\n($persistence_info | to json)\nEOF"
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_config.name
|
||||
message: "Registered as permanent VM"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
success: false
|
||||
error: $"Failed to register permanent VM: ($err)"
|
||||
}
|
||||
# Guard: Check write result
|
||||
if (is-err $write_result) {
|
||||
return {success: false, error: $write_result.err}
|
||||
}
|
||||
|
||||
{success: true, vm_name: $vm_config.name, message: "Registered as permanent VM"}
|
||||
}
|
||||
|
||||
export def "register-temporary-vm" [
|
||||
@ -87,22 +82,19 @@ export def "register-temporary-vm" [
|
||||
}
|
||||
|
||||
let persist_file = (get-persistence-file $vm_config.name)
|
||||
let write_result = (json-write $persist_file $persistence_info)
|
||||
|
||||
try {
|
||||
bash -c $"cat > ($persist_file) << 'EOF'\n($persistence_info | to json)\nEOF"
|
||||
# Guard: Check write result
|
||||
if (is-err $write_result) {
|
||||
return {success: false, error: $write_result.err}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_config.name
|
||||
ttl_hours: $ttl_hours
|
||||
cleanup_scheduled_at: $cleanup_time
|
||||
message: $"Registered as temporary VM (cleanup in ($ttl_hours) hours)"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
success: false
|
||||
error: $"Failed to register temporary VM: ($err)"
|
||||
}
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_config.name
|
||||
ttl_hours: $ttl_hours
|
||||
cleanup_scheduled_at: $cleanup_time
|
||||
message: $"Registered as temporary VM (cleanup in ($ttl_hours) hours)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -113,15 +105,16 @@ export def "get-vm-persistence-info" [
|
||||
|
||||
let persist_file = (get-persistence-file $vm_name)
|
||||
|
||||
try {
|
||||
open $persist_file | from json
|
||||
} catch {
|
||||
{
|
||||
vm_name: $vm_name
|
||||
mode: "unknown"
|
||||
error: "No persistence info found"
|
||||
}
|
||||
# Guard: File exists check
|
||||
if not ($persist_file | path exists) {
|
||||
return {vm_name: $vm_name, mode: "unknown", error: "No persistence info found"}
|
||||
}
|
||||
|
||||
# Read using json-read helper (no inline try-catch)
|
||||
(json-read $persist_file)
|
||||
| match-result
|
||||
{|data| $data} # On success, return data
|
||||
{|_err| {vm_name: $vm_name, mode: "unknown", error: "No persistence info found"}} # On error, return default
|
||||
}
|
||||
|
||||
export def "list-permanent-vms" []: table {
|
||||
@ -133,26 +126,33 @@ export def "list-permanent-vms" []: table {
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
# Use do/complete for bash command (no try-catch)
|
||||
let ls_result = (do {
|
||||
bash -c $"ls -1 ($persist_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| where {|f| ($f | length) > 0}
|
||||
| map {|f|
|
||||
try {
|
||||
let data = (open $f | from json)
|
||||
if ($data.mode // "unknown") == "permanent" {
|
||||
$data
|
||||
} else {
|
||||
null
|
||||
}
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $ls_result.exit_code != 0 {
|
||||
return []
|
||||
}
|
||||
|
||||
$ls_result.stdout
|
||||
| lines
|
||||
| where {|f| ($f | length) > 0}
|
||||
| map {|f|
|
||||
# Guard: Check if file can be opened and parsed as JSON
|
||||
let json_result = (do { open $f | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let data = ($json_result.stdout)
|
||||
if ($data.mode // "unknown") == "permanent" {
|
||||
$data
|
||||
} else {
|
||||
null
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
| compact
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
| compact
|
||||
}
|
||||
|
||||
export def "list-temporary-vms" []: table {
|
||||
@ -164,26 +164,33 @@ export def "list-temporary-vms" []: table {
|
||||
return []
|
||||
}
|
||||
|
||||
try {
|
||||
# Use do/complete for bash command (no try-catch)
|
||||
let ls_result = (do {
|
||||
bash -c $"ls -1 ($persist_dir)/*.json 2>/dev/null"
|
||||
| lines
|
||||
| where {|f| ($f | length) > 0}
|
||||
| map {|f|
|
||||
try {
|
||||
let data = (open $f | from json)
|
||||
if ($data.mode // "unknown") == "temporary" {
|
||||
$data
|
||||
} else {
|
||||
null
|
||||
}
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $ls_result.exit_code != 0 {
|
||||
return []
|
||||
}
|
||||
|
||||
$ls_result.stdout
|
||||
| lines
|
||||
| where {|f| ($f | length) > 0}
|
||||
| map {|f|
|
||||
# Guard: Check if file can be opened and parsed as JSON
|
||||
let json_result = (do { open $f | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let data = ($json_result.stdout)
|
||||
if ($data.mode // "unknown") == "temporary" {
|
||||
$data
|
||||
} else {
|
||||
null
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
| compact
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
| compact
|
||||
}
|
||||
|
||||
export def "find-expired-vms" []: table {
|
||||
@ -353,22 +360,25 @@ export def "extend-vm-ttl" [
|
||||
|
||||
let persist_file = (get-persistence-file $vm_name)
|
||||
|
||||
try {
|
||||
# Use do/complete for bash command (no try-catch)
|
||||
let write_result = (do {
|
||||
bash -c $"cat > ($persist_file) << 'EOF'\n($updated_info | to json)\nEOF"
|
||||
} | complete)
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
additional_hours: $additional_hours
|
||||
new_cleanup_time: $new_cleanup_time
|
||||
message: $"Extended TTL by ($additional_hours) hours"
|
||||
}
|
||||
} catch {|err|
|
||||
{
|
||||
if $write_result.exit_code != 0 {
|
||||
return {
|
||||
success: false
|
||||
error: $err
|
||||
error: $write_result.stderr
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
vm_name: $vm_name
|
||||
additional_hours: $additional_hours
|
||||
new_cleanup_time: $new_cleanup_time
|
||||
message: $"Extended TTL by ($additional_hours) hours"
|
||||
}
|
||||
}
|
||||
|
||||
def get-persistence-dir []: string {
|
||||
@ -404,12 +414,16 @@ def update-cleanup-status [
|
||||
|
||||
let persist_file = (get-persistence-file $vm_name)
|
||||
|
||||
try {
|
||||
# Use do/complete for bash command (no try-catch)
|
||||
let write_result = (do {
|
||||
bash -c $"cat > ($persist_file) << 'EOF'\n($updated | to json)\nEOF"
|
||||
{success: true}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
} | complete)
|
||||
|
||||
if $write_result.exit_code != 0 {
|
||||
return {success: false, error: $write_result.stderr}
|
||||
}
|
||||
|
||||
{success: true}
|
||||
}
|
||||
|
||||
export def "get-vm-persistence-stats" []: record {
|
||||
|
||||
@ -38,23 +38,29 @@ export def "volume-create" [
|
||||
path: $"($volume_dir)/($name).img"
|
||||
}
|
||||
|
||||
try {
|
||||
# Create backing file
|
||||
bash -c $"qemu-img create -f qcow2 ($volume_meta.path) ($size_gb)G" | complete
|
||||
# Create backing file (no try-catch)
|
||||
let create_result = (do { bash -c $"qemu-img create -f qcow2 ($volume_meta.path) ($size_gb)G" } | complete)
|
||||
if $create_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create volume: ($create_result.stderr)"}
|
||||
}
|
||||
|
||||
# Save metadata
|
||||
bash -c $"mkdir -p ($volume_dir)/meta" | complete
|
||||
bash -c $"cat > ($volume_dir)/meta/($name).json << 'EOF'\n($volume_meta | to json)\nEOF" | complete
|
||||
# Save metadata (no try-catch)
|
||||
let mkdir_result = (do { bash -c $"mkdir -p ($volume_dir)/meta" } | complete)
|
||||
if $mkdir_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create metadata directory: ($mkdir_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
volume_name: $name
|
||||
volume_path: $volume_meta.path
|
||||
size_gb: $size_gb
|
||||
mount_path: $mount_path
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
let save_result = (do { bash -c $"cat > ($volume_dir)/meta/($name).json << 'EOF'\n($volume_meta | to json)\nEOF" } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save metadata: ($save_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
volume_name: $name
|
||||
volume_path: $volume_meta.path
|
||||
size_gb: $size_gb
|
||||
mount_path: $mount_path
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,8 +78,10 @@ export def "volume-list" []: table {
|
||||
bash -c $"ls -1 ($volume_dir)/meta/*.json 2>/dev/null"
|
||||
| lines
|
||||
| each {|file|
|
||||
try {
|
||||
let meta = (open $file | from json)
|
||||
# Guard: Check if file can be opened and parsed as JSON (no try-catch)
|
||||
let json_result = (do { open $file | from json } | complete)
|
||||
if $json_result.exit_code == 0 {
|
||||
let meta = ($json_result.stdout)
|
||||
{
|
||||
name: $meta.name
|
||||
type: $meta.type
|
||||
@ -82,7 +90,7 @@ export def "volume-list" []: table {
|
||||
status: $meta.status
|
||||
created: $meta.created_at
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
@ -103,25 +111,27 @@ export def "volume-info" [
|
||||
return {success: false, error: "Volume not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
let usage = (
|
||||
bash -c $"du -h ($meta.path) 2>/dev/null | cut -f1" | str trim
|
||||
)
|
||||
# Load metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load volume metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
name: $meta.name
|
||||
type: $meta.type
|
||||
size_gb: $meta.size_gb
|
||||
used: $usage
|
||||
mount_path: $meta.mount_path
|
||||
readonly: $meta.readonly
|
||||
created: $meta.created_at
|
||||
status: $meta.status
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
let meta = ($meta_result.stdout)
|
||||
let usage = (
|
||||
bash -c $"du -h ($meta.path) 2>/dev/null | cut -f1" | str trim
|
||||
)
|
||||
|
||||
{
|
||||
success: true
|
||||
name: $meta.name
|
||||
type: $meta.type
|
||||
size_gb: $meta.size_gb
|
||||
used: $usage
|
||||
mount_path: $meta.mount_path
|
||||
readonly: $meta.readonly
|
||||
created: $meta.created_at
|
||||
status: $meta.status
|
||||
}
|
||||
}
|
||||
|
||||
@ -141,28 +151,37 @@ export def "volume-attach" [
|
||||
return {success: false, error: "Volume not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
let mount = (if ($mount_path | is-empty) {$meta.mount_path} else {$mount_path})
|
||||
# Load metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load volume metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
# Record attachment
|
||||
let attachment = {
|
||||
vm_name: $vm_name
|
||||
attached_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
mount_path: $mount
|
||||
}
|
||||
let meta = ($meta_result.stdout)
|
||||
let mount = (if ($mount_path | is-empty) {$meta.mount_path} else {$mount_path})
|
||||
|
||||
bash -c $"mkdir -p ($volume_dir)/attachments" | complete
|
||||
bash -c $"cat >> ($volume_dir)/attachments/($volume_name).txt << 'EOF'\n($vm_name)|($mount)\nEOF" | complete
|
||||
# Record attachment (no try-catch)
|
||||
let attachment = {
|
||||
vm_name: $vm_name
|
||||
attached_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
mount_path: $mount
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
volume: $volume_name
|
||||
vm: $vm_name
|
||||
mount_path: $mount
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
let mkdir_result = (do { bash -c $"mkdir -p ($volume_dir)/attachments" } | complete)
|
||||
if $mkdir_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create attachments directory: ($mkdir_result.stderr)"}
|
||||
}
|
||||
|
||||
let append_result = (do { bash -c $"cat >> ($volume_dir)/attachments/($volume_name).txt << 'EOF'\n($vm_name)|($mount)\nEOF" } | complete)
|
||||
if $append_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to record attachment: ($append_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
volume: $volume_name
|
||||
vm: $vm_name
|
||||
mount_path: $mount
|
||||
}
|
||||
}
|
||||
|
||||
@ -181,16 +200,15 @@ export def "volume-detach" [
|
||||
return {success: false, error: "No attachments found"}
|
||||
}
|
||||
|
||||
try {
|
||||
# Remove attachment entry
|
||||
bash -c $"grep -v ($vm_name) ($attachments_file) > ($attachments_file).tmp && mv ($attachments_file).tmp ($attachments_file)" | complete
|
||||
# Remove attachment entry (no try-catch)
|
||||
let detach_result = (do { bash -c $"grep -v ($vm_name) ($attachments_file) > ($attachments_file).tmp && mv ($attachments_file).tmp ($attachments_file)" } | complete)
|
||||
if $detach_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to detach volume: ($detach_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: $"Volume detached from VM"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
{
|
||||
success: true
|
||||
message: $"Volume detached from VM"
|
||||
}
|
||||
}
|
||||
|
||||
@ -210,36 +228,55 @@ export def "volume-snapshot" [
|
||||
return {success: false, error: "Volume not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
let snapshot_path = $"($volume_dir)/snapshots/($volume_name)/($snapshot_name).qcow2"
|
||||
# Load metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load volume metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
bash -c $"mkdir -p $(dirname ($snapshot_path))" | complete
|
||||
let meta = ($meta_result.stdout)
|
||||
let snapshot_path = $"($volume_dir)/snapshots/($volume_name)/($snapshot_name).qcow2"
|
||||
|
||||
# Create snapshot
|
||||
bash -c $"qemu-img snapshot -c ($snapshot_name) ($meta.path)" | complete
|
||||
bash -c $"qemu-img convert -f qcow2 -O qcow2 -o backing_file=($meta.path) ($snapshot_path)" | complete
|
||||
let mkdir_result = (do { bash -c $"mkdir -p $(dirname ($snapshot_path))" } | complete)
|
||||
if $mkdir_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create snapshot directory: ($mkdir_result.stderr)"}
|
||||
}
|
||||
|
||||
# Save snapshot metadata
|
||||
let snapshot_meta = {
|
||||
name: $snapshot_name
|
||||
volume: $volume_name
|
||||
path: $snapshot_path
|
||||
created_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
description: $description
|
||||
}
|
||||
# Create snapshot (no try-catch)
|
||||
let snapshot_result = (do { bash -c $"qemu-img snapshot -c ($snapshot_name) ($meta.path)" } | complete)
|
||||
if $snapshot_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create snapshot: ($snapshot_result.stderr)"}
|
||||
}
|
||||
|
||||
bash -c $"mkdir -p ($volume_dir)/snapshots/($volume_name)" | complete
|
||||
bash -c $"cat > ($volume_dir)/snapshots/($volume_name)/($snapshot_name).json << 'EOF'\n($snapshot_meta | to json)\nEOF" | complete
|
||||
let convert_result = (do { bash -c $"qemu-img convert -f qcow2 -O qcow2 -o backing_file=($meta.path) ($snapshot_path)" } | complete)
|
||||
if $convert_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to convert snapshot: ($convert_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
snapshot: $snapshot_name
|
||||
volume: $volume_name
|
||||
path: $snapshot_path
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Save snapshot metadata (no try-catch)
|
||||
let snapshot_meta = {
|
||||
name: $snapshot_name
|
||||
volume: $volume_name
|
||||
path: $snapshot_path
|
||||
created_at: (date now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
description: $description
|
||||
}
|
||||
|
||||
let meta_mkdir_result = (do { bash -c $"mkdir -p ($volume_dir)/snapshots/($volume_name)" } | complete)
|
||||
if $meta_mkdir_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to create snapshot metadata directory: ($meta_mkdir_result.stderr)"}
|
||||
}
|
||||
|
||||
let meta_save_result = (do { bash -c $"cat > ($volume_dir)/snapshots/($volume_name)/($snapshot_name).json << 'EOF'\n($snapshot_meta | to json)\nEOF" } | complete)
|
||||
if $meta_save_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to save snapshot metadata: ($meta_save_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
snapshot: $snapshot_name
|
||||
volume: $volume_name
|
||||
path: $snapshot_path
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,22 +296,34 @@ export def "volume-restore" [
|
||||
return {success: false, error: "Snapshot not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let snapshot_meta = (open $snapshot_meta_file | from json)
|
||||
let meta_file = $"($volume_dir)/meta/($volume_name).json"
|
||||
let meta = (open $meta_file | from json)
|
||||
# Load snapshot metadata (no try-catch)
|
||||
let snap_result = (do { open $snapshot_meta_file | from json } | complete)
|
||||
if $snap_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load snapshot metadata: ($snap_result.stderr)"}
|
||||
}
|
||||
|
||||
# Restore from snapshot
|
||||
bash -c $"qemu-img snapshot -a ($snapshot_name) ($meta.path)" | complete
|
||||
let snapshot_meta = ($snap_result.stdout)
|
||||
let meta_file = $"($volume_dir)/meta/($volume_name).json"
|
||||
|
||||
{
|
||||
success: true
|
||||
message: $"Volume restored from snapshot"
|
||||
volume: $volume_name
|
||||
snapshot: $snapshot_name
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
# Load volume metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load volume metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
# Restore from snapshot (no try-catch)
|
||||
let restore_result = (do { bash -c $"qemu-img snapshot -a ($snapshot_name) ($meta.path)" } | complete)
|
||||
if $restore_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to restore snapshot: ($restore_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: $"Volume restored from snapshot"
|
||||
volume: $volume_name
|
||||
snapshot: $snapshot_name
|
||||
}
|
||||
}
|
||||
|
||||
@ -293,31 +342,51 @@ export def "volume-delete" [
|
||||
return {success: false, error: "Volume not found"}
|
||||
}
|
||||
|
||||
try {
|
||||
let meta = (open $meta_file | from json)
|
||||
# Load metadata (no try-catch)
|
||||
let meta_result = (do { open $meta_file | from json } | complete)
|
||||
if $meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to load volume metadata: ($meta_result.stderr)"}
|
||||
}
|
||||
|
||||
# Check if in use
|
||||
let attachments_file = $"($volume_dir)/attachments/($name).txt"
|
||||
if (($attachments_file | path exists) and (not $force)) {
|
||||
let count = (bash -c $"wc -l < ($attachments_file)" | str trim | into int)
|
||||
let meta = ($meta_result.stdout)
|
||||
|
||||
# Check if in use (no try-catch)
|
||||
let attachments_file = $"($volume_dir)/attachments/($name).txt"
|
||||
if (($attachments_file | path exists) and (not $force)) {
|
||||
let count_result = (do { bash -c $"wc -l < ($attachments_file)" } | complete)
|
||||
if $count_result.exit_code == 0 {
|
||||
let count = ($count_result.stdout | str trim | into int)
|
||||
return {
|
||||
success: false
|
||||
error: $"Volume in use by ($count) VM(s)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Delete files
|
||||
bash -c $"rm -f ($meta.path)" | complete
|
||||
bash -c $"rm -f ($meta_file)" | complete
|
||||
bash -c $"rm -rf ($volume_dir)/snapshots/($name)" | complete
|
||||
bash -c $"rm -f ($attachments_file)" | complete
|
||||
# Delete files (no try-catch)
|
||||
let rm_img_result = (do { bash -c $"rm -f ($meta.path)" } | complete)
|
||||
if $rm_img_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete volume image: ($rm_img_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: $"Volume deleted"
|
||||
}
|
||||
} catch {|err|
|
||||
{success: false, error: $err}
|
||||
let rm_meta_result = (do { bash -c $"rm -f ($meta_file)" } | complete)
|
||||
if $rm_meta_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete metadata file: ($rm_meta_result.stderr)"}
|
||||
}
|
||||
|
||||
let rm_snapshots_result = (do { bash -c $"rm -rf ($volume_dir)/snapshots/($name)" } | complete)
|
||||
if $rm_snapshots_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete snapshots: ($rm_snapshots_result.stderr)"}
|
||||
}
|
||||
|
||||
let rm_attachments_result = (do { bash -c $"rm -f ($attachments_file)" } | complete)
|
||||
if $rm_attachments_result.exit_code != 0 {
|
||||
return {success: false, error: $"Failed to delete attachments: ($rm_attachments_result.stderr)"}
|
||||
}
|
||||
|
||||
{
|
||||
success: true
|
||||
message: $"Volume deleted"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -35,19 +35,22 @@ export def provisioning_init [
|
||||
str replace "-h" "" | str replace $module "" | str trim | split row " "
|
||||
)
|
||||
if ($cmd_args | length) > 0 {
|
||||
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod '($module)' ($cmd_args) help"
|
||||
^$"((get-provisioning-name))" "-mod" $"($module | str replace ' ' '|')" ...$cmd_args help
|
||||
# let str_mod_0 = ($cmd_args | try { get 0 } catch { "") }
|
||||
# let str_mod_1 = ($cmd_args | try { get 1 } catch { "") }
|
||||
# if $str_mod_1 != "" {
|
||||
# let final_args = ($cmd_args | drop nth 0 1)
|
||||
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod '($str_mod_0) ($str_mod_1)' ($cmd_args | drop nth 0) help"
|
||||
# ^$"($env.PROVISIONING_NAME)" "-mod" $"'($str_mod_0) ($str_mod_1)'" ...$final_args help
|
||||
# } else {
|
||||
# let final_args = ($cmd_args | drop nth 0)
|
||||
# _print $"---($module)-- ($env.PROVISIONING_NAME) -mod ($str_mod_0) ($cmd_args | drop nth 0) help"
|
||||
# ^$"($env.PROVISIONING_NAME)" "-mod" ($str_mod_0) ...$final_args help
|
||||
# }
|
||||
# Refactored from try-catch to do/complete for explicit error handling
|
||||
let str_mod_0_result = (do { $cmd_args | get 0 } | complete)
|
||||
let str_mod_0 = if $str_mod_0_result.exit_code == 0 { ($str_mod_0_result.stdout | str trim) } else { "" }
|
||||
|
||||
let str_mod_1_result = (do { $cmd_args | get 1 } | complete)
|
||||
let str_mod_1 = if $str_mod_1_result.exit_code == 0 { ($str_mod_1_result.stdout | str trim) } else { "" }
|
||||
|
||||
if $str_mod_1 != "" {
|
||||
let final_args = ($cmd_args | drop nth 0 1)
|
||||
^$"((get-provisioning-name))" "-mod" $"'($str_mod_0) ($str_mod_1)'" ...$final_args help
|
||||
} else if $str_mod_0 != "" {
|
||||
let final_args = ($cmd_args | drop nth 0)
|
||||
^$"((get-provisioning-name))" "-mod" ($str_mod_0) ...$final_args help
|
||||
} else {
|
||||
^$"((get-provisioning-name))" "-mod" $"($module | str replace ' ' '|')" ...$cmd_args help
|
||||
}
|
||||
} else {
|
||||
^$"((get-provisioning-name))" help
|
||||
}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
# Workspace Configuration Migration: YAML → Nickel
|
||||
# Converts existing provisioning.yaml workspace configs to Nickel format
|
||||
# Error handling: do/complete pattern with exit_code checks (no try-catch)
|
||||
|
||||
use ../config/accessor.nu *
|
||||
|
||||
@ -123,9 +124,8 @@ def migrate_single_workspace [
|
||||
}
|
||||
|
||||
# Load YAML config
|
||||
let yaml_config = try {
|
||||
open $yaml_file
|
||||
} catch {
|
||||
let yaml_load_result = (do { open $yaml_file } | complete)
|
||||
if $yaml_load_result.exit_code != 0 {
|
||||
if $verbose {
|
||||
print $" ❌ Failed to parse YAML"
|
||||
}
|
||||
@ -136,21 +136,10 @@ def migrate_single_workspace [
|
||||
error: "Failed to parse YAML"
|
||||
}
|
||||
}
|
||||
let yaml_config = $yaml_load_result.stdout
|
||||
|
||||
# Convert YAML to Nickel
|
||||
let nickel_content = try {
|
||||
yaml_to_nickel $yaml_config $workspace_name
|
||||
} catch {|e|
|
||||
if $verbose {
|
||||
print $" ❌ Conversion failed: ($e)"
|
||||
}
|
||||
return {
|
||||
workspace: $workspace_name
|
||||
success: false
|
||||
skipped: false
|
||||
error: $"Conversion failed: ($e)"
|
||||
}
|
||||
}
|
||||
let nickel_content = (yaml_to_nickel $yaml_config $workspace_name)
|
||||
|
||||
if $check {
|
||||
if $verbose {
|
||||
@ -171,54 +160,50 @@ def migrate_single_workspace [
|
||||
# Create backup if requested
|
||||
if $backup and ($yaml_file | path exists) {
|
||||
let backup_file = $"($yaml_file).backup"
|
||||
try {
|
||||
cp $yaml_file $backup_file
|
||||
let backup_result = (do { cp $yaml_file $backup_file } | complete)
|
||||
if $backup_result.exit_code == 0 {
|
||||
if $verbose {
|
||||
print $" 📦 Backed up to ($backup_file)"
|
||||
}
|
||||
} catch {
|
||||
if $verbose {
|
||||
print $" ⚠️ Failed to create backup"
|
||||
}
|
||||
} else if $verbose {
|
||||
print $" ⚠️ Failed to create backup"
|
||||
}
|
||||
}
|
||||
|
||||
# Write Nickel file
|
||||
try {
|
||||
$nickel_content | save $decl_file
|
||||
let save_result = (do { $nickel_content | save $decl_file } | complete)
|
||||
if $save_result.exit_code != 0 {
|
||||
if $verbose {
|
||||
print $" ✅ Created ($decl_file)"
|
||||
}
|
||||
|
||||
# Validate Nickel
|
||||
try {
|
||||
let _ = (nickel export $decl_file --format json)
|
||||
if $verbose {
|
||||
print $" ✅ Nickel validation passed"
|
||||
}
|
||||
} catch {
|
||||
if $verbose {
|
||||
print $" ⚠️ Nickel validation warning (may still be usable)"
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
workspace: $workspace_name
|
||||
success: true
|
||||
skipped: false
|
||||
error: null
|
||||
}
|
||||
} catch {|e|
|
||||
if $verbose {
|
||||
print $" ❌ Failed to write Nickel file: ($e)"
|
||||
print $" ❌ Failed to write Nickel file: ($save_result.stderr)"
|
||||
}
|
||||
return {
|
||||
workspace: $workspace_name
|
||||
success: false
|
||||
skipped: false
|
||||
error: $"Failed to write Nickel file: ($e)"
|
||||
error: $"Failed to write Nickel file: ($save_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
if $verbose {
|
||||
print $" ✅ Created ($decl_file)"
|
||||
}
|
||||
|
||||
# Validate Nickel
|
||||
let validate_result = (do { nickel export $decl_file --format json } | complete)
|
||||
if $validate_result.exit_code == 0 {
|
||||
if $verbose {
|
||||
print $" ✅ Nickel validation passed"
|
||||
}
|
||||
} else if $verbose {
|
||||
print $" ⚠️ Nickel validation warning (may still be usable)"
|
||||
}
|
||||
|
||||
return {
|
||||
workspace: $workspace_name
|
||||
success: true
|
||||
skipped: false
|
||||
error: null
|
||||
}
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -4,6 +4,24 @@
|
||||
use ../../../lib_provisioning *
|
||||
use ../../flags.nu *
|
||||
|
||||
# Validate identifier is safe from path/command injection
|
||||
def validate_safe_identifier [id: string] {
|
||||
# Returns true if INVALID (contains dangerous patterns)
|
||||
let has_slash = ($id | str contains "/")
|
||||
let has_dotdot = ($id | str contains "..")
|
||||
let starts_slash = ($id | str starts-with "/")
|
||||
let has_semicolon = ($id | str contains ";")
|
||||
let has_pipe = ($id | str contains "|")
|
||||
let has_ampersand = ($id | str contains "&")
|
||||
let has_dollar = ($id | str contains "$")
|
||||
let has_backtick = ($id | str contains "`")
|
||||
|
||||
if $has_slash or $has_dotdot or $starts_slash or $has_semicolon or $has_pipe or $has_ampersand or $has_dollar or $has_backtick {
|
||||
return true
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
# Main providers command handler - Manage infrastructure providers
|
||||
export def handle_providers [ops: string, flags: record] {
|
||||
use ../../../lib_provisioning/module_loader.nu *
|
||||
@ -91,6 +109,12 @@ def handle_providers_info [args: list, flags: record] {
|
||||
}
|
||||
|
||||
let provider_name = $args | get 0
|
||||
|
||||
# Validate provider name
|
||||
if validate_safe_identifier $provider_name {
|
||||
error make { msg: "Invalid provider name - contains invalid characters" }
|
||||
}
|
||||
|
||||
let show_nickel = ($args | any { |x| $x == "--nickel" })
|
||||
let no_cache = ($args | any { |x| $x == "--no-cache" })
|
||||
|
||||
@ -149,6 +173,14 @@ def handle_providers_install [args: list, flags: record] {
|
||||
let provider_name = $args | get 0
|
||||
let infra_name = $args | get 1
|
||||
|
||||
# Validate provider and infrastructure names
|
||||
if validate_safe_identifier $provider_name {
|
||||
error make { msg: "Invalid provider name - contains invalid characters" }
|
||||
}
|
||||
if validate_safe_identifier $infra_name {
|
||||
error make { msg: "Invalid infrastructure name - contains invalid characters" }
|
||||
}
|
||||
|
||||
# Extract version flag if present
|
||||
let version_idx = ($args | enumerate | where item == "--version" | get 0?.index | default (-1))
|
||||
let version = if $version_idx >= 0 and ($args | length) > ($version_idx + 1) {
|
||||
@ -187,6 +219,15 @@ def handle_providers_remove [args: list, flags: record] {
|
||||
|
||||
let provider_name = $args | get 0
|
||||
let infra_name = $args | get 1
|
||||
|
||||
# Validate provider and infrastructure names
|
||||
if validate_safe_identifier $provider_name {
|
||||
error make { msg: "Invalid provider name - contains invalid characters" }
|
||||
}
|
||||
if validate_safe_identifier $infra_name {
|
||||
error make { msg: "Invalid infrastructure name - contains invalid characters" }
|
||||
}
|
||||
|
||||
let force = ($args | any { |x| $x == "--force" })
|
||||
|
||||
# Resolve infrastructure path
|
||||
@ -223,6 +264,11 @@ def handle_providers_installed [args: list, flags: record] {
|
||||
|
||||
let infra_name = $args | get 0
|
||||
|
||||
# Validate infrastructure name
|
||||
if validate_safe_identifier $infra_name {
|
||||
error make { msg: "Invalid infrastructure name - contains invalid characters" }
|
||||
}
|
||||
|
||||
# Parse format flag
|
||||
let format_idx = ($args | enumerate | where item == "--format" | get 0?.index | default (-1))
|
||||
let format = if $format_idx >= 0 and ($args | length) > ($format_idx + 1) {
|
||||
@ -282,6 +328,12 @@ def handle_providers_validate [args: list, flags: record] {
|
||||
}
|
||||
|
||||
let infra_name = $args | get 0
|
||||
|
||||
# Validate infrastructure name
|
||||
if validate_safe_identifier $infra_name {
|
||||
error make { msg: "Invalid infrastructure name - contains invalid characters" }
|
||||
}
|
||||
|
||||
let no_cache = ($args | any { |x| $x == "--no-cache" })
|
||||
|
||||
print $"(_ansi blue_bold)🔍 Validating providers for ($infra_name)...(_ansi reset)"
|
||||
|
||||
@ -4,6 +4,15 @@
|
||||
use ../../../lib_provisioning *
|
||||
use ../../flags.nu *
|
||||
|
||||
# Validate infrastructure name is safe from path injection
|
||||
def validate_infra_name [infra: string] {
|
||||
# Returns true if INVALID (contains dangerous patterns)
|
||||
if ($infra | str contains "/") or ($infra | str contains "..") or ($infra | str starts-with "/") or ($infra | str contains " ") {
|
||||
return true
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
# Nu shell command handler - Start Nushell with provisioning library loaded
|
||||
export def handle_nu [ops: string, flags: record] {
|
||||
let run_ops = if ($ops | str trim | str starts-with "-") {
|
||||
@ -13,8 +22,14 @@ export def handle_nu [ops: string, flags: record] {
|
||||
if ($parts | is-empty) { "" } else { $parts | first }
|
||||
}
|
||||
|
||||
if ($flags.infra | is-not-empty) and ($env.PROVISIONING_INFRA_PATH | path join $flags.infra | path exists) {
|
||||
cd ($env.PROVISIONING_INFRA_PATH | path join $flags.infra)
|
||||
if ($flags.infra | is-not-empty) {
|
||||
# Validate infra name to prevent path injection
|
||||
if validate_infra_name $flags.infra {
|
||||
error make { msg: "Invalid infrastructure name - contains path traversal characters" }
|
||||
}
|
||||
if ($env.PROVISIONING_INFRA_PATH | path join $flags.infra | path exists) {
|
||||
cd ($env.PROVISIONING_INFRA_PATH | path join $flags.infra)
|
||||
}
|
||||
}
|
||||
|
||||
if ($flags.output_format | is-empty) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user