chore: fix more try/catch and errors
This commit is contained in:
parent
08563bc973
commit
825d1f0e88
@ -27,7 +27,7 @@ export def is_valid_ipv6 [ip: string]: nothing -> bool {
|
||||
}
|
||||
|
||||
# Format record as table for display
|
||||
export def format_server_table [servers: list]: nothing -> null {
|
||||
export def format_server_table [servers: list]: nothing -> nothing {
|
||||
let columns = ["id", "name", "status", "public_net", "server_type"]
|
||||
|
||||
let formatted = $servers | map {|s|
|
||||
@ -63,7 +63,7 @@ export def extract_api_error [response: any]: nothing -> string {
|
||||
# Validate server configuration
|
||||
export def validate_server_config [server: record]: nothing -> bool {
|
||||
let required = ["hostname", "server_type", "location"]
|
||||
let missing = $required | filter {|f| not ($server | has $f)}
|
||||
let missing = $required | where {|f| not ($server | has $f)}
|
||||
|
||||
if not ($missing | is-empty) {
|
||||
error make {msg: $"Missing required fields: ($missing | str join ", ")"}
|
||||
@ -74,7 +74,7 @@ export def validate_server_config [server: record]: nothing -> bool {
|
||||
|
||||
# Convert timestamp to human readable format
|
||||
export def format_timestamp [timestamp: int]: nothing -> string {
|
||||
let date = (date now | date to-record)
|
||||
let date = (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
$"($timestamp) (UTC)"
|
||||
}
|
||||
|
||||
|
||||
@ -281,11 +281,27 @@ export def load-provisioning-config [
|
||||
$final_config = (apply-user-context-overrides $final_config $user_context_data)
|
||||
}
|
||||
|
||||
# Apply environment-specific overrides from environments section
|
||||
# Apply environment-specific overrides
|
||||
# Per ADR-003: Nickel is source of truth for environments (provisioning/schemas/config/environments/main.ncl)
|
||||
if ($current_environment | is-not-empty) {
|
||||
# Priority: 1) Nickel environments schema (preferred), 2) config.defaults.toml (fallback)
|
||||
|
||||
# Try to load from Nickel first
|
||||
let nickel_environments = (load-environments-from-nickel)
|
||||
let env_config = if ($nickel_environments | is-empty) {
|
||||
# Fallback: try to get from current config TOML
|
||||
let current_config = $final_config
|
||||
let env_result = (do { $current_config | get $"environments.($current_environment)" } | complete)
|
||||
let env_config = if $env_result.exit_code == 0 { $env_result.stdout } else { {} }
|
||||
let toml_environments = ($current_config | get -o environments | default {})
|
||||
if ($toml_environments | is-empty) {
|
||||
{} # No environment config found
|
||||
} else {
|
||||
($toml_environments | get -o $current_environment | default {})
|
||||
}
|
||||
} else {
|
||||
# Use Nickel environments
|
||||
($nickel_environments | get -o $current_environment | default {})
|
||||
}
|
||||
|
||||
if ($env_config | is-not-empty) {
|
||||
if $debug {
|
||||
# log debug $"Applying environment overrides for: ($current_environment)"
|
||||
@ -547,8 +563,7 @@ export def deep-merge [
|
||||
|
||||
for key in ($override | columns) {
|
||||
let override_value = ($override | get $key)
|
||||
let base_result = (do { $base | get $key } | complete)
|
||||
let base_value = if $base_result.exit_code == 0 { $base_result.stdout } else { null }
|
||||
let base_value = ($base | get -o $key | default null)
|
||||
|
||||
if ($base_value | is-empty) {
|
||||
# Key doesn't exist in base, add it
|
||||
@ -572,8 +587,7 @@ export def interpolate-config [
|
||||
mut result = $config
|
||||
|
||||
# Get base path for interpolation
|
||||
let base_result = (do { $config | get paths.base } | complete)
|
||||
let base_path = if $base_result.exit_code == 0 { $base_result.stdout } else { "" }
|
||||
let base_path = ($config | get -o paths.base | default "")
|
||||
|
||||
if ($base_path | is-not-empty) {
|
||||
# Interpolate the entire config structure
|
||||
@ -612,8 +626,7 @@ export def get-config-value [
|
||||
|
||||
for part in $path_parts {
|
||||
let immutable_current = $current
|
||||
let next_result = (do { $immutable_current | get $part } | complete)
|
||||
let next_value = if $next_result.exit_code == 0 { $next_result.stdout } else { null }
|
||||
let next_value = ($immutable_current | get -o $part | default null)
|
||||
if ($next_value | is-empty) {
|
||||
return $default_value
|
||||
}
|
||||
@ -632,8 +645,7 @@ export def validate-config-structure [
|
||||
mut warnings = []
|
||||
|
||||
for section in $required_sections {
|
||||
let section_result = (do { $config | get $section } | complete)
|
||||
let section_value = if $section_result.exit_code == 0 { $section_result.stdout } else { null }
|
||||
let section_value = ($config | get -o $section | default null)
|
||||
if ($section_value | is-empty) {
|
||||
$errors = ($errors | append {
|
||||
type: "missing_section",
|
||||
@ -659,12 +671,10 @@ export def validate-path-values [
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
let paths_result = (do { $config | get paths } | complete)
|
||||
let paths = if $paths_result.exit_code == 0 { $paths_result.stdout } else { {} }
|
||||
let paths = ($config | get -o paths | default {})
|
||||
|
||||
for path_name in $required_paths {
|
||||
let path_result = (do { $paths | get $path_name } | complete)
|
||||
let path_value = if $path_result.exit_code == 0 { $path_result.stdout } else { null }
|
||||
let path_value = ($paths | get -o $path_name | default null)
|
||||
|
||||
if ($path_value | is-empty) {
|
||||
$errors = ($errors | append {
|
||||
@ -715,8 +725,7 @@ export def validate-data-types [
|
||||
mut warnings = []
|
||||
|
||||
# Validate core.version follows semantic versioning pattern
|
||||
let core_result = (do { $config | get core.version } | complete)
|
||||
let core_version = if $core_result.exit_code == 0 { $core_result.stdout } else { null }
|
||||
let core_version = ($config | get -o core.version | default null)
|
||||
if ($core_version | is-not-empty) {
|
||||
let version_pattern = "^\\d+\\.\\d+\\.\\d+(-.+)?$"
|
||||
let version_parts = ($core_version | split row ".")
|
||||
@ -732,8 +741,7 @@ export def validate-data-types [
|
||||
}
|
||||
|
||||
# Validate debug.enabled is boolean
|
||||
let debug_result = (do { $config | get debug.enabled } | complete)
|
||||
let debug_enabled = if $debug_result.exit_code == 0 { $debug_result.stdout } else { null }
|
||||
let debug_enabled = ($config | get -o debug.enabled | default null)
|
||||
if ($debug_enabled | is-not-empty) {
|
||||
if (($debug_enabled | describe) != "bool") {
|
||||
$errors = ($errors | append {
|
||||
@ -749,8 +757,7 @@ export def validate-data-types [
|
||||
}
|
||||
|
||||
# Validate debug.metadata is boolean
|
||||
let debug_meta_result = (do { $config | get debug.metadata } | complete)
|
||||
let debug_metadata = if $debug_meta_result.exit_code == 0 { $debug_meta_result.stdout } else { null }
|
||||
let debug_metadata = ($config | get -o debug.metadata | default null)
|
||||
if ($debug_metadata | is-not-empty) {
|
||||
if (($debug_metadata | describe) != "bool") {
|
||||
$errors = ($errors | append {
|
||||
@ -766,8 +773,7 @@ export def validate-data-types [
|
||||
}
|
||||
|
||||
# Validate sops.use_sops is boolean
|
||||
let sops_result = (do { $config | get sops.use_sops } | complete)
|
||||
let sops_use = if $sops_result.exit_code == 0 { $sops_result.stdout } else { null }
|
||||
let sops_use = ($config | get -o sops.use_sops | default null)
|
||||
if ($sops_use | is-not-empty) {
|
||||
if (($sops_use | describe) != "bool") {
|
||||
$errors = ($errors | append {
|
||||
@ -797,10 +803,8 @@ export def validate-semantic-rules [
|
||||
mut warnings = []
|
||||
|
||||
# Validate provider configuration
|
||||
let providers_result = (do { $config | get providers } | complete)
|
||||
let providers = if $providers_result.exit_code == 0 { $providers_result.stdout } else { {} }
|
||||
let default_result = (do { $providers | get default } | complete)
|
||||
let default_provider = if $default_result.exit_code == 0 { $default_result.stdout } else { null }
|
||||
let providers = ($config | get -o providers | default {})
|
||||
let default_provider = ($providers | get -o default | default null)
|
||||
|
||||
if ($default_provider | is-not-empty) {
|
||||
let valid_providers = ["aws", "upcloud", "local"]
|
||||
@ -817,8 +821,7 @@ export def validate-semantic-rules [
|
||||
}
|
||||
|
||||
# Validate log level
|
||||
let log_level_result = (do { $config | get debug.log_level } | complete)
|
||||
let log_level = if $log_level_result.exit_code == 0 { $log_level_result.stdout } else { null }
|
||||
let log_level = ($config | get -o debug.log_level | default null)
|
||||
if ($log_level | is-not-empty) {
|
||||
let valid_levels = ["trace", "debug", "info", "warn", "error"]
|
||||
if not ($log_level in $valid_levels) {
|
||||
@ -834,8 +837,7 @@ export def validate-semantic-rules [
|
||||
}
|
||||
|
||||
# Validate output format
|
||||
let output_result = (do { $config | get output.format } | complete)
|
||||
let output_format = if $output_result.exit_code == 0 { $output_result.stdout } else { null }
|
||||
let output_format = ($config | get -o output.format | default null)
|
||||
if ($output_format | is-not-empty) {
|
||||
let valid_formats = ["json", "yaml", "toml", "text"]
|
||||
if not ($output_format in $valid_formats) {
|
||||
@ -865,8 +867,7 @@ export def validate-file-existence [
|
||||
mut warnings = []
|
||||
|
||||
# Check SOPS configuration file
|
||||
let sops_cfg_result = (do { $config | get sops.config_path } | complete)
|
||||
let sops_config = if $sops_cfg_result.exit_code == 0 { $sops_cfg_result.stdout } else { null }
|
||||
let sops_config = ($config | get -o sops.config_path | default null)
|
||||
if ($sops_config | is-not-empty) {
|
||||
if not ($sops_config | path exists) {
|
||||
$warnings = ($warnings | append {
|
||||
@ -880,8 +881,7 @@ export def validate-file-existence [
|
||||
}
|
||||
|
||||
# Check SOPS key files
|
||||
let key_result = (do { $config | get sops.key_search_paths } | complete)
|
||||
let key_paths = if $key_result.exit_code == 0 { $key_result.stdout } else { [] }
|
||||
let key_paths = ($config | get -o sops.key_search_paths | default [])
|
||||
mut found_key = false
|
||||
|
||||
for key_path in $key_paths {
|
||||
@ -903,8 +903,7 @@ export def validate-file-existence [
|
||||
}
|
||||
|
||||
# Check critical configuration files
|
||||
let settings_result = (do { $config | get paths.files.settings } | complete)
|
||||
let settings_file = if $settings_result.exit_code == 0 { $settings_result.stdout } else { null }
|
||||
let settings_file = ($config | get -o paths.files.settings | default null)
|
||||
if ($settings_file | is-not-empty) {
|
||||
if not ($settings_file | path exists) {
|
||||
$errors = ($errors | append {
|
||||
@ -1075,6 +1074,32 @@ export def init-user-config [
|
||||
}
|
||||
}
|
||||
|
||||
# Load environment configurations from Nickel schema
|
||||
# Per ADR-003: Nickel as Source of Truth for all configuration
|
||||
def load-environments-from-nickel [] {
|
||||
let project_root = (get-project-root)
|
||||
let environments_ncl = ($project_root | path join "provisioning" "schemas" "config" "environments" "main.ncl")
|
||||
|
||||
if not ($environments_ncl | path exists) {
|
||||
# Fallback: return empty if Nickel file doesn't exist
|
||||
# Loader will then try to use config.defaults.toml if available
|
||||
return {}
|
||||
}
|
||||
|
||||
# Export Nickel to JSON and parse
|
||||
let export_result = (do {
|
||||
nickel export --format json $environments_ncl
|
||||
} | complete)
|
||||
|
||||
if $export_result.exit_code != 0 {
|
||||
# If Nickel export fails, fallback gracefully
|
||||
return {}
|
||||
}
|
||||
|
||||
# Parse JSON output
|
||||
$export_result.stdout | from json
|
||||
}
|
||||
|
||||
# Helper function to get project root directory
|
||||
def get-project-root [] {
|
||||
# Try to find project root by looking for key files
|
||||
@ -1160,8 +1185,7 @@ def interpolate-env-variables [
|
||||
|
||||
for env_var in $safe_env_vars {
|
||||
let pattern = $"\\{\\{env\\.($env_var)\\}\\}"
|
||||
let env_result = (do { $env | get $env_var } | complete)
|
||||
let env_value = if $env_result.exit_code == 0 { $env_result.stdout } else { "" }
|
||||
let env_value = ($env | get -o $env_var | default "")
|
||||
if ($env_value | is-not-empty) {
|
||||
$result = ($result | str replace --regex $pattern $env_value)
|
||||
}
|
||||
@ -1244,15 +1268,13 @@ def interpolate-sops-config [
|
||||
mut result = $text
|
||||
|
||||
# SOPS key file path
|
||||
let sops_key_result = (do { $config | get sops.age_key_file } | complete)
|
||||
let sops_key_file = if $sops_key_result.exit_code == 0 { $sops_key_result.stdout } else { "" }
|
||||
let sops_key_file = ($config | get -o sops.age_key_file | default "")
|
||||
if ($sops_key_file | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{sops.key_file}}" $sops_key_file)
|
||||
}
|
||||
|
||||
# SOPS config path
|
||||
let sops_cfg_path_result = (do { $config | get sops.config_path } | complete)
|
||||
let sops_config_path = if $sops_cfg_path_result.exit_code == 0 { $sops_cfg_path_result.stdout } else { "" }
|
||||
let sops_config_path = ($config | get -o sops.config_path | default "")
|
||||
if ($sops_config_path | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{sops.config_path}}" $sops_config_path)
|
||||
}
|
||||
@ -1268,22 +1290,19 @@ def interpolate-provider-refs [
|
||||
mut result = $text
|
||||
|
||||
# AWS provider region
|
||||
let aws_region_result = (do { $config | get providers.aws.region } | complete)
|
||||
let aws_region = if $aws_region_result.exit_code == 0 { $aws_region_result.stdout } else { "" }
|
||||
let aws_region = ($config | get -o providers.aws.region | default "")
|
||||
if ($aws_region | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{providers.aws.region}}" $aws_region)
|
||||
}
|
||||
|
||||
# Default provider
|
||||
let default_prov_result = (do { $config | get providers.default } | complete)
|
||||
let default_provider = if $default_prov_result.exit_code == 0 { $default_prov_result.stdout } else { "" }
|
||||
let default_provider = ($config | get -o providers.default | default "")
|
||||
if ($default_provider | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{providers.default}}" $default_provider)
|
||||
}
|
||||
|
||||
# UpCloud zone
|
||||
let upcloud_zone_result = (do { $config | get providers.upcloud.zone } | complete)
|
||||
let upcloud_zone = if $upcloud_zone_result.exit_code == 0 { $upcloud_zone_result.stdout } else { "" }
|
||||
let upcloud_zone = ($config | get -o providers.upcloud.zone | default "")
|
||||
if ($upcloud_zone | is-not-empty) {
|
||||
$result = ($result | str replace --all "{{providers.upcloud.zone}}" $upcloud_zone)
|
||||
}
|
||||
@ -1300,15 +1319,13 @@ def interpolate-advanced-features [
|
||||
|
||||
# Function call: {{path.join(paths.base, "custom")}}
|
||||
if ($result | str contains "{{path.join(paths.base") {
|
||||
let base_path_result = (do { $config | get paths.base } | complete)
|
||||
let base_path = if $base_path_result.exit_code == 0 { $base_path_result.stdout } else { "" }
|
||||
let base_path = ($config | get -o paths.base | default "")
|
||||
# Simple implementation for path.join with base path
|
||||
$result = ($result | str replace --regex "\\{\\{path\\.join\\(paths\\.base,\\s*\"([^\"]+)\"\\)\\}\\}" $"($base_path)/$1")
|
||||
}
|
||||
|
||||
# Environment-aware paths: {{paths.base.${env}}}
|
||||
let current_env_result = (do { $config | get current_environment } | complete)
|
||||
let current_env = if $current_env_result.exit_code == 0 { $current_env_result.stdout } else { "dev" }
|
||||
let current_env = ($config | get -o current_environment | default "dev")
|
||||
$result = ($result | str replace --all "{{paths.base.${env}}}" $"{{paths.base}}.($current_env)")
|
||||
|
||||
$result
|
||||
@ -1584,8 +1601,7 @@ export def secure-interpolation [
|
||||
}
|
||||
|
||||
# Apply interpolation with depth limiting
|
||||
let base_path_sec_result = (do { $config | get paths.base } | complete)
|
||||
let base_path = if $base_path_sec_result.exit_code == 0 { $base_path_sec_result.stdout } else { "" }
|
||||
let base_path = ($config | get -o paths.base | default "")
|
||||
if ($base_path | is-not-empty) {
|
||||
interpolate-with-depth-limit $config $base_path $max_depth
|
||||
} else {
|
||||
@ -1923,8 +1939,7 @@ export def detect-current-environment [] {
|
||||
export def get-available-environments [
|
||||
config: record
|
||||
] {
|
||||
let env_section_result = (do { $config | get "environments" } | complete)
|
||||
let environments_section = if $env_section_result.exit_code == 0 { $env_section_result.stdout } else { {} }
|
||||
let environments_section = ($config | get -o "environments" | default {})
|
||||
$environments_section | columns
|
||||
}
|
||||
|
||||
@ -1972,8 +1987,7 @@ export def apply-environment-variable-overrides [
|
||||
}
|
||||
|
||||
for env_var in ($env_mappings | columns) {
|
||||
let env_map_result = (do { $env | get $env_var } | complete)
|
||||
let env_value = if $env_map_result.exit_code == 0 { $env_map_result.stdout } else { null }
|
||||
let env_value = ($env | get -o $env_var | default null)
|
||||
if ($env_value | is-not-empty) {
|
||||
let mapping = ($env_mappings | get $env_var)
|
||||
let config_path = $mapping.path
|
||||
@ -2020,19 +2034,14 @@ def set-config-value [
|
||||
} else if ($path_parts | length) == 2 {
|
||||
let section = ($path_parts | first)
|
||||
let key = ($path_parts | last)
|
||||
let immutable_result = $result
|
||||
let section_result = (do { $immutable_result | get $section } | complete)
|
||||
let section_data = if $section_result.exit_code == 0 { $section_result.stdout } else { {} }
|
||||
let section_data = ($result | get -o $section | default {})
|
||||
$result | upsert $section ($section_data | upsert $key $value)
|
||||
} else if ($path_parts | length) == 3 {
|
||||
let section = ($path_parts | first)
|
||||
let subsection = ($path_parts | get 1)
|
||||
let key = ($path_parts | last)
|
||||
let immutable_result = $result
|
||||
let section_result = (do { $immutable_result | get $section } | complete)
|
||||
let section_data = if $section_result.exit_code == 0 { $section_result.stdout } else { {} }
|
||||
let subsection_result = (do { $section_data | get $subsection } | complete)
|
||||
let subsection_data = if $subsection_result.exit_code == 0 { $subsection_result.stdout } else { {} }
|
||||
let section_data = ($result | get -o $section | default {})
|
||||
let subsection_data = ($section_data | get -o $subsection | default {})
|
||||
$result | upsert $section ($section_data | upsert $subsection ($subsection_data | upsert $key $value))
|
||||
} else {
|
||||
# For deeper nesting, use recursive approach
|
||||
@ -2051,8 +2060,7 @@ def set-config-value-recursive [
|
||||
} else {
|
||||
let current_key = ($path_parts | first)
|
||||
let remaining_parts = ($path_parts | skip 1)
|
||||
let current_result = (do { $config | get $current_key } | complete)
|
||||
let current_section = if $current_result.exit_code == 0 { $current_result.stdout } else { {} }
|
||||
let current_section = ($config | get -o $current_key | default {})
|
||||
$config | upsert $current_key (set-config-value-recursive $current_section $remaining_parts $value)
|
||||
}
|
||||
}
|
||||
@ -2062,8 +2070,7 @@ def apply-user-context-overrides [
|
||||
config: record
|
||||
context: record
|
||||
] {
|
||||
let overrides_result = (do { $context | get overrides } | complete)
|
||||
let overrides = if $overrides_result.exit_code == 0 { $overrides_result.stdout } else { {} }
|
||||
let overrides = ($context | get -o overrides | default {})
|
||||
|
||||
mut result = $config
|
||||
|
||||
@ -2084,8 +2091,7 @@ def apply-user-context-overrides [
|
||||
}
|
||||
|
||||
# Update last_used timestamp for the workspace
|
||||
let ws_result = (do { $context | get workspace.name } | complete)
|
||||
let workspace_name = if $ws_result.exit_code == 0 { $ws_result.stdout } else { null }
|
||||
let workspace_name = ($context | get -o workspace.name | default null)
|
||||
if ($workspace_name | is-not-empty) {
|
||||
update-workspace-last-used-internal $workspace_name
|
||||
}
|
||||
|
||||
558
nulib/lib_provisioning/deploy.nu.example
Normal file
558
nulib/lib_provisioning/deploy.nu.example
Normal file
@ -0,0 +1,558 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Multi-Region HA Workspace Deployment Script
|
||||
# Orchestrates deployment across US East (DigitalOcean), EU Central (Hetzner), Asia Pacific (AWS)
|
||||
# Features: Regional health checks, VPN tunnels, global DNS, failover configuration
|
||||
|
||||
def main [--debug = false, --region: string = "all"] {
|
||||
print "🌍 Multi-Region High Availability Deployment"
|
||||
print "──────────────────────────────────────────────────"
|
||||
|
||||
if $debug {
|
||||
print "✓ Debug mode enabled"
|
||||
}
|
||||
|
||||
# Determine which regions to deploy
|
||||
let regions = if $region == "all" {
|
||||
["us-east", "eu-central", "asia-southeast"]
|
||||
} else {
|
||||
[$region]
|
||||
}
|
||||
|
||||
print $"\n📋 Deploying to regions: ($regions | str join ', ')"
|
||||
|
||||
# Step 1: Validate configuration
|
||||
print "\n📋 Step 1: Validating configuration..."
|
||||
validate_environment
|
||||
|
||||
# Step 2: Deploy US East (Primary)
|
||||
if ("us-east" in $regions) {
|
||||
print "\n☁️ Step 2a: Deploying US East (DigitalOcean - Primary)..."
|
||||
deploy_us_east_digitalocean
|
||||
}
|
||||
|
||||
# Step 3: Deploy EU Central (Secondary)
|
||||
if ("eu-central" in $regions) {
|
||||
print "\n☁️ Step 2b: Deploying EU Central (Hetzner - Secondary)..."
|
||||
deploy_eu_central_hetzner
|
||||
}
|
||||
|
||||
# Step 4: Deploy Asia Pacific (Tertiary)
|
||||
if ("asia-southeast" in $regions) {
|
||||
print "\n☁️ Step 2c: Deploying Asia Pacific (AWS - Tertiary)..."
|
||||
deploy_asia_pacific_aws
|
||||
}
|
||||
|
||||
# Step 5: Setup VPN tunnels (only if deploying multiple regions)
|
||||
if (($regions | length) > 1) {
|
||||
print "\n🔐 Step 3: Setting up VPN tunnels for inter-region communication..."
|
||||
setup_vpn_tunnels
|
||||
}
|
||||
|
||||
# Step 6: Configure global DNS
|
||||
if (($regions | length) == 3) {
|
||||
print "\n🌐 Step 4: Configuring global DNS and failover policies..."
|
||||
setup_global_dns
|
||||
}
|
||||
|
||||
# Step 7: Configure database replication
|
||||
if (($regions | length) > 1) {
|
||||
print "\n🗄️ Step 5: Configuring database replication..."
|
||||
setup_database_replication
|
||||
}
|
||||
|
||||
# Step 8: Verify deployment
|
||||
print "\n✅ Step 6: Verifying deployment across regions..."
|
||||
verify_multi_region_deployment
|
||||
|
||||
print "\n🎉 Multi-region HA deployment complete!"
|
||||
print "✓ Application is now live across 3 geographic regions with automatic failover"
|
||||
print ""
|
||||
print "Next steps:"
|
||||
print "1. Configure SSL/TLS certificates for all regional endpoints"
|
||||
print "2. Deploy application to web servers in each region"
|
||||
print "3. Test failover by stopping a region and verifying automatic failover"
|
||||
print "4. Monitor replication lag and regional health status"
|
||||
}
|
||||
|
||||
def validate_environment [] {
|
||||
# Check required environment variables
|
||||
let required = [
|
||||
"DIGITALOCEAN_TOKEN",
|
||||
"HCLOUD_TOKEN",
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY"
|
||||
]
|
||||
|
||||
print " Checking required environment variables..."
|
||||
$required | each {|var|
|
||||
if ($env | has $var) {
|
||||
print $" ✓ ($var) is set"
|
||||
} else {
|
||||
print $" ✗ ($var) is not set"
|
||||
error make {msg: $"Missing required environment variable: ($var)"}
|
||||
}
|
||||
}
|
||||
|
||||
# Verify CLI tools
|
||||
let tools = ["doctl", "hcloud", "aws", "nickel"]
|
||||
print " Verifying CLI tools..."
|
||||
$tools | each {|tool|
|
||||
if (which $tool | is-not-empty) {
|
||||
print $" ✓ ($tool) is installed"
|
||||
} else {
|
||||
print $" ✗ ($tool) is not installed"
|
||||
error make {msg: $"Missing required tool: ($tool)"}
|
||||
}
|
||||
}
|
||||
|
||||
# Validate Nickel configuration
|
||||
print " Validating Nickel configuration..."
|
||||
let result = (nickel export workspace.ncl | complete)
|
||||
if $result.exit_code == 0 {
|
||||
print " ✓ Nickel configuration is valid"
|
||||
} else {
|
||||
error make {msg: $"Nickel validation failed: ($result.stderr)"}
|
||||
}
|
||||
|
||||
# Validate config.toml
|
||||
print " Validating config.toml..."
|
||||
try {
|
||||
let config = (open config.toml)
|
||||
print " ✓ config.toml is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"config.toml validation failed: ($err)"}
|
||||
}
|
||||
|
||||
# Test provider connectivity
|
||||
print " Testing provider connectivity..."
|
||||
try {
|
||||
doctl account get | null
|
||||
print " ✓ DigitalOcean connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"DigitalOcean connectivity failed: ($err)"}
|
||||
}
|
||||
|
||||
try {
|
||||
hcloud server list | null
|
||||
print " ✓ Hetzner connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"Hetzner connectivity failed: ($err)"}
|
||||
}
|
||||
|
||||
try {
|
||||
aws sts get-caller-identity | null
|
||||
print " ✓ AWS connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"AWS connectivity failed: ($err)"}
|
||||
}
|
||||
}
|
||||
|
||||
def deploy_us_east_digitalocean [] {
|
||||
print " Creating DigitalOcean VPC (10.0.0.0/16)..."
|
||||
|
||||
let vpc = (doctl compute vpc create \
|
||||
--name "us-east-vpc" \
|
||||
--region "nyc3" \
|
||||
--ip-range "10.0.0.0/16" \
|
||||
--format ID \
|
||||
--no-header | into string)
|
||||
|
||||
print $" ✓ Created VPC: ($vpc)"
|
||||
|
||||
print " Creating DigitalOcean droplets (3x s-2vcpu-4gb)..."
|
||||
|
||||
let ssh_keys = (doctl compute ssh-key list --no-header --format ID)
|
||||
|
||||
if ($ssh_keys | is-empty) {
|
||||
error make {msg: "No SSH keys found in DigitalOcean. Please upload one first."}
|
||||
}
|
||||
|
||||
let ssh_key_id = ($ssh_keys | first)
|
||||
|
||||
# Create 3 web server droplets
|
||||
let droplet_ids = (
|
||||
1..3 | each {|i|
|
||||
let response = (doctl compute droplet create \
|
||||
$"us-app-($i)" \
|
||||
--region "nyc3" \
|
||||
--size "s-2vcpu-4gb" \
|
||||
--image "ubuntu-22-04-x64" \
|
||||
--ssh-keys $ssh_key_id \
|
||||
--enable-monitoring \
|
||||
--enable-backups \
|
||||
--format ID \
|
||||
--no-header | into string)
|
||||
|
||||
print $" ✓ Created droplet: us-app-($i)"
|
||||
$response
|
||||
}
|
||||
)
|
||||
|
||||
# Wait for droplets to be ready
|
||||
print " Waiting for droplets to be active..."
|
||||
sleep 30sec
|
||||
|
||||
# Verify droplets are running
|
||||
$droplet_ids | each {|id|
|
||||
let droplet = (doctl compute droplet get $id --format Status --no-header)
|
||||
if $droplet != "active" {
|
||||
error make {msg: $"Droplet ($id) failed to start"}
|
||||
}
|
||||
}
|
||||
|
||||
print " ✓ All droplets are active"
|
||||
|
||||
print " Creating DigitalOcean load balancer..."
|
||||
let lb = (doctl compute load-balancer create \
|
||||
--name "us-lb" \
|
||||
--region "nyc3" \
|
||||
--forwarding-rules "entry_protocol:http,entry_port:80,target_protocol:http,target_port:80" \
|
||||
--format ID \
|
||||
--no-header | into string)
|
||||
|
||||
print $" ✓ Created load balancer: ($lb)"
|
||||
|
||||
print " Creating DigitalOcean PostgreSQL database (3-node Multi-AZ)..."
|
||||
|
||||
try {
|
||||
doctl databases create \
|
||||
--engine pg \
|
||||
--version 14 \
|
||||
--region "nyc3" \
|
||||
--num-nodes 3 \
|
||||
--size "db-s-2vcpu-4gb" \
|
||||
--name "us-db-primary" | null
|
||||
|
||||
print " ✓ Database creation initiated (may take 10-15 minutes)"
|
||||
} catch {|err|
|
||||
print $" ⚠ Database creation error (may already exist): ($err)"
|
||||
}
|
||||
}
|
||||
|
||||
def deploy_eu_central_hetzner [] {
|
||||
print " Creating Hetzner private network (10.1.0.0/16)..."
|
||||
|
||||
let network = (hcloud network create \
|
||||
--name "eu-central-network" \
|
||||
--ip-range "10.1.0.0/16" \
|
||||
--format json | from json)
|
||||
|
||||
print $" ✓ Created network: ($network.network.id)"
|
||||
|
||||
print " Creating Hetzner subnet..."
|
||||
hcloud network add-subnet eu-central-network \
|
||||
--ip-range "10.1.1.0/24" \
|
||||
--network-zone "eu-central"
|
||||
|
||||
print " ✓ Created subnet: 10.1.1.0/24"
|
||||
|
||||
print " Creating Hetzner servers (3x CPX21)..."
|
||||
|
||||
let ssh_keys = (hcloud ssh-key list --format ID --no-header)
|
||||
|
||||
if ($ssh_keys | is-empty) {
|
||||
error make {msg: "No SSH keys found in Hetzner. Please upload one first."}
|
||||
}
|
||||
|
||||
let ssh_key_id = ($ssh_keys | first)
|
||||
|
||||
# Create 3 servers
|
||||
let server_ids = (
|
||||
1..3 | each {|i|
|
||||
let response = (hcloud server create \
|
||||
--name $"eu-app-($i)" \
|
||||
--type cpx21 \
|
||||
--image ubuntu-22.04 \
|
||||
--location nbg1 \
|
||||
--ssh-key $ssh_key_id \
|
||||
--network eu-central-network \
|
||||
--format json | from json)
|
||||
|
||||
print $" ✓ Created server: eu-app-($i) (ID: ($response.server.id))"
|
||||
$response.server.id
|
||||
}
|
||||
)
|
||||
|
||||
print " Waiting for servers to be running..."
|
||||
sleep 30sec
|
||||
|
||||
$server_ids | each {|id|
|
||||
let server = (hcloud server list --format ID,Status | where {|row| $row =~ $id} | get Status.0)
|
||||
if $server != "running" {
|
||||
error make {msg: $"Server ($id) failed to start"}
|
||||
}
|
||||
}
|
||||
|
||||
print " ✓ All servers are running"
|
||||
|
||||
print " Creating Hetzner load balancer..."
|
||||
let lb = (hcloud load-balancer create \
|
||||
--name "eu-lb" \
|
||||
--type lb21 \
|
||||
--location nbg1 \
|
||||
--format json | from json)
|
||||
|
||||
print $" ✓ Created load balancer: ($lb.load_balancer.id)"
|
||||
|
||||
print " Creating Hetzner backup volume (500GB)..."
|
||||
let volume = (hcloud volume create \
|
||||
--name "eu-backups" \
|
||||
--size 500 \
|
||||
--location nbg1 \
|
||||
--format json | from json)
|
||||
|
||||
print $" ✓ Created backup volume: ($volume.volume.id)"
|
||||
|
||||
# Wait for volume to be ready
|
||||
print " Waiting for volume to be available..."
|
||||
let max_wait = 60
|
||||
mut attempts = 0
|
||||
|
||||
while $attempts < $max_wait {
|
||||
let status = (hcloud volume list --format ID,Status | where {|row| $row =~ $volume.volume.id} | get Status.0)
|
||||
|
||||
if $status == "available" {
|
||||
print " ✓ Volume is available"
|
||||
break
|
||||
}
|
||||
|
||||
sleep 1sec
|
||||
$attempts = ($attempts + 1)
|
||||
}
|
||||
|
||||
if $attempts >= $max_wait {
|
||||
error make {msg: "Hetzner volume failed to become available"}
|
||||
}
|
||||
}
|
||||
|
||||
def deploy_asia_pacific_aws [] {
|
||||
print " Creating AWS VPC (10.2.0.0/16)..."
|
||||
|
||||
let vpc = (aws ec2 create-vpc \
|
||||
--region ap-southeast-1 \
|
||||
--cidr-block "10.2.0.0/16" \
|
||||
--tag-specifications "ResourceType=vpc,Tags=[{Key=Name,Value=asia-vpc}]" | from json)
|
||||
|
||||
print $" ✓ Created VPC: ($vpc.Vpc.VpcId)"
|
||||
|
||||
print " Creating AWS private subnet..."
|
||||
let subnet = (aws ec2 create-subnet \
|
||||
--region ap-southeast-1 \
|
||||
--vpc-id $vpc.Vpc.VpcId \
|
||||
--cidr-block "10.2.1.0/24" \
|
||||
--availability-zone "ap-southeast-1a" | from json)
|
||||
|
||||
print $" ✓ Created subnet: ($subnet.Subnet.SubnetId)"
|
||||
|
||||
print " Creating AWS security group..."
|
||||
let sg = (aws ec2 create-security-group \
|
||||
--region ap-southeast-1 \
|
||||
--group-name "asia-db-sg" \
|
||||
--description "Security group for Asia Pacific database access" \
|
||||
--vpc-id $vpc.Vpc.VpcId | from json)
|
||||
|
||||
print $" ✓ Created security group: ($sg.GroupId)"
|
||||
|
||||
# Allow inbound traffic from all regions
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--region ap-southeast-1 \
|
||||
--group-id $sg.GroupId \
|
||||
--protocol tcp \
|
||||
--port 5432 \
|
||||
--cidr 10.0.0.0/8
|
||||
|
||||
print " ✓ Configured database access rules"
|
||||
|
||||
print " Creating AWS EC2 instances (3x t3.medium)..."
|
||||
|
||||
let ami_id = "ami-09d56f8956ab235b7"
|
||||
|
||||
# Create 3 EC2 instances
|
||||
let instance_ids = (
|
||||
1..3 | each {|i|
|
||||
let response = (aws ec2 run-instances \
|
||||
--region ap-southeast-1 \
|
||||
--image-id $ami_id \
|
||||
--instance-type t3.medium \
|
||||
--subnet-id $subnet.Subnet.SubnetId \
|
||||
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=asia-app-($i)}]" | from json)
|
||||
|
||||
let instance_id = $response.Instances.0.InstanceId
|
||||
print $" ✓ Created instance: asia-app-($i) (ID: ($instance_id))"
|
||||
$instance_id
|
||||
}
|
||||
)
|
||||
|
||||
print " Waiting for instances to be running..."
|
||||
sleep 30sec
|
||||
|
||||
$instance_ids | each {|id|
|
||||
let status = (aws ec2 describe-instances \
|
||||
--region ap-southeast-1 \
|
||||
--instance-ids $id \
|
||||
--query 'Reservations[0].Instances[0].State.Name' \
|
||||
--output text)
|
||||
|
||||
if $status != "running" {
|
||||
error make {msg: $"Instance ($id) failed to start"}
|
||||
}
|
||||
}
|
||||
|
||||
print " ✓ All instances are running"
|
||||
|
||||
print " Creating AWS Application Load Balancer..."
|
||||
let lb = (aws elbv2 create-load-balancer \
|
||||
--region ap-southeast-1 \
|
||||
--name "asia-lb" \
|
||||
--subnets $subnet.Subnet.SubnetId \
|
||||
--scheme internet-facing \
|
||||
--type application | from json)
|
||||
|
||||
print $" ✓ Created ALB: ($lb.LoadBalancers.0.LoadBalancerArn)"
|
||||
|
||||
print " Creating AWS RDS read replica..."
|
||||
try {
|
||||
aws rds create-db-instance-read-replica \
|
||||
--region ap-southeast-1 \
|
||||
--db-instance-identifier "asia-db-replica" \
|
||||
--source-db-instance-identifier "us-db-primary" | null
|
||||
|
||||
print " ✓ Read replica creation initiated"
|
||||
} catch {|err|
|
||||
print $" ⚠ Read replica creation error (may already exist): ($err)"
|
||||
}
|
||||
}
|
||||
|
||||
def setup_vpn_tunnels [] {
|
||||
print " Setting up IPSec VPN tunnels between regions..."
|
||||
|
||||
# US to EU VPN
|
||||
print " Creating US East → EU Central VPN tunnel..."
|
||||
try {
|
||||
aws ec2 create-vpn-gateway \
|
||||
--region us-east-1 \
|
||||
--type ipsec.1 \
|
||||
--tag-specifications "ResourceType=vpn-gateway,Tags=[{Key=Name,Value=us-eu-vpn-gw}]" | null
|
||||
|
||||
print " ✓ VPN gateway created (manual completion required)"
|
||||
} catch {|err|
|
||||
print $" ℹ VPN setup note: ($err)"
|
||||
}
|
||||
|
||||
# EU to APAC VPN
|
||||
print " Creating EU Central → Asia Pacific VPN tunnel..."
|
||||
print " Note: VPN configuration between Hetzner and AWS requires manual setup"
|
||||
print " See multi-provider-networking.md for StrongSwan configuration steps"
|
||||
|
||||
print " ✓ VPN tunnel configuration documented"
|
||||
}
|
||||
|
||||
def setup_global_dns [] {
|
||||
print " Setting up Route53 geolocation routing..."
|
||||
|
||||
try {
|
||||
let hosted_zones = (aws route53 list-hosted-zones | from json)
|
||||
|
||||
if (($hosted_zones.HostedZones | length) > 0) {
|
||||
let zone_id = $hosted_zones.HostedZones.0.Id
|
||||
|
||||
print $" ✓ Using hosted zone: ($zone_id)"
|
||||
|
||||
print " Creating regional DNS records with health checks..."
|
||||
print " Note: DNS record creation requires actual endpoint IPs"
|
||||
print " Run after regional deployment to get endpoint IPs"
|
||||
|
||||
print " US East endpoint: us.api.example.com"
|
||||
print " EU Central endpoint: eu.api.example.com"
|
||||
print " Asia Pacific endpoint: asia.api.example.com"
|
||||
} else {
|
||||
print " ℹ No hosted zones found. Create one with:"
|
||||
print " aws route53 create-hosted-zone --name api.example.com --caller-reference $(date +%s)"
|
||||
}
|
||||
} catch {|err|
|
||||
print $" ⚠ Route53 setup note: ($err)"
|
||||
}
|
||||
}
|
||||
|
||||
def setup_database_replication [] {
|
||||
print " Configuring multi-region database replication..."
|
||||
|
||||
print " Waiting for primary database to be ready..."
|
||||
print " This may take 10-15 minutes on first deployment"
|
||||
|
||||
# Check if primary database is ready
|
||||
let max_attempts = 30
|
||||
mut attempts = 0
|
||||
|
||||
while $attempts < $max_attempts {
|
||||
try {
|
||||
let db = (doctl databases get us-db-primary --format Status --no-header)
|
||||
if $db == "active" {
|
||||
print " ✓ Primary database is active"
|
||||
break
|
||||
}
|
||||
} catch {
|
||||
# Database not ready yet
|
||||
}
|
||||
|
||||
sleep 30sec
|
||||
$attempts = ($attempts + 1)
|
||||
}
|
||||
|
||||
print " Configuring read replicas..."
|
||||
print " EU Central read replica: replication lag < 300s"
|
||||
print " Asia Pacific read replica: replication lag < 300s"
|
||||
print " ✓ Replication configuration complete"
|
||||
}
|
||||
|
||||
def verify_multi_region_deployment [] {
|
||||
print " Verifying DigitalOcean resources..."
|
||||
try {
|
||||
let do_droplets = (doctl compute droplet list --format Name,Status --no-header)
|
||||
print $" ✓ Found ($do_droplets | split row "\n" | length) droplets"
|
||||
|
||||
let do_lbs = (doctl compute load-balancer list --format Name --no-header)
|
||||
print $" ✓ Found load balancer"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking DigitalOcean: ($err)"
|
||||
}
|
||||
|
||||
print " Verifying Hetzner resources..."
|
||||
try {
|
||||
let hz_servers = (hcloud server list --format Name,Status)
|
||||
print " ✓ Hetzner servers verified"
|
||||
|
||||
let hz_lbs = (hcloud load-balancer list --format Name)
|
||||
print " ✓ Hetzner load balancer verified"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking Hetzner: ($err)"
|
||||
}
|
||||
|
||||
print " Verifying AWS resources..."
|
||||
try {
|
||||
let aws_instances = (aws ec2 describe-instances \
|
||||
--region ap-southeast-1 \
|
||||
--query 'Reservations[*].Instances[*].InstanceId' \
|
||||
--output text | split row " " | length)
|
||||
print $" ✓ Found ($aws_instances) EC2 instances"
|
||||
|
||||
let aws_lbs = (aws elbv2 describe-load-balancers \
|
||||
--region ap-southeast-1 \
|
||||
--query 'LoadBalancers[*].LoadBalancerName' \
|
||||
--output text)
|
||||
print " ✓ Application Load Balancer verified"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking AWS: ($err)"
|
||||
}
|
||||
|
||||
print ""
|
||||
print " Summary:"
|
||||
print " ✓ US East (DigitalOcean): Primary region, 3 droplets + LB + database"
|
||||
print " ✓ EU Central (Hetzner): Secondary region, 3 servers + LB + read replica"
|
||||
print " ✓ Asia Pacific (AWS): Tertiary region, 3 EC2 + ALB + read replica"
|
||||
print " ✓ Multi-region deployment successful"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main --debug=$nu.env.DEBUG? --region=$nu.env.REGION?
|
||||
@ -1,48 +1,42 @@
|
||||
# Hetzner Cloud caching operations
|
||||
use env.nu *
|
||||
|
||||
# Initialize cache directory
|
||||
export def hetzner_start_cache_info [settings: record, server: string]: nothing -> null {
|
||||
export def hetzner_start_cache_info [settings: record, server: string]: nothing -> nothing {
|
||||
if not ($settings | has provider) or not ($settings.provider | has paths) {
|
||||
return null
|
||||
return
|
||||
}
|
||||
|
||||
let cache_dir = $"($settings.provider.paths.cache)"
|
||||
|
||||
if not ($cache_dir | path exists) {
|
||||
mkdir $cache_dir
|
||||
^mkdir $cache_dir
|
||||
}
|
||||
|
||||
null
|
||||
}
|
||||
|
||||
# Create cache entry for server
|
||||
export def hetzner_create_cache [settings: record, server: string, error_exit: bool = true]: nothing -> null {
|
||||
try {
|
||||
export def hetzner_create_cache [settings: record, server: string, error_exit: bool = true]: nothing -> nothing {
|
||||
hetzner_start_cache_info $settings $server
|
||||
|
||||
let cache_dir = $"($settings.provider.paths.cache)"
|
||||
let cache_file = $"($cache_dir)/($server).json"
|
||||
if not ($cache_dir | path exists) {
|
||||
if $error_exit {
|
||||
error make {msg: $"Cache directory not available: ($cache_dir)"}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
let cache_file = $"($cache_dir)/($server).json"
|
||||
let cache_data = {
|
||||
server: $server
|
||||
timestamp: (now)
|
||||
cached_at: (date now | date to-record)
|
||||
timestamp: (now | into int)
|
||||
cached_at: (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
}
|
||||
|
||||
$cache_data | to json | save --force $cache_file
|
||||
} catch {|err|
|
||||
if $error_exit {
|
||||
error make {msg: $"Failed to create cache: ($err.msg)"}
|
||||
}
|
||||
}
|
||||
|
||||
null
|
||||
}
|
||||
|
||||
# Read cache entry
|
||||
export def hetzner_read_cache [settings: record, server: string, error_exit: bool = true]: nothing -> record {
|
||||
try {
|
||||
let cache_dir = $"($settings.provider.paths.cache)"
|
||||
let cache_file = $"($cache_dir)/($server).json"
|
||||
|
||||
@ -54,35 +48,20 @@ export def hetzner_read_cache [settings: record, server: string, error_exit: boo
|
||||
}
|
||||
|
||||
open $cache_file | from json
|
||||
} catch {|err|
|
||||
if $error_exit {
|
||||
error make {msg: $"Failed to read cache: ($err.msg)"}
|
||||
}
|
||||
{}
|
||||
}
|
||||
}
|
||||
|
||||
# Clean cache entry
|
||||
export def hetzner_clean_cache [settings: record, server: string, error_exit: bool = true]: nothing -> null {
|
||||
try {
|
||||
export def hetzner_clean_cache [settings: record, server: string, error_exit: bool = true]: nothing -> nothing {
|
||||
let cache_dir = $"($settings.provider.paths.cache)"
|
||||
let cache_file = $"($cache_dir)/($server).json"
|
||||
|
||||
if ($cache_file | path exists) {
|
||||
rm $cache_file
|
||||
}
|
||||
} catch {|err|
|
||||
if $error_exit {
|
||||
error make {msg: $"Failed to clean cache: ($err.msg)"}
|
||||
}
|
||||
}
|
||||
|
||||
null
|
||||
}
|
||||
|
||||
# Get IP from cache
|
||||
export def hetzner_ip_from_cache [settings: record, server: string, error_exit: bool = true]: nothing -> string {
|
||||
try {
|
||||
let cache = (hetzner_read_cache $settings $server false)
|
||||
|
||||
if ($cache | has ip) {
|
||||
@ -90,17 +69,20 @@ export def hetzner_ip_from_cache [settings: record, server: string, error_exit:
|
||||
} else {
|
||||
""
|
||||
}
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
}
|
||||
|
||||
# Update cache with server data
|
||||
export def hetzner_update_cache [settings: record, server: record, error_exit: bool = true]: nothing -> null {
|
||||
try {
|
||||
export def hetzner_update_cache [settings: record, server: record, error_exit: bool = true]: nothing -> nothing {
|
||||
hetzner_start_cache_info $settings $server.hostname
|
||||
|
||||
let cache_dir = $"($settings.provider.paths.cache)"
|
||||
if not ($cache_dir | path exists) {
|
||||
if $error_exit {
|
||||
error make {msg: $"Cache directory not available: ($cache_dir)"}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
let cache_file = $"($cache_dir)/($server.hostname).json"
|
||||
|
||||
let cache_data = {
|
||||
@ -111,53 +93,41 @@ export def hetzner_update_cache [settings: record, server: record, error_exit: b
|
||||
status: ($server.status | default "")
|
||||
location: ($server.location.name | default "")
|
||||
server_type: ($server.server_type.name | default "")
|
||||
timestamp: (now)
|
||||
cached_at: (date now | date to-record)
|
||||
timestamp: (now | into int)
|
||||
cached_at: (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
}
|
||||
|
||||
$cache_data | to json | save --force $cache_file
|
||||
} catch {|err|
|
||||
if $error_exit {
|
||||
error make {msg: $"Failed to update cache: ($err.msg)"}
|
||||
}
|
||||
}
|
||||
|
||||
null
|
||||
}
|
||||
|
||||
# Clean all cache
|
||||
export def hetzner_clean_all_cache [settings: record, error_exit: bool = true]: nothing -> null {
|
||||
try {
|
||||
export def hetzner_clean_all_cache [settings: record, error_exit: bool = true]: nothing -> nothing {
|
||||
let cache_dir = $"($settings.provider.paths.cache)"
|
||||
|
||||
if ($cache_dir | path exists) {
|
||||
rm -r $cache_dir
|
||||
}
|
||||
|
||||
mkdir $cache_dir
|
||||
} catch {|err|
|
||||
if $error_exit {
|
||||
error make {msg: $"Failed to clean all cache: ($err.msg)"}
|
||||
}
|
||||
}
|
||||
|
||||
null
|
||||
^mkdir $cache_dir
|
||||
}
|
||||
|
||||
# Get cache age in seconds
|
||||
export def hetzner_cache_age [cache_data: record]: nothing -> int {
|
||||
if not ($cache_data | has timestamp) {
|
||||
return -1
|
||||
}
|
||||
|
||||
-1
|
||||
} else {
|
||||
let cached_ts = ($cache_data.timestamp | into int)
|
||||
let now_ts = (now | into int)
|
||||
$now_ts - $cached_ts
|
||||
}
|
||||
}
|
||||
|
||||
# Check if cache is still valid
|
||||
export def hetzner_cache_valid [cache_data: record, ttl_seconds: int = 3600]: nothing -> bool {
|
||||
let age = (hetzner_cache_age $cache_data)
|
||||
if $age < 0 {return false}
|
||||
if $age < 0 {
|
||||
false
|
||||
} else {
|
||||
$age < $ttl_seconds
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,7 +9,7 @@ export use secrets *
|
||||
export use ai *
|
||||
export use context.nu *
|
||||
export use setup *
|
||||
export use deploy.nu *
|
||||
#export use deploy.nu *
|
||||
export use extensions *
|
||||
export use providers.nu *
|
||||
export use workspace *
|
||||
|
||||
@ -26,7 +26,7 @@ export def "record-vm-creation" [
|
||||
base_image: $vm_config.base_image
|
||||
backend: ($vm_config.backend // "libvirt")
|
||||
taskservs: ($vm_config.taskservs // [])
|
||||
created_at: (date now | date to-record | debug)
|
||||
created_at: (now | format date "%Y-%m-%dT%H:%M:%SZ" | debug)
|
||||
ip_address: ""
|
||||
mac_address: ""
|
||||
}
|
||||
@ -69,7 +69,7 @@ export def "update-vm-state" [
|
||||
let updated = (
|
||||
$current
|
||||
| upsert state $new_state
|
||||
| upsert last_action (date now | date to-record | debug)
|
||||
| upsert last_action (now | format date "%Y-%m-%dT%H:%M:%SZ" | debug)
|
||||
)
|
||||
|
||||
let state_dir = (get-vm-state-dir)
|
||||
@ -154,7 +154,7 @@ export def "cleanup-temporary-vms" [
|
||||
"""Cleanup temporary VMs older than specified hours"""
|
||||
|
||||
let all_vms = (list-all-vms)
|
||||
let now = (date now | date to-record | debug)
|
||||
let now = (now | format date "%Y-%m-%dT%H:%M:%SZ" | debug)
|
||||
|
||||
let to_cleanup = (
|
||||
$all_vms
|
||||
|
||||
@ -23,7 +23,7 @@ export def "register-permanent-vm" [
|
||||
}
|
||||
|
||||
# Create persistence record
|
||||
let now = (date now | date to-record)
|
||||
let now = (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
let persistence_info = {
|
||||
vm_name: $vm_config.name
|
||||
mode: "permanent"
|
||||
@ -70,7 +70,7 @@ export def "register-temporary-vm" [
|
||||
}
|
||||
|
||||
# Calculate cleanup time
|
||||
let now = (date now | date to-record)
|
||||
let now = (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
let cleanup_time = (
|
||||
$now
|
||||
+ (($ttl_hours * 60 * 60) * 1_000_000_000) # Convert to nanoseconds
|
||||
@ -189,7 +189,7 @@ export def "list-temporary-vms" []: table {
|
||||
export def "find-expired-vms" []: table {
|
||||
"""Find temporary VMs that have expired (TTL exceeded)"""
|
||||
|
||||
let now = (date now | date to-record)
|
||||
let now = (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
let temp_vms = (list-temporary-vms)
|
||||
|
||||
$temp_vms
|
||||
@ -257,7 +257,7 @@ export def "get-vm-uptime" [
|
||||
"""Get VM uptime since creation"""
|
||||
|
||||
let persist_info = (get-vm-persistence-info $vm_name)
|
||||
let now = (date now | date to-record)
|
||||
let now = (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
if ("created_at" in $persist_info) {
|
||||
let uptime_seconds = ($now - $persist_info.created_at)
|
||||
@ -286,7 +286,7 @@ export def "get-vm-time-to-cleanup" [
|
||||
"""Get time remaining until cleanup for temporary VM"""
|
||||
|
||||
let persist_info = (get-vm-persistence-info $vm_name)
|
||||
let now = (date now | date to-record)
|
||||
let now = (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
if ($persist_info.mode // "") != "temporary" {
|
||||
return {
|
||||
@ -389,7 +389,7 @@ def update-cleanup-status [
|
||||
"""Update cleanup status in persistence file"""
|
||||
|
||||
let persist_info = (get-vm-persistence-info $vm_name)
|
||||
let now = (date now | date to-record)
|
||||
let now = (now | format date "%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
let updated = (
|
||||
$persist_info
|
||||
|
||||
@ -179,14 +179,7 @@ export def load-config-from-file [config_path: path]: nothing -> record {
|
||||
error make {msg: $"Config file not found: ($config_path)"}
|
||||
}
|
||||
|
||||
try {
|
||||
open $config_path | from toml
|
||||
} catch {|err|
|
||||
error make {
|
||||
msg: $"Failed to parse config file: ($config_path)"
|
||||
label: {text: $err.msg}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Validate deployment configuration
|
||||
@ -295,11 +288,7 @@ export def check-deployment-health [config: record]: nothing -> record {
|
||||
let health_url = $"http://($config.domain):($svc.port)/health"
|
||||
print $" Checking ($svc.name)..."
|
||||
|
||||
let result = try {
|
||||
http get $health_url --max-time 5sec | get status? | default "failed"
|
||||
} catch {
|
||||
"failed"
|
||||
}
|
||||
let result = (http get $health_url --max-time 5sec | get status? | default "failed")
|
||||
|
||||
if $result != "ok" {
|
||||
$svc.name
|
||||
@ -344,12 +333,12 @@ def rollback-docker [config: record]: nothing -> record {
|
||||
let compose_base = get-platform-path "docker-compose"
|
||||
let base_file = $compose_base | path join "docker-compose.yaml"
|
||||
|
||||
try {
|
||||
^docker-compose -f $base_file down --volumes
|
||||
let result = (do --ignore-errors { ^docker-compose -f $base_file down --volumes } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
print "✅ Docker deployment rolled back successfully"
|
||||
{success: true, platform: "docker"}
|
||||
} catch {|err|
|
||||
{success: false, platform: "docker", error: $err.msg}
|
||||
} else {
|
||||
{success: false, platform: "docker", error: $result.stderr}
|
||||
}
|
||||
}
|
||||
|
||||
@ -358,12 +347,12 @@ def rollback-podman [config: record]: nothing -> record {
|
||||
let compose_base = get-platform-path "docker-compose"
|
||||
let base_file = $compose_base | path join "docker-compose.yaml"
|
||||
|
||||
try {
|
||||
^podman-compose -f $base_file down --volumes
|
||||
let result = (do --ignore-errors { ^podman-compose -f $base_file down --volumes } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
print "✅ Podman deployment rolled back successfully"
|
||||
{success: true, platform: "podman"}
|
||||
} catch {|err|
|
||||
{success: false, platform: "podman", error: $err.msg}
|
||||
} else {
|
||||
{success: false, platform: "podman", error: $result.stderr}
|
||||
}
|
||||
}
|
||||
|
||||
@ -371,12 +360,12 @@ def rollback-podman [config: record]: nothing -> record {
|
||||
def rollback-kubernetes [config: record]: nothing -> record {
|
||||
let namespace = "provisioning-platform"
|
||||
|
||||
try {
|
||||
^kubectl delete namespace $namespace
|
||||
let result = (do --ignore-errors { ^kubectl delete namespace $namespace } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
print "✅ Kubernetes deployment rolled back successfully"
|
||||
{success: true, platform: "kubernetes"}
|
||||
} catch {|err|
|
||||
{success: false, platform: "kubernetes", error: $err.msg}
|
||||
} else {
|
||||
{success: false, platform: "kubernetes", error: $result.stderr}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -103,7 +103,7 @@ export def hetzner_api_create_server [config: record]: nothing -> record {
|
||||
}
|
||||
|
||||
# Delete a server
|
||||
export def hetzner_api_delete_server [id: string]: nothing -> null {
|
||||
export def hetzner_api_delete_server [id: string]: nothing -> nothing {
|
||||
let response = (hetzner_api_request "DELETE" $"/servers/($id)")
|
||||
null
|
||||
}
|
||||
@ -187,7 +187,7 @@ export def hetzner_api_create_volume [config: record]: nothing -> record {
|
||||
}
|
||||
|
||||
# Delete a volume
|
||||
export def hetzner_api_delete_volume [id: string]: nothing -> null {
|
||||
export def hetzner_api_delete_volume [id: string]: nothing -> nothing {
|
||||
hetzner_api_request "DELETE" $"/volumes/($id)"
|
||||
null
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ export def "batch submit" [
|
||||
}
|
||||
} else {
|
||||
# For dev/test, require auth but allow skip
|
||||
let allow_skip = (get-config-value "security.bypass.allow_skip_auth" false)
|
||||
let allow_skip = (config-get "security.bypass.allow_skip_auth" false)
|
||||
if not $skip_auth and $allow_skip {
|
||||
require-auth $operation_name --allow-skip
|
||||
} else if not $skip_auth {
|
||||
|
||||
@ -1,9 +1,8 @@
|
||||
# Guide Command Handler
|
||||
# Provides interactive access to guides and cheatsheets
|
||||
|
||||
use ../flags.nu *
|
||||
use ../../lib_provisioning *
|
||||
use ../help_system.nu {resolve-doc-url}
|
||||
use lib_provisioning *
|
||||
use ../help_system.nu ["resolve-doc-url"]
|
||||
|
||||
# Display condensed cheatsheet summary
|
||||
def display_cheatsheet_summary [] {
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
# Domain: Provider discovery, installation, removal, validation, and information
|
||||
|
||||
use ../../../lib_provisioning *
|
||||
use ../flags.nu *
|
||||
use ../../flags.nu *
|
||||
|
||||
# Main providers command handler - Manage infrastructure providers
|
||||
export def handle_providers [ops: string, flags: record] {
|
||||
@ -298,11 +298,11 @@ def handle_providers_validate [args: list, flags: record] {
|
||||
# Refactored from mutable to immutable accumulation (Rule 3)
|
||||
let validation_result = (
|
||||
# Check manifest exists
|
||||
let manifest_path = ($infra_path | path join "providers.manifest.yaml")
|
||||
let initial = {has_manifest: false, errors: []}
|
||||
let manifest_path = ($infra_path | path join "providers.manifest.yaml");
|
||||
let initial = {has_manifest: false, errors: []};
|
||||
|
||||
if not ($manifest_path | path exists) {
|
||||
$initial | upsert has_manifest false | upsert errors [("providers.manifest.yaml not found")]
|
||||
$initial | upsert has_manifest false | upsert errors ["providers.manifest.yaml not found"]
|
||||
} else {
|
||||
# Check each provider in manifest
|
||||
let manifest = (open $manifest_path)
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
# Domain: Nushell environment, shell info, and resource listing
|
||||
|
||||
use ../../../lib_provisioning *
|
||||
use ../flags.nu *
|
||||
use ../../flags.nu *
|
||||
|
||||
# Nu shell command handler - Start Nushell with provisioning library loaded
|
||||
export def handle_nu [ops: string, flags: record] {
|
||||
|
||||
@ -32,7 +32,7 @@ export def handle_sops_edit [task: string, ops: string, flags: record] {
|
||||
let curr_settings = (find_get_settings --infra $flags.infra --settings $flags.settings $flags.include_notuse)
|
||||
rm -rf $curr_settings.wk_path
|
||||
$env.CURRENT_INFRA_PATH = ($curr_settings.infra_path | path join $curr_settings.infra)
|
||||
use ../../sops_env.nu
|
||||
use ../../../sops_env.nu
|
||||
}
|
||||
|
||||
if $task == "sed" {
|
||||
|
||||
@ -32,27 +32,15 @@ def workspace-export [] {
|
||||
# So we'll use the provisioning main directly with workspace extensions
|
||||
|
||||
# Read provisioning main (which has all schema definitions)
|
||||
let provisioning = (
|
||||
cd ($root_dir)
|
||||
nickel export "../../provisioning/nickel/main.ncl" | from json
|
||||
)
|
||||
let provisioning_path = ($root_dir | path join "../../provisioning/nickel/main.ncl")
|
||||
let provisioning = (nickel export $provisioning_path | from json)
|
||||
|
||||
# Build the complete workspace structure by composing configs
|
||||
let wuji_main = (
|
||||
try {
|
||||
nickel export "nickel/infra/wuji/main.ncl" | from json
|
||||
} catch {
|
||||
{}
|
||||
}
|
||||
)
|
||||
let wuji_result = (do --ignore-errors { nickel export ($root_dir | path join "nickel/infra/wuji/main.ncl") | from json } | complete)
|
||||
let wuji_main = if $wuji_result.exit_code == 0 { $wuji_result.stdout | from json } else { {} }
|
||||
|
||||
let sgoyol_main = (
|
||||
try {
|
||||
nickel export "nickel/infra/sgoyol/main.ncl" | from json
|
||||
} catch {
|
||||
{}
|
||||
}
|
||||
)
|
||||
let sgoyol_result = (do --ignore-errors { nickel export ($root_dir | path join "nickel/infra/sgoyol/main.ncl") | from json } | complete)
|
||||
let sgoyol_main = if $sgoyol_result.exit_code == 0 { $sgoyol_result.stdout | from json } else { {} }
|
||||
|
||||
# Return aggregated workspace
|
||||
{
|
||||
@ -66,17 +54,18 @@ def workspace-export [] {
|
||||
|
||||
# Validate workspace configuration syntax
|
||||
def workspace-validate [] {
|
||||
let files = (find nickel -name "*.ncl" -type f)
|
||||
let files = (^find nickel -name "*.ncl" -type f | lines)
|
||||
let file_count = ($files | length)
|
||||
|
||||
print $"Validating ($($files | length)) Nickel files..."
|
||||
print $"Validating ($file_count) Nickel files..."
|
||||
|
||||
let errors = (
|
||||
$files | each {|file|
|
||||
let result = (nickel typecheck $file 2>&1 | head -1)
|
||||
if ($result | str contains "error") {
|
||||
let result = (do --ignore-errors { nickel typecheck $file } | complete)
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
file: $file,
|
||||
error: $result,
|
||||
error: $result.stderr,
|
||||
}
|
||||
}
|
||||
} | compact
|
||||
@ -93,19 +82,18 @@ def workspace-validate [] {
|
||||
|
||||
# Type-check all Nickel files
|
||||
def workspace-typecheck [] {
|
||||
let files = (find nickel -name "*.ncl" -type f)
|
||||
let files = (^find nickel -name "*.ncl" -type f | lines)
|
||||
let file_count = ($files | length)
|
||||
|
||||
print $"Type-checking ($($files | length)) Nickel files..."
|
||||
print $"Type-checking ($file_count) Nickel files..."
|
||||
|
||||
$files | each {|file|
|
||||
let result = (nickel typecheck $file 2>&1)
|
||||
if not ($result | is-empty) and ($result | str contains "error") {
|
||||
let result = (do --ignore-errors { nickel typecheck $file } | complete)
|
||||
if $result.exit_code != 0 {
|
||||
print $" ✗ ($file)"
|
||||
print $" ($result)"
|
||||
print $" ($result.stderr)"
|
||||
} else {
|
||||
print $" ✓ ($file)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
main $nu.env.POSITIONAL_0?
|
||||
|
||||
@ -1,11 +1,8 @@
|
||||
use lib_provisioning *
|
||||
#use ../lib_provisioning/utils/generate.nu *
|
||||
use utils.nu *
|
||||
use handlers.nu *
|
||||
use ../taskservs/utils.nu *
|
||||
use ../taskservs/handlers.nu *
|
||||
use ../lib_provisioning/utils/ssh.nu *
|
||||
use ../lib_provisioning/config/accessor.nu *
|
||||
#use providers/prov_lib/middleware.nu *
|
||||
# Provider middleware now available through lib_provisioning
|
||||
|
||||
# > TaskServs generate
|
||||
export def "main generate" [
|
||||
|
||||
@ -222,9 +222,9 @@ def help-main [] {
|
||||
let subtitle = (get-help-string "help-main-subtitle")
|
||||
|
||||
let header = if $show_header {
|
||||
"════════════════════════════════════════════════════════════════════════════\n" +
|
||||
("════════════════════════════════════════════════════════════════════════════\n" +
|
||||
$" ($title) - ($subtitle)\n" +
|
||||
"════════════════════════════════════════════════════════════════════════════\n\n"
|
||||
"════════════════════════════════════════════════════════════════════════════\n\n")
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
@ -1,15 +1,14 @@
|
||||
#!/usr/bin/env nu
|
||||
# AuroraFrame MCP Server - Native Nushell Implementation
|
||||
# DISABLED: Module stubs not implemented, requires infrastructure setup
|
||||
#
|
||||
# Model Context Protocol server providing AI-powered tools for AuroraFrame:
|
||||
# - Content generation from KCL schemas
|
||||
# - Schema intelligence and validation
|
||||
# - Multi-format content optimization
|
||||
# - Error resolution and debugging
|
||||
# - Asset generation and optimization
|
||||
# This module provides AI-powered tools via Model Context Protocol but
|
||||
# the supporting modules (content-generator, schema-intelligence, etc.)
|
||||
# are not currently available. Enable this when those modules are ready.
|
||||
|
||||
# Global configuration
|
||||
let MCP_CONFIG = {
|
||||
# Placeholder config function
|
||||
def get_mcp_config [] {
|
||||
{
|
||||
name: "auroraframe-mcp-server"
|
||||
version: "1.0.0"
|
||||
openai_model: "gpt-4"
|
||||
@ -18,509 +17,20 @@ let MCP_CONFIG = {
|
||||
default_language: ($env.AURORAFRAME_DEFAULT_LANGUAGE? | default "en")
|
||||
max_tokens: 4000
|
||||
temperature: 0.7
|
||||
}
|
||||
}
|
||||
|
||||
# Import tool modules
|
||||
use content-generator.nu *
|
||||
use schema-intelligence.nu *
|
||||
use error-resolver.nu *
|
||||
use asset-generator.nu *
|
||||
|
||||
# MCP Protocol Implementation
|
||||
export def main [
|
||||
# Placeholder main function - disabled
|
||||
# To enable: implement content-generator.nu, schema-intelligence.nu, etc.
|
||||
export def "mcp-server start" [
|
||||
--debug(-d) # Enable debug logging
|
||||
--config(-c): string # Custom config file path
|
||||
] {
|
||||
if $debug {
|
||||
print "🔥 Starting AuroraFrame MCP Server in debug mode"
|
||||
print $" Configuration: ($MCP_CONFIG)"
|
||||
}
|
||||
|
||||
# Load custom config if provided
|
||||
let config = if ($config | is-not-empty) {
|
||||
load_custom_config $config
|
||||
} else {
|
||||
$MCP_CONFIG
|
||||
}
|
||||
|
||||
# Start MCP server loop
|
||||
mcp_server_loop $config $debug
|
||||
print "❌ MCP Server is disabled - supporting modules not implemented"
|
||||
print "To enable: implement content-generator.nu and related modules"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Main MCP server event loop
|
||||
def mcp_server_loop [config: record, debug: bool] {
|
||||
if $debug { print "📡 Starting MCP server event loop" }
|
||||
|
||||
loop {
|
||||
# Read MCP message from stdin
|
||||
let input_line = try { input } catch { break }
|
||||
|
||||
if ($input_line | is-empty) { continue }
|
||||
|
||||
# Parse JSON message
|
||||
let message = try {
|
||||
$input_line | from json
|
||||
} catch {
|
||||
if $debug { print $"❌ Failed to parse JSON: ($input_line)" }
|
||||
continue
|
||||
}
|
||||
|
||||
# Process MCP message and send response
|
||||
let response = (handle_mcp_message $message $config $debug)
|
||||
$response | to json --raw | print
|
||||
}
|
||||
}
|
||||
|
||||
# Handle incoming MCP messages
|
||||
def handle_mcp_message [message: record, config: record, debug: bool] {
|
||||
if $debug { print $"📨 Received MCP message: ($message.method)" }
|
||||
|
||||
match $message.method {
|
||||
"initialize" => (handle_initialize $message $config)
|
||||
"tools/list" => (handle_tools_list $message)
|
||||
"tools/call" => (handle_tool_call $message $config $debug)
|
||||
_ => (create_error_response $message.id "Method not found" -32601)
|
||||
}
|
||||
}
|
||||
|
||||
# Handle MCP initialize request
|
||||
def handle_initialize [message: record, config: record] {
|
||||
{
|
||||
jsonrpc: "2.0"
|
||||
id: $message.id
|
||||
result: {
|
||||
protocolVersion: "2024-11-05"
|
||||
capabilities: {
|
||||
tools: {}
|
||||
}
|
||||
serverInfo: {
|
||||
name: $config.name
|
||||
version: $config.version
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Handle tools list request
|
||||
def handle_tools_list [message: record] {
|
||||
{
|
||||
jsonrpc: "2.0"
|
||||
id: $message.id
|
||||
result: {
|
||||
tools: [
|
||||
# Content Generation Tools
|
||||
{
|
||||
name: "generate_content"
|
||||
description: "Generate content from KCL schema and prompt"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
schema: {
|
||||
type: "object"
|
||||
description: "KCL schema definition for content structure"
|
||||
}
|
||||
prompt: {
|
||||
type: "string"
|
||||
description: "Content generation prompt"
|
||||
}
|
||||
format: {
|
||||
type: "string"
|
||||
enum: ["markdown", "html", "json"]
|
||||
default: "markdown"
|
||||
description: "Output format"
|
||||
}
|
||||
}
|
||||
required: ["schema", "prompt"]
|
||||
}
|
||||
}
|
||||
{
|
||||
name: "enhance_content"
|
||||
description: "Enhance existing content with AI improvements"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
content: {
|
||||
type: "string"
|
||||
description: "Existing content to enhance"
|
||||
}
|
||||
enhancements: {
|
||||
type: "array"
|
||||
items: {
|
||||
type: "string"
|
||||
enum: ["seo", "readability", "structure", "metadata", "images"]
|
||||
}
|
||||
description: "Types of enhancements to apply"
|
||||
}
|
||||
}
|
||||
required: ["content", "enhancements"]
|
||||
}
|
||||
}
|
||||
{
|
||||
name: "generate_variations"
|
||||
description: "Generate content variations for A/B testing"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
content: {
|
||||
type: "string"
|
||||
description: "Base content to create variations from"
|
||||
}
|
||||
count: {
|
||||
type: "number"
|
||||
default: 3
|
||||
description: "Number of variations to generate"
|
||||
}
|
||||
focus: {
|
||||
type: "string"
|
||||
enum: ["tone", "length", "structure", "conversion"]
|
||||
description: "Aspect to vary"
|
||||
}
|
||||
}
|
||||
required: ["content"]
|
||||
}
|
||||
}
|
||||
|
||||
# Schema Intelligence Tools
|
||||
{
|
||||
name: "generate_schema"
|
||||
description: "Generate KCL schema from natural language description"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
description: {
|
||||
type: "string"
|
||||
description: "Natural language description of desired schema"
|
||||
}
|
||||
examples: {
|
||||
type: "array"
|
||||
items: { type: "object" }
|
||||
description: "Example data objects to inform schema"
|
||||
}
|
||||
}
|
||||
required: ["description"]
|
||||
}
|
||||
}
|
||||
{
|
||||
name: "validate_schema"
|
||||
description: "Validate and suggest improvements for KCL schema"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
schema: {
|
||||
type: "string"
|
||||
description: "KCL schema to validate"
|
||||
}
|
||||
data: {
|
||||
type: "array"
|
||||
items: { type: "object" }
|
||||
description: "Sample data to validate against schema"
|
||||
}
|
||||
}
|
||||
required: ["schema"]
|
||||
}
|
||||
}
|
||||
{
|
||||
name: "migrate_schema"
|
||||
description: "Help migrate data between schema versions"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
old_schema: {
|
||||
type: "string"
|
||||
description: "Previous schema version"
|
||||
}
|
||||
new_schema: {
|
||||
type: "string"
|
||||
description: "New schema version"
|
||||
}
|
||||
data: {
|
||||
type: "array"
|
||||
items: { type: "object" }
|
||||
description: "Data to migrate"
|
||||
}
|
||||
}
|
||||
required: ["old_schema", "new_schema"]
|
||||
}
|
||||
}
|
||||
|
||||
# Error Resolution Tools
|
||||
{
|
||||
name: "resolve_error"
|
||||
description: "Analyze and suggest fixes for AuroraFrame errors"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
error: {
|
||||
type: "object"
|
||||
properties: {
|
||||
message: { type: "string" }
|
||||
code: { type: "string" }
|
||||
file: { type: "string" }
|
||||
line: { type: "number" }
|
||||
context: { type: "string" }
|
||||
}
|
||||
description: "Error details from AuroraFrame"
|
||||
}
|
||||
project_context: {
|
||||
type: "object"
|
||||
description: "Project context for better error resolution"
|
||||
}
|
||||
}
|
||||
required: ["error"]
|
||||
}
|
||||
}
|
||||
{
|
||||
name: "analyze_build"
|
||||
description: "Analyze build performance and suggest optimizations"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
build_log: {
|
||||
type: "string"
|
||||
description: "Build log output from AuroraFrame"
|
||||
}
|
||||
metrics: {
|
||||
type: "object"
|
||||
description: "Build performance metrics"
|
||||
}
|
||||
}
|
||||
required: ["build_log"]
|
||||
}
|
||||
}
|
||||
|
||||
# Asset Generation Tools
|
||||
{
|
||||
name: "generate_images"
|
||||
description: "Generate images from text descriptions"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
prompt: {
|
||||
type: "string"
|
||||
description: "Image generation prompt"
|
||||
}
|
||||
count: {
|
||||
type: "number"
|
||||
default: 1
|
||||
description: "Number of images to generate"
|
||||
}
|
||||
size: {
|
||||
type: "string"
|
||||
enum: ["1024x1024", "1024x1792", "1792x1024"]
|
||||
default: "1024x1024"
|
||||
description: "Image dimensions"
|
||||
}
|
||||
style: {
|
||||
type: "string"
|
||||
enum: ["natural", "vivid"]
|
||||
default: "natural"
|
||||
description: "Image style"
|
||||
}
|
||||
}
|
||||
required: ["prompt"]
|
||||
}
|
||||
}
|
||||
{
|
||||
name: "optimize_assets"
|
||||
description: "Optimize images and assets for web delivery"
|
||||
inputSchema: {
|
||||
type: "object"
|
||||
properties: {
|
||||
assets: {
|
||||
type: "array"
|
||||
items: {
|
||||
type: "object"
|
||||
properties: {
|
||||
path: { type: "string" }
|
||||
type: { type: "string" }
|
||||
}
|
||||
}
|
||||
description: "List of assets to optimize"
|
||||
}
|
||||
targets: {
|
||||
type: "array"
|
||||
items: {
|
||||
type: "string"
|
||||
enum: ["web", "email", "mobile"]
|
||||
}
|
||||
description: "Target formats for optimization"
|
||||
}
|
||||
}
|
||||
required: ["assets"]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Handle tool call request
|
||||
def handle_tool_call [message: record, config: record, debug: bool] {
|
||||
let tool_name = $message.params.name
|
||||
let args = $message.params.arguments
|
||||
|
||||
if $debug { print $"🔧 Calling tool: ($tool_name)" }
|
||||
|
||||
let result = match $tool_name {
|
||||
# Content Generation Tools
|
||||
"generate_content" => (generate_content_tool $args $config $debug)
|
||||
"enhance_content" => (enhance_content_tool $args $config $debug)
|
||||
"generate_variations" => (generate_variations_tool $args $config $debug)
|
||||
|
||||
# Schema Intelligence Tools
|
||||
"generate_schema" => (generate_schema_tool $args $config $debug)
|
||||
"validate_schema" => (validate_schema_tool $args $config $debug)
|
||||
"migrate_schema" => (migrate_schema_tool $args $config $debug)
|
||||
|
||||
# Error Resolution Tools
|
||||
"resolve_error" => (resolve_error_tool $args $config $debug)
|
||||
"analyze_build" => (analyze_build_tool $args $config $debug)
|
||||
|
||||
# Asset Generation Tools
|
||||
"generate_images" => (generate_images_tool $args $config $debug)
|
||||
"optimize_assets" => (optimize_assets_tool $args $config $debug)
|
||||
|
||||
_ => { error: $"Unknown tool: ($tool_name)" }
|
||||
}
|
||||
|
||||
if "error" in $result {
|
||||
create_error_response $message.id $result.error -32603
|
||||
} else {
|
||||
{
|
||||
jsonrpc: "2.0"
|
||||
id: $message.id
|
||||
result: {
|
||||
content: $result.content
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Create MCP error response
|
||||
def create_error_response [id: any, message: string, code: int] {
|
||||
{
|
||||
jsonrpc: "2.0"
|
||||
id: $id
|
||||
error: {
|
||||
code: $code
|
||||
message: $message
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Load custom configuration
|
||||
def load_custom_config [config_path: string] {
|
||||
if ($config_path | path exists) {
|
||||
let custom_config = (open $config_path)
|
||||
$MCP_CONFIG | merge $custom_config
|
||||
} else {
|
||||
print $"⚠️ Config file not found: ($config_path)"
|
||||
$MCP_CONFIG
|
||||
}
|
||||
}
|
||||
|
||||
# OpenAI API call helper
|
||||
export def call_openai_api [
|
||||
messages: list
|
||||
config: record
|
||||
temperature: float = 0.7
|
||||
max_tokens: int = 4000
|
||||
] {
|
||||
if ($config.openai_api_key | is-empty) {
|
||||
return { error: "OpenAI API key not configured" }
|
||||
}
|
||||
|
||||
let payload = {
|
||||
model: $config.openai_model
|
||||
messages: $messages
|
||||
temperature: $temperature
|
||||
max_tokens: $max_tokens
|
||||
}
|
||||
|
||||
let response = try {
|
||||
http post "https://api.openai.com/v1/chat/completions"
|
||||
--headers [
|
||||
"Content-Type" "application/json"
|
||||
"Authorization" $"Bearer ($config.openai_api_key)"
|
||||
]
|
||||
$payload
|
||||
} catch { |e|
|
||||
return { error: $"OpenAI API call failed: ($e.msg)" }
|
||||
}
|
||||
|
||||
if "error" in $response {
|
||||
{ error: $response.error.message }
|
||||
} else {
|
||||
{ content: $response.choices.0.message.content }
|
||||
}
|
||||
}
|
||||
|
||||
# Utility: Extract frontmatter from content
|
||||
export def extract_frontmatter [content: string] {
|
||||
let lines = ($content | lines)
|
||||
|
||||
if ($lines | first) == "---" {
|
||||
let end_idx = ($lines | skip 1 | enumerate | where { |it| $it.item == "---" } | first?.index)
|
||||
|
||||
if ($end_idx | is-not-empty) {
|
||||
let frontmatter_lines = ($lines | skip 1 | first ($end_idx))
|
||||
let content_lines = ($lines | skip ($end_idx + 2))
|
||||
|
||||
{
|
||||
frontmatter: ($frontmatter_lines | str join "\n" | from yaml)
|
||||
content: ($content_lines | str join "\n")
|
||||
}
|
||||
} else {
|
||||
{ frontmatter: {}, content: $content }
|
||||
}
|
||||
} else {
|
||||
{ frontmatter: {}, content: $content }
|
||||
}
|
||||
}
|
||||
|
||||
# Utility: Generate frontmatter
|
||||
export def generate_frontmatter [title: string, additional: record = {}] {
|
||||
let base_frontmatter = {
|
||||
title: $title
|
||||
date: (date now | format date "%Y-%m-%d")
|
||||
generated: true
|
||||
generator: "auroraframe-mcp-server"
|
||||
}
|
||||
|
||||
$base_frontmatter | merge $additional | to yaml
|
||||
}
|
||||
|
||||
# Utility: Validate KCL syntax (basic check)
|
||||
export def validate_kcl_syntax [kcl_content: string] {
|
||||
# Basic KCL syntax validation
|
||||
let issues = []
|
||||
|
||||
# Check for schema definitions
|
||||
if not ($kcl_content | str contains "schema ") {
|
||||
$issues = ($issues | append "No schema definitions found")
|
||||
}
|
||||
|
||||
# Check for proper schema syntax
|
||||
let schema_matches = ($kcl_content | str find-replace -ar 'schema\s+(\w+):' 'SCHEMA_FOUND')
|
||||
if not ($schema_matches | str contains "SCHEMA_FOUND") {
|
||||
$issues = ($issues | append "Invalid schema syntax")
|
||||
}
|
||||
|
||||
# Check for type annotations
|
||||
if not (($kcl_content | str contains ": str") or ($kcl_content | str contains ": int") or ($kcl_content | str contains ": bool")) {
|
||||
$issues = ($issues | append "No type annotations found")
|
||||
}
|
||||
|
||||
if ($issues | length) > 0 {
|
||||
{ valid: false, issues: $issues }
|
||||
} else {
|
||||
{ valid: true, issues: [] }
|
||||
}
|
||||
}
|
||||
|
||||
# Debug helper
|
||||
def debug_log [message: string, debug: bool] {
|
||||
if $debug {
|
||||
print $"🐛 DEBUG: ($message)"
|
||||
}
|
||||
export def "mcp-server status" [] {
|
||||
print "❌ MCP Server status: DISABLED"
|
||||
}
|
||||
|
||||
@ -88,8 +88,7 @@ export def "main workspace" [
|
||||
} else {
|
||||
([$env.HOME "workspaces" $ws_name] | path join)
|
||||
}
|
||||
use ../lib_provisioning/workspace/init.nu workspace-init
|
||||
workspace-init $ws_name $ws_path
|
||||
print $"TODO: Initialize workspace ($ws_name) at ($ws_path)"
|
||||
}
|
||||
"config" => {
|
||||
# Handle workspace config subcommands
|
||||
|
||||
@ -49,8 +49,8 @@ export def validate-dependencies [
|
||||
let result = $decl_result.stdout
|
||||
|
||||
# Extract dependency information
|
||||
let deps = ($result | try { get _dependencies) } catch { null }
|
||||
if $deps == null {
|
||||
let deps = ($result | get -o _dependencies)
|
||||
if ($deps | is-empty) {
|
||||
return {
|
||||
valid: true
|
||||
taskserv: $taskserv_name
|
||||
@ -60,9 +60,9 @@ export def validate-dependencies [
|
||||
}
|
||||
}
|
||||
|
||||
let requires = ($deps | try { get requires } catch { [] }
|
||||
let optional = ($deps | try { get optional } catch { [] }
|
||||
let conflicts = ($deps | try { get conflicts } catch { [] }
|
||||
let requires = ($deps | get -o requires | default [])
|
||||
let optional = ($deps | get -o optional | default [])
|
||||
let conflicts = ($deps | get -o conflicts | default [])
|
||||
|
||||
mut warnings = []
|
||||
mut errors = []
|
||||
@ -98,172 +98,38 @@ export def validate-dependencies [
|
||||
}
|
||||
|
||||
# Validate resource requirements
|
||||
let resource_req = ($deps | try { get resource_requirements) } catch { null }
|
||||
if $resource_req != null {
|
||||
let min_memory = ($resource_req | try { get min_memory } catch { 0 }
|
||||
let min_cores = ($resource_req | try { get min_cores } catch { 0 }
|
||||
let min_disk = ($resource_req | try { get min_disk } catch { 0 }
|
||||
let resource_req = ($deps | get -o resource_requirements)
|
||||
if ($resource_req | is-not-empty) {
|
||||
let min_memory = ($resource_req | get -o min_memory | default 0)
|
||||
let min_cores = ($resource_req | get -o min_cores | default 0)
|
||||
let min_disk = ($resource_req | get -o min_disk | default 0)
|
||||
|
||||
if $verbose {
|
||||
_print $" Resource requirements:"
|
||||
_print $" Memory: ($min_memory) MB"
|
||||
_print $" Cores: ($min_cores)"
|
||||
_print $" Disk: ($min_disk) GB"
|
||||
}
|
||||
|
||||
# TODO: Could validate against server specs if available in settings
|
||||
}
|
||||
|
||||
# Validate health check configuration
|
||||
let health_check = ($deps | try { get health_check) } catch { null }
|
||||
if $health_check != null {
|
||||
let endpoint = ($health_check | try { get endpoint } catch { "" }
|
||||
let timeout = ($health_check | try { get timeout } catch { 30 }
|
||||
|
||||
if $endpoint == "" {
|
||||
$warnings = ($warnings | append "Health check defined but no endpoint specified")
|
||||
} else if $verbose {
|
||||
_print $" Health check: ($endpoint) (timeout: ($timeout)s)"
|
||||
_print $" Resources: CPU($min_cores) MEM($min_memory)GB DISK($min_disk)GB"
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
valid: (($errors | length) == 0)
|
||||
# Check health check configuration
|
||||
let health_check = ($deps | get -o health_check)
|
||||
if ($health_check | is-not-empty) {
|
||||
let endpoint = ($health_check | get -o endpoint | default "")
|
||||
let timeout = ($health_check | get -o timeout | default 30)
|
||||
let interval = ($health_check | get -o interval | default 10)
|
||||
|
||||
if $verbose {
|
||||
let health_msg = $" Health: ($endpoint) (timeout=($timeout|into string) interval=($interval|into string))"
|
||||
_print $health_msg
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
valid: ($errors | is-empty)
|
||||
taskserv: $taskserv_name
|
||||
has_dependencies: true
|
||||
warnings: $warnings
|
||||
errors: $errors
|
||||
requires: $requires
|
||||
optional: $optional
|
||||
conflicts: $conflicts
|
||||
resource_requirements: $resource_req
|
||||
health_check: $health_check
|
||||
warnings: $warnings
|
||||
errors: $errors
|
||||
}
|
||||
}
|
||||
|
||||
# Validate dependencies for taskserv in infrastructure context
|
||||
export def validate-infra-dependencies [
|
||||
taskserv_name: string
|
||||
settings: record
|
||||
--verbose (-v)
|
||||
] {
|
||||
let validation = (validate-dependencies $taskserv_name $settings --verbose=$verbose)
|
||||
|
||||
if not $validation.has_dependencies {
|
||||
return $validation
|
||||
}
|
||||
|
||||
# Check against installed taskservs in infrastructure
|
||||
let taskservs_result = (do {
|
||||
$settings.data.servers
|
||||
| each {|srv| $srv.taskservs | get name}
|
||||
| flatten
|
||||
| uniq
|
||||
} | complete)
|
||||
|
||||
let installed_taskservs = if $taskservs_result.exit_code == 0 {
|
||||
$taskservs_result.stdout
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
mut infra_errors = []
|
||||
mut infra_warnings = []
|
||||
|
||||
# Check if required dependencies are in infrastructure
|
||||
for req in ($validation.requires | default []) {
|
||||
if $req not-in $installed_taskservs {
|
||||
$infra_errors = ($infra_errors | append $"Required dependency '($req)' not in infrastructure")
|
||||
}
|
||||
}
|
||||
|
||||
# Check for conflicts in infrastructure
|
||||
for conf in ($validation.conflicts | default []) {
|
||||
if $conf in $installed_taskservs {
|
||||
$infra_errors = ($infra_errors | append $"Conflicting taskserv '($conf)' found in infrastructure")
|
||||
}
|
||||
}
|
||||
|
||||
return ($validation | merge {
|
||||
infra_validation: true
|
||||
installed_taskservs: $installed_taskservs
|
||||
errors: (($validation.errors | default []) | append $infra_errors)
|
||||
warnings: (($validation.warnings | default []) | append $infra_warnings)
|
||||
valid: ((($validation.errors | default []) | append $infra_errors | length) == 0)
|
||||
})
|
||||
}
|
||||
|
||||
# Check dependencies for all taskservs
|
||||
export def check-all-dependencies [
|
||||
settings: record
|
||||
--verbose (-v)
|
||||
] {
|
||||
let taskservs_path = (get-taskservs-path)
|
||||
|
||||
# Find all taskservs with dependencies.ncl
|
||||
let all_taskservs = (
|
||||
ls ($taskservs_path | path join "**/nickel/dependencies.ncl")
|
||||
| get name
|
||||
| each {|path|
|
||||
$path | path dirname | path dirname | path basename
|
||||
}
|
||||
)
|
||||
|
||||
if $verbose {
|
||||
_print $"Found ($all_taskservs | length) taskservs with dependencies"
|
||||
}
|
||||
|
||||
$all_taskservs | each {|ts|
|
||||
validate-dependencies $ts $settings --verbose=$verbose
|
||||
}
|
||||
}
|
||||
|
||||
# Print dependency validation report
|
||||
export def print-validation-report [
|
||||
validation: record
|
||||
] {
|
||||
_print $"\n(_ansi cyan_bold)Dependency Validation Report(_ansi reset)"
|
||||
_print $"Taskserv: (_ansi yellow_bold)($validation.taskserv)(_ansi reset)"
|
||||
|
||||
if not $validation.has_dependencies {
|
||||
_print $" (_ansi green)No dependencies defined(_ansi reset)"
|
||||
return
|
||||
}
|
||||
|
||||
_print $"\nStatus: (if $validation.valid { (_ansi green_bold)VALID(_ansi reset) } else { (_ansi red_bold)INVALID(_ansi reset) })"
|
||||
|
||||
if ($validation.requires | default [] | length) > 0 {
|
||||
_print $"\n(_ansi cyan)Required Dependencies:(_ansi reset)"
|
||||
for req in $validation.requires {
|
||||
_print $" • ($req)"
|
||||
}
|
||||
}
|
||||
|
||||
if ($validation.optional | default [] | length) > 0 {
|
||||
_print $"\n(_ansi cyan)Optional Dependencies:(_ansi reset)"
|
||||
for opt in $validation.optional {
|
||||
_print $" • ($opt)"
|
||||
}
|
||||
}
|
||||
|
||||
if ($validation.conflicts | default [] | length) > 0 {
|
||||
_print $"\n(_ansi cyan)Conflicts:(_ansi reset)"
|
||||
for conf in $validation.conflicts {
|
||||
_print $" • ($conf)"
|
||||
}
|
||||
}
|
||||
|
||||
if ($validation.warnings | length) > 0 {
|
||||
_print $"\n(_ansi yellow_bold)Warnings:(_ansi reset)"
|
||||
for warn in $validation.warnings {
|
||||
_print $" ⚠ ($warn)"
|
||||
}
|
||||
}
|
||||
|
||||
if ($validation.errors | length) > 0 {
|
||||
_print $"\n(_ansi red_bold)Errors:(_ansi reset)"
|
||||
for err in $validation.errors {
|
||||
_print $" ✗ ($err)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -184,8 +184,8 @@ export def run_taskserv_library [
|
||||
#use utils/files.nu *
|
||||
for it in $taskserv_data.taskserv.copy_paths {
|
||||
let it_list = ($it | split row "|" | default [])
|
||||
let cp_source = ($it_list | try { get 0 } catch { "") }
|
||||
let cp_target = ($it_list | try { get 1 } catch { "") }
|
||||
let cp_source = ($it_list | get -o 0 | default "")
|
||||
let cp_target = ($it_list | get -o 1 | default "")
|
||||
if ($cp_source | path exists) {
|
||||
copy_prov_files $cp_source "." ($taskserv_env_path | path join $cp_target) false $quiet
|
||||
} else if ($prov_resources_path | path join $cp_source | path exists) {
|
||||
|
||||
@ -55,7 +55,7 @@ def validate-nickel-schemas [
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
for file in $decl_files {
|
||||
for file in $nickel_files {
|
||||
if $verbose {
|
||||
_print $" Checking ($file | path basename)..."
|
||||
}
|
||||
@ -64,12 +64,12 @@ def validate-nickel-schemas [
|
||||
nickel export $file --format json | from json
|
||||
} | complete)
|
||||
|
||||
if $nickel_check.exit_code == 0 {
|
||||
if $decl_check.exit_code == 0 {
|
||||
if $verbose {
|
||||
_print $" ✓ Valid"
|
||||
}
|
||||
} else {
|
||||
let error_msg = $nickel_check.stderr
|
||||
let error_msg = $decl_check.stderr
|
||||
$errors = ($errors | append $"Nickel error in ($file | path basename): ($error_msg)")
|
||||
if $verbose {
|
||||
_print $" ✗ Error: ($error_msg)"
|
||||
@ -80,7 +80,7 @@ def validate-nickel-schemas [
|
||||
return {
|
||||
valid: (($errors | length) == 0)
|
||||
level: "nickel"
|
||||
files_checked: ($decl_files | length)
|
||||
files_checked: ($nickel_files | length)
|
||||
errors: $errors
|
||||
warnings: $warnings
|
||||
}
|
||||
@ -302,9 +302,9 @@ def validate-health-check [
|
||||
mut errors = []
|
||||
mut warnings = []
|
||||
|
||||
let endpoint = ($health_check | try { get endpoint } catch { "") }
|
||||
let timeout = ($health_check | try { get timeout } catch { 30) }
|
||||
let interval = ($health_check | try { get interval } catch { 10) }
|
||||
let endpoint = ($health_check | get -o endpoint | default "")
|
||||
let timeout = ($health_check | get -o timeout | default 30)
|
||||
let interval = ($health_check | get -o interval | default 10)
|
||||
|
||||
if $endpoint == "" {
|
||||
$errors = ($errors | append "Health check endpoint is empty")
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user