chore: update scripts

This commit is contained in:
Jesús Pérez 2026-01-08 21:14:49 +00:00
parent a874f20a4d
commit 0ccd697e55
Signed by: jesus
GPG Key ID: 9F243E355E0BC939
12 changed files with 948 additions and 757 deletions

View File

@ -6,12 +6,12 @@ OS=$(uname | tr '[:upper:]' '[:lower:]')
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
wget https://github.com/cloudflare/cfssl/releases/download/v${VERSION}/cfssl_${VERSION}_${OS}_${ARCH} wget https://github.com/cloudflare/cfssl/releases/download/v${VERSION}/cfssl_${VERSION}_${OS}_${ARCH}
if [ -r "cfssl_${VERSION}_${OS}_${ARCH}" ] ; then if [ -r "cfssl_${VERSION}_${OS}_${ARCH}" ] ; then
chmod +x "cfssl_${VERSION}_${OS}_${ARCH}" chmod +x "cfssl_${VERSION}_${OS}_${ARCH}"
sudo mv "cfssl_${VERSION}_${OS}_${ARCH}" /usr/local/bin/cfssl sudo mv "cfssl_${VERSION}_${OS}_${ARCH}" /usr/local/bin/cfssl
fi fi
wget https://github.com/cloudflare/cfssl/releases/download/v${VERSION}/cfssljson_${VERSION}_${OS}_${ARCH} wget https://github.com/cloudflare/cfssl/releases/download/v${VERSION}/cfssljson_${VERSION}_${OS}_${ARCH}
if [ -r "cfssljson_${VERSION}_${OS}_${ARCH}" ] ; then if [ -r "cfssljson_${VERSION}_${OS}_${ARCH}" ] ; then
chmod +x "cfssljson_${VERSION}_${OS}_${ARCH}" chmod +x "cfssljson_${VERSION}_${OS}_${ARCH}"
sudo mv "cfssljson_${VERSION}_${OS}_${ARCH}" /usr/local/bin/cfssljson sudo mv "cfssljson_${VERSION}_${OS}_${ARCH}" /usr/local/bin/cfssljson
fi fi

View File

@ -1,9 +1,9 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Info: Script to instal NUSHELL for Provisioning # Info: Script to instal NUSHELL for Provisioning
# Author: JesusPerezLorenzo # Author: JesusPerezLorenzo
# Release: 1.0.5 # Release: 1.0.5
# Date: 8-03-2024 # Date: 8-03-2024
test_runner() { test_runner() {
echo -e "\nTest installation ... " echo -e "\nTest installation ... "
RUNNER_PATH=$(type -P $RUNNER) RUNNER_PATH=$(type -P $RUNNER)
@ -14,27 +14,27 @@ test_runner() {
echo -e "\n🛑 Error $RUNNER ! Review installation " && exit 1 echo -e "\n🛑 Error $RUNNER ! Review installation " && exit 1
fi fi
} }
register_plugins() { register_plugins() {
local source=$1 local source=$1
local warn=$2 local warn=$2
[ ! -d "$source" ] && echo "🛑 Error path $source is not a directory" && exit 1 [ ! -d "$source" ] && echo "🛑 Error path $source is not a directory" && exit 1
[ -z "$(ls $source/nu_plugin_* 2> /dev/null)" ] && echo "🛑 Error no 'nu_plugin_*' found in $source to register" && exit 1 [ -z "$(ls $source/nu_plugin_* 2> /dev/null)" ] && echo "🛑 Error no 'nu_plugin_*' found in $source to register" && exit 1
echo -e "Nushell $NU_VERSION plugins registration \n" echo -e "Nushell $NU_VERSION plugins registration \n"
if [ -n "$warn" ] ; then if [ -n "$warn" ] ; then
echo -e $"❗Warning: Be sure Nushell plugins are compiled for same Nushell version $NU_VERSION\n otherwise will probably not work and will break installation !\n" echo -e $"❗Warning: Be sure Nushell plugins are compiled for same Nushell version $NU_VERSION\n otherwise will probably not work and will break installation !\n"
fi fi
for plugin in ${source}/nu_plugin_* for plugin in ${source}/nu_plugin_*
do do
if $source/nu -c "register \"${plugin}\" " 2>/dev/null ; then if $source/nu -c "register \"${plugin}\" " 2>/dev/null ; then
echo -en "$(basename $plugin)" echo -en "$(basename $plugin)"
if [[ "$plugin" == *_notifications ]] ; then if [[ "$plugin" == *_notifications ]] ; then
echo -e " registred " echo -e " registred "
else else
echo -e "\t\t registred " echo -e "\t\t registred "
fi fi
fi fi
done done
# Install nu_plugin_tera if available # Install nu_plugin_tera if available
if command -v cargo >/dev/null 2>&1; then if command -v cargo >/dev/null 2>&1; then
echo -e "Installing nu_plugin_tera..." echo -e "Installing nu_plugin_tera..."
@ -47,22 +47,26 @@ register_plugins() {
else else
echo -e "❗ Failed to install nu_plugin_tera" echo -e "❗ Failed to install nu_plugin_tera"
fi fi
# Install nu_plugin_kcl if available
echo -e "Installing nu_plugin_kcl..."
if cargo install nu_plugin_kcl; then
if $source/nu -c "register ~/.cargo/bin/nu_plugin_kcl" 2>/dev/null; then
echo -e "nu_plugin_kcl\t\t registred"
else
echo -e "❗ Failed to register nu_plugin_kcl"
fi
else
echo -e "❗ Failed to install nu_plugin_kcl"
fi
else else
echo -e "❗ Cargo not found - nu_plugin_tera and nu_plugin_kcl not installed" echo -e "❗ Cargo not found - nu_plugin_tera not installed"
fi fi
} }
# Check Nickel configuration language installation
check_nickel_installation() {
if command -v nickel >/dev/null 2>&1; then
nickel_version=$(nickel --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1)
echo -e "Nickel\t\t\t already installed (version $nickel_version)"
return 0
else
echo -e "⚠️ Nickel not found - Optional but recommended for config rendering"
echo -e " Install via: \$PROVISIONING/core/cli/tools-install nickel"
echo -e " Recommended method: nix profile install nixpkgs#nickel"
echo -e " (Pre-built binaries have Nix library dependencies)"
echo -e " https://nickel-lang.org/getting-started"
return 1
fi
}
install_mode() { install_mode() {
local mode=$1 local mode=$1
@ -72,13 +76,13 @@ install_mode() {
echo "Mode $mode installed" echo "Mode $mode installed"
fi fi
;; ;;
*) *)
NC_PATH=$(type -P nc) NC_PATH=$(type -P nc)
if [ -z "$NC_PATH" ] ; then if [ -z "$NC_PATH" ] ; then
echo "'nc' command not found in PATH. Install 'nc' (netcat) command." echo "'nc' command not found in PATH. Install 'nc' (netcat) command."
exit 1 exit 1
fi fi
if cp $PROVISIONING_MODELS_SRC/no_plugins_defs.nu $PROVISIONING_MODELS_TARGET/plugins_defs.nu ; then if cp $PROVISIONING_MODELS_SRC/no_plugins_defs.nu $PROVISIONING_MODELS_TARGET/plugins_defs.nu ; then
echo "Mode 'no plugins' installed" echo "Mode 'no plugins' installed"
fi fi
esac esac
@ -95,7 +99,7 @@ install_from_url() {
lib_mode=$(grep NU_LIB $PROVISIONING/core/versions | cut -f2 -d"=" | sed 's/"//g') lib_mode=$(grep NU_LIB $PROVISIONING/core/versions | cut -f2 -d"=" | sed 's/"//g')
url_source=$(grep NU_SOURCE $PROVISIONING/core/versions | cut -f2 -d"=" | sed 's/"//g') url_source=$(grep NU_SOURCE $PROVISIONING/core/versions | cut -f2 -d"=" | sed 's/"//g')
download_path="nu-${NU_VERSION}-${ARCH_ORG}-${OS}" download_path="nu-${NU_VERSION}-${ARCH_ORG}-${OS}"
case "$OS" in case "$OS" in
linux) download_path="nu-${NU_VERSION}-${ARCH_ORG}-unknown-${OS}-gnu" linux) download_path="nu-${NU_VERSION}-${ARCH_ORG}-unknown-${OS}-gnu"
;; ;;
esac esac
@ -107,7 +111,7 @@ install_from_url() {
return 1 return 1
fi fi
echo -e "Nushell $NU_VERSION extracting ..." echo -e "Nushell $NU_VERSION extracting ..."
if ! tar xzf $tar_file ; then if ! tar xzf $tar_file ; then
echo "🛑 Error download $download_url " && exit 1 echo "🛑 Error download $download_url " && exit 1
return 1 return 1
fi fi
@ -117,9 +121,9 @@ install_from_url() {
return 1 return 1
fi fi
echo -e "Nushell $NU_VERSION installing ..." echo -e "Nushell $NU_VERSION installing ..."
if [ -r "$download_path/nu" ] ; then if [ -r "$download_path/nu" ] ; then
chmod +x $download_path/nu chmod +x $download_path/nu
if ! sudo cp $download_path/nu $target_path ; then if ! sudo cp $download_path/nu $target_path ; then
echo "🛑 Error installing \"nu\" in $target_path" echo "🛑 Error installing \"nu\" in $target_path"
rm -rf $download_path rm -rf $download_path
return 1 return 1
@ -127,14 +131,14 @@ install_from_url() {
fi fi
rm -rf $download_path rm -rf $download_path
echo "✅ Nushell and installed in $target_path" echo "✅ Nushell and installed in $target_path"
[[ ! "$PATH" =~ $target_path ]] && echo "❗ Warning: \"$target_path\" is not in your PATH for $(basename $SHELL) ! Fix your PATH settings " [[ ! "$PATH" =~ $target_path ]] && echo "❗ Warning: \"$target_path\" is not in your PATH for $(basename $SHELL) ! Fix your PATH settings "
echo "" echo ""
# TDOO install plguins via cargo ?? # TDOO install plguins via cargo ??
# TODO a NU version without PLUGINS # TODO a NU version without PLUGINS
# register_plugins $target_path # register_plugins $target_path
} }
install_from_local() { install_from_local() {
local source=$1 local source=$1
local target=$2 local target=$2
local tmpdir local tmpdir
@ -146,44 +150,47 @@ install_from_local() {
tmpdir=$(mktemp -d) tmpdir=$(mktemp -d)
cp $source/*gz $tmpdir cp $source/*gz $tmpdir
for file in $tmpdir/*gz ; do gunzip $file ; done for file in $tmpdir/*gz ; do gunzip $file ; done
if ! sudo mv $tmpdir/* $target ; then if ! sudo mv $tmpdir/* $target ; then
echo -e "🛑 Errors to install Nushell and plugins in \"${target}\"" echo -e "🛑 Errors to install Nushell and plugins in \"${target}\""
rm -rf $tmpdir rm -rf $tmpdir
return 1 return 1
fi fi
rm -rf $tmpdir rm -rf $tmpdir
echo "✅ Nushell and plugins installed in $target" echo "✅ Nushell and plugins installed in $target"
[[ ! "$PATH" =~ $target ]] && echo "❗ Warning: \"$target\" is not in your PATH for $(basename $SHELL) ! Fix your PATH settings " [[ ! "$PATH" =~ $target ]] && echo "❗ Warning: \"$target\" is not in your PATH for $(basename $SHELL) ! Fix your PATH settings "
echo "" echo ""
register_plugins $target register_plugins $target
} }
message_install() { message_install() {
local ask=$1 local ask=$1
local msg local msg
local answer local answer
[ -r "$PROVISIONING/resources/ascii.txt" ] && cat "$PROVISIONING/resources/ascii.txt" && echo "" [ -r "$PROVISIONING/resources/ascii.txt" ] && cat "$PROVISIONING/resources/ascii.txt" && echo ""
if [ -z "$NU" ] ; then if [ -z "$NU" ] ; then
echo -e "🛑 Nushell $NU_VERSION not installed is mandatory for \"${RUNNER}\"" echo -e "🛑 Nushell $NU_VERSION not installed is mandatory for \"${RUNNER}\""
echo -e "Check PATH or https://www.nushell.sh/book/installation.html with version $NU_VERSION" echo -e "Check PATH or https://www.nushell.sh/book/installation.html with version $NU_VERSION"
else else
echo -e "Nushell $NU_VERSION update for \"${RUNNER}\"" echo -e "Nushell $NU_VERSION update for \"${RUNNER}\""
fi fi
echo "" echo ""
if [ -n "$ask" ] && [ -d "$(dirname $0)/nu/${ARCH}-${OS}" ] ; then if [ -n "$ask" ] && [ -d "$(dirname $0)/nu/${ARCH}-${OS}" ] ; then
echo -en "Install Nushell $(uname -m) $(uname) in \"$INSTALL_PATH\" now (yes/no) ? : " echo -en "Install Nushell $(uname -m) $(uname) in \"$INSTALL_PATH\" now (yes/no) ? : "
read -r answer read -r answer
if [ "$answer" != "yes" ] && [ "$answer" != "y" ] ; then if [ "$answer" != "yes" ] && [ "$answer" != "y" ] ; then
return 1 return 1
fi fi
fi fi
if [ -d "$(dirname $0)/nu/${ARCH}-${OS}" ] ; then if [ -d "$(dirname $0)/nu/${ARCH}-${OS}" ] ; then
install_from_local $(dirname $0)/nu/${ARCH}-${OS} $INSTALL_PATH install_from_local $(dirname $0)/nu/${ARCH}-${OS} $INSTALL_PATH
install_mode "ui" install_mode "ui"
else else
install_from_url $INSTALL_PATH install_from_url $INSTALL_PATH
install_mode "" install_mode ""
fi fi
echo ""
echo -e "Checking optional configuration languages..."
check_nickel_installation
} }
set +o errexit set +o errexit
@ -195,21 +202,21 @@ export NU=$(type -P nu)
[ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV" [ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV"
[ -r "../env-provisioning" ] && source ../env-provisioning [ -r "../env-provisioning" ] && source ../env-provisioning
[ -r "env-provisioning" ] && source ./env-provisioning [ -r "env-provisioning" ] && source ./env-provisioning
#[ -r ".env" ] && source .env set #[ -r ".env" ] && source .env set
set +o allexport set +o allexport
if [ -n "$1" ] && [ -d "$1" ] && [ -d "$1/core" ] ; then if [ -n "$1" ] && [ -d "$1" ] && [ -d "$1/core" ] ; then
export PROVISIONING=$1 export PROVISIONING=$1
else else
export PROVISIONING=${PROVISIONING:-/usr/local/provisioning} export PROVISIONING=${PROVISIONING:-/usr/local/provisioning}
fi fi
TASK=${1:-check} TASK=${1:-check}
shift shift
if [ "$TASK" == "mode" ] && [ -n "$1" ] ; then if [ "$TASK" == "mode" ] && [ -n "$1" ] ; then
INSTALL_MODE=$1 INSTALL_MODE=$1
shift shift
else else
INSTALL_MODE="ui" INSTALL_MODE="ui"
fi fi
@ -230,21 +237,21 @@ PROVISIONING_MODELS_SRC=$PROVISIONING/core/nulib/models
PROVISIONING_MODELS_TARGET=$PROVISIONING/core/nulib/lib_provisioning PROVISIONING_MODELS_TARGET=$PROVISIONING/core/nulib/lib_provisioning
USAGE="$(basename $0) [install | reinstall | mode | check] no-ask mode-?? " USAGE="$(basename $0) [install | reinstall | mode | check] no-ask mode-?? "
case $TASK in case $TASK in
install) install)
message_install $ASK_MESSAGE message_install $ASK_MESSAGE
;; ;;
reinstall | update) reinstall | update)
INSTALL_PATH=$(dirname $NU) INSTALL_PATH=$(dirname $NU)
if message_install ; then if message_install ; then
test_runner test_runner
fi fi
;; ;;
mode) mode)
install_mode $INSTALL_MODE install_mode $INSTALL_MODE
;; ;;
check) check)
$PROVISIONING/core/bin/tools-install check nu $PROVISIONING/core/bin/tools-install check nu
;; ;;
help|-h) help|-h)
echo "$USAGE" echo "$USAGE"

View File

@ -10,7 +10,7 @@ use ../nulib/providers/discover.nu *
use ../nulib/providers/load.nu * use ../nulib/providers/load.nu *
use ../nulib/clusters/discover.nu * use ../nulib/clusters/discover.nu *
use ../nulib/clusters/load.nu * use ../nulib/clusters/load.nu *
use ../nulib/lib_provisioning/kcl_module_loader.nu * use ../nulib/lib_provisioning/module_loader.nu *
use ../nulib/lib_provisioning/config/accessor.nu config-get use ../nulib/lib_provisioning/config/accessor.nu config-get
# Main module loader command with enhanced features # Main module loader command with enhanced features
@ -82,11 +82,11 @@ export def "main discover" [
} }
} }
# Sync KCL dependencies for infrastructure workspace # Sync Nickel dependencies for infrastructure workspace
export def "main sync-kcl" [ export def "main sync" [
infra: string, # Infrastructure name or path infra: string, # Infrastructure name or path
--manifest: string = "providers.manifest.yaml", # Manifest file name --manifest: string = "providers.manifest.yaml", # Manifest file name
--kcl # Show KCL module info after sync --show-modules # Show module info after sync
] { ] {
# Resolve infrastructure path # Resolve infrastructure path
let infra_path = if ($infra | path exists) { let infra_path = if ($infra | path exists) {
@ -102,14 +102,14 @@ export def "main sync-kcl" [
} }
} }
# Sync KCL dependencies using library function # Sync Nickel dependencies using library function
sync-kcl-dependencies $infra_path --manifest $manifest sync-nickel-dependencies $infra_path --manifest $manifest
# Show KCL module info if requested # Show Nickel module info if requested
if $kcl { if $show_modules {
print "" print ""
print "📋 KCL Modules:" print "📋 Nickel Modules:"
let modules_dir = (get-config-value "kcl" "modules_dir") let modules_dir = (get-config-value "nickel" "modules_dir")
let modules_path = ($infra_path | path join $modules_dir) let modules_path = ($infra_path | path join $modules_dir)
if ($modules_path | path exists) { if ($modules_path | path exists) {
@ -382,7 +382,7 @@ export def "main override create" [
$"# Override for ($module) in ($infra) $"# Override for ($module) in ($infra)
# Based on template: ($from) # Based on template: ($from)
import ($type).*.($module).kcl.($module) as base import ($type).*.($module).ncl.($module) as base
import provisioning.workspace.templates.($type).($from) as template import provisioning.workspace.templates.($type).($from) as template
# Infrastructure-specific overrides # Infrastructure-specific overrides
@ -396,7 +396,7 @@ import provisioning.workspace.templates.($type).($from) as template
} else { } else {
$"# Override for ($module) in ($infra) $"# Override for ($module) in ($infra)
import ($type).*.($module).kcl.($module) as base import ($type).*.($module).ncl.($module) as base
# Infrastructure-specific overrides # Infrastructure-specific overrides
($module)_($infra)_override: base.($module | str capitalize) = base.($module)_config { ($module)_($infra)_override: base.($module | str capitalize) = base.($module)_config {
@ -627,29 +627,29 @@ def load_extension_to_workspace [
cp -r $source_module_path $parent_dir cp -r $source_module_path $parent_dir
print $" ✓ Schemas copied to workspace .($extension_type)/" print $" ✓ Schemas copied to workspace .($extension_type)/"
# STEP 2a: Update individual module's kcl.mod with correct workspace paths # STEP 2a: Update individual module's nickel.mod with correct workspace paths
# Calculate relative paths based on categorization depth # Calculate relative paths based on categorization depth
let provisioning_path = if ($group_path | is-not-empty) { let provisioning_path = if ($group_path | is-not-empty) {
# Categorized: .{ext}/{category}/{module}/kcl/ -> ../../../../.kcl/packages/provisioning # Categorized: .{ext}/{category}/{module}/nickel/ -> ../../../../.nickel/packages/provisioning
"../../../../.kcl/packages/provisioning" "../../../../.nickel/packages/provisioning"
} else { } else {
# Non-categorized: .{ext}/{module}/kcl/ -> ../../../.kcl/packages/provisioning # Non-categorized: .{ext}/{module}/nickel/ -> ../../../.nickel/packages/provisioning
"../../../.kcl/packages/provisioning" "../../../.nickel/packages/provisioning"
} }
let parent_path = if ($group_path | is-not-empty) { let parent_path = if ($group_path | is-not-empty) {
# Categorized: .{ext}/{category}/{module}/kcl/ -> ../../.. # Categorized: .{ext}/{category}/{module}/nickel/ -> ../../..
"../../.." "../../.."
} else { } else {
# Non-categorized: .{ext}/{module}/kcl/ -> ../.. # Non-categorized: .{ext}/{module}/nickel/ -> ../..
"../.." "../.."
} }
# Update the module's kcl.mod file with workspace-relative paths # Update the module's nickel.mod file with workspace-relative paths
let module_kcl_mod_path = ($target_module_path | path join "kcl" "kcl.mod") let module_nickel_mod_path = ($target_module_path | path join "nickel" "nickel.mod")
if ($module_kcl_mod_path | path exists) { if ($module_nickel_mod_path | path exists) {
print $" 🔧 Updating module kcl.mod with workspace paths" print $" 🔧 Updating module nickel.mod with workspace paths"
let module_kcl_mod_content = $"[package] let module_nickel_mod_content = $"[package]
name = \"($module)\" name = \"($module)\"
edition = \"v0.11.3\" edition = \"v0.11.3\"
version = \"0.0.1\" version = \"0.0.1\"
@ -658,24 +658,24 @@ version = \"0.0.1\"
provisioning = { path = \"($provisioning_path)\", version = \"0.0.1\" } provisioning = { path = \"($provisioning_path)\", version = \"0.0.1\" }
($extension_type) = { path = \"($parent_path)\", version = \"0.1.0\" } ($extension_type) = { path = \"($parent_path)\", version = \"0.1.0\" }
" "
$module_kcl_mod_content | save -f $module_kcl_mod_path $module_nickel_mod_content | save -f $module_nickel_mod_path
print $" ✓ Updated kcl.mod: ($module_kcl_mod_path)" print $" ✓ Updated nickel.mod: ($module_nickel_mod_path)"
} }
} else { } else {
print $" ⚠️ Warning: Source not found at ($source_module_path)" print $" ⚠️ Warning: Source not found at ($source_module_path)"
} }
# STEP 2b: Create kcl.mod in workspace/.{extension_type} # STEP 2b: Create nickel.mod in workspace/.{extension_type}
let extension_kcl_mod = ($target_schemas_dir | path join "kcl.mod") let extension_nickel_mod = ($target_schemas_dir | path join "nickel.mod")
if not ($extension_kcl_mod | path exists) { if not ($extension_nickel_mod | path exists) {
print $" 📦 Creating kcl.mod for .($extension_type) package" print $" 📦 Creating nickel.mod for .($extension_type) package"
let kcl_mod_content = $"[package] let nickel_mod_content = $"[package]
name = \"($extension_type)\" name = \"($extension_type)\"
edition = \"v0.11.3\" edition = \"v0.11.3\"
version = \"0.1.0\" version = \"0.1.0\"
description = \"Workspace-level ($extension_type) schemas\" description = \"Workspace-level ($extension_type) schemas\"
" "
$kcl_mod_content | save $extension_kcl_mod $nickel_mod_content | save $extension_nickel_mod
} }
# Ensure config directory exists # Ensure config directory exists
@ -690,9 +690,9 @@ description = \"Workspace-level ($extension_type) schemas\"
# Build import statement with "as {module}" alias # Build import statement with "as {module}" alias
let import_stmt = if ($group_path | is-not-empty) { let import_stmt = if ($group_path | is-not-empty) {
$"import ($extension_type).($group_path).($module).kcl.($module) as ($module)" $"import ($extension_type).($group_path).($module).ncl.($module) as ($module)"
} else { } else {
$"import ($extension_type).($module).kcl.($module) as ($module)" $"import ($extension_type).($module).ncl.($module) as ($module)"
} }
# Get relative paths for comments # Get relative paths for comments
@ -719,7 +719,7 @@ description = \"Workspace-level ($extension_type) schemas\"
($import_stmt) ($import_stmt)
# TODO: Configure your ($module) instance # TODO: Configure your ($module) instance
# See available schemas at: ($relative_schema_path)/kcl/ # See available schemas at: ($relative_schema_path)/nickel/
" "
} }
@ -727,15 +727,15 @@ description = \"Workspace-level ($extension_type) schemas\"
print $" ✓ Config created: ($config_file_path)" print $" ✓ Config created: ($config_file_path)"
print $" 📝 Edit ($extension_type)/($module).k to configure settings" print $" 📝 Edit ($extension_type)/($module).k to configure settings"
# STEP 3: Update infra kcl.mod # STEP 3: Update infra nickel.mod
if ($workspace_abs | str contains "/infra/") { if ($workspace_abs | str contains "/infra/") {
let kcl_mod_path = ($workspace_abs | path join "kcl.mod") let nickel_mod_path = ($workspace_abs | path join "nickel.mod")
if ($kcl_mod_path | path exists) { if ($nickel_mod_path | path exists) {
let kcl_mod_content = (open $kcl_mod_path) let nickel_mod_content = (open $nickel_mod_path)
if not ($kcl_mod_content | str contains $"($extension_type) =") { if not ($nickel_mod_content | str contains $"($extension_type) =") {
print $" 🔧 Updating kcl.mod to include ($extension_type) dependency" print $" 🔧 Updating nickel.mod to include ($extension_type) dependency"
let new_dependency = $"\n# Workspace-level ($extension_type) \(shared across infras\)\n($extension_type) = { path = \"../../.($extension_type)\" }\n" let new_dependency = $"\n# Workspace-level ($extension_type) \(shared across infras\)\n($extension_type) = { path = \"../../.($extension_type)\" }\n"
$"($kcl_mod_content)($new_dependency)" | save -f $kcl_mod_path $"($nickel_mod_content)($new_dependency)" | save -f $nickel_mod_path
} }
} }
} }
@ -808,7 +808,7 @@ def print_enhanced_help [] {
print "" print ""
print "CORE COMMANDS:" print "CORE COMMANDS:"
print " discover <type> [query] [--format <fmt>] [--category <cat>] - Discover available modules" print " discover <type> [query] [--format <fmt>] [--category <cat>] - Discover available modules"
print " sync-kcl <infra> [--manifest <file>] [--kcl] - Sync KCL dependencies for infrastructure" print " sync <infra> [--manifest <file>] [--show-modules] - Sync Nickel dependencies for infrastructure"
print " load <type> <workspace> <modules...> [--layer <layer>] - Load modules into workspace" print " load <type> <workspace> <modules...> [--layer <layer>] - Load modules into workspace"
print " list <type> <workspace> [--layer <layer>] - List loaded modules" print " list <type> <workspace> [--layer <layer>] - List loaded modules"
print " unload <type> <workspace> <module> [--layer <layer>] - Unload module from workspace" print " unload <type> <workspace> <module> [--layer <layer>] - Unload module from workspace"
@ -978,4 +978,4 @@ def print_override_help [] {
print "Examples:" print "Examples:"
print " module-loader override create taskservs wuji kubernetes" print " module-loader override create taskservs wuji kubernetes"
print " module-loader override create taskservs wuji redis --from databases/redis" print " module-loader override create taskservs wuji redis --from databases/redis"
} }

View File

@ -221,4 +221,4 @@ def print_help [] {
print " pack clean --all" print " pack clean --all"
print "" print ""
print "Distribution configuration in: provisioning/config/config.defaults.toml [distribution]" print "Distribution configuration in: provisioning/config/config.defaults.toml [distribution]"
} }

View File

@ -1,29 +1,29 @@
#!/bin/bash #!/bin/bash
# Info: Script to install providers # Info: Script to install providers
# Author: JesusPerezLorenzo # Author: JesusPerezLorenzo
# Release: 1.0 # Release: 1.0
# Date: 12-11-2023 # Date: 12-11-2023
[ "$DEBUG" == "-x" ] && set -x [ "$DEBUG" == "-x" ] && set -x
USAGE="install-tools [ tool-name: tera k9s, etc | all] [--update] USAGE="install-tools [ tool-name: tera k9s, etc | all] [--update]
As alternative use environment var TOOL_TO_INSTALL with a list-of-tools (separeted with spaces) As alternative use environment var TOOL_TO_INSTALL with a list-of-tools (separeted with spaces)
Versions are set in ./versions file Versions are set in ./versions file
This can be called by directly with an argumet or from an other srcipt This can be called by directly with an argumet or from an other srcipt
" "
ORG=$(pwd) ORG=$(pwd)
function _install_cmds { function _install_cmds {
OS="$(uname | tr '[:upper:]' '[:lower:]')" OS="$(uname | tr '[:upper:]' '[:lower:]')"
local has_cmd local has_cmd
for cmd in $CMDS_PROVISIONING for cmd in $CMDS_PROVISIONING
do do
has_cmd=$(type -P $cmd) has_cmd=$(type -P $cmd)
if [ -z "$has_cmd" ] ; then if [ -z "$has_cmd" ] ; then
case "$(OS)" in case "$(OS)" in
darwin) brew install $cmd ;; darwin) brew install $cmd ;;
linux) sudo apt install $cmd ;; linux) sudo apt install $cmd ;;
*) echo "Install $cmd in your PATH" ;; *) echo "Install $cmd in your PATH" ;;
@ -41,8 +41,8 @@ function _install_tools {
# local jq_version # local jq_version
# local has_yq # local has_yq
# local yq_version # local yq_version
local has_kcl local has_nickel
local kcl_version local nickel_version
local has_tera local has_tera
local tera_version local tera_version
local has_k9s local has_k9s
@ -56,21 +56,21 @@ function _install_tools {
# local has_aws # local has_aws
# local aws_version # local aws_version
OS="$(uname | tr '[:upper:]' '[:lower:]')" OS="$(uname | tr '[:upper:]' '[:lower:]')"
ORG_OS=$(uname) ORG_OS=$(uname)
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
ORG_ARCH="$(uname -m)" ORG_ARCH="$(uname -m)"
if [ -z "$CHECK_ONLY" ] and [ "$match" == "all" ] ; then if [ -z "$CHECK_ONLY" ] and [ "$match" == "all" ] ; then
_install_cmds _install_cmds
fi fi
# if [ -n "$JQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "jq" ] ; then # if [ -n "$JQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "jq" ] ; then
# has_jq=$(type -P jq) # has_jq=$(type -P jq)
# num_version="0" # num_version="0"
# [ -n "$has_jq" ] && jq_version=$(jq -V | sed 's/jq-//g') && num_version=${jq_version//\./} # [ -n "$has_jq" ] && jq_version=$(jq -V | sed 's/jq-//g') && num_version=${jq_version//\./}
# expected_version_num=${JQ_VERSION//\./} # expected_version_num=${JQ_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then # if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# curl -fsSLO "https://github.com/jqlang/jq/releases/download/jq-${JQ_VERSION}/jq-${OS}-${ARCH}" && # curl -fsSLO "https://github.com/jqlang/jq/releases/download/jq-${JQ_VERSION}/jq-${OS}-${ARCH}" &&
# chmod +x "jq-${OS}-${ARCH}" && # chmod +x "jq-${OS}-${ARCH}" &&
# sudo mv "jq-${OS}-${ARCH}" /usr/local/bin/jq && # sudo mv "jq-${OS}-${ARCH}" /usr/local/bin/jq &&
@ -81,16 +81,16 @@ function _install_tools {
# printf "%s\t%s\n" "jq" "already $JQ_VERSION" # printf "%s\t%s\n" "jq" "already $JQ_VERSION"
# fi # fi
# fi # fi
# if [ -n "$YQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "yq" ] ; then # if [ -n "$YQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "yq" ] ; then
# has_yq=$(type -P yq) # has_yq=$(type -P yq)
# num_version="0" # num_version="0"
# [ -n "$has_yq" ] && yq_version=$(yq -V | cut -f4 -d" " | sed 's/v//g') && num_version=${yq_version//\./} # [ -n "$has_yq" ] && yq_version=$(yq -V | cut -f4 -d" " | sed 's/v//g') && num_version=${yq_version//\./}
# expected_version_num=${YQ_VERSION//\./} # expected_version_num=${YQ_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then # if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# curl -fsSLO "https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_${OS}_${ARCH}.tar.gz" && # curl -fsSLO "https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_${OS}_${ARCH}.tar.gz" &&
# tar -xzf "yq_${OS}_${ARCH}.tar.gz" && # tar -xzf "yq_${OS}_${ARCH}.tar.gz" &&
# sudo mv "yq_${OS}_${ARCH}" /usr/local/bin/yq && # sudo mv "yq_${OS}_${ARCH}" /usr/local/bin/yq &&
# sudo ./install-man-page.sh && # sudo ./install-man-page.sh &&
# rm -f install-man-page.sh yq.1 "yq_${OS}_${ARCH}.tar.gz" && # rm -f install-man-page.sh yq.1 "yq_${OS}_${ARCH}.tar.gz" &&
# printf "%s\t%s\n" "yq" "installed $YQ_VERSION" # printf "%s\t%s\n" "yq" "installed $YQ_VERSION"
# elif [ -n "$CHECK_ONLY" ] ; then # elif [ -n "$CHECK_ONLY" ] ; then
@ -99,36 +99,34 @@ function _install_tools {
# printf "%s\t%s\n" "yq" "already $YQ_VERSION" # printf "%s\t%s\n" "yq" "already $YQ_VERSION"
# fi # fi
# fi # fi
if [ -n "$NICKEL_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "nickel" ] ; then
if [ -n "$KCL_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "kcl" ] ; then has_nickel=$(type -P nickel)
has_kcl=$(type -P kcl)
num_version="0" num_version="0"
[ -n "$has_kcl" ] && kcl_version=$(kcl -v | cut -f3 -d" " | sed 's/ //g') && num_version=${kcl_version//\./} [ -n "$has_nickel" ] && nickel_version=$(nickel --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1) && num_version=${nickel_version//\./}
expected_version_num=${KCL_VERSION//\./} expected_version_num=${NICKEL_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
curl -fsSLO "https://github.com/kcl-lang/cli/releases/download/v${KCL_VERSION}/kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" && echo "⚠️ Nickel installation/update required"
tar -xzf "kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" && echo " Recommended method: nix profile install nixpkgs#nickel"
sudo mv kcl /usr/local/bin/kcl && echo " Alternative: cargo install nickel-lang-cli --version ${NICKEL_VERSION}"
rm -f "kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" && echo " https://nickel-lang.org/getting-started"
printf "%s\t%s\n" "kcl" "installed $KCL_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "kcl" "$kcl_version" "expected $KCL_VERSION" printf "%s\t%s\t%s\n" "nickel" "$nickel_version" "expected $NICKEL_VERSION"
else else
printf "%s\t%s\n" "kcl" "already $KCL_VERSION" printf "%s\t%s\n" "nickel" "already $NICKEL_VERSION"
fi fi
fi fi
if [ -n "$TERA_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "tera" ] ; then if [ -n "$TERA_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "tera" ] ; then
has_tera=$(type -P tera) has_tera=$(type -P tera)
num_version="0" num_version="0"
[ -n "$has_tera" ] && tera_version=$(tera -V | cut -f2 -d" " | sed 's/teracli//g') && num_version=${tera_version//\./} [ -n "$has_tera" ] && tera_version=$(tera -V | cut -f2 -d" " | sed 's/teracli//g') && num_version=${tera_version//\./}
expected_version_num=${TERA_VERSION//\./} expected_version_num=${TERA_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
if [ -x "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" ] ; then if [ -x "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" ] ; then
sudo cp "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" /usr/local/bin/tera && printf "%s\t%s\n" "tera" "installed $TERA_VERSION" sudo cp "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" /usr/local/bin/tera && printf "%s\t%s\n" "tera" "installed $TERA_VERSION"
else else
echo "Error: $(dirname "$0")/../ttools/tera_${OS}_${ARCH} not found !!" echo "Error: $(dirname "$0")/../ttools/tera_${OS}_${ARCH} not found !!"
exit 2 exit 2
fi fi
elif [ -n "$CHECK_ONLY" ] ; then elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "tera" "$tera_version" "expected $TERA_VERSION" printf "%s\t%s\t%s\n" "tera" "$tera_version" "expected $TERA_VERSION"
else else
@ -140,9 +138,9 @@ function _install_tools {
num_version="0" num_version="0"
[ -n "$has_k9s" ] && k9s_version="$( k9s version | grep Version | cut -f2 -d"v" | sed 's/ //g')" && num_version=${k9s_version//\./} [ -n "$has_k9s" ] && k9s_version="$( k9s version | grep Version | cut -f2 -d"v" | sed 's/ //g')" && num_version=${k9s_version//\./}
expected_version_num=${K9S_VERSION//\./} expected_version_num=${K9S_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
mkdir -p k9s && cd k9s && mkdir -p k9s && cd k9s &&
curl -fsSLO https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_${ORG_OS}_${ARCH}.tar.gz && curl -fsSLO https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_${ORG_OS}_${ARCH}.tar.gz &&
tar -xzf "k9s_${ORG_OS}_${ARCH}.tar.gz" && tar -xzf "k9s_${ORG_OS}_${ARCH}.tar.gz" &&
sudo mv k9s /usr/local/bin && sudo mv k9s /usr/local/bin &&
cd "$ORG" && rm -rf /tmp/k9s "/k9s_${ORG_OS}_${ARCH}.tar.gz" && cd "$ORG" && rm -rf /tmp/k9s "/k9s_${ORG_OS}_${ARCH}.tar.gz" &&
@ -158,12 +156,12 @@ function _install_tools {
num_version="0" num_version="0"
[ -n "$has_age" ] && age_version="${AGE_VERSION}" && num_version=${age_version//\./} [ -n "$has_age" ] && age_version="${AGE_VERSION}" && num_version=${age_version//\./}
expected_version_num=${AGE_VERSION//\./} expected_version_num=${AGE_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
curl -fsSLO https://github.com/FiloSottile/age/releases/download/v${AGE_VERSION}/age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz && curl -fsSLO https://github.com/FiloSottile/age/releases/download/v${AGE_VERSION}/age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz &&
tar -xzf age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz && tar -xzf age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz &&
sudo mv age/age /usr/local/bin && sudo mv age/age /usr/local/bin &&
sudo mv age/age-keygen /usr/local/bin && sudo mv age/age-keygen /usr/local/bin &&
rm -rf age "age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz" && rm -rf age "age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz" &&
printf "%s\t%s\n" "age" "installed $AGE_VERSION" printf "%s\t%s\n" "age" "installed $AGE_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "age" "$age_version" "expected $AGE_VERSION" printf "%s\t%s\t%s\n" "age" "$age_version" "expected $AGE_VERSION"
@ -176,11 +174,11 @@ function _install_tools {
num_version="0" num_version="0"
[ -n "$has_sops" ] && sops_version="$(sops -v | cut -f2 -d" " | sed 's/ //g')" && num_version=${sops_version//\./} [ -n "$has_sops" ] && sops_version="$(sops -v | cut -f2 -d" " | sed 's/ //g')" && num_version=${sops_version//\./}
expected_version_num=${SOPS_VERSION//\./} expected_version_num=${SOPS_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
mkdir -p sops && cd sops && mkdir -p sops && cd sops &&
curl -fsSLO https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.${OS}.${ARCH} && curl -fsSLO https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.${OS}.${ARCH} &&
mv sops-v${SOPS_VERSION}.${OS}.${ARCH} sops && mv sops-v${SOPS_VERSION}.${OS}.${ARCH} sops &&
chmod +x sops && chmod +x sops &&
sudo mv sops /usr/local/bin && sudo mv sops /usr/local/bin &&
rm -f sops-v${SOPS_VERSION}.${OS}.${ARCH} sops && rm -f sops-v${SOPS_VERSION}.${OS}.${ARCH} sops &&
printf "%s\t%s\n" "sops" "installed $SOPS_VERSION" printf "%s\t%s\n" "sops" "installed $SOPS_VERSION"
@ -195,9 +193,9 @@ function _install_tools {
# num_version="0" # num_version="0"
# [ -n "$has_upctl" ] && upctl_version=$(upctl version | grep "Version" | cut -f2 -d":" | sed 's/ //g') && num_version=${upctl_version//\./} # [ -n "$has_upctl" ] && upctl_version=$(upctl version | grep "Version" | cut -f2 -d":" | sed 's/ //g') && num_version=${upctl_version//\./}
# expected_version_num=${UPCTL_VERSION//\./} # expected_version_num=${UPCTL_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then # if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# mkdir -p upctl && cd upctl && # mkdir -p upctl && cd upctl &&
# curl -fsSLO https://github.com/UpCloudLtd/upcloud-cli/releases/download/v${UPCTL_VERSION}/upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz && # curl -fsSLO https://github.com/UpCloudLtd/upcloud-cli/releases/download/v${UPCTL_VERSION}/upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz &&
# tar -xzf "upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz" && # tar -xzf "upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz" &&
# sudo mv upctl /usr/local/bin && # sudo mv upctl /usr/local/bin &&
# cd "$ORG" && rm -rf /tmp/upct "/upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz" # cd "$ORG" && rm -rf /tmp/upct "/upcloud-cli_${UPCTL_VERSION}_${OS}_${ORG_ARCH}.tar.gz"
@ -209,16 +207,16 @@ function _install_tools {
# fi # fi
# fi # fi
# if [ -n "$AWS_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "aws" ] ; then # if [ -n "$AWS_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "aws" ] ; then
# [ -r "/usr/bin/aws" ] && mv /usr/bin/aws /usr/bin/_aws # [ -r "/usr/bin/aws" ] && mv /usr/bin/aws /usr/bin/_aws
# has_aws=$(type -P aws) # has_aws=$(type -P aws)
# num_version="0" # num_version="0"
# [ -n "$has_aws" ] && aws_version=$(aws --version | cut -f1 -d" " | sed 's,aws-cli/,,g') && num_version=${aws_version//\./} # [ -n "$has_aws" ] && aws_version=$(aws --version | cut -f1 -d" " | sed 's,aws-cli/,,g') && num_version=${aws_version//\./}
# expected_version_num=${AWS_VERSION//\./} # expected_version_num=${AWS_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then # if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# cd "$ORG" || exit 1 # cd "$ORG" || exit 1
# curl "https://awscli.amazonaws.com/awscli-exe-${OS}-${ORG_ARCH}.zip" -o "awscliv2.zip" # curl "https://awscli.amazonaws.com/awscli-exe-${OS}-${ORG_ARCH}.zip" -o "awscliv2.zip"
# unzip awscliv2.zip >/dev/null # unzip awscliv2.zip >/dev/null
# [ "$1" != "-update" ] && [ -d "/usr/local/aws-cli" ] && sudo rm -rf "/usr/local/aws-cli" # [ "$1" != "-update" ] && [ -d "/usr/local/aws-cli" ] && sudo rm -rf "/usr/local/aws-cli"
# sudo ./aws/install && printf "%s\t%s\n" "aws" "installed $AWS_VERSION" # sudo ./aws/install && printf "%s\t%s\n" "aws" "installed $AWS_VERSION"
# #sudo ./aws/install $options && echo "aws cli installed" # #sudo ./aws/install $options && echo "aws cli installed"
# cd "$ORG" && rm -rf awscliv2.zip # cd "$ORG" && rm -rf awscliv2.zip
@ -230,9 +228,9 @@ function _install_tools {
# fi # fi
} }
function get_providers { function get_providers {
local list local list
local name local name
for item in $PROVIDERS_PATH/* for item in $PROVIDERS_PATH/*
do do
@ -250,26 +248,26 @@ function get_providers {
function _on_providers { function _on_providers {
local providers_list=$1 local providers_list=$1
[ -z "$providers_list" ] || [[ "$providers_list" == -* ]] && providers_list=${PROVISIONING_PROVIDERS:-all} [ -z "$providers_list" ] || [[ "$providers_list" == -* ]] && providers_list=${PROVISIONING_PROVIDERS:-all}
if [ "$providers_list" == "all" ] ; then if [ "$providers_list" == "all" ] ; then
providers_list=$(get_providers) providers_list=$(get_providers)
fi fi
for provider in $providers_list for provider in $providers_list
do do
[ ! -d "$PROVIDERS_PATH/$provider/templates" ] && [ ! -r "$PROVIDERS_PATH/$provider/provisioning.yam" ] && continue [ ! -d "$PROVIDERS_PATH/$provider/templates" ] && [ ! -r "$PROVIDERS_PATH/$provider/provisioning.yam" ] && continue
if [ ! -r "$PROVIDERS_PATH/$provider/bin/install.sh" ] ; then if [ ! -r "$PROVIDERS_PATH/$provider/bin/install.sh" ] ; then
echo "🛑 Error on $provider no $PROVIDERS_PATH/$provider/bin/install.sh found" echo "🛑 Error on $provider no $PROVIDERS_PATH/$provider/bin/install.sh found"
continue continue
fi fi
"$PROVIDERS_PATH/$provider/bin/install.sh" "$@" "$PROVIDERS_PATH/$provider/bin/install.sh" "$@"
done done
} }
set -o allexport set -o allexport
## shellcheck disable=SC1090 ## shellcheck disable=SC1090
[ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV" [ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV"
[ -r "../env-provisioning" ] && source ../env-provisioning [ -r "../env-provisioning" ] && source ../env-provisioning
[ -r "env-provisioning" ] && source ./env-provisioning [ -r "env-provisioning" ] && source ./env-provisioning
#[ -r ".env" ] && source .env set #[ -r ".env" ] && source .env set
set +o allexport set +o allexport
export PROVISIONING=${PROVISIONING:-/usr/local/provisioning} export PROVISIONING=${PROVISIONING:-/usr/local/provisioning}

View File

@ -1,15 +1,18 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Info: Script to run Provisioning # Info: Script to run Provisioning
# Author: JesusPerezLorenzo # Author: JesusPerezLorenzo
# Release: 1.0.10 # Release: 1.0.10
# Date: 2025-10-02 # Date: 2025-10-02
set +o errexit set +o errexit
set +o pipefail set +o pipefail
# Debug: log startup
[ "$PROVISIONING_DEBUG_STARTUP" = "true" ] && echo "[DEBUG] Wrapper started with args: $@" >&2
export NU=$(type -P nu) export NU=$(type -P nu)
_release() { _release() {
grep "^# Release:" "$0" | sed "s/# Release: //g" grep "^# Release:" "$0" | sed "s/# Release: //g"
} }
@ -52,11 +55,12 @@ case "$1" in
# Note: "setup" is now handled by the main provisioning CLI dispatcher # Note: "setup" is now handled by the main provisioning CLI dispatcher
# No special module handling needed # No special module handling needed
-mod) -mod)
export PROVISIONING_MODULE=$(echo "$2" | sed 's/ //g' | cut -f1 -d"|") PROVISIONING_MODULE=$(echo "$2" | sed 's/ //g' | cut -f1 -d"|")
PROVISIONING_MODULE_TASK=$(echo "$2" | sed 's/ //g' | cut -f2 -d"|") PROVISIONING_MODULE_TASK=$(echo "$2" | sed 's/ //g' | cut -f2 -d"|")
[ "$PROVISIONING_MODULE" == "$PROVISIONING_MODULE_TASK" ] && PROVISIONING_MODULE_TASK="" [ "$PROVISIONING_MODULE" == "$PROVISIONING_MODULE_TASK" ] && PROVISIONING_MODULE_TASK=""
shift 2 shift 2
CMD_ARGS=$@ CMD_ARGS=$@
[ "$PROVISIONING_DEBUG_STARTUP" = "true" ] && echo "[DEBUG] -mod detected: MODULE=$PROVISIONING_MODULE, TASK=$PROVISIONING_MODULE_TASK, CMD_ARGS=$CMD_ARGS" >&2
;; ;;
esac esac
NU_ARGS="" NU_ARGS=""
@ -75,15 +79,546 @@ case "$(uname | tr '[:upper:]' '[:lower:]')" in
;; ;;
esac esac
# FAST-PATH: Help commands and no-arguments case don't need full config loading # ════════════════════════════════════════════════════════════════════════════════
# Detect help-only commands and empty arguments, use minimal help system # DAEMON ROUTING - Try daemon for all commands (except setup/help/interactive)
# Falls back to traditional handlers if daemon unavailable
# ════════════════════════════════════════════════════════════════════════════════
DAEMON_ENDPOINT="http://127.0.0.1:9091/execute"
# Function to execute command via daemon
execute_via_daemon() {
local cmd="$1"
shift
# Build JSON array of arguments (simple bash)
local args_json="["
local first=1
for arg in "$@"; do
[ $first -eq 0 ] && args_json="$args_json,"
args_json="$args_json\"$(echo "$arg" | sed 's/"/\\"/g')\""
first=0
done
args_json="$args_json]"
# Determine timeout based on command type
# Heavy commands (create, delete, update) get longer timeout
local timeout=0.5
case "$cmd" in
create|delete|update|setup|init) timeout=5 ;;
*) timeout=0.2 ;;
esac
# Make request and extract stdout
curl -s -m $timeout -X POST "$DAEMON_ENDPOINT" \
-H "Content-Type: application/json" \
-d "{\"command\":\"$cmd\",\"args\":$args_json,\"timeout_ms\":30000}" 2>/dev/null | \
sed -n 's/.*"stdout":"\(.*\)","execution.*/\1/p' | \
sed 's/\\n/\n/g'
}
# Try daemon ONLY for lightweight commands (list, show, status)
# Skip daemon for heavy commands (create, delete, update) because bash wrapper is slow
if [ "$1" = "server" ] || [ "$1" = "s" ]; then
if [ "$2" = "list" ] || [ -z "$2" ]; then
# Light command - try daemon
[ "$PROVISIONING_DEBUG" = "true" ] && echo "⚡ Attempting daemon execution..." >&2
DAEMON_OUTPUT=$(execute_via_daemon "$@" 2>/dev/null)
if [ -n "$DAEMON_OUTPUT" ]; then
echo "$DAEMON_OUTPUT"
exit 0
fi
[ "$PROVISIONING_DEBUG" = "true" ] && echo "⚠️ Daemon unavailable, using traditional handlers..." >&2
fi
# NOTE: Command reordering (server create -> create server) has been removed.
# The Nushell dispatcher in provisioning/core/nulib/main_provisioning/dispatcher.nu
# handles command routing correctly and expects "server create" format.
# The reorder_args function in provisioning script handles any flag reordering needed.
fi
# ════════════════════════════════════════════════════════════════════════════════
# FAST-PATH: Commands that don't need full config loading or platform bootstrap
# These commands use lib_minimal.nu for <100ms execution
# (ONLY REACHED if daemon is not available)
# ════════════════════════════════════════════════════════════════════════════════
# Help commands (uses help_minimal.nu)
if [ -z "$1" ] || [ "$1" = "help" ] || [ "$1" = "-h" ] || [ "$1" = "--help" ] || [ "$1" = "--helpinfo" ]; then if [ -z "$1" ] || [ "$1" = "help" ] || [ "$1" = "-h" ] || [ "$1" = "--help" ] || [ "$1" = "--helpinfo" ]; then
category="${2:-}" category="${2:-}"
$NU -n -c "source '$PROVISIONING/core/nulib/help_minimal.nu'; provisioning-help '$category' | print" 2>/dev/null $NU -n -c "source '$PROVISIONING/core/nulib/help_minimal.nu'; provisioning-help '$category' | print" 2>/dev/null
exit $? exit $?
fi fi
if [ ! -d "$PROVISIONING_USER_CONFIG" ] || [ ! -r "$PROVISIONING_CONTEXT_PATH" ] ; then # Workspace operations (fast-path)
if [ "$1" = "workspace" ] || [ "$1" = "ws" ]; then
case "$2" in
"list"|"")
$NU -n -c "source '$PROVISIONING/core/nulib/lib_minimal.nu'; workspace-list | table" 2>/dev/null
exit $?
;;
"active")
$NU -n -c "source '$PROVISIONING/core/nulib/lib_minimal.nu'; workspace-active" 2>/dev/null
exit $?
;;
"info")
if [ -n "$3" ]; then
$NU -n -c "source '$PROVISIONING/core/nulib/lib_minimal.nu'; workspace-info '$3'" 2>/dev/null
else
$NU -n -c "source '$PROVISIONING/core/nulib/lib_minimal.nu'; workspace-active | workspace-info \$in" 2>/dev/null
fi
exit $?
;;
esac
# Other workspace commands (switch, register, etc.) fall through to full loading
fi
# Status/Health check (fast-path)
if [ "$1" = "status" ] || [ "$1" = "health" ]; then
$NU -n -c "source '$PROVISIONING/core/nulib/lib_minimal.nu'; status-quick | table" 2>/dev/null
exit $?
fi
# Environment display (fast-path)
if [ "$1" = "env" ] || [ "$1" = "allenv" ]; then
$NU -n -c "source '$PROVISIONING/core/nulib/lib_minimal.nu'; env-quick | table" 2>/dev/null
exit $?
fi
# Provider list (lightweight - reads filesystem only, no module loading)
if [ "$1" = "provider" ] || [ "$1" = "providers" ]; then
if [ "$2" = "list" ] || [ -z "$2" ]; then
$NU -n -c "
source '$PROVISIONING/core/nulib/lib_minimal.nu'
let provisioning = (\$env.PROVISIONING | default '/usr/local/provisioning')
let providers_base = (\$provisioning | path join 'extensions' | path join 'providers')
if not (\$providers_base | path exists) {
print 'PROVIDERS list: (none found)'
return
}
# Discover all providers from directories
let all_providers = (
ls \$providers_base | where type == 'dir' | each {|prov_dir|
let prov_name = (\$prov_dir.name | path basename)
if \$prov_name != 'prov_lib' {
{name: \$prov_name, type: 'providers', version: '0.0.1'}
} else {
null
}
} | compact
)
if (\$all_providers | length) == 0 {
print 'PROVIDERS list: (none found)'
} else {
print 'PROVIDERS list: '
print ''
\$all_providers | table
}
" 2>/dev/null
exit $?
fi
fi
# Taskserv list (fast-path) - avoid full system load
if [ "$1" = "taskserv" ] || [ "$1" = "task" ]; then
if [ "$2" = "list" ] || [ -z "$2" ]; then
$NU -n -c "
# Direct implementation of taskserv discovery (no dependency loading)
# Taskservs are nested: extensions/taskservs/{category}/{name}/kcl/
let provisioning = (\$env.PROVISIONING | default '/usr/local/provisioning')
let taskservs_base = (\$provisioning | path join 'extensions' | path join 'taskservs')
if not (\$taskservs_base | path exists) {
print '📦 Available Taskservs: (none found)'
return null
}
# Discover all taskservs from nested categories
let all_taskservs = (
ls \$taskservs_base | where type == 'dir' | each {|cat_dir|
let category = (\$cat_dir.name | path basename)
let cat_path = (\$taskservs_base | path join \$category)
if (\$cat_path | path exists) {
ls \$cat_path | where type == 'dir' | each {|ts|
let ts_name = (\$ts.name | path basename)
{task: \$ts_name, mode: \$category, info: ''}
}
} else {
[]
}
} | flatten
)
if (\$all_taskservs | length) == 0 {
print '📦 Available Taskservs: (none found)'
} else {
print '📦 Available Taskservs:'
print ''
\$all_taskservs | each {|ts|
print \$\" • (\$ts.task) [(\$ts.mode)]\"
} | ignore
}
" 2>/dev/null
exit $?
fi
fi
# Server list (lightweight - reads filesystem only, no config loading)
if [ "$1" = "server" ] || [ "$1" = "s" ]; then
if [ "$2" = "list" ] || [ -z "$2" ]; then
# Extract --infra flag from remaining args
INFRA_FILTER=""
shift
[ "$1" = "list" ] && shift
while [ $# -gt 0 ]; do
case "$1" in
--infra|-i) INFRA_FILTER="$2"; shift 2 ;;
*) shift ;;
esac
done
$NU -n -c "
source '$PROVISIONING/core/nulib/lib_minimal.nu'
# Get active workspace
let active_ws = (workspace-active)
if (\$active_ws | is-empty) {
print 'No active workspace'
return
}
# Get workspace path from config
let user_config_path = if (\$env.HOME | path exists) {
(
\$env.HOME | path join 'Library' | path join 'Application Support' |
path join 'provisioning' | path join 'user_config.yaml'
)
} else {
''
}
if not (\$user_config_path | path exists) {
print 'Config not found'
return
}
let config = (open \$user_config_path)
let workspaces = (\$config | get --optional workspaces | default [])
let ws = (\$workspaces | where { \$in.name == \$active_ws } | first)
if (\$ws | is-empty) {
print 'Workspace not found'
return
}
let ws_path = \$ws.path
let infra_path = (\$ws_path | path join 'infra')
if not (\$infra_path | path exists) {
print 'No infrastructures found'
return
}
# Filter by infrastructure if specified
let infra_filter = \"$INFRA_FILTER\"
# List server definitions from infrastructure (filtered if --infra specified)
let servers = (
ls \$infra_path | where type == 'dir' | each {|infra|
let infra_name = (\$infra.name | path basename)
# Skip if filter is specified and doesn't match
if ((\$infra_filter | is-not-empty) and (\$infra_name != \$infra_filter)) {
[]
} else {
let servers_file = (\$infra_path | path join \$infra_name | path join 'defs' | path join 'servers.k')
if (\$servers_file | path exists) {
# Parse the KCL servers.k file to extract server names
let content = (open \$servers_file --raw)
# Extract hostnames from hostname = "..." patterns by splitting on quotes
let hostnames = (
\$content
| split row \"\\n\"
| where {|line| \$line | str contains \"hostname = \\\"\" }
| each {|line|
# Split by quotes to extract hostname value
let parts = (\$line | split row \"\\\"\")
if (\$parts | length) >= 2 {
\$parts | get 1
} else {
\"\"
}
}
| where {|h| (\$h | is-not-empty) }
)
\$hostnames | each {|srv_name|
{
name: \$srv_name
infrastructure: \$infra_name
path: \$servers_file
}
}
} else {
[]
}
}
} | flatten
)
if (\$servers | length) == 0 {
print '📦 Available Servers: (none configured)'
} else {
print '📦 Available Servers:'
print ''
\$servers | each {|srv|
print \$\" • (\$srv.name) [(\$srv.infrastructure)]\"
} | ignore
}
" 2>/dev/null
exit $?
fi
fi
# Cluster list (lightweight - reads filesystem only)
if [ "$1" = "cluster" ] || [ "$1" = "cl" ]; then
if [ "$2" = "list" ] || [ -z "$2" ]; then
$NU -n -c "
source '$PROVISIONING/core/nulib/lib_minimal.nu'
# Get active workspace
let active_ws = (workspace-active)
if (\$active_ws | is-empty) {
print 'No active workspace'
return
}
# Get workspace path from config
let user_config_path = (
\$env.HOME | path join 'Library' | path join 'Application Support' |
path join 'provisioning' | path join 'user_config.yaml'
)
if not (\$user_config_path | path exists) {
print 'Config not found'
return
}
let config = (open \$user_config_path)
let workspaces = (\$config | get --optional workspaces | default [])
let ws = (\$workspaces | where { \$in.name == \$active_ws } | first)
if (\$ws | is-empty) {
print 'Workspace not found'
return
}
let ws_path = \$ws.path
# List all clusters from workspace
let clusters = (
if ((\$ws_path | path join '.clusters') | path exists) {
let clusters_path = (\$ws_path | path join '.clusters')
ls \$clusters_path | where type == 'dir' | each {|cl|
let cl_name = (\$cl.name | path basename)
{
name: \$cl_name
path: \$cl.name
}
}
} else {
[]
}
)
if (\$clusters | length) == 0 {
print '🗂️ Available Clusters: (none found)'
} else {
print '🗂️ Available Clusters:'
print ''
\$clusters | each {|cl|
print \$\" • (\$cl.name)\"
} | ignore
}
" 2>/dev/null
exit $?
fi
fi
# Infra list (lightweight - reads filesystem only)
if [ "$1" = "infra" ] || [ "$1" = "inf" ]; then
if [ "$2" = "list" ] || [ -z "$2" ]; then
$NU -n -c "
source '$PROVISIONING/core/nulib/lib_minimal.nu'
# Get active workspace
let active_ws = (workspace-active)
if (\$active_ws | is-empty) {
print 'No active workspace'
return
}
# Get workspace path from config
let user_config_path = (
\$env.HOME | path join 'Library' | path join 'Application Support' |
path join 'provisioning' | path join 'user_config.yaml'
)
if not (\$user_config_path | path exists) {
print 'Config not found'
return
}
let config = (open \$user_config_path)
let workspaces = (\$config | get --optional workspaces | default [])
let ws = (\$workspaces | where { \$in.name == \$active_ws } | first)
if (\$ws | is-empty) {
print 'Workspace not found'
return
}
let ws_path = \$ws.path
let infra_path = (\$ws_path | path join 'infra')
if not (\$infra_path | path exists) {
print '📁 Available Infrastructures: (none configured)'
return
}
# List all infrastructures
let infras = (
ls \$infra_path | where type == 'dir' | each {|inf|
let inf_name = (\$inf.name | path basename)
let inf_full_path = (\$infra_path | path join \$inf_name)
let has_config = ((\$inf_full_path | path join 'settings.k') | path exists)
{
name: \$inf_name
configured: \$has_config
modified: \$inf.modified
}
}
)
if (\$infras | length) == 0 {
print '📁 Available Infrastructures: (none found)'
} else {
print '📁 Available Infrastructures:'
print ''
\$infras | each {|inf|
let status = if \$inf.configured { '✓' } else { '○' }
let output = \" [\" + \$status + \"] \" + \$inf.name
print \$output
} | ignore
}
" 2>/dev/null
exit $?
fi
fi
# Config validation (lightweight - validates config structure without full load)
if [ "$1" = "validate" ]; then
if [ "$2" = "config" ] || [ -z "$2" ]; then
$NU -n -c "
source '$PROVISIONING/core/nulib/lib_minimal.nu'
try {
# Get active workspace
let active_ws = (workspace-active)
if (\$active_ws | is-empty) {
print '❌ Error: No active workspace'
return
}
# Get workspace path from config
let user_config_path = (
\$env.HOME | path join 'Library' | path join 'Application Support' |
path join 'provisioning' | path join 'user_config.yaml'
)
if not (\$user_config_path | path exists) {
print '❌ Error: User config not found at' \$user_config_path
return
}
let config = (open \$user_config_path)
let workspaces = (\$config | get --optional workspaces | default [])
let ws = (\$workspaces | where { \$in.name == \$active_ws } | first)
if (\$ws | is-empty) {
print '❌ Error: Workspace' \$active_ws 'not found in config'
return
}
let ws_path = \$ws.path
# Validate workspace structure
let required_dirs = ['infra', 'config', '.clusters']
let infra_path = (\$ws_path | path join 'infra')
let config_path = (\$ws_path | path join 'config')
let missing_dirs = \$required_dirs | where { not ((\$ws_path | path join \$in) | path exists) }
if (\$missing_dirs | length) > 0 {
print '⚠️ Warning: Missing directories:' (\$missing_dirs | str join ', ')
}
# Validate infrastructures have required files
if (\$infra_path | path exists) {
let infras = (ls \$infra_path | where type == 'dir')
let invalid_infras = (
\$infras | each {|inf|
let inf_name = (\$inf.name | path basename)
let inf_full_path = (\$infra_path | path join \$inf_name)
if not ((\$inf_full_path | path join 'settings.k') | path exists) {
\$inf_name
} else {
null
}
} | compact
)
if (\$invalid_infras | length) > 0 {
print '⚠️ Warning: Infrastructures missing settings.k:' (\$invalid_infras | str join ', ')
}
}
# Validate user config structure
let has_active = ((\$config | get --optional active_workspace) != null)
let has_workspaces = ((\$config | get --optional workspaces) != null)
let has_preferences = ((\$config | get --optional preferences) != null)
if not \$has_active {
print '⚠️ Warning: Missing active_workspace in user config'
}
if not \$has_workspaces {
print '⚠️ Warning: Missing workspaces list in user config'
}
if not \$has_preferences {
print '⚠️ Warning: Missing preferences in user config'
}
# Summary
print ''
print '✓ Configuration validation complete for workspace:' \$active_ws
print ' Path:' \$ws_path
print ' Status: Valid (with warnings, if any listed above)'
} catch {|err|
print '❌ Validation error:' \$err
}
" 2>/dev/null
exit $?
fi
fi
if [ ! -d "$PROVISIONING_USER_CONFIG" ] || [ ! -r "$PROVISIONING_CONTEXT_PATH" ] ; then
[ ! -x "$PROVISIONING/core/nulib/provisioning setup" ] && echo "$PROVISIONING/core/nulib/provisioning setup not found" && exit 1 [ ! -x "$PROVISIONING/core/nulib/provisioning setup" ] && echo "$PROVISIONING/core/nulib/provisioning setup not found" && exit 1
cd "$PROVISIONING/core/nulib" cd "$PROVISIONING/core/nulib"
./"provisioning setup" ./"provisioning setup"
@ -100,19 +635,50 @@ export PROVISIONING_ARGS="$CMD_ARGS" NU_ARGS="$NU_ARGS"
# Export NU_LIB_DIRS so Nushell can find modules during parsing # Export NU_LIB_DIRS so Nushell can find modules during parsing
export NU_LIB_DIRS="$PROVISIONING/core/nulib:/opt/provisioning/core/nulib:/usr/local/provisioning/core/nulib" export NU_LIB_DIRS="$PROVISIONING/core/nulib:/opt/provisioning/core/nulib:/usr/local/provisioning/core/nulib"
# ============================================================================
# DAEMON ROUTING - ENABLED (Phase 3.7: CLI Daemon Integration)
# ============================================================================
# Redesigned daemon with pre-loaded Nushell environment (no CLI callback).
# Routes eligible commands to HTTP daemon for <100ms execution.
# Gracefully falls back to full load if daemon unavailable.
#
# ARCHITECTURE:
# 1. Check daemon health (curl with 5ms timeout)
# 2. Route eligible commands to daemon via HTTP POST
# 3. Fall back to full load if daemon unavailable
# 4. Zero breaking changes (graceful degradation)
#
# PERFORMANCE:
# - With daemon: <100ms for ALL commands
# - Without daemon: ~430ms (normal behavior)
# - Daemon fallback: Automatic, user sees no difference
if [ -n "$PROVISIONING_MODULE" ] ; then if [ -n "$PROVISIONING_MODULE" ] ; then
if [[ -x $PROVISIONING/core/nulib/$RUNNER\ $PROVISIONING_MODULE ]] ; then if [[ -x $PROVISIONING/core/nulib/$RUNNER\ $PROVISIONING_MODULE ]] ; then
$NU "${NU_ARGS[@]}" "$PROVISIONING/core/nulib/$RUNNER $PROVISIONING_MODULE" $PROVISIONING_MODULE_TASK $CMD_ARGS $NU "${NU_ARGS[@]}" "$PROVISIONING/core/nulib/$RUNNER $PROVISIONING_MODULE" $CMD_ARGS
else else
echo "Error \"$PROVISIONING/core/nulib/$RUNNER $PROVISIONING_MODULE\" not found" echo "Error \"$PROVISIONING/core/nulib/$RUNNER $PROVISIONING_MODULE\" not found"
fi fi
else else
# Only redirect stdin for non-interactive commands (nu command needs interactive stdin) # Only redirect stdin for non-interactive commands (nu command needs interactive stdin)
if [ "$1" = "nu" ]; then if [ "$1" = "nu" ]; then
# For interactive mode, ensure ENV variables are available # For interactive mode, start nu with provisioning environment
export PROVISIONING_CONFIG="$PROVISIONING_USER_CONFIG" export PROVISIONING_CONFIG="$PROVISIONING_USER_CONFIG"
$NU "${NU_ARGS[@]}" "$PROVISIONING/core/nulib/$RUNNER" $CMD_ARGS # Start nu interactively - it will use the config and env from NU_ARGS
$NU "${NU_ARGS[@]}"
else else
$NU "${NU_ARGS[@]}" "$PROVISIONING/core/nulib/$RUNNER" $CMD_ARGS < /dev/null # Don't redirect stdin for infrastructure commands - they may need interactive input
# Only redirect for commands we know are safe
case "$1" in
help|h|--help|--info|-i|-v|--version|env|allenv|status|health|list|ls|l|workspace|ws|provider|providers|validate|plugin|plugins|nuinfo)
# Safe commands - can use /dev/null
$NU "${NU_ARGS[@]}" "$PROVISIONING/core/nulib/$RUNNER" $CMD_ARGS < /dev/null
;;
*)
# All other commands (create, delete, server, taskserv, etc.) - keep stdin open
# NOTE: PROVISIONING_MODULE is automatically inherited by Nushell from bash environment
$NU "${NU_ARGS[@]}" "$PROVISIONING/core/nulib/$RUNNER" $CMD_ARGS
;;
esac
fi fi
fi fi

View File

@ -1,28 +1,28 @@
#!/bin/bash #!/bin/bash
# Info: Script to install tools # Info: Script to install tools
# Author: JesusPerezLorenzo # Author: JesusPerezLorenzo
# Release: 1.0 # Release: 1.0
# Date: 12-11-2023 # Date: 12-11-2023
[ "$DEBUG" == "-x" ] && set -x [ "$DEBUG" == "-x" ] && set -x
USAGE="install-tools [ tool-name: providers tera k9s, etc | all] [--update] USAGE="install-tools [ tool-name: providers tera k9s, etc | all] [--update]
As alternative use environment var TOOL_TO_INSTALL with a list-of-tools (separeted with spaces) As alternative use environment var TOOL_TO_INSTALL with a list-of-tools (separeted with spaces)
Versions are set in ./versions file Versions are set in ./versions file
This can be called by directly with an argumet or from an other srcipt This can be called by directly with an argumet or from an other srcipt
" "
ORG=$(pwd) ORG=$(pwd)
function _install_cmds { function _install_cmds {
OS="$(uname | tr '[:upper:]' '[:lower:]')" OS="$(uname | tr '[:upper:]' '[:lower:]')"
local has_cmd local has_cmd
for cmd in $CMDS_PROVISIONING for cmd in $CMDS_PROVISIONING
do do
has_cmd=$(type -P $cmd) has_cmd=$(type -P $cmd)
if [ -z "$has_cmd" ] ; then if [ -z "$has_cmd" ] ; then
case "$OS" in case "$OS" in
darwin) brew install $cmd ;; darwin) brew install $cmd ;;
linux) sudo apt install $cmd ;; linux) sudo apt install $cmd ;;
*) echo "Install $cmd in your PATH" ;; *) echo "Install $cmd in your PATH" ;;
@ -37,19 +37,19 @@ function _install_providers {
local info_keys local info_keys
options="$*" options="$*"
info_keys="info version site" info_keys="info version site"
if [ -z "$match" ] || [ "$match" == "all" ] || [ "$match" == "-" ]; then if [ -z "$match" ] || [ "$match" == "all" ] || [ "$match" == "-" ]; then
match="all" match="all"
fi fi
for prov in $(ls $PROVIDERS_PATH | grep -v "^_" ) for prov in $(ls $PROVIDERS_PATH | grep -v "^_" )
do do
prov_name=$(basename "$prov") prov_name=$(basename "$prov")
[ ! -d "$PROVIDERS_PATH/$prov_name/templates" ] && continue [ ! -d "$PROVIDERS_PATH/$prov_name/templates" ] && continue
if [ "$match" == "all" ] || [ "$prov_name" == "$match" ] ; then if [ "$match" == "all" ] || [ "$prov_name" == "$match" ] ; then
[ -x "$PROVIDERS_PATH/$prov_name/bin/install.sh" ] && $PROVIDERS_PATH/$prov_name/bin/install.sh $options [ -x "$PROVIDERS_PATH/$prov_name/bin/install.sh" ] && $PROVIDERS_PATH/$prov_name/bin/install.sh $options
elif [ "$match" == "?" ] ; then elif [ "$match" == "?" ] ; then
[ -n "$options" ] && [ -z "$(echo "$options" | grep ^$prov_name)" ] && continue [ -n "$options" ] && [ -z "$(echo "$options" | grep ^$prov_name)" ] && continue
if [ -r "$PROVIDERS_PATH/$prov_name/provisioning.yaml" ] ; then if [ -r "$PROVIDERS_PATH/$prov_name/provisioning.yaml" ] ; then
echo "-------------------------------------------------------" echo "-------------------------------------------------------"
for key in $info_keys for key in $info_keys
do do
@ -58,7 +58,7 @@ function _install_providers {
echo " $(grep "^$key:" "$PROVIDERS_PATH/$prov_name/provisioning.yaml" | sed "s/$key: //g")" echo " $(grep "^$key:" "$PROVIDERS_PATH/$prov_name/provisioning.yaml" | sed "s/$key: //g")"
done done
[ -n "$options" ] && echo "________________________________________________________" [ -n "$options" ] && echo "________________________________________________________"
else else
echo "$prov_name" echo "$prov_name"
fi fi
fi fi
@ -76,8 +76,8 @@ function _install_tools {
# local yq_version # local yq_version
local has_nu local has_nu
local nu_version local nu_version
local has_kcl local has_nickel
local kcl_version local nickel_version
local has_tera local has_tera
local tera_version local tera_version
local has_k9s local has_k9s
@ -87,21 +87,21 @@ function _install_tools {
local has_sops local has_sops
local sops_version local sops_version
OS="$(uname | tr '[:upper:]' '[:lower:]')" OS="$(uname | tr '[:upper:]' '[:lower:]')"
ORG_OS=$(uname) ORG_OS=$(uname)
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')"
ORG_ARCH="$(uname -m)" ORG_ARCH="$(uname -m)"
if [ -z "$CHECK_ONLY" ] && [ "$match" == "all" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$match" == "all" ] ; then
_install_cmds _install_cmds
fi fi
# if [ -n "$JQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "jq" ] ; then # if [ -n "$JQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "jq" ] ; then
# has_jq=$(type -P jq) # has_jq=$(type -P jq)
# num_version="0" # num_version="0"
# [ -n "$has_jq" ] && jq_version=$(jq -V | sed 's/jq-//g') && num_version=${jq_version//\./} # [ -n "$has_jq" ] && jq_version=$(jq -V | sed 's/jq-//g') && num_version=${jq_version//\./}
# expected_version_num=${JQ_VERSION//\./} # expected_version_num=${JQ_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then # if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# curl -fsSLO "https://github.com/jqlang/jq/releases/download/jq-${JQ_VERSION}/jq-${OS}-${ARCH}" && # curl -fsSLO "https://github.com/jqlang/jq/releases/download/jq-${JQ_VERSION}/jq-${OS}-${ARCH}" &&
# chmod +x "jq-${OS}-${ARCH}" && # chmod +x "jq-${OS}-${ARCH}" &&
# sudo mv "jq-${OS}-${ARCH}" /usr/local/bin/jq && # sudo mv "jq-${OS}-${ARCH}" /usr/local/bin/jq &&
@ -112,16 +112,16 @@ function _install_tools {
# printf "%s\t%s\n" "jq" "already $JQ_VERSION" # printf "%s\t%s\n" "jq" "already $JQ_VERSION"
# fi # fi
# fi # fi
# if [ -n "$YQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "yq" ] ; then # if [ -n "$YQ_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "yq" ] ; then
# has_yq=$(type -P yq) # has_yq=$(type -P yq)
# num_version="0" # num_version="0"
# [ -n "$has_yq" ] && yq_version=$(yq -V | cut -f4 -d" " | sed 's/v//g') && num_version=${yq_version//\./} # [ -n "$has_yq" ] && yq_version=$(yq -V | cut -f4 -d" " | sed 's/v//g') && num_version=${yq_version//\./}
# expected_version_num=${YQ_VERSION//\./} # expected_version_num=${YQ_VERSION//\./}
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then # if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# curl -fsSLO "https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_${OS}_${ARCH}.tar.gz" && # curl -fsSLO "https://github.com/mikefarah/yq/releases/download/v${YQ_VERSION}/yq_${OS}_${ARCH}.tar.gz" &&
# tar -xzf "yq_${OS}_${ARCH}.tar.gz" && # tar -xzf "yq_${OS}_${ARCH}.tar.gz" &&
# sudo mv "yq_${OS}_${ARCH}" /usr/local/bin/yq && # sudo mv "yq_${OS}_${ARCH}" /usr/local/bin/yq &&
# sudo ./install-man-page.sh && # sudo ./install-man-page.sh &&
# rm -f install-man-page.sh yq.1 "yq_${OS}_${ARCH}.tar.gz" && # rm -f install-man-page.sh yq.1 "yq_${OS}_${ARCH}.tar.gz" &&
# printf "%s\t%s\n" "yq" "installed $YQ_VERSION" # printf "%s\t%s\n" "yq" "installed $YQ_VERSION"
# elif [ -n "$CHECK_ONLY" ] ; then # elif [ -n "$CHECK_ONLY" ] ; then
@ -131,16 +131,16 @@ function _install_tools {
# fi # fi
# fi # fi
if [ -n "$NU_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "nu" ] ; then if [ -n "$NU_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "nu" ] ; then
has_nu=$(type -P nu) has_nu=$(type -P nu)
num_version="0" num_version="0"
[ -n "$has_nu" ] && nu_version=$(nu -v) && num_version=${nu_version//\./} && num_version=${num_version//0/} [ -n "$has_nu" ] && nu_version=$(nu -v) && num_version=${nu_version//\./} && num_version=${num_version//0/}
expected_version_num=${NU_VERSION//\./} expected_version_num=${NU_VERSION//\./}
expected_version_num=${expected_version_num//0/} expected_version_num=${expected_version_num//0/}
[ -z "$num_version" ] && num_version=0 [ -z "$num_version" ] && num_version=0
if [ -z "$num_version" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$num_version" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION require installation" printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION require installation"
elif [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then elif [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION require installation" printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION require installation"
elif [ -n "$CHECK_ONLY" ] ; then elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION" printf "%s\t%s\t%s\n" "nu" "$nu_version" "expected $NU_VERSION"
@ -148,37 +148,81 @@ function _install_tools {
printf "%s\t%s\n" "nu" "already $NU_VERSION" printf "%s\t%s\n" "nu" "already $NU_VERSION"
fi fi
fi fi
if [ -n "$KCL_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "kcl" ] ; then if [ -n "$NICKEL_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "nickel" ] ; then
has_kcl=$(type -P kcl) has_nickel=$(type -P nickel)
num_version=0 num_version=0
[ -n "$has_kcl" ] && kcl_version=$(kcl -v | cut -f3 -d" " | sed 's/ //g') && num_version=${kcl_version//\./} [ -n "$has_nickel" ] && nickel_version=$((nickel -V | cut -f3 -d" ") 2>/dev/null) && num_version=${nickel_version//\./}
expected_version_num=${KCL_VERSION//\./} expected_version_num=${NICKEL_VERSION//\./}
[ -z "$num_version" ] && num_version=0 [ -z "$num_version" ] && num_version=0
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
curl -fsSLO "https://github.com/kcl-lang/cli/releases/download/v${KCL_VERSION}/kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" && # macOS: try Cargo first, then Homebrew
tar -xzf "kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" && if [ "$OS" == "darwin" ] ; then
sudo mv kcl /usr/local/bin/kcl && printf "%s\t%s\n" "nickel" "installing $NICKEL_VERSION on macOS"
rm -f "kcl-v${KCL_VERSION}-${OS}-${ARCH}.tar.gz" &&
printf "%s\t%s\n" "kcl" "installed $KCL_VERSION" # Try Cargo first (if available)
if command -v cargo >/dev/null 2>&1 ; then
printf "%s\t%s\n" "nickel" "using Cargo (Rust compiler)"
if cargo install nickel-lang-cli --version "${NICKEL_VERSION}" ; then
printf "%s\t%s\n" "nickel" "✅ installed $NICKEL_VERSION via Cargo"
else
printf "%s\t%s\n" "nickel" "❌ Failed to build with Cargo"
exit 1
fi
# Try Homebrew if Cargo not available
elif command -v brew >/dev/null 2>&1 ; then
printf "%s\t%s\n" "nickel" "using Homebrew"
if brew install nickel ; then
printf "%s\t%s\n" "nickel" "✅ installed $NICKEL_VERSION via Homebrew"
else
printf "%s\t%s\n" "nickel" "❌ Failed to install with Homebrew"
exit 1
fi
else
# Neither Cargo nor Homebrew available
printf "%s\t%s\n" "nickel" "⚠️ Neither Cargo nor Homebrew found"
printf "%s\t%s\n" "nickel" "Install one of:"
printf "%s\t%s\n" "nickel" " 1. Cargo: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh"
printf "%s\t%s\n" "nickel" " 2. Homebrew: /bin/bash -c \"\$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\""
exit 1
fi
else
# Non-macOS: download binary from GitHub
printf "%s\t%s\n" "nickel" "installing $NICKEL_VERSION on $OS"
# Map architecture names (GitHub uses different naming)
local nickel_arch="$ARCH"
[ "$nickel_arch" == "amd64" ] && nickel_arch="x86_64"
# Build download URL
local download_url="https://github.com/tweag/nickel/releases/download/${NICKEL_VERSION}/nickel-${nickel_arch}-${OS}"
# Download and install
if curl -fsSLO "$download_url" && chmod +x "nickel-${nickel_arch}-${OS}" && sudo mv "nickel-${nickel_arch}-${OS}" /usr/local/bin/nickel ; then
printf "%s\t%s\n" "nickel" "installed $NICKEL_VERSION"
else
printf "%s\t%s\n" "nickel" "❌ Failed to download/install Nickel binary"
exit 1
fi
fi
elif [ -n "$CHECK_ONLY" ] ; then elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "kcl" "$kcl_version" "expected $KCL_VERSION" printf "%s\t%s\t%s\n" "nickel" "$nickel_version" "expected $NICKEL_VERSION"
else else
printf "%s\t%s\n" "kcl" "already $KCL_VERSION" printf "%s\t%s\n" "nickel" "already $NICKEL_VERSION"
fi fi
fi fi
#if [ -n "$TERA_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "tera" ] ; then #if [ -n "$TERA_VERSION" ] && [ "$match" == "all" ] || [ "$match" == "tera" ] ; then
# has_tera=$(type -P tera) # has_tera=$(type -P tera)
# num_version="0" # num_version="0"
# [ -n "$has_tera" ] && tera_version=$(tera -V | cut -f2 -d" " | sed 's/teracli//g') && num_version=${tera_version//\./} # [ -n "$has_tera" ] && tera_version=$(tera -V | cut -f2 -d" " | sed 's/teracli//g') && num_version=${tera_version//\./}
# expected_version_num=${TERA_VERSION//\./} # expected_version_num=${TERA_VERSION//\./}
# [ -z "$num_version" ] && num_version=0 # [ -z "$num_version" ] && num_version=0
# if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then # if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
# if [ -x "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" ] ; then # if [ -x "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" ] ; then
# sudo cp "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" /usr/local/bin/tera && printf "%s\t%s\n" "tera" "installed $TERA_VERSION" # sudo cp "$(dirname "$0")/../tools/tera_${OS}_${ARCH}" /usr/local/bin/tera && printf "%s\t%s\n" "tera" "installed $TERA_VERSION"
# else # else
# echo "Error: $(dirname "$0")/../tools/tera_${OS}_${ARCH} not found !!" # echo "Error: $(dirname "$0")/../tools/tera_${OS}_${ARCH} not found !!"
# exit 2 # exit 2
# fi # fi
# elif [ -n "$CHECK_ONLY" ] ; then # elif [ -n "$CHECK_ONLY" ] ; then
# printf "%s\t%s\t%s\n" "tera" "$tera_version" "expected $TERA_VERSION" # printf "%s\t%s\t%s\n" "tera" "$tera_version" "expected $TERA_VERSION"
# else # else
@ -191,9 +235,9 @@ function _install_tools {
[ -n "$has_k9s" ] && k9s_version="$( k9s version | grep Version | cut -f2 -d"v" | sed 's/ //g')" && num_version=${k9s_version//\./} [ -n "$has_k9s" ] && k9s_version="$( k9s version | grep Version | cut -f2 -d"v" | sed 's/ //g')" && num_version=${k9s_version//\./}
expected_version_num=${K9S_VERSION//\./} expected_version_num=${K9S_VERSION//\./}
[ -z "$num_version" ] && num_version=0 [ -z "$num_version" ] && num_version=0
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
mkdir -p k9s && cd k9s && mkdir -p k9s && cd k9s &&
curl -fsSLO https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_${ORG_OS}_${ARCH}.tar.gz && curl -fsSLO https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_${ORG_OS}_${ARCH}.tar.gz &&
tar -xzf "k9s_${ORG_OS}_${ARCH}.tar.gz" && tar -xzf "k9s_${ORG_OS}_${ARCH}.tar.gz" &&
sudo mv k9s /usr/local/bin && sudo mv k9s /usr/local/bin &&
cd "$ORG" && rm -rf /tmp/k9s "/k9s_${ORG_OS}_${ARCH}.tar.gz" && cd "$ORG" && rm -rf /tmp/k9s "/k9s_${ORG_OS}_${ARCH}.tar.gz" &&
@ -209,12 +253,12 @@ function _install_tools {
num_version="0" num_version="0"
[ -n "$has_age" ] && age_version="${AGE_VERSION}" && num_version=${age_version//\./} [ -n "$has_age" ] && age_version="${AGE_VERSION}" && num_version=${age_version//\./}
expected_version_num=${AGE_VERSION//\./} expected_version_num=${AGE_VERSION//\./}
if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then if [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
curl -fsSLO https://github.com/FiloSottile/age/releases/download/v${AGE_VERSION}/age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz && curl -fsSLO https://github.com/FiloSottile/age/releases/download/v${AGE_VERSION}/age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz &&
tar -xzf age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz && tar -xzf age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz &&
sudo mv age/age /usr/local/bin && sudo mv age/age /usr/local/bin &&
sudo mv age/age-keygen /usr/local/bin && sudo mv age/age-keygen /usr/local/bin &&
rm -rf age "age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz" && rm -rf age "age-v${AGE_VERSION}-${OS}-${ARCH}.tar.gz" &&
printf "%s\t%s\n" "age" "installed $AGE_VERSION" printf "%s\t%s\n" "age" "installed $AGE_VERSION"
elif [ -n "$CHECK_ONLY" ] ; then elif [ -n "$CHECK_ONLY" ] ; then
printf "%s\t%s\t%s\n" "age" "$age_version" "expected $AGE_VERSION" printf "%s\t%s\t%s\n" "age" "$age_version" "expected $AGE_VERSION"
@ -228,9 +272,9 @@ function _install_tools {
[ -n "$has_sops" ] && sops_version="$(sops -v | grep ^sops | cut -f2 -d" " | sed 's/ //g')" && num_version=${sops_version//\./} [ -n "$has_sops" ] && sops_version="$(sops -v | grep ^sops | cut -f2 -d" " | sed 's/ //g')" && num_version=${sops_version//\./}
expected_version_num=${SOPS_VERSION//\./} expected_version_num=${SOPS_VERSION//\./}
[ -z "$num_version" ] && num_version=0 [ -z "$num_version" ] && num_version=0
if [ -z "$expected_version_num" ] ; then if [ -z "$expected_version_num" ] ; then
printf "%s\t%s\t%s\n" "sops" "$sops_version" "expected $SOPS_VERSION" printf "%s\t%s\t%s\n" "sops" "$sops_version" "expected $SOPS_VERSION"
elif [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then elif [ -z "$CHECK_ONLY" ] && [ "$num_version" -lt "$expected_version_num" ] ; then
mkdir -p sops && cd sops && mkdir -p sops && cd sops &&
curl -fsSLO https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.${OS}.${ARCH} && curl -fsSLO https://github.com/getsops/sops/releases/download/v${SOPS_VERSION}/sops-v${SOPS_VERSION}.${OS}.${ARCH} &&
mv sops-v${SOPS_VERSION}.${OS}.${ARCH} sops && mv sops-v${SOPS_VERSION}.${OS}.${ARCH} sops &&
@ -263,8 +307,8 @@ function _detect_tool_version {
nu | nushell) nu | nushell)
nu -v 2>/dev/null | head -1 || echo "" nu -v 2>/dev/null | head -1 || echo ""
;; ;;
kcl) nickel)
kcl -v 2>/dev/null | grep "kcl version" | sed 's/.*version\s*//' || echo "" nickel --version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -1 || echo ""
;; ;;
sops) sops)
sops -v 2>/dev/null | head -1 | awk '{print $2}' || echo "" sops -v 2>/dev/null | head -1 | awk '{print $2}' || echo ""
@ -325,22 +369,22 @@ function _try_install_provider_tool {
local options=$2 local options=$2
local force_update=$3 local force_update=$3
# Look for the tool in provider kcl/version.k files (KCL is single source of truth) # Look for the tool in provider nickel/version.ncl files (Nickel is single source of truth)
for prov in $(ls $PROVIDERS_PATH 2>/dev/null | grep -v "^_" ) for prov in $(ls $PROVIDERS_PATH 2>/dev/null | grep -v "^_" )
do do
if [ -r "$PROVIDERS_PATH/$prov/kcl/version.k" ] ; then if [ -r "$PROVIDERS_PATH/$prov/nickel/version.ncl" ] ; then
# Compile KCL file to JSON and extract version data (single source of truth) # Evaluate Nickel file to JSON and extract version data (single source of truth)
local kcl_file="$PROVIDERS_PATH/$prov/kcl/version.k" local nickel_file="$PROVIDERS_PATH/$prov/nickel/version.ncl"
local kcl_output="" local nickel_output=""
local tool_version="" local tool_version=""
local tool_name="" local tool_name=""
# Compile KCL to JSON and capture output # Evaluate Nickel to JSON and capture output
kcl_output=$(kcl run "$kcl_file" --format json 2>/dev/null) nickel_output=$(nickel export --format json "$nickel_file" 2>/dev/null)
# Extract tool name and version from JSON # Extract tool name and version from JSON
tool_name=$(echo "$kcl_output" | grep -o '"name": "[^"]*"' | head -1 | sed 's/"name": "//;s/"$//') tool_name=$(echo "$nickel_output" | grep -o '"name": "[^"]*"' | head -1 | sed 's/"name": "//;s/"$//')
tool_version=$(echo "$kcl_output" | grep -o '"current": "[^"]*"' | head -1 | sed 's/"current": "//;s/"$//') tool_version=$(echo "$nickel_output" | grep -o '"current": "[^"]*"' | head -1 | sed 's/"current": "//;s/"$//')
# If this is the tool we're looking for # If this is the tool we're looking for
if [ "$tool_name" == "$tool" ] && [ -n "$tool_version" ] ; then if [ "$tool_name" == "$tool" ] && [ -n "$tool_version" ] ; then
@ -357,7 +401,7 @@ function _try_install_provider_tool {
export UPCLOUD_UPCTL_VERSION="$tool_version" export UPCLOUD_UPCTL_VERSION="$tool_version"
$PROVIDERS_PATH/$prov/bin/install.sh "$tool_name" $options $PROVIDERS_PATH/$prov/bin/install.sh "$tool_name" $options
elif [ "$prov" = "hetzner" ] ; then elif [ "$prov" = "hetzner" ] ; then
# Hetzner expects: version as param (from kcl/version.k) # Hetzner expects: version as param (from nickel/version.ncl)
$PROVIDERS_PATH/$prov/bin/install.sh "$tool_version" $options $PROVIDERS_PATH/$prov/bin/install.sh "$tool_version" $options
elif [ "$prov" = "aws" ] ; then elif [ "$prov" = "aws" ] ; then
# AWS format - set env var and pass tool name # AWS format - set env var and pass tool name
@ -410,14 +454,14 @@ function _on_tools {
_install_tools "$tool" "$@" _install_tools "$tool" "$@"
done done
esac esac
} }
set -o allexport set -o allexport
## shellcheck disable=SC1090 ## shellcheck disable=SC1090
[ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV" [ -n "$PROVISIONING_ENV" ] && [ -r "$PROVISIONING_ENV" ] && source "$PROVISIONING_ENV"
[ -r "../env-provisioning" ] && source ../env-provisioning [ -r "../env-provisioning" ] && source ../env-provisioning
[ -r "env-provisioning" ] && source ./env-provisioning [ -r "env-provisioning" ] && source ./env-provisioning
#[ -r ".env" ] && source .env set #[ -r ".env" ] && source .env set
set +o allexport set +o allexport
export PROVISIONING=${PROVISIONING:-/usr/local/provisioning} export PROVISIONING=${PROVISIONING:-/usr/local/provisioning}
@ -434,7 +478,7 @@ PROVIDERS_PATH=${PROVIDERS_PATH:-"$PROVISIONING/extensions/providers"}
if [ -z "$1" ] ; then if [ -z "$1" ] ; then
CHECK_ONLY="yes" CHECK_ONLY="yes"
_on_tools all _on_tools all
else else
[ "$1" == "-h" ] && echo "$USAGE" && shift [ "$1" == "-h" ] && echo "$USAGE" && shift
[ "$1" == "check" ] && CHECK_ONLY="yes" && shift [ "$1" == "check" ] && CHECK_ONLY="yes" && shift
[ -n "$1" ] && cd /tmp && _on_tools "$@" [ -n "$1" ] && cd /tmp && _on_tools "$@"

View File

@ -1,469 +0,0 @@
# KMS Independent Configuration - Migration Summary
**Date:** 2025-10-06
**Version:** 1.0.0
**Status:** ✅ Complete
## Overview
Successfully created independent KMS (Key Management Service) configuration system supporting local and remote modes, completely decoupled from SOPS configuration.
## What Was Created
### 1. Directory Structure
```
/Users/Akasha/project-provisioning/provisioning/core/services/kms/
├── config.defaults.toml (6.7 KB) - System defaults
├── config.schema.toml (14 KB) - Validation rules
├── config.remote.example.toml (5.0 KB) - Remote KMS examples
├── config.local.example.toml (8.4 KB) - Local KMS examples
├── README.md (14 KB) - Comprehensive documentation
└── MIGRATION.md (this file) - Migration summary
```
### 2. Configuration Files
#### config.defaults.toml (270 lines)
Comprehensive default configuration covering:
- **Core Settings**: enabled, mode (local/remote/hybrid), version
- **Path Configuration**: All paths with interpolation support
- **Local KMS**: age, sops, vault providers
- **Remote KMS**: Server, auth, TLS, cache configuration
- **Hybrid Mode**: Fallback and sync settings
- **Policies**: Rotation, backup, audit logging
- **Encryption**: Algorithms and KDF configuration
- **Security**: Enforcement rules and secret scanning
- **Monitoring**: Health checks and metrics
- **Operations**: Verbose, debug, dry-run modes
**Key Features:**
- All paths use interpolation: `{{workspace.path}}`, `{{kms.paths.base}}`, `{{env.HOME}}`
- No hardcoded paths
- Secure defaults (TLS 1.3, 0600 permissions, no debug)
- Secret references only (no plaintext)
#### config.schema.toml (330 lines)
Validation schema defining:
- Type constraints for all fields
- Value ranges (timeouts, retries, sizes)
- Pattern matching (versions, ARNs, URLs)
- Enum validation (modes, algorithms, formats)
- 10 cross-field validation rules
**Validation Rules:**
1. Mode consistency (local/remote/hybrid)
2. Auth method required fields
3. Local provider configuration
4. Password secret format enforcement
5. TLS/mTLS consistency
6. Cache TTL bounds
7. Rotation interval requirements
8. Key permissions security
9. Debug mode warnings
10. Hybrid mode requirements
#### config.remote.example.toml (180 lines)
Remote KMS examples including:
- mTLS authentication (production)
- Token-based auth
- API key authentication
- Basic authentication
- IAM authentication (AWS)
- Deployment scenarios (prod, dev, CI/CD)
- Integration examples (AWS, Cosmian, Vault)
#### config.local.example.toml (290 lines)
Local KMS examples including:
- Age encryption (simple, multi-key, SSH-key)
- SOPS with age
- SOPS with cloud KMS (AWS, GCP, Azure)
- HashiCorp Vault Transit engine
- Development/testing setups
- High-security configurations
- Migration paths
### 3. Configuration Accessor Functions
Added **59 new accessor functions** to `/provisioning/core/nulib/lib_provisioning/config/accessor.nu`:
#### Core Settings (3)
- `get-kms-enabled`
- `get-kms-mode`
- `get-kms-version`
#### Path Accessors (4)
- `get-kms-base-path`
- `get-kms-keys-dir`
- `get-kms-cache-dir`
- `get-kms-config-dir`
#### Local Configuration (13)
- `get-kms-local-enabled`
- `get-kms-local-provider`
- `get-kms-local-key-path`
- `get-kms-local-sops-config`
- Age: `get-kms-age-generate-on-init`, `get-kms-age-key-format`, `get-kms-age-key-permissions`
- SOPS: `get-kms-sops-config-path`, `get-kms-sops-age-recipients`
- Vault: `get-kms-vault-address`, `get-kms-vault-token-path`, `get-kms-vault-transit-path`, `get-kms-vault-key-name`
#### Remote Configuration (19)
- `get-kms-remote-enabled`
- `get-kms-remote-endpoint`
- `get-kms-remote-api-version`
- `get-kms-remote-timeout`
- `get-kms-remote-retry-attempts`
- `get-kms-remote-retry-delay`
- Auth: `get-kms-remote-auth-method`, `get-kms-remote-token-path`, `get-kms-remote-refresh-token`, `get-kms-remote-token-expiry`
- TLS: `get-kms-remote-tls-enabled`, `get-kms-remote-tls-verify`, `get-kms-remote-ca-cert-path`, `get-kms-remote-client-cert-path`, `get-kms-remote-client-key-path`, `get-kms-remote-tls-min-version`
- Cache: `get-kms-remote-cache-enabled`, `get-kms-remote-cache-ttl`, `get-kms-remote-cache-max-size`
#### Hybrid Mode (3)
- `get-kms-hybrid-enabled`
- `get-kms-hybrid-fallback-to-local`
- `get-kms-hybrid-sync-keys`
#### Policies (6)
- `get-kms-auto-rotate`
- `get-kms-rotation-days`
- `get-kms-backup-enabled`
- `get-kms-backup-path`
- `get-kms-audit-log-enabled`
- `get-kms-audit-log-path`
#### Encryption & Security (6)
- `get-kms-encryption-algorithm`
- `get-kms-key-derivation`
- `get-kms-enforce-key-permissions`
- `get-kms-disallow-plaintext-secrets`
- `get-kms-secret-scanning-enabled`
- `get-kms-min-key-size-bits`
#### Operations (4)
- `get-kms-verbose`
- `get-kms-debug`
- `get-kms-dry-run`
- `get-kms-max-file-size-mb`
#### Helper Function (1)
- `get-kms-config-full` - Returns complete KMS config as record
**Total:** 69 KMS accessor functions (10 existing + 59 new)
### 4. Documentation
#### README.md (500+ lines)
Comprehensive documentation covering:
- Overview and directory structure
- Configuration file descriptions
- Path interpolation guide (6 variable types)
- **Security Considerations** (7 critical topics):
1. Key file permissions (0600/0400)
2. Secret references (no plaintext)
3. TLS/mTLS configuration
4. Audit logging
5. Debug mode warnings
6. Secret scanning
7. Key backup and rotation
- Operational modes (local, remote, hybrid)
- Authentication methods (5 types)
- Integration with existing lib.nu
- Validation rules
- Migration guide
- Best practices (dev, prod, HA)
- Troubleshooting
- Version compatibility
## Security Implementation
### 1. Path Interpolation
All paths support secure interpolation:
```toml
base = "{{workspace.path}}/.kms" # Workspace-relative
keys_dir = "{{kms.paths.base}}/keys" # Self-referential
token_path = "{{env.HOME}}/.kms/token" # Environment-based
```
**Benefits:**
- No hardcoded paths
- Portable configurations
- Dynamic workspace support
- Environment-aware
### 2. Secret References
**Never plaintext secrets!** Only references:
```toml
# ✅ Secure
password_secret = "sops://kms/remote/password"
api_key = "vault://kms/api_key"
# ❌ Insecure (blocked by validation)
password = "my-password"
```
**Supported Schemes:**
- `sops://` - SOPS encrypted
- `vault://` - HashiCorp Vault
- `kms://` - KMS encrypted
- `age://` - Age encrypted
### 3. Permission Enforcement
```toml
[kms.local.age]
key_permissions = "0600" # Owner read/write only
[kms.security]
enforce_key_permissions = true
disallow_plaintext_secrets = true
```
**Enforced Rules:**
- Keys must be 0600 or 0400
- Secrets must be references
- TLS 1.3+ for remote
- Certificate verification required
### 4. Audit and Monitoring
```toml
[kms.policies]
audit_log_enabled = true
audit_log_path = "{{kms.paths.base}}/audit.log"
audit_log_format = "json"
[kms.monitoring]
health_check_enabled = true
metrics_enabled = true
```
**Logged Events:**
- Encryption/decryption operations
- Key rotations
- Authentication attempts
- Configuration changes
## Changes to Existing Code
### Modified Files
#### 1. config/accessor.nu
**Location:** `/provisioning/core/nulib/lib_provisioning/config/accessor.nu`
**Changes:**
- Added 59 new KMS accessor functions (lines 739-1144)
- Added comprehensive documentation header
- Added helper function `get-kms-config-full`
- Total KMS functions: 69 (10 existing + 59 new)
**No Breaking Changes:**
- Existing functions preserved
- Backward compatible
- Additive only
### Existing KMS Library (lib.nu)
**Location:** `/provisioning/core/nulib/lib_provisioning/kms/lib.nu`
**Current State:**
- Uses old accessor functions (`get-kms-server`, etc.)
- Hardcoded to remote KMS (Cosmian)
- No local/hybrid mode support
**Recommended Updates:**
```nushell
# Update get_kms_config function to use new accessors:
def get_kms_config [] {
let mode = (get-kms-mode)
match $mode {
"local" => {
{
provider: (get-kms-local-provider)
key_path: (get-kms-local-key-path)
}
}
"remote" => {
{
endpoint: (get-kms-remote-endpoint)
auth_method: (get-kms-remote-auth-method)
# ... existing remote config
}
}
"hybrid" => {
# Both configs with fallback
}
}
}
```
**Note:** lib.nu was NOT modified in this task. Future task should update it to use new config.
## Integration Points
### 1. With SOPS
KMS config is now independent but still supports SOPS:
```toml
[kms.local]
provider = "sops"
sops_config = "{{workspace.path}}/.sops.yaml"
[kms.local.sops]
age_recipients = ["age1xxx..."]
```
### 2. With Workspace Config
KMS config loads from workspace:
```toml
[kms.paths]
base = "{{workspace.path}}/.kms"
```
### 3. With Provider Configs
Can integrate with cloud provider KMS:
```toml
[kms.local.sops]
aws_kms_arn = "arn:aws:kms:..."
gcp_kms_resource_id = "projects/..."
azure_keyvault_url = "https://..."
```
## Usage Examples
### Local Age Encryption
```nushell
# Configuration automatically loaded
let kms_config = (get-kms-config-full)
print $kms_config.local.key_path
# Output: /workspace/my-project/.kms/keys/age.txt
```
### Remote KMS with mTLS
```nushell
let endpoint = (get-kms-remote-endpoint)
let auth = (get-kms-remote-auth-method)
let tls_enabled = (get-kms-remote-tls-enabled)
print $"Connecting to ($endpoint) using ($auth)"
# Output: Connecting to https://kms.prod.example.com using mtls
```
### Hybrid Mode with Fallback
```nushell
let mode = (get-kms-mode)
let fallback = (get-kms-hybrid-fallback-to-local)
if $mode == "hybrid" and $fallback {
print "Hybrid mode with local fallback enabled"
}
```
## Testing Checklist
- [x] Config files created with correct structure
- [x] Schema validation rules defined
- [x] Path interpolation variables documented
- [x] Secret reference patterns enforced
- [x] Accessor functions added (59 new)
- [x] Security considerations documented
- [x] Example configurations provided
- [x] Migration guide included
- [x] README comprehensive
- [ ] lib.nu updated (future task)
- [ ] Integration tests added (future task)
- [ ] End-to-end testing (future task)
## Next Steps
### 1. Update lib.nu
Update `/provisioning/core/nulib/lib_provisioning/kms/lib.nu` to:
- Use new accessor functions
- Support all three modes (local/remote/hybrid)
- Implement local providers (age, sops, vault)
- Add fallback logic for hybrid mode
### 2. Integration Testing
- Test local age encryption
- Test SOPS integration
- Test remote KMS connection
- Test hybrid mode fallback
- Validate all accessor functions
### 3. Migration Path
- Update existing configurations
- Migrate from ENV to config
- Document breaking changes
- Provide migration scripts
### 4. Additional Features
- Key rotation automation
- Backup/restore procedures
- Monitoring dashboards
- Alerting integration
## Files Summary
| File | Size | Lines | Purpose |
|------|------|-------|---------|
| config.defaults.toml | 6.7 KB | 270 | System defaults |
| config.schema.toml | 14 KB | 330 | Validation rules |
| config.remote.example.toml | 5.0 KB | 180 | Remote examples |
| config.local.example.toml | 8.4 KB | 290 | Local examples |
| README.md | 14 KB | 500+ | Documentation |
| MIGRATION.md | - | - | This summary |
| **Total** | **48.1 KB** | **1570+** | Complete KMS config |
## Accessor Functions Summary
| Category | Count | Examples |
|----------|-------|----------|
| Core Settings | 3 | get-kms-enabled, get-kms-mode |
| Paths | 4 | get-kms-base-path, get-kms-keys-dir |
| Local Config | 13 | get-kms-local-provider, get-kms-age-* |
| Remote Config | 19 | get-kms-remote-endpoint, get-kms-remote-tls-* |
| Hybrid Mode | 3 | get-kms-hybrid-enabled |
| Policies | 6 | get-kms-auto-rotate, get-kms-backup-path |
| Security | 6 | get-kms-enforce-key-permissions |
| Operations | 4 | get-kms-verbose, get-kms-debug |
| Helper | 1 | get-kms-config-full |
| **Total New** | **59** | - |
| **Total KMS** | **69** | (10 existing + 59 new) |
## Security Guarantees
**No plaintext secrets** - All secrets use references
**No hardcoded paths** - All paths use interpolation
**Secure defaults** - TLS 1.3, 0600 permissions, no debug
**Validation enforced** - Schema validates all configs
**Audit logging** - All operations logged (when enabled)
**Key rotation** - Automated rotation support
**Permission checks** - Enforced key file permissions
**Secret scanning** - Pattern-based secret detection
## Conclusion
Successfully created a comprehensive, independent KMS configuration system with:
- **4 config files** (defaults, schema, 2 examples)
- **59 new accessor functions**
- **Comprehensive documentation** (README + migration guide)
- **Security-first design** (no plaintext, path interpolation, validation)
- **Three operational modes** (local, remote, hybrid)
- **Backward compatibility** (existing code unchanged)
The system is ready for:
1. Integration with existing lib.nu
2. Testing and validation
3. Production deployment
All requirements met. All paths use interpolation. All security considerations documented.

View File

@ -14,7 +14,7 @@ The KMS configuration system provides a comprehensive, independent configuration
## Directory Structure ## Directory Structure
``` ```plaintext
provisioning/core/services/kms/ provisioning/core/services/kms/
├── config.defaults.toml # System defaults for all KMS settings ├── config.defaults.toml # System defaults for all KMS settings
├── config.schema.toml # Validation rules and constraints ├── config.schema.toml # Validation rules and constraints
@ -22,7 +22,7 @@ provisioning/core/services/kms/
├── config.local.example.toml # Local encryption examples ├── config.local.example.toml # Local encryption examples
├── lib.nu # KMS library functions (existing) ├── lib.nu # KMS library functions (existing)
└── README.md # This file └── README.md # This file
``` ```plaintext
## Configuration Files ## Configuration Files
@ -31,6 +31,7 @@ provisioning/core/services/kms/
Primary configuration file containing all KMS settings with sensible defaults. Primary configuration file containing all KMS settings with sensible defaults.
**Key Sections:** **Key Sections:**
- `[kms]` - Core settings (enabled, mode, version) - `[kms]` - Core settings (enabled, mode, version)
- `[kms.paths]` - Path configuration with interpolation support - `[kms.paths]` - Path configuration with interpolation support
- `[kms.local]` - Local encryption provider settings - `[kms.local]` - Local encryption provider settings
@ -43,6 +44,7 @@ Primary configuration file containing all KMS settings with sensible defaults.
### 2. config.schema.toml ### 2. config.schema.toml
Validation schema defining: Validation schema defining:
- Type constraints for all fields - Type constraints for all fields
- Value ranges and patterns - Value ranges and patterns
- Cross-field validation rules - Cross-field validation rules
@ -87,7 +89,7 @@ token_path = "{{env.HOME}}/.config/provisioning/kms-token"
# Environment variable paths # Environment variable paths
[kms.local.vault] [kms.local.vault]
token_path = "{{env.VAULT_TOKEN_PATH}}" token_path = "{{env.VAULT_TOKEN_PATH}}"
``` ```plaintext
## Security Considerations ## Security Considerations
@ -101,9 +103,10 @@ key_permissions = "0600" # Read/write for owner only
[kms.security] [kms.security]
enforce_key_permissions = true # Enforces permission checks enforce_key_permissions = true # Enforces permission checks
``` ```plaintext
**Best Practice:** **Best Practice:**
- Production keys: `0400` (read-only) - Production keys: `0400` (read-only)
- Development keys: `0600` (read/write for owner) - Development keys: `0600` (read/write for owner)
- Never use: `0644`, `0755`, or world-readable permissions - Never use: `0644`, `0755`, or world-readable permissions
@ -123,9 +126,10 @@ api_key = "vault://kms/api/key"
# ❌ WRONG - Plaintext secret # ❌ WRONG - Plaintext secret
[kms.remote.auth] [kms.remote.auth]
password = "my-secret-password" # NEVER DO THIS! password = "my-secret-password" # NEVER DO THIS!
``` ```plaintext
**Supported Secret References:** **Supported Secret References:**
- `sops://path/to/secret` - SOPS encrypted secret - `sops://path/to/secret` - SOPS encrypted secret
- `vault://path/to/secret` - HashiCorp Vault secret - `vault://path/to/secret` - HashiCorp Vault secret
- `kms://path/to/secret` - KMS-encrypted secret - `kms://path/to/secret` - KMS-encrypted secret
@ -147,9 +151,10 @@ ca_cert_path = "/etc/kms/ca.crt"
method = "mtls" method = "mtls"
client_cert_path = "/etc/kms/client.crt" client_cert_path = "/etc/kms/client.crt"
client_key_path = "/etc/kms/client.key" client_key_path = "/etc/kms/client.key"
``` ```plaintext
**Security Rules:** **Security Rules:**
- Never disable TLS verification in production - Never disable TLS verification in production
- Use mTLS when available for mutual authentication - Use mTLS when available for mutual authentication
- Store certificates outside version control - Store certificates outside version control
@ -164,9 +169,10 @@ Enable audit logging for production environments:
audit_log_enabled = true audit_log_enabled = true
audit_log_path = "{{kms.paths.base}}/audit.log" audit_log_path = "{{kms.paths.base}}/audit.log"
audit_log_format = "json" audit_log_format = "json"
``` ```plaintext
**Logged Operations:** **Logged Operations:**
- Encryption/decryption requests - Encryption/decryption requests
- Key rotation events - Key rotation events
- Authentication attempts - Authentication attempts
@ -180,9 +186,10 @@ audit_log_format = "json"
[kms.operations] [kms.operations]
debug = false # Debug exposes sensitive data in logs! debug = false # Debug exposes sensitive data in logs!
verbose = false verbose = false
``` ```plaintext
Debug mode includes: Debug mode includes:
- Plaintext key material in logs - Plaintext key material in logs
- Full request/response bodies - Full request/response bodies
- Authentication credentials - Authentication credentials
@ -202,7 +209,7 @@ secret_patterns = [
"(?i)api[_-]?key\\s*=\\s*['\"]?[^'\"\\s]+", "(?i)api[_-]?key\\s*=\\s*['\"]?[^'\"\\s]+",
"(?i)token\\s*=\\s*['\"]?[^'\"\\s]+", "(?i)token\\s*=\\s*['\"]?[^'\"\\s]+",
] ]
``` ```plaintext
### 7. Key Backup and Rotation ### 7. Key Backup and Rotation
@ -215,9 +222,10 @@ rotation_days = 90 # Rotate every 90 days
backup_enabled = true backup_enabled = true
backup_path = "{{kms.paths.base}}/backups" backup_path = "{{kms.paths.base}}/backups"
backup_retention_count = 5 # Keep last 5 backups backup_retention_count = 5 # Keep last 5 backups
``` ```plaintext
**Backup Best Practices:** **Backup Best Practices:**
- Store backups in secure, encrypted storage - Store backups in secure, encrypted storage
- Test restore procedures regularly - Test restore procedures regularly
- Document key recovery process - Document key recovery process
@ -230,29 +238,34 @@ The KMS configuration is loaded via config accessor functions in `/provisioning/
### Available Accessor Functions ### Available Accessor Functions
#### Core Settings #### Core Settings
- `get-kms-enabled` - Check if KMS is enabled - `get-kms-enabled` - Check if KMS is enabled
- `get-kms-mode` - Get operating mode (local/remote/hybrid) - `get-kms-mode` - Get operating mode (local/remote/hybrid)
- `get-kms-version` - Get KMS config version - `get-kms-version` - Get KMS config version
#### Path Accessors #### Path Accessors
- `get-kms-base-path` - Get base KMS directory - `get-kms-base-path` - Get base KMS directory
- `get-kms-keys-dir` - Get keys directory - `get-kms-keys-dir` - Get keys directory
- `get-kms-cache-dir` - Get cache directory - `get-kms-cache-dir` - Get cache directory
- `get-kms-config-dir` - Get config directory - `get-kms-config-dir` - Get config directory
#### Local Configuration #### Local Configuration
- `get-kms-local-enabled` - Check if local mode enabled - `get-kms-local-enabled` - Check if local mode enabled
- `get-kms-local-provider` - Get provider (age/sops/vault) - `get-kms-local-provider` - Get provider (age/sops/vault)
- `get-kms-local-key-path` - Get key file path - `get-kms-local-key-path` - Get key file path
- `get-kms-age-generate-on-init` - Check auto-generate setting - `get-kms-age-generate-on-init` - Check auto-generate setting
#### Remote Configuration #### Remote Configuration
- `get-kms-remote-enabled` - Check if remote mode enabled - `get-kms-remote-enabled` - Check if remote mode enabled
- `get-kms-remote-endpoint` - Get KMS server URL - `get-kms-remote-endpoint` - Get KMS server URL
- `get-kms-remote-auth-method` - Get auth method - `get-kms-remote-auth-method` - Get auth method
- `get-kms-remote-timeout` - Get connection timeout - `get-kms-remote-timeout` - Get connection timeout
#### Full Config Helper #### Full Config Helper
- `get-kms-config-full` - Get complete KMS config as record - `get-kms-config-full` - Get complete KMS config as record
### Usage Examples ### Usage Examples
@ -269,7 +282,7 @@ let kms_config = (get-kms-config-full)
# Get local key path with interpolation resolved # Get local key path with interpolation resolved
let key_path = (get-kms-local-key-path) let key_path = (get-kms-local-key-path)
``` ```plaintext
## Operational Modes ## Operational Modes
@ -278,17 +291,20 @@ let key_path = (get-kms-local-key-path)
Uses local encryption tools without external dependencies. Uses local encryption tools without external dependencies.
**Use Cases:** **Use Cases:**
- Development environments - Development environments
- Offline operations - Offline operations
- Simple encryption needs - Simple encryption needs
- No cloud KMS access - No cloud KMS access
**Supported Providers:** **Supported Providers:**
- **age** - Simple, modern encryption (recommended) - **age** - Simple, modern encryption (recommended)
- **sops** - Secret Operations with multiple backends - **sops** - Secret Operations with multiple backends
- **vault** - HashiCorp Vault Transit engine - **vault** - HashiCorp Vault Transit engine
**Example:** **Example:**
```toml ```toml
[kms] [kms]
enabled = true enabled = true
@ -298,25 +314,28 @@ mode = "local"
enabled = true enabled = true
provider = "age" provider = "age"
key_path = "{{kms.paths.keys_dir}}/age.txt" key_path = "{{kms.paths.keys_dir}}/age.txt"
``` ```plaintext
### 2. Remote Mode ### 2. Remote Mode
Connects to external KMS server for centralized key management. Connects to external KMS server for centralized key management.
**Use Cases:** **Use Cases:**
- Production environments - Production environments
- Centralized key management - Centralized key management
- Compliance requirements - Compliance requirements
- Multi-region deployments - Multi-region deployments
**Supported Integrations:** **Supported Integrations:**
- Cosmian KMS - Cosmian KMS
- AWS KMS - AWS KMS
- HashiCorp Vault (remote) - HashiCorp Vault (remote)
- Custom KMS servers - Custom KMS servers
**Example:** **Example:**
```toml ```toml
[kms] [kms]
enabled = true enabled = true
@ -330,19 +349,21 @@ endpoint = "https://kms.production.example.com"
method = "mtls" method = "mtls"
client_cert_path = "/etc/kms/client.crt" client_cert_path = "/etc/kms/client.crt"
client_key_path = "/etc/kms/client.key" client_key_path = "/etc/kms/client.key"
``` ```plaintext
### 3. Hybrid Mode ### 3. Hybrid Mode
Combines local and remote with automatic fallback. Combines local and remote with automatic fallback.
**Use Cases:** **Use Cases:**
- High availability requirements - High availability requirements
- Gradual migration from local to remote - Gradual migration from local to remote
- Offline operation support - Offline operation support
- Disaster recovery - Disaster recovery
**Example:** **Example:**
```toml ```toml
[kms] [kms]
enabled = true enabled = true
@ -360,20 +381,22 @@ endpoint = "https://kms.example.com"
enabled = true enabled = true
fallback_to_local = true fallback_to_local = true
sync_keys = false sync_keys = false
``` ```plaintext
## Authentication Methods ## Authentication Methods
### Token-based Authentication ### Token-based Authentication
```toml ```toml
[kms.remote.auth] [kms.remote.auth]
method = "token" method = "token"
token_path = "{{kms.paths.config_dir}}/token" token_path = "{{kms.paths.config_dir}}/token"
refresh_token = true refresh_token = true
token_expiry_seconds = 3600 token_expiry_seconds = 3600
``` ```plaintext
### mTLS (Mutual TLS) ### mTLS (Mutual TLS)
```toml ```toml
[kms.remote.auth] [kms.remote.auth]
method = "mtls" method = "mtls"
@ -382,44 +405,49 @@ client_key_path = "/etc/kms/client.key"
[kms.remote.tls] [kms.remote.tls]
ca_cert_path = "/etc/kms/ca.crt" ca_cert_path = "/etc/kms/ca.crt"
``` ```plaintext
### API Key ### API Key
```toml ```toml
[kms.remote.auth] [kms.remote.auth]
method = "api_key" method = "api_key"
api_key = "sops://kms/api_key" # Secret reference! api_key = "sops://kms/api_key" # Secret reference!
``` ```plaintext
### Basic Authentication ### Basic Authentication
```toml ```toml
[kms.remote.auth] [kms.remote.auth]
method = "basic" method = "basic"
username = "provisioning" username = "provisioning"
password_secret = "vault://kms/password" # Secret reference! password_secret = "vault://kms/password" # Secret reference!
``` ```plaintext
### IAM (AWS) ### IAM (AWS)
```toml ```toml
[kms.remote.auth] [kms.remote.auth]
method = "iam" method = "iam"
iam_role_arn = "arn:aws:iam::123456789012:role/kms-role" iam_role_arn = "arn:aws:iam::123456789012:role/kms-role"
``` ```plaintext
## Integration with Existing KMS Library ## Integration with Existing KMS Library
The existing KMS library (`lib.nu`) can be updated to use the new configuration: The existing KMS library (`lib.nu`) can be updated to use the new configuration:
### Current Implementation ### Current Implementation
```nushell ```nushell
# Old: Hardcoded config lookup # Old: Hardcoded config lookup
def get_kms_config [] { def get_kms_config [] {
let server_url = (get-kms-server) let server_url = (get-kms-server)
# ... # ...
} }
``` ```plaintext
### Updated Implementation ### Updated Implementation
```nushell ```nushell
# New: Use new config accessors # New: Use new config accessors
def get_kms_config [] { def get_kms_config [] {
@ -445,7 +473,7 @@ def get_kms_config [] {
} }
} }
} }
``` ```plaintext
## Validation ## Validation
@ -480,19 +508,21 @@ Configuration is validated against the schema:
### From Environment Variables to Config ### From Environment Variables to Config
**Before (ENV-based):** **Before (ENV-based):**
```bash ```bash
export PROVISIONING_KMS_SERVER="https://kms.example.com" export PROVISIONING_KMS_SERVER="https://kms.example.com"
export PROVISIONING_KMS_AUTH="certificate" export PROVISIONING_KMS_AUTH="certificate"
``` ```plaintext
**After (Config-based):** **After (Config-based):**
```toml ```toml
[kms.remote] [kms.remote]
endpoint = "https://kms.example.com" endpoint = "https://kms.example.com"
[kms.remote.auth] [kms.remote.auth]
method = "mtls" method = "mtls"
``` ```plaintext
### From SOPS to KMS Config ### From SOPS to KMS Config
@ -505,11 +535,12 @@ sops_config = "{{workspace.path}}/.sops.yaml"
[kms.local.sops] [kms.local.sops]
age_recipients = ["age1xxx...", "age1yyy..."] age_recipients = ["age1xxx...", "age1yyy..."]
``` ```plaintext
## Best Practices ## Best Practices
### 1. Development Environment ### 1. Development Environment
```toml ```toml
[kms] [kms]
mode = "local" mode = "local"
@ -525,9 +556,10 @@ debug = false # Never true, even in dev!
[kms.policies] [kms.policies]
backup_enabled = false backup_enabled = false
audit_log_enabled = false audit_log_enabled = false
``` ```plaintext
### 2. Production Environment ### 2. Production Environment
```toml ```toml
[kms] [kms]
mode = "remote" mode = "remote"
@ -559,9 +591,10 @@ disallow_plaintext_secrets = true
[kms.operations] [kms.operations]
verbose = false verbose = false
debug = false debug = false
``` ```plaintext
### 3. Hybrid/HA Environment ### 3. Hybrid/HA Environment
```toml ```toml
[kms] [kms]
mode = "hybrid" mode = "hybrid"
@ -578,42 +611,48 @@ endpoint = "https://kms.example.com"
enabled = true enabled = true
fallback_to_local = true fallback_to_local = true
sync_keys = false sync_keys = false
``` ```plaintext
## Troubleshooting ## Troubleshooting
### Issue: Permission Denied on Key File ### Issue: Permission Denied on Key File
**Error:** **Error:**
```
```plaintext
Permission denied: /path/to/age.txt Permission denied: /path/to/age.txt
``` ```plaintext
**Solution:** **Solution:**
```bash ```bash
chmod 0600 /path/to/age.txt chmod 0600 /path/to/age.txt
``` ```plaintext
Or update config: Or update config:
```toml ```toml
[kms.local.age] [kms.local.age]
key_permissions = "0600" key_permissions = "0600"
[kms.security] [kms.security]
enforce_key_permissions = true enforce_key_permissions = true
``` ```plaintext
### Issue: Remote KMS Connection Failed ### Issue: Remote KMS Connection Failed
**Error:** **Error:**
```
```plaintext
Connection timeout: https://kms.example.com Connection timeout: https://kms.example.com
``` ```plaintext
**Solutions:** **Solutions:**
1. Check network connectivity 1. Check network connectivity
2. Verify TLS certificates 2. Verify TLS certificates
3. Increase timeout: 3. Increase timeout:
```toml ```toml
[kms.remote] [kms.remote]
timeout_seconds = 60 timeout_seconds = 60
@ -623,18 +662,20 @@ Connection timeout: https://kms.example.com
### Issue: Secret Reference Not Found ### Issue: Secret Reference Not Found
**Error:** **Error:**
```
```plaintext
Secret not found: sops://kms/password Secret not found: sops://kms/password
``` ```plaintext
**Solution:** **Solution:**
1. Verify secret exists in SOPS/Vault 1. Verify secret exists in SOPS/Vault
2. Check secret path format 2. Check secret path format
3. Ensure SOPS/Vault is properly configured 3. Ensure SOPS/Vault is properly configured
## Version Compatibility ## Version Compatibility
| KMS Config Version | Nushell Version | KCL Version | Notes | | KMS Config Version | Nushell Version | Nickel Version | Notes |
|-------------------|-----------------|-------------|-------| |-------------------|-----------------|-------------|-------|
| 1.0.0 | 0.107.1+ | 0.11.3+ | Initial release | | 1.0.0 | 0.107.1+ | 0.11.3+ | Initial release |
@ -648,6 +689,7 @@ Secret not found: sops://kms/password
## Support ## Support
For issues or questions: For issues or questions:
1. Check this README 1. Check this README
2. Review example configurations 2. Review example configurations
3. Consult validation schema 3. Consult validation schema

View File

@ -19,11 +19,12 @@ display_only = true
[items.warning_details] [items.warning_details]
type = "text" type = "text"
prompt = "Cluster Deletion will:" prompt = "Cluster Deletion will:"
help = " Permanently delete all nodes in the cluster help = """
Permanently delete all nodes in the cluster
Destroy all persistent volumes and data Destroy all persistent volumes and data
Terminate all running applications and services Terminate all running applications and services
Remove all persistent configurations Remove all persistent configurations
Make cluster inaccessible - cannot be recovered" Make cluster inaccessible - cannot be recovered"""
display_only = true display_only = true
# ============================================================================ # ============================================================================

View File

@ -19,10 +19,11 @@ display_only = true
[items.warning_text] [items.warning_text]
type = "text" type = "text"
prompt = "Server Deletion will:" prompt = "Server Deletion will:"
help = " Permanently remove the server from all providers help = """
Permanently remove the server from all providers
Delete all associated data and configurations Delete all associated data and configurations
Terminate all running services Terminate all running services
Release allocated IP addresses and storage" Release allocated IP addresses and storage"""
display_only = true display_only = true
# ============================================================================ # ============================================================================

View File

@ -19,11 +19,12 @@ display_only = true
[items.warning_text] [items.warning_text]
type = "text" type = "text"
prompt = "Task Service Deletion will:" prompt = "Task Service Deletion will:"
help = " Permanently remove the service definition help = """
Permanently remove the service definition
Delete all containers and images Delete all containers and images
Remove all associated volumes and data Remove all associated volumes and data
Terminate all running tasks Terminate all running tasks
Invalidate all service references" Invalidate all service references"""
display_only = true display_only = true
# ============================================================================ # ============================================================================