chore: remove obsolete reports and reorganize documentation structure. Fix try cath, Review for nu 0.110.0
This commit is contained in:
parent
b94a901f1a
commit
9d9d916c97
@ -87,27 +87,35 @@ let nickel_path = if ($env.NICKEL_IMPORT_PATH? == "") {
|
||||
$env.NICKEL_IMPORT_PATH
|
||||
}
|
||||
|
||||
try {
|
||||
# Export to TOML
|
||||
let export_result = (
|
||||
with-env { NICKEL_IMPORT_PATH: $nickel_path } {
|
||||
nickel export --format toml $input_file
|
||||
}
|
||||
)
|
||||
# Export to TOML
|
||||
let export_cmd = (do {
|
||||
with-env { NICKEL_IMPORT_PATH: $nickel_path } {
|
||||
^nickel export --format toml $input_file
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $export_cmd.exit_code == 0 {
|
||||
# Write to output file
|
||||
$export_result | save --raw $output_file
|
||||
let save_cmd = (do {
|
||||
$export_cmd.stdout | save --raw $output_file
|
||||
{result: "success"}
|
||||
} | complete)
|
||||
|
||||
print $"✅ Success: Exported to ($output_file)"
|
||||
print ""
|
||||
print "Config summary:"
|
||||
print $" Service: ($service)"
|
||||
print $" Mode: ($mode)"
|
||||
print $" Source: ($input_file)"
|
||||
print $" Output: ($output_file)"
|
||||
print $" Size: (($output_file | stat).size | into string) bytes"
|
||||
} catch { |err|
|
||||
if $save_cmd.exit_code == 0 {
|
||||
print $"✅ Success: Exported to ($output_file)"
|
||||
print ""
|
||||
print "Config summary:"
|
||||
print $" Service: ($service)"
|
||||
print $" Mode: ($mode)"
|
||||
print $" Source: ($input_file)"
|
||||
print $" Output: ($output_file)"
|
||||
print $" Size: (($output_file | stat).size | into string) bytes"
|
||||
} else {
|
||||
print $"❌ Error: Failed to save output"
|
||||
exit 1
|
||||
}
|
||||
} else {
|
||||
print $"❌ Error: Failed to export TOML"
|
||||
print $"Error details: ($err.msg)"
|
||||
print $"Error details: ($export_cmd.stderr)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
@ -24,15 +24,11 @@ export def run-cmd-or-fail [cmd: string, args: list<string>, error_msg: string]:
|
||||
$result.stdout
|
||||
}
|
||||
|
||||
export def check-command-exists [cmd: string]: string -> bool {
|
||||
let result = do {
|
||||
which $cmd
|
||||
} | complete
|
||||
|
||||
$result.exit_code == 0
|
||||
export def check-command-exists [cmd: string]: nothing -> bool {
|
||||
(which $cmd | length) > 0
|
||||
}
|
||||
|
||||
export def assert-command-exists [cmd: string]: nothing -> nothing {
|
||||
export def assert-command-exists [cmd: string] {
|
||||
if not (check-command-exists $cmd) {
|
||||
error make {
|
||||
msg: $"Required command not found: ($cmd)"
|
||||
@ -40,7 +36,7 @@ export def assert-command-exists [cmd: string]: nothing -> nothing {
|
||||
}
|
||||
}
|
||||
|
||||
export def run-nickel-typecheck [path: string]: nothing -> nothing {
|
||||
export def run-nickel-typecheck [path: string] {
|
||||
assert-command-exists "nickel"
|
||||
|
||||
let result = (run-cmd "nickel" ["typecheck", $path])
|
||||
@ -75,14 +71,20 @@ export def run-yq-convert [input: string, output_format: string]: nothing -> str
|
||||
}
|
||||
|
||||
export def run-typedialog [backend: string, args: list<string>]: nothing -> record<exit_code: int, stdout: string, stderr: string> {
|
||||
assert-command-exists "typedialog"
|
||||
# TypeDialog has separate binaries per backend
|
||||
let cmd = match $backend {
|
||||
"web" => "typedialog-web"
|
||||
"tui" => "typedialog-tui"
|
||||
"cli" => "typedialog"
|
||||
_ => "typedialog"
|
||||
}
|
||||
|
||||
let cmd_args = [$backend] | append $args
|
||||
assert-command-exists $cmd
|
||||
|
||||
(run-cmd "typedialog" $cmd_args)
|
||||
(run-cmd $cmd $args)
|
||||
}
|
||||
|
||||
export def run-typedialog-or-fail [backend: string, args: list<string>, error_msg: string]: nothing -> nothing {
|
||||
export def run-typedialog-or-fail [backend: string, args: list<string>, error_msg: string] {
|
||||
let result = (run-typedialog $backend $args)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
@ -104,14 +106,14 @@ export def run-kubectl [args: list<string>]: nothing -> record<exit_code: int, s
|
||||
(run-cmd "kubectl" $args)
|
||||
}
|
||||
|
||||
export def pipe-to-file [content: string, path: string]: string -> nothing {
|
||||
export def pipe-to-file [content: string, path: string] {
|
||||
$content | save --force $path
|
||||
}
|
||||
|
||||
export def file-exists [path: string]: string -> bool {
|
||||
export def file-exists [path: string]: nothing -> bool {
|
||||
($path | path exists)
|
||||
}
|
||||
|
||||
export def dir-exists [path: string]: string -> bool {
|
||||
export def dir-exists [path: string]: nothing -> bool {
|
||||
($path | path exists) and (($path | path type) == "dir")
|
||||
}
|
||||
|
||||
@ -1,10 +1,16 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Path Management and Validation Helpers
|
||||
# Provides consistent path handling for Phase 8 scripts
|
||||
# Usage: use paths.nu; assert-file-exists "/some/file"
|
||||
|
||||
export def assert-file-exists [path: string]: nothing -> nothing {
|
||||
# Calculate project root from this script's location
|
||||
# This script is in: provisioning/.typedialog/platform/scripts/
|
||||
# Project root is: provisioning/
|
||||
def project-root []: nothing -> string {
|
||||
$env.FILE_PWD | path dirname | path dirname | path dirname
|
||||
}
|
||||
|
||||
export def assert-file-exists [path: string] {
|
||||
if not ($path | path exists) {
|
||||
error make {
|
||||
msg: $"File not found: ($path)"
|
||||
@ -12,109 +18,104 @@ export def assert-file-exists [path: string]: nothing -> nothing {
|
||||
}
|
||||
}
|
||||
|
||||
export def assert-dir-exists [path: string]: nothing -> nothing {
|
||||
export def assert-dir-exists [path: string] {
|
||||
let path_obj = $path | path expand
|
||||
|
||||
if not ($path_obj | path exists) {
|
||||
error make {
|
||||
msg: $"Directory not found: ($path_obj)"
|
||||
}
|
||||
}
|
||||
|
||||
if not ($path_obj | path type) == "dir" {
|
||||
if ($path_obj | path type) != "dir" {
|
||||
error make {
|
||||
msg: $"Path exists but is not a directory: ($path_obj)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export def ensure-dir [path: string]: string -> string {
|
||||
export def ensure-dir [path: string]: nothing -> string {
|
||||
let expanded = $path | path expand
|
||||
|
||||
if not ($expanded | path exists) {
|
||||
^mkdir -p $expanded
|
||||
mkdir $expanded
|
||||
}
|
||||
|
||||
$expanded
|
||||
}
|
||||
|
||||
export def resolve-relative [path: string]: string -> string {
|
||||
export def resolve-relative [path: string]: nothing -> string {
|
||||
if ($path | str starts-with "/") {
|
||||
$path
|
||||
} else if ($path | str starts-with "~/") {
|
||||
$path | path expand
|
||||
} else {
|
||||
(pwd) / $path | path expand
|
||||
$env.PWD | path join $path | path expand
|
||||
}
|
||||
}
|
||||
|
||||
export def typedialog-base-path []: nothing -> string {
|
||||
"provisioning/.typedialog/platform"
|
||||
(project-root) | path join ".typedialog" "platform"
|
||||
}
|
||||
|
||||
export def schemas-base-path []: nothing -> string {
|
||||
"provisioning/schemas/platform"
|
||||
(project-root) | path join "schemas" "platform"
|
||||
}
|
||||
|
||||
export def forms-path []: nothing -> string {
|
||||
(typedialog-base-path) + "/forms"
|
||||
(typedialog-base-path) | path join "forms"
|
||||
}
|
||||
|
||||
export def fragments-path []: nothing -> string {
|
||||
(forms-path) + "/fragments"
|
||||
(forms-path) | path join "fragments"
|
||||
}
|
||||
|
||||
export def schemas-path []: nothing -> string {
|
||||
(schemas-base-path) + "/schemas"
|
||||
(schemas-base-path) | path join "schemas"
|
||||
}
|
||||
|
||||
export def defaults-path []: nothing -> string {
|
||||
(schemas-base-path) + "/defaults"
|
||||
(schemas-base-path) | path join "defaults"
|
||||
}
|
||||
|
||||
export def validators-path []: nothing -> string {
|
||||
(schemas-base-path) + "/validators"
|
||||
(schemas-base-path) | path join "validators"
|
||||
}
|
||||
|
||||
export def configs-path []: nothing -> string {
|
||||
(schemas-base-path) + "/configs"
|
||||
(schemas-base-path) | path join "configs"
|
||||
}
|
||||
|
||||
export def templates-path []: nothing -> string {
|
||||
(schemas-base-path) + "/templates"
|
||||
(schemas-base-path) | path join "templates"
|
||||
}
|
||||
|
||||
export def values-path []: nothing -> string {
|
||||
(schemas-base-path) + "/values"
|
||||
(schemas-base-path) | path join "values"
|
||||
}
|
||||
|
||||
export def constraints-path []: nothing -> string {
|
||||
(schemas-base-path) + "/constraints"
|
||||
(schemas-base-path) | path join "constraints"
|
||||
}
|
||||
|
||||
export def get-form-path [service: string]: string -> string {
|
||||
(forms-path) + "/" + $service + "-form.toml"
|
||||
export def get-form-path [service: string]: nothing -> string {
|
||||
(forms-path) | path join $"($service)-form.toml"
|
||||
}
|
||||
|
||||
export def get-config-path [service: string, mode: string]: string -> string {
|
||||
(configs-path) + "/" + $service + "." + $mode + ".ncl"
|
||||
export def get-config-path [service: string, mode: string]: nothing -> string {
|
||||
(configs-path) | path join $"($service).($mode).ncl"
|
||||
}
|
||||
|
||||
export def get-value-path [service: string, mode: string]: string -> string {
|
||||
(values-path) + "/" + $service + "." + $mode + ".ncl"
|
||||
export def get-value-path [service: string, mode: string]: nothing -> string {
|
||||
(values-path) | path join $"($service).($mode).ncl"
|
||||
}
|
||||
|
||||
export def get-template-path [template_name: string]: string -> string {
|
||||
(templates-path) + "/" + $template_name
|
||||
export def get-template-path [template_name: string]: nothing -> string {
|
||||
(templates-path) | path join $template_name
|
||||
}
|
||||
|
||||
export def get-output-config-path [service: string, mode: string]: string -> string {
|
||||
"provisioning/config/runtime/generated/" + $service + "." + $mode + ".toml"
|
||||
export def get-output-config-path [service: string, mode: string]: nothing -> string {
|
||||
(project-root) | path join "config" "runtime" "generated" $"($service).($mode).toml"
|
||||
}
|
||||
|
||||
export def validate-service [service: string]: nothing -> nothing {
|
||||
let valid_services = ["orchestrator", "control-center", "mcp-server", "vault-service", "extension-registry", "rag", "ai-service", "provisioning-daemon"]
|
||||
|
||||
if $service not-in $valid_services {
|
||||
error make {
|
||||
msg: $"Invalid service: ($service). Valid options: ($valid_services | str join ', ')"
|
||||
@ -124,7 +125,6 @@ export def validate-service [service: string]: nothing -> nothing {
|
||||
|
||||
export def validate-mode [mode: string]: nothing -> nothing {
|
||||
let valid_modes = ["solo", "multiuser", "cicd", "enterprise"]
|
||||
|
||||
if $mode not-in $valid_modes {
|
||||
error make {
|
||||
msg: $"Invalid deployment mode: ($mode). Valid options: ($valid_modes | str join ', ')"
|
||||
@ -134,7 +134,6 @@ export def validate-mode [mode: string]: nothing -> nothing {
|
||||
|
||||
export def validate-backend [backend: string]: nothing -> nothing {
|
||||
let valid_backends = ["cli", "tui", "web"]
|
||||
|
||||
if $backend not-in $valid_backends {
|
||||
error make {
|
||||
msg: $"Invalid TypeDialog backend: ($backend). Valid options: ($valid_backends | str join ', ')"
|
||||
|
||||
@ -17,7 +17,7 @@ def load_envrc [envrc_path: path] {
|
||||
}
|
||||
|
||||
let envrc_content = (open $envrc_path)
|
||||
let lines = ($envrc_content | split row "\n")
|
||||
let lines = ($envrc_content | lines)
|
||||
|
||||
mut result = {}
|
||||
for line in $lines {
|
||||
|
||||
@ -29,17 +29,19 @@ def main [
|
||||
# Register each plugin
|
||||
for plugin in $plugins {
|
||||
print $" Registering ($plugin)..."
|
||||
try {
|
||||
let result = (do {
|
||||
run-external $nu_binary "-c" $"plugin add ./($plugin)"
|
||||
} catch { |err|
|
||||
print $" ⚠️ Failed to register ($plugin): ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
print $" ⚠️ Failed to register ($plugin): ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
if $verify {
|
||||
print "🔍 Verifying installation..."
|
||||
try {
|
||||
let plugin_list = run-external $nu_binary "-c" "plugin list" | complete
|
||||
let verify_result = (do {
|
||||
let plugin_list = (run-external $nu_binary "-c" "plugin list" | complete)
|
||||
if $plugin_list.exit_code == 0 {
|
||||
print "✅ Plugin verification successful"
|
||||
print $plugin_list.stdout
|
||||
@ -47,8 +49,10 @@ def main [
|
||||
print "❌ Plugin verification failed"
|
||||
print $plugin_list.stderr
|
||||
}
|
||||
} catch { |err|
|
||||
print $"❌ Verification failed: ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $verify_result.exit_code != 0 {
|
||||
print $"❌ Verification failed: ($verify_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -87,43 +87,43 @@ def validate_environment [] {
|
||||
|
||||
# Validate Nickel configuration
|
||||
print " Validating Nickel configuration..."
|
||||
try {
|
||||
nickel export workspace.ncl | from json | null
|
||||
let nickel_result = (do { nickel export workspace.ncl | from json } | complete)
|
||||
if $nickel_result.exit_code == 0 {
|
||||
print " ✓ Nickel configuration is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"Nickel validation failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"Nickel validation failed: ($nickel_result.stderr)"}
|
||||
}
|
||||
|
||||
# Validate config.toml
|
||||
print " Validating config.toml..."
|
||||
try {
|
||||
let config = (open config.toml)
|
||||
let config_result = (do { open config.toml } | complete)
|
||||
if $config_result.exit_code == 0 {
|
||||
print " ✓ config.toml is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"config.toml validation failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"config.toml validation failed: ($config_result.stderr)"}
|
||||
}
|
||||
|
||||
# Test provider connectivity
|
||||
print " Testing provider connectivity..."
|
||||
try {
|
||||
hcloud server list | null
|
||||
let hcloud_result = (do { hcloud server list } | complete)
|
||||
if $hcloud_result.exit_code == 0 {
|
||||
print " ✓ Hetzner connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"Hetzner connectivity failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"Hetzner connectivity failed: ($hcloud_result.stderr)"}
|
||||
}
|
||||
|
||||
try {
|
||||
aws sts get-caller-identity | null
|
||||
let aws_result = (do { aws sts get-caller-identity } | complete)
|
||||
if $aws_result.exit_code == 0 {
|
||||
print " ✓ AWS connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"AWS connectivity failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"AWS connectivity failed: ($aws_result.stderr)"}
|
||||
}
|
||||
|
||||
try {
|
||||
doctl account get | null
|
||||
let doctl_result = (do { doctl account get } | complete)
|
||||
if $doctl_result.exit_code == 0 {
|
||||
print " ✓ DigitalOcean connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"DigitalOcean connectivity failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"DigitalOcean connectivity failed: ($doctl_result.stderr)"}
|
||||
}
|
||||
}
|
||||
|
||||
@ -237,7 +237,7 @@ def deploy_aws_managed_services [] {
|
||||
|
||||
print " Creating RDS PostgreSQL database (db.t3.small, ~$60/month)..."
|
||||
|
||||
try {
|
||||
let rds_result = (do {
|
||||
aws rds create-db-instance \
|
||||
--db-instance-identifier app-db \
|
||||
--db-instance-class db.t3.small \
|
||||
@ -251,39 +251,46 @@ def deploy_aws_managed_services [] {
|
||||
--backup-retention-period 30 \
|
||||
--region us-east-1 \
|
||||
--db-subnet-group-name default \
|
||||
--vpc-security-group-ids $sg.GroupId | null
|
||||
--vpc-security-group-ids $sg.GroupId
|
||||
} | complete)
|
||||
|
||||
if $rds_result.exit_code == 0 {
|
||||
print " ✓ Database creation initiated (may take 10-15 minutes)"
|
||||
} catch {|err|
|
||||
print $" ⚠ Database creation note: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Database creation note: ($rds_result.stderr)"
|
||||
}
|
||||
|
||||
print " Creating ElastiCache Redis cluster (2 nodes, ~$25/month)..."
|
||||
|
||||
try {
|
||||
let cache_result = (do {
|
||||
aws elasticache create-cache-cluster \
|
||||
--cache-cluster-id app-cache \
|
||||
--engine redis \
|
||||
--engine-version 7.0 \
|
||||
--cache-node-type cache.t3.small \
|
||||
--num-cache-nodes 2 \
|
||||
--region us-east-1 | null
|
||||
--region us-east-1
|
||||
} | complete)
|
||||
|
||||
if $cache_result.exit_code == 0 {
|
||||
print " ✓ Redis cache creation initiated (may take 5-10 minutes)"
|
||||
} catch {|err|
|
||||
print $" ⚠ Cache creation note: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Cache creation note: ($cache_result.stderr)"
|
||||
}
|
||||
|
||||
print " Creating SQS message queue (~$15/month, pay-per-request)..."
|
||||
|
||||
try {
|
||||
let queue = (aws sqs create-queue \
|
||||
let queue_result = (do {
|
||||
aws sqs create-queue \
|
||||
--queue-name app-queue \
|
||||
--region us-east-1 | from json)
|
||||
--region us-east-1 | from json
|
||||
} | complete)
|
||||
|
||||
if $queue_result.exit_code == 0 {
|
||||
let queue = ($queue_result.stdout | from json)
|
||||
print $" ✓ Created SQS queue: ($queue.QueueUrl)"
|
||||
} catch {|err|
|
||||
print $" ⚠ Queue creation note: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Queue creation note: ($queue_result.stderr)"
|
||||
}
|
||||
|
||||
print " Cost for AWS managed services: ~$115/month"
|
||||
@ -296,13 +303,15 @@ def deploy_aws_managed_services [] {
|
||||
def setup_vpn_tunnel [] {
|
||||
print " Setting up IPSec VPN tunnel (Hetzner ↔ AWS)..."
|
||||
|
||||
try {
|
||||
# Create VPN gateway on AWS side
|
||||
let vgw = (aws ec2 create-vpn-gateway \
|
||||
let vpn_result = (do {
|
||||
aws ec2 create-vpn-gateway \
|
||||
--region us-east-1 \
|
||||
--type ipsec.1 \
|
||||
--tag-specifications "ResourceType=vpn-gateway,Tags=[{Key=Name,Value=hetzner-aws-vpn-gw}]" | from json)
|
||||
--tag-specifications "ResourceType=vpn-gateway,Tags=[{Key=Name,Value=hetzner-aws-vpn-gw}]" | from json
|
||||
} | complete)
|
||||
|
||||
if $vpn_result.exit_code == 0 {
|
||||
let vgw = ($vpn_result.stdout | from json)
|
||||
print $" ✓ AWS VPN Gateway created: ($vgw.VpnGateway.VpnGatewayId)"
|
||||
|
||||
print " Note: Complete VPN configuration requires:"
|
||||
@ -310,8 +319,8 @@ def setup_vpn_tunnel [] {
|
||||
print " 2. Create VPN Connection in AWS"
|
||||
print " 3. Configure Hetzner side with StrongSwan or Wireguard"
|
||||
print " 4. Test connectivity: ping 10.1.0.0 from Hetzner"
|
||||
} catch {|err|
|
||||
print $" ℹ VPN setup note: ($err)"
|
||||
} else {
|
||||
print $" ℹ VPN setup note: ($vpn_result.stderr)"
|
||||
}
|
||||
|
||||
print " ✓ VPN tunnel configuration documented"
|
||||
@ -321,25 +330,30 @@ def setup_vpn_tunnel [] {
|
||||
def deploy_digitalocean_cdn [] {
|
||||
print " Creating DigitalOcean Spaces object storage (~$15/month)..."
|
||||
|
||||
try {
|
||||
let spaces_result = (do {
|
||||
doctl compute spaces create app-content \
|
||||
--region nyc3
|
||||
} | complete)
|
||||
|
||||
if $spaces_result.exit_code == 0 {
|
||||
print " ✓ Created Spaces bucket: app-content"
|
||||
} catch {|err|
|
||||
print $" ⚠ Spaces creation note: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Spaces creation note: ($spaces_result.stderr)"
|
||||
}
|
||||
|
||||
print " Creating DigitalOcean CDN endpoint (~$25/month)..."
|
||||
|
||||
try {
|
||||
# Note: CDN creation is typically done via Terraform or API
|
||||
let cdn_result = (do {
|
||||
print " Note: CDN requires content origin and is configured via:"
|
||||
print " • Set origin to: content.example.com"
|
||||
print " • Supported regions: nyc1, sfo1, lon1, sgp1, blr1"
|
||||
print " • Cache TTL: 3600s for dynamic, 86400s for static"
|
||||
} catch {|err|
|
||||
print $" ℹ CDN setup note: ($err)"
|
||||
} | complete)
|
||||
|
||||
if $cdn_result.exit_code == 0 {
|
||||
# Additional configuration info printed above
|
||||
} else {
|
||||
print $" ℹ CDN setup note: ($cdn_result.stderr)"
|
||||
}
|
||||
|
||||
print " Creating DigitalOcean edge nodes (3x s-1vcpu-1gb, ~$24/month)..."
|
||||
@ -380,45 +394,50 @@ def deploy_digitalocean_cdn [] {
|
||||
|
||||
def verify_cost_optimized_deployment [] {
|
||||
print " Verifying Hetzner resources..."
|
||||
try {
|
||||
let hz_result = (do {
|
||||
let hz_servers = (hcloud server list --format Name,Status)
|
||||
print " ✓ Hetzner servers verified"
|
||||
|
||||
let hz_lbs = (hcloud load-balancer list --format Name)
|
||||
} | complete)
|
||||
|
||||
if $hz_result.exit_code == 0 {
|
||||
print " ✓ Hetzner servers verified"
|
||||
print " ✓ Hetzner load balancer verified"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking Hetzner: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Error checking Hetzner: ($hz_result.stderr)"
|
||||
}
|
||||
|
||||
print " Verifying AWS resources..."
|
||||
try {
|
||||
let aws_result = (do {
|
||||
let rds = (aws rds describe-db-instances \
|
||||
--region us-east-1 \
|
||||
--query 'DBInstances[0].DBInstanceIdentifier' \
|
||||
--output text)
|
||||
print $" ✓ RDS database: ($rds)"
|
||||
|
||||
let cache = (aws elasticache describe-cache-clusters \
|
||||
--region us-east-1 \
|
||||
--query 'CacheClusters[0].CacheClusterId' \
|
||||
--output text)
|
||||
print $" ✓ ElastiCache cluster: ($cache)"
|
||||
|
||||
let queues = (aws sqs list-queues --region us-east-1)
|
||||
} | complete)
|
||||
|
||||
if $aws_result.exit_code == 0 {
|
||||
print " ✓ RDS database: verified"
|
||||
print " ✓ ElastiCache cluster: verified"
|
||||
print " ✓ SQS queue created"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking AWS: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Error checking AWS: ($aws_result.stderr)"
|
||||
}
|
||||
|
||||
print " Verifying DigitalOcean resources..."
|
||||
try {
|
||||
let do_result = (do {
|
||||
let spaces = (doctl compute spaces list --format Name)
|
||||
print " ✓ Spaces object storage verified"
|
||||
|
||||
let droplets = (doctl compute droplet list --format Name,Status)
|
||||
} | complete)
|
||||
|
||||
if $do_result.exit_code == 0 {
|
||||
print " ✓ Spaces object storage verified"
|
||||
print " ✓ Edge nodes verified"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking DigitalOcean: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Error checking DigitalOcean: ($do_result.stderr)"
|
||||
}
|
||||
|
||||
print ""
|
||||
|
||||
@ -81,20 +81,20 @@ def validate_environment [] {
|
||||
|
||||
# Validate Nickel configuration
|
||||
print " Validating Nickel configuration..."
|
||||
try {
|
||||
nickel export workspace.ncl | from json | null
|
||||
let nickel_result = (do { nickel export workspace.ncl | from json } | complete)
|
||||
if $nickel_result.exit_code == 0 {
|
||||
print " ✓ Nickel configuration is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"Nickel validation failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"Nickel validation failed: ($nickel_result.stderr)"}
|
||||
}
|
||||
|
||||
# Validate config.toml
|
||||
print " Validating config.toml..."
|
||||
try {
|
||||
let config = (open config.toml)
|
||||
let config_result = (do { open config.toml } | complete)
|
||||
if $config_result.exit_code == 0 {
|
||||
print " ✓ config.toml is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"config.toml validation failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"config.toml validation failed: ($config_result.stderr)"}
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,9 +208,13 @@ def create_aws_resources [] {
|
||||
|
||||
mut attempts = 0
|
||||
while $attempts < $max_wait {
|
||||
try {
|
||||
let db_info = (aws rds describe-db-instances \
|
||||
--db-instance-identifier "webapp-db" | from json)
|
||||
let db_check = (do {
|
||||
aws rds describe-db-instances \
|
||||
--db-instance-identifier "webapp-db" | from json
|
||||
} | complete)
|
||||
|
||||
if $db_check.exit_code == 0 {
|
||||
let db_info = ($db_check.stdout | from json)
|
||||
|
||||
if ($db_info.DBInstances | first).DBInstanceStatus == "available" {
|
||||
print " ✓ Database is available"
|
||||
@ -218,8 +222,6 @@ def create_aws_resources [] {
|
||||
print $" ✓ Database endpoint: ($endpoint)"
|
||||
break
|
||||
}
|
||||
} catch {
|
||||
# Still creating
|
||||
}
|
||||
|
||||
sleep 10sec
|
||||
@ -385,7 +387,7 @@ def deploy_application [] {
|
||||
# Get web server IPs
|
||||
let droplets = (doctl compute droplet list --format Name,PublicIPv4 --no-header)
|
||||
|
||||
$droplets | split row "\n" | each {|line|
|
||||
$droplets | lines | each {|line|
|
||||
if ($line | is-not-empty) {
|
||||
let parts = ($line | split column --max-columns 2 " " | get column1)
|
||||
let ip = ($parts | last)
|
||||
@ -409,22 +411,25 @@ def verify_deployment [] {
|
||||
print " Verifying DigitalOcean droplets..."
|
||||
|
||||
let droplets = (doctl compute droplet list --no-header --format ID,Name,Status)
|
||||
print $" Found ($droplets | split row "\n" | length) droplets"
|
||||
print $" Found ($droplets | lines | length) droplets"
|
||||
|
||||
print " Verifying AWS RDS database..."
|
||||
try {
|
||||
let db = (aws rds describe-db-instances \
|
||||
--db-instance-identifier "webapp-db" | from json)
|
||||
let db_check = (do {
|
||||
aws rds describe-db-instances \
|
||||
--db-instance-identifier "webapp-db" | from json
|
||||
} | complete)
|
||||
|
||||
if $db_check.exit_code == 0 {
|
||||
let db = ($db_check.stdout | from json)
|
||||
let endpoint = ($db.DBInstances | first).Endpoint.Address
|
||||
print $" Database endpoint: ($endpoint)"
|
||||
} catch {|err|
|
||||
print $" Note: Database may still be initializing"
|
||||
} else {
|
||||
print " Note: Database may still be initializing"
|
||||
}
|
||||
|
||||
print " Verifying Hetzner volumes..."
|
||||
let volumes = (hcloud volume list --format "ID Name Status" --no-header)
|
||||
print $" Found ($volumes | split row "\n" | length) volumes"
|
||||
print $" Found ($volumes | lines | length) volumes"
|
||||
|
||||
print "\n Summary:"
|
||||
print " ✓ DigitalOcean: 3 web servers + load balancer"
|
||||
|
||||
@ -108,43 +108,43 @@ def validate_environment [] {
|
||||
|
||||
# Validate Nickel configuration
|
||||
print " Validating Nickel configuration..."
|
||||
try {
|
||||
nickel export workspace.ncl | from json | null
|
||||
let nickel_result = (do { nickel export workspace.ncl | from json } | complete)
|
||||
if $nickel_result.exit_code == 0 {
|
||||
print " ✓ Nickel configuration is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"Nickel validation failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"Nickel validation failed: ($nickel_result.stderr)"}
|
||||
}
|
||||
|
||||
# Validate config.toml
|
||||
print " Validating config.toml..."
|
||||
try {
|
||||
let config = (open config.toml)
|
||||
let config_result = (do { open config.toml } | complete)
|
||||
if $config_result.exit_code == 0 {
|
||||
print " ✓ config.toml is valid"
|
||||
} catch {|err|
|
||||
error make {msg: $"config.toml validation failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"config.toml validation failed: ($config_result.stderr)"}
|
||||
}
|
||||
|
||||
# Test provider connectivity
|
||||
print " Testing provider connectivity..."
|
||||
try {
|
||||
doctl account get | null
|
||||
let do_connect = (do { doctl account get } | complete)
|
||||
if $do_connect.exit_code == 0 {
|
||||
print " ✓ DigitalOcean connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"DigitalOcean connectivity failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"DigitalOcean connectivity failed: ($do_connect.stderr)"}
|
||||
}
|
||||
|
||||
try {
|
||||
hcloud server list | null
|
||||
let hz_connect = (do { hcloud server list } | complete)
|
||||
if $hz_connect.exit_code == 0 {
|
||||
print " ✓ Hetzner connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"Hetzner connectivity failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"Hetzner connectivity failed: ($hz_connect.stderr)"}
|
||||
}
|
||||
|
||||
try {
|
||||
aws sts get-caller-identity | null
|
||||
let aws_connect = (do { aws sts get-caller-identity } | complete)
|
||||
if $aws_connect.exit_code == 0 {
|
||||
print " ✓ AWS connectivity verified"
|
||||
} catch {|err|
|
||||
error make {msg: $"AWS connectivity failed: ($err)"}
|
||||
} else {
|
||||
error make {msg: $"AWS connectivity failed: ($aws_connect.stderr)"}
|
||||
}
|
||||
}
|
||||
|
||||
@ -215,18 +215,20 @@ def deploy_us_east_digitalocean [] {
|
||||
|
||||
print " Creating DigitalOcean PostgreSQL database (3-node Multi-AZ)..."
|
||||
|
||||
try {
|
||||
let db_result = (do {
|
||||
doctl databases create \
|
||||
--engine pg \
|
||||
--version 14 \
|
||||
--region "nyc3" \
|
||||
--num-nodes 3 \
|
||||
--size "db-s-2vcpu-4gb" \
|
||||
--name "us-db-primary" | null
|
||||
--name "us-db-primary"
|
||||
} | complete)
|
||||
|
||||
if $db_result.exit_code == 0 {
|
||||
print " ✓ Database creation initiated (may take 10-15 minutes)"
|
||||
} catch {|err|
|
||||
print $" ⚠ Database creation error (may already exist): ($err)"
|
||||
} else {
|
||||
print $" ⚠ Database creation error (may already exist): ($db_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -412,15 +414,17 @@ def deploy_asia_pacific_aws [] {
|
||||
print $" ✓ Created ALB: ($lb.LoadBalancers.0.LoadBalancerArn)"
|
||||
|
||||
print " Creating AWS RDS read replica..."
|
||||
try {
|
||||
let replica_result = (do {
|
||||
aws rds create-db-instance-read-replica \
|
||||
--region ap-southeast-1 \
|
||||
--db-instance-identifier "asia-db-replica" \
|
||||
--source-db-instance-identifier "us-db-primary" | null
|
||||
--source-db-instance-identifier "us-db-primary"
|
||||
} | complete)
|
||||
|
||||
if $replica_result.exit_code == 0 {
|
||||
print " ✓ Read replica creation initiated"
|
||||
} catch {|err|
|
||||
print $" ⚠ Read replica creation error (may already exist): ($err)"
|
||||
} else {
|
||||
print $" ⚠ Read replica creation error (may already exist): ($replica_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -429,15 +433,17 @@ def setup_vpn_tunnels [] {
|
||||
|
||||
# US to EU VPN
|
||||
print " Creating US East → EU Central VPN tunnel..."
|
||||
try {
|
||||
let vpn_result = (do {
|
||||
aws ec2 create-vpn-gateway \
|
||||
--region us-east-1 \
|
||||
--type ipsec.1 \
|
||||
--tag-specifications "ResourceType=vpn-gateway,Tags=[{Key=Name,Value=us-eu-vpn-gw}]" | null
|
||||
--tag-specifications "ResourceType=vpn-gateway,Tags=[{Key=Name,Value=us-eu-vpn-gw}]"
|
||||
} | complete)
|
||||
|
||||
if $vpn_result.exit_code == 0 {
|
||||
print " ✓ VPN gateway created (manual completion required)"
|
||||
} catch {|err|
|
||||
print $" ℹ VPN setup note: ($err)"
|
||||
} else {
|
||||
print $" ℹ VPN setup note: ($vpn_result.stderr)"
|
||||
}
|
||||
|
||||
# EU to APAC VPN
|
||||
@ -451,8 +457,12 @@ def setup_vpn_tunnels [] {
|
||||
def setup_global_dns [] {
|
||||
print " Setting up Route53 geolocation routing..."
|
||||
|
||||
try {
|
||||
let hosted_zones = (aws route53 list-hosted-zones | from json)
|
||||
let dns_result = (do {
|
||||
aws route53 list-hosted-zones | from json
|
||||
} | complete)
|
||||
|
||||
if $dns_result.exit_code == 0 {
|
||||
let hosted_zones = ($dns_result.stdout | from json)
|
||||
|
||||
if (($hosted_zones.HostedZones | length) > 0) {
|
||||
let zone_id = $hosted_zones.HostedZones.0.Id
|
||||
@ -470,8 +480,8 @@ def setup_global_dns [] {
|
||||
print " ℹ No hosted zones found. Create one with:"
|
||||
print " aws route53 create-hosted-zone --name api.example.com --caller-reference $(date +%s)"
|
||||
}
|
||||
} catch {|err|
|
||||
print $" ⚠ Route53 setup note: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Route53 setup note: ($dns_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -486,14 +496,14 @@ def setup_database_replication [] {
|
||||
mut attempts = 0
|
||||
|
||||
while $attempts < $max_attempts {
|
||||
try {
|
||||
let db = (doctl databases get us-db-primary --format Status --no-header)
|
||||
let db_check = (do { doctl databases get us-db-primary --format Status --no-header } | complete)
|
||||
|
||||
if $db_check.exit_code == 0 {
|
||||
let db = ($db_check.stdout | str trim)
|
||||
if $db == "active" {
|
||||
print " ✓ Primary database is active"
|
||||
break
|
||||
}
|
||||
} catch {
|
||||
# Database not ready yet
|
||||
}
|
||||
|
||||
sleep 30sec
|
||||
@ -508,42 +518,49 @@ def setup_database_replication [] {
|
||||
|
||||
def verify_multi_region_deployment [] {
|
||||
print " Verifying DigitalOcean resources..."
|
||||
try {
|
||||
let do_result = (do {
|
||||
let do_droplets = (doctl compute droplet list --format Name,Status --no-header)
|
||||
print $" ✓ Found ($do_droplets | split row "\n" | length) droplets"
|
||||
|
||||
let do_lbs = (doctl compute load-balancer list --format Name --no-header)
|
||||
print $" ✓ Found load balancer"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking DigitalOcean: ($err)"
|
||||
} | complete)
|
||||
|
||||
if $do_result.exit_code == 0 {
|
||||
let do_droplets = ($do_result.stdout | lines | length)
|
||||
print $" ✓ Found ($do_droplets) droplets"
|
||||
print " ✓ Found load balancer"
|
||||
} else {
|
||||
print $" ⚠ Error checking DigitalOcean: ($do_result.stderr)"
|
||||
}
|
||||
|
||||
print " Verifying Hetzner resources..."
|
||||
try {
|
||||
let hz_result = (do {
|
||||
let hz_servers = (hcloud server list --format Name,Status)
|
||||
print " ✓ Hetzner servers verified"
|
||||
|
||||
let hz_lbs = (hcloud load-balancer list --format Name)
|
||||
} | complete)
|
||||
|
||||
if $hz_result.exit_code == 0 {
|
||||
print " ✓ Hetzner servers verified"
|
||||
print " ✓ Hetzner load balancer verified"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking Hetzner: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Error checking Hetzner: ($hz_result.stderr)"
|
||||
}
|
||||
|
||||
print " Verifying AWS resources..."
|
||||
try {
|
||||
let aws_result = (do {
|
||||
let aws_instances = (aws ec2 describe-instances \
|
||||
--region ap-southeast-1 \
|
||||
--query 'Reservations[*].Instances[*].InstanceId' \
|
||||
--output text | split row " " | length)
|
||||
print $" ✓ Found ($aws_instances) EC2 instances"
|
||||
|
||||
let aws_lbs = (aws elbv2 describe-load-balancers \
|
||||
--region ap-southeast-1 \
|
||||
--query 'LoadBalancers[*].LoadBalancerName' \
|
||||
--output text)
|
||||
} | complete)
|
||||
|
||||
if $aws_result.exit_code == 0 {
|
||||
print " ✓ Found EC2 instances"
|
||||
print " ✓ Application Load Balancer verified"
|
||||
} catch {|err|
|
||||
print $" ⚠ Error checking AWS: ($err)"
|
||||
} else {
|
||||
print $" ⚠ Error checking AWS: ($aws_result.stderr)"
|
||||
}
|
||||
|
||||
print ""
|
||||
|
||||
@ -41,7 +41,7 @@ def main [] {
|
||||
$fixed_count += 1
|
||||
|
||||
# Show progress
|
||||
let line_count = ($fixed_content | split row "\n" | length)
|
||||
let line_count = ($fixed_content | lines | length)
|
||||
print $"✓ Fixed: ($file) - ($line_count) lines"
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,7 +101,7 @@ def find-all-positions [text: string, pattern: string]: list<int> {
|
||||
|
||||
def process-file [file: path, max_len: int] {
|
||||
let content = open $file --raw
|
||||
let lines = $content | split row "\n"
|
||||
let lines = $content | lines
|
||||
|
||||
mut in_code_block = false
|
||||
mut new_lines = []
|
||||
@ -123,7 +123,7 @@ def process-file [file: path, max_len: int] {
|
||||
# Wrap long lines
|
||||
let wrapped = wrap-line $line $max_len
|
||||
# Split wrapped result back into lines
|
||||
let wrapped_lines = $wrapped | split row "\n"
|
||||
let wrapped_lines = $wrapped | lines
|
||||
$new_lines = ($new_lines | append $wrapped_lines)
|
||||
}
|
||||
|
||||
|
||||
@ -94,9 +94,15 @@ check_exists() {
|
||||
|
||||
validate_service() {
|
||||
local service="$1"
|
||||
|
||||
# "all" is handled by bash script itself, not passed to Nushell
|
||||
if [[ "$service" == "all" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ ! " ${SERVICES[*]} " =~ " ${service} " ]]; then
|
||||
print_error "Invalid service: $service"
|
||||
echo "Valid services: ${SERVICES[*]}"
|
||||
echo "Valid services: ${SERVICES[*]}, all"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
@ -171,89 +177,155 @@ list_runtime_services() {
|
||||
# ============================================================================
|
||||
|
||||
prompt_action_existing_config() {
|
||||
echo ""
|
||||
print_warning "Runtime configuration already exists in: $CONFIG_RUNTIME"
|
||||
echo ""
|
||||
echo "Choose action:"
|
||||
echo " 1) Clean up and start fresh (removes all .ncl and .toml files)"
|
||||
echo " 2) Use TypeDialog to update configuration"
|
||||
echo " 3) Setup quick mode (solo/multiuser/cicd/enterprise)"
|
||||
echo " 4) List existing configurations"
|
||||
echo " 5) Cancel"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-5]: " choice
|
||||
while true; do
|
||||
echo ""
|
||||
print_warning "Runtime configuration already exists in: $CONFIG_RUNTIME"
|
||||
echo ""
|
||||
echo "Choose action:"
|
||||
echo " 1) Clean up and start fresh (removes all .ncl and .toml files)"
|
||||
echo " 2) Use TypeDialog to update configuration [default]"
|
||||
echo " 3) Setup quick mode (solo/multiuser/cicd/enterprise)"
|
||||
echo " 4) List existing configurations"
|
||||
echo " 5) Cancel"
|
||||
echo ""
|
||||
echo "Press CTRL-C to cancel at any time"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-5] (default: 2): " choice
|
||||
|
||||
case "$choice" in
|
||||
1) ACTION="clean-start" ;;
|
||||
2) ACTION="typedialog" ;;
|
||||
3) ACTION="quick-mode" ;;
|
||||
4) ACTION="list" ;;
|
||||
5) print_info "Cancelled."; exit 0 ;;
|
||||
*) print_error "Invalid choice"; exit 1 ;;
|
||||
esac
|
||||
# Default to 2 (TypeDialog update)
|
||||
choice="${choice:-2}"
|
||||
|
||||
case "$choice" in
|
||||
1) ACTION="clean-start"; return 0 ;;
|
||||
2) ACTION="typedialog"; return 0 ;;
|
||||
3) ACTION="quick-mode"; return 0 ;;
|
||||
4) ACTION="list"; return 0 ;;
|
||||
5) print_info "Cancelled."; exit 0 ;;
|
||||
*) print_error "Invalid choice. Please enter 1-5 (or press CTRL-C to abort)." ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
prompt_action_empty_config() {
|
||||
echo ""
|
||||
echo "Choose how to setup platform configuration:"
|
||||
echo " 1) Interactive TypeDialog (recommended, with UI form)"
|
||||
echo " 2) Quick mode setup (choose solo/multiuser/cicd/enterprise)"
|
||||
echo " 3) Cancel"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-3]: " choice
|
||||
while true; do
|
||||
echo ""
|
||||
echo "Choose how to setup platform configuration:"
|
||||
echo " 1) Interactive TypeDialog (recommended, with UI form) [default]"
|
||||
echo " 2) Quick mode setup (choose solo/multiuser/cicd/enterprise)"
|
||||
echo " 3) Cancel"
|
||||
echo ""
|
||||
echo "Press CTRL-C to cancel at any time"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-3] (default: 1): " choice
|
||||
|
||||
case "$choice" in
|
||||
1) ACTION="typedialog" ;;
|
||||
2) ACTION="quick-mode" ;;
|
||||
3) print_info "Cancelled."; exit 0 ;;
|
||||
*) print_error "Invalid choice"; exit 1 ;;
|
||||
esac
|
||||
# Default to 1 if empty
|
||||
choice="${choice:-1}"
|
||||
|
||||
case "$choice" in
|
||||
1) ACTION="typedialog"; return 0 ;;
|
||||
2) ACTION="quick-mode"; return 0 ;;
|
||||
3) print_info "Cancelled."; exit 0 ;;
|
||||
*) print_error "Invalid choice. Please enter 1, 2, or 3 (or press CTRL-C to abort)." ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
prompt_for_service() {
|
||||
echo ""
|
||||
echo "Select service to configure:"
|
||||
for i in "${!SERVICES[@]}"; do
|
||||
echo " $((i+1))) ${SERVICES[$i]}"
|
||||
done
|
||||
echo " $((${#SERVICES[@]}+1))) Configure all services"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-$((${#SERVICES[@]}+1))]: " choice
|
||||
local max_choice=$((${#SERVICES[@]}+1))
|
||||
|
||||
if [[ "$choice" == "$((${#SERVICES[@]}+1))" ]]; then
|
||||
SERVICE="all"
|
||||
else
|
||||
SERVICE="${SERVICES[$((choice-1))]}"
|
||||
fi
|
||||
while true; do
|
||||
echo ""
|
||||
echo "Select service to configure:"
|
||||
for i in "${!SERVICES[@]}"; do
|
||||
echo " $((i+1))) ${SERVICES[$i]}"
|
||||
done
|
||||
echo " $max_choice) Configure all services [default]"
|
||||
echo ""
|
||||
echo "Press CTRL-C to cancel"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-$max_choice] (default: $max_choice): " choice
|
||||
|
||||
# Default to "all services"
|
||||
choice="${choice:-$max_choice}"
|
||||
|
||||
# Validate numeric input
|
||||
if ! [[ "$choice" =~ ^[0-9]+$ ]]; then
|
||||
print_error "Invalid input. Please enter a number (or press CTRL-C to abort)."
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$choice" -ge 1 && "$choice" -le "$max_choice" ]]; then
|
||||
if [[ "$choice" == "$max_choice" ]]; then
|
||||
SERVICE="all"
|
||||
else
|
||||
SERVICE="${SERVICES[$((choice-1))]}"
|
||||
fi
|
||||
return 0
|
||||
else
|
||||
print_error "Invalid choice. Please enter a number between 1 and $max_choice (or press CTRL-C to abort)."
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
prompt_for_mode() {
|
||||
echo ""
|
||||
echo "Select deployment mode:"
|
||||
for i in "${!MODES[@]}"; do
|
||||
echo " $((i+1))) ${MODES[$i]}"
|
||||
done
|
||||
echo ""
|
||||
read -rp "Enter choice [1-${#MODES[@]}]: " choice
|
||||
local max_choice=${#MODES[@]}
|
||||
|
||||
MODE="${MODES[$((choice-1))]}"
|
||||
while true; do
|
||||
echo ""
|
||||
echo "Select deployment mode:"
|
||||
for i in "${!MODES[@]}"; do
|
||||
local marker=""
|
||||
# Mark solo as default
|
||||
if [[ "${MODES[$i]}" == "solo" ]]; then
|
||||
marker=" [default]"
|
||||
fi
|
||||
echo " $((i+1))) ${MODES[$i]}$marker"
|
||||
done
|
||||
echo ""
|
||||
echo "Press CTRL-C to cancel"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-$max_choice] (default: 1): " choice
|
||||
|
||||
# Default to 1 (solo)
|
||||
choice="${choice:-1}"
|
||||
|
||||
# Validate numeric input
|
||||
if ! [[ "$choice" =~ ^[0-9]+$ ]]; then
|
||||
print_error "Invalid input. Please enter a number (or press CTRL-C to abort)."
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$choice" -ge 1 && "$choice" -le "$max_choice" ]]; then
|
||||
MODE="${MODES[$((choice-1))]}"
|
||||
return 0
|
||||
else
|
||||
print_error "Invalid choice. Please enter a number between 1 and $max_choice (or press CTRL-C to abort)."
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
prompt_for_backend() {
|
||||
echo ""
|
||||
echo "Select TypeDialog backend:"
|
||||
echo " 1) web (browser-based, recommended)"
|
||||
echo " 2) tui (terminal UI)"
|
||||
echo " 3) cli (command-line)"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-3]: " choice
|
||||
while true; do
|
||||
echo ""
|
||||
echo "Select TypeDialog backend:"
|
||||
echo " 1) web (browser-based, recommended) [default]"
|
||||
echo " 2) tui (terminal UI)"
|
||||
echo " 3) cli (command-line)"
|
||||
echo ""
|
||||
echo "Press CTRL-C to cancel"
|
||||
echo ""
|
||||
read -rp "Enter choice [1-3] (default: 1): " choice
|
||||
|
||||
case "$choice" in
|
||||
1) BACKEND="web" ;;
|
||||
2) BACKEND="tui" ;;
|
||||
3) BACKEND="cli" ;;
|
||||
*) print_error "Invalid choice"; exit 1 ;;
|
||||
esac
|
||||
# Default to 1 (web)
|
||||
choice="${choice:-1}"
|
||||
|
||||
case "$choice" in
|
||||
1) BACKEND="web"; return 0 ;;
|
||||
2) BACKEND="tui"; return 0 ;;
|
||||
3) BACKEND="cli"; return 0 ;;
|
||||
*) print_error "Invalid choice. Please enter 1, 2, or 3 (or press CTRL-C to abort)." ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
@ -342,11 +414,37 @@ configure_via_typedialog() {
|
||||
prompt_for_backend
|
||||
fi
|
||||
|
||||
# Handle "all" services by iterating
|
||||
if [[ "$SERVICE" == "all" ]]; then
|
||||
print_info "Configuring all services for mode: $MODE"
|
||||
echo ""
|
||||
|
||||
local success_count=0
|
||||
local fail_count=0
|
||||
|
||||
for svc in "${SERVICES[@]}"; do
|
||||
print_info "Launching TypeDialog ($BACKEND backend) for $svc ($MODE)..."
|
||||
|
||||
# Execute from scripts dir so relative module imports work
|
||||
if (cd "$TYPEDIALOG_SCRIPTS" && nu configure.nu "$svc" "$MODE" --backend "$BACKEND"); then
|
||||
print_success "Configuration completed for $svc"
|
||||
generate_toml_for_service "$svc" "$MODE" && ((success_count++)) || ((fail_count++))
|
||||
else
|
||||
print_warning "Configuration skipped or failed for $svc"
|
||||
((fail_count++))
|
||||
fi
|
||||
echo ""
|
||||
done
|
||||
|
||||
print_info "Configured $success_count services, $fail_count failed/skipped"
|
||||
return 0
|
||||
fi
|
||||
|
||||
print_info "Launching TypeDialog ($BACKEND backend) for $SERVICE ($MODE)..."
|
||||
echo ""
|
||||
|
||||
# Run TypeDialog via Nushell script
|
||||
if nu "$TYPEDIALOG_SCRIPTS/configure.nu" "$SERVICE" "$MODE" --backend "$BACKEND"; then
|
||||
# Run TypeDialog via Nushell script (execute from scripts dir so module imports work)
|
||||
if (cd "$TYPEDIALOG_SCRIPTS" && nu configure.nu "$SERVICE" "$MODE" --backend "$BACKEND"); then
|
||||
print_success "Configuration completed for $SERVICE"
|
||||
|
||||
# Auto-generate TOML
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
# version = "0.99.1"
|
||||
|
||||
def create_left_prompt [] {
|
||||
let dir = match (do --ignore-shell-errors { $env.PWD | path relative-to $nu.home-path }) {
|
||||
let dir = match (do --ignore-shell-errors { $env.PWD | path relative-to $nu.home-dir }) {
|
||||
null => $env.PWD
|
||||
'' => '~'
|
||||
$relative_pwd => ([~ $relative_pwd] | path join)
|
||||
|
||||
@ -31,7 +31,7 @@ export def orbstack-connect [] {
|
||||
# Run command on OrbStack machine
|
||||
export def orbstack-run [
|
||||
command: string
|
||||
--detach: bool = false
|
||||
--detach
|
||||
] {
|
||||
let connection = (orbstack-connect)
|
||||
|
||||
@ -104,13 +104,7 @@ def deploy-orchestrator [connection: record, config: record] {
|
||||
docker -H $connection.docker_socket build -t provisioning-orchestrator:test .
|
||||
|
||||
# Run orchestrator container
|
||||
docker -H $connection.docker_socket run -d \
|
||||
--name orchestrator \
|
||||
--network provisioning-net \
|
||||
--ip $service_config.host \
|
||||
-p $"($service_config.port):($service_config.port)" \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
provisioning-orchestrator:test
|
||||
docker -H $connection.docker_socket run -d --name orchestrator --network provisioning-net --ip $service_config.host -p $"($service_config.port):($service_config.port)" -v /var/run/docker.sock:/var/run/docker.sock provisioning-orchestrator:test
|
||||
|
||||
log info "Orchestrator deployed successfully"
|
||||
}
|
||||
@ -140,13 +134,7 @@ local:53 {
|
||||
$coredns_config | save -f /tmp/Corefile
|
||||
|
||||
# Run CoreDNS container
|
||||
docker -H $connection.docker_socket run -d \
|
||||
--name coredns \
|
||||
--network provisioning-net \
|
||||
--ip $service_config.host \
|
||||
-p $"($service_config.port):53/udp" \
|
||||
-v /tmp/Corefile:/etc/coredns/Corefile \
|
||||
coredns/coredns:latest
|
||||
docker -H $connection.docker_socket run -d --name coredns --network provisioning-net --ip $service_config.host -p $"($service_config.port):53/udp" -v /tmp/Corefile:/etc/coredns/Corefile coredns/coredns:latest
|
||||
|
||||
log info "CoreDNS deployed successfully"
|
||||
}
|
||||
@ -185,14 +173,7 @@ def deploy-zot [connection: record, config: record] {
|
||||
$zot_config | to json | save -f /tmp/zot-config.json
|
||||
|
||||
# Run Zot container
|
||||
docker -H $connection.docker_socket run -d \
|
||||
--name oci-registry \
|
||||
--network provisioning-net \
|
||||
--ip $service_config.host \
|
||||
-p $"($service_config.port):5000" \
|
||||
-v /tmp/zot-config.json:/etc/zot/config.json \
|
||||
-v zot-data:/var/lib/registry \
|
||||
ghcr.io/project-zot/zot:latest
|
||||
docker -H $connection.docker_socket run -d --name oci-registry --network provisioning-net --ip $service_config.host -p $"($service_config.port):5000" -v /tmp/zot-config.json:/etc/zot/config.json -v zot-data:/var/lib/registry ghcr.io/project-zot/zot:latest
|
||||
|
||||
log info "Zot OCI registry deployed successfully"
|
||||
}
|
||||
@ -211,13 +192,7 @@ def deploy-harbor [connection: record, config: record] {
|
||||
|
||||
# Note: Full Harbor deployment requires docker-compose and is complex
|
||||
# For testing, we'll use a simplified Harbor deployment
|
||||
docker -H $connection.docker_socket run -d \
|
||||
--name harbor \
|
||||
--network provisioning-net \
|
||||
--ip $service_config.host \
|
||||
-p $"($service_config.port):443" \
|
||||
-p $"($service_config.ui_port):80" \
|
||||
goharbor/harbor-core:$harbor_version
|
||||
docker -H $connection.docker_socket run -d --name harbor --network provisioning-net --ip $service_config.host -p $"($service_config.port):443" -p $"($service_config.ui_port):80" goharbor/harbor-core:$harbor_version
|
||||
|
||||
log info "Harbor OCI registry deployed successfully"
|
||||
}
|
||||
@ -229,21 +204,7 @@ def deploy-gitea [connection: record, config: record] {
|
||||
let postgres_config = $test_config.services.postgres
|
||||
|
||||
# Run Gitea container
|
||||
docker -H $connection.docker_socket run -d \
|
||||
--name gitea \
|
||||
--network provisioning-net \
|
||||
--ip $service_config.host \
|
||||
-p $"($service_config.port):3000" \
|
||||
-p $"($service_config.ssh_port):22" \
|
||||
-e USER_UID=1000 \
|
||||
-e USER_GID=1000 \
|
||||
-e GITEA__database__DB_TYPE=postgres \
|
||||
-e $"GITEA__database__HOST=($postgres_config.host):($postgres_config.port)" \
|
||||
-e GITEA__database__NAME=gitea \
|
||||
-e GITEA__database__USER=$postgres_config.username \
|
||||
-e GITEA__database__PASSWD=gitea \
|
||||
-v gitea-data:/data \
|
||||
gitea/gitea:latest
|
||||
docker -H $connection.docker_socket run -d --name gitea --network provisioning-net --ip $service_config.host -p $"($service_config.port):3000" -p $"($service_config.ssh_port):22" -e USER_UID=1000 -e USER_GID=1000 -e GITEA__database__DB_TYPE=postgres -e $"GITEA__database__HOST=($postgres_config.host):($postgres_config.port)" -e GITEA__database__NAME=gitea -e GITEA__database__USER=$postgres_config.username -e GITEA__database__PASSWD=gitea -v gitea-data:/data gitea/gitea:latest
|
||||
|
||||
log info "Gitea deployed successfully"
|
||||
}
|
||||
@ -254,16 +215,7 @@ def deploy-postgres [connection: record, config: record] {
|
||||
let service_config = $test_config.services.postgres
|
||||
|
||||
# Run PostgreSQL container
|
||||
docker -H $connection.docker_socket run -d \
|
||||
--name postgres \
|
||||
--network provisioning-net \
|
||||
--ip $service_config.host \
|
||||
-p $"($service_config.port):5432" \
|
||||
-e POSTGRES_USER=$service_config.username \
|
||||
-e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_DB=$service_config.database \
|
||||
-v postgres-data:/var/lib/postgresql/data \
|
||||
postgres:15-alpine
|
||||
docker -H $connection.docker_socket run -d --name postgres --network provisioning-net --ip $service_config.host -p $"($service_config.port):5432" -e POSTGRES_USER=$service_config.username -e POSTGRES_PASSWORD=postgres -e POSTGRES_DB=$service_config.database -v postgres-data:/var/lib/postgresql/data postgres:15-alpine
|
||||
|
||||
log info "PostgreSQL deployed successfully"
|
||||
}
|
||||
@ -288,14 +240,7 @@ scrape_configs:
|
||||
$prometheus_config | save -f /tmp/prometheus.yml
|
||||
|
||||
# Run Prometheus container
|
||||
docker -H $connection.docker_socket run -d \
|
||||
--name prometheus \
|
||||
--network provisioning-net \
|
||||
--ip $service_config.host \
|
||||
-p $"($service_config.port):9090" \
|
||||
-v /tmp/prometheus.yml:/etc/prometheus/prometheus.yml \
|
||||
-v prometheus-data:/prometheus \
|
||||
prom/prometheus:latest
|
||||
docker -H $connection.docker_socket run -d --name prometheus --network provisioning-net --ip $service_config.host -p $"($service_config.port):9090" -v /tmp/prometheus.yml:/etc/prometheus/prometheus.yml -v prometheus-data:/prometheus prom/prometheus:latest
|
||||
|
||||
log info "Prometheus deployed successfully"
|
||||
}
|
||||
@ -306,14 +251,7 @@ def deploy-grafana [connection: record, config: record] {
|
||||
let service_config = $test_config.services.grafana
|
||||
|
||||
# Run Grafana container
|
||||
docker -H $connection.docker_socket run -d \
|
||||
--name grafana \
|
||||
--network provisioning-net \
|
||||
--ip $service_config.host \
|
||||
-p $"($service_config.port):3000" \
|
||||
-e GF_SECURITY_ADMIN_PASSWORD=admin \
|
||||
-v grafana-data:/var/lib/grafana \
|
||||
grafana/grafana:latest
|
||||
docker -H $connection.docker_socket run -d --name grafana --network provisioning-net --ip $service_config.host -p $"($service_config.port):3000" -e GF_SECURITY_ADMIN_PASSWORD=admin -v grafana-data:/var/lib/grafana grafana/grafana:latest
|
||||
|
||||
log info "Grafana deployed successfully"
|
||||
}
|
||||
@ -324,10 +262,7 @@ export def orbstack-create-network [] {
|
||||
let test_config = (load-test-config)
|
||||
|
||||
# Create custom network
|
||||
docker -H $connection.docker_socket network create \
|
||||
--subnet $test_config.orbstack.network.subnet \
|
||||
--gateway $test_config.orbstack.network.gateway \
|
||||
provisioning-net
|
||||
docker -H $connection.docker_socket network create --subnet $test_config.orbstack.network.subnet --gateway $test_config.orbstack.network.gateway provisioning-net
|
||||
|
||||
log info "Docker network 'provisioning-net' created"
|
||||
}
|
||||
@ -351,20 +286,20 @@ export def orbstack-cleanup [] {
|
||||
]
|
||||
|
||||
for container in $containers {
|
||||
try {
|
||||
let result = (do {
|
||||
docker -H $connection.docker_socket stop $container
|
||||
docker -H $connection.docker_socket rm $container
|
||||
} catch {
|
||||
# Ignore errors if container doesn't exist
|
||||
}
|
||||
} | complete)
|
||||
|
||||
# Ignore errors if container doesn't exist
|
||||
}
|
||||
|
||||
# Remove network
|
||||
try {
|
||||
let net_result = (do {
|
||||
docker -H $connection.docker_socket network rm provisioning-net
|
||||
} catch {
|
||||
# Ignore errors if network doesn't exist
|
||||
}
|
||||
} | complete)
|
||||
|
||||
# Ignore errors if network doesn't exist
|
||||
|
||||
log info "OrbStack cleanup completed"
|
||||
}
|
||||
@ -373,7 +308,7 @@ export def orbstack-cleanup [] {
|
||||
export def orbstack-logs [
|
||||
container_name: string
|
||||
--tail: int = 100
|
||||
--follow: bool = false
|
||||
--follow
|
||||
] {
|
||||
let connection = (orbstack-connect)
|
||||
|
||||
|
||||
@ -244,20 +244,24 @@ export def with-retry [
|
||||
] {
|
||||
mut attempts = 0
|
||||
mut current_delay = $delay
|
||||
mut last_error = null
|
||||
mut last_error = {msg: ""}
|
||||
|
||||
while $attempts < $max_attempts {
|
||||
try {
|
||||
let result = (do {
|
||||
return (do $operation)
|
||||
} catch { |err|
|
||||
$attempts = $attempts + 1
|
||||
$last_error = $err
|
||||
} | complete)
|
||||
|
||||
if $attempts < $max_attempts {
|
||||
log warning $"Attempt ($attempts) failed: ($err.msg). Retrying in ($current_delay)s..."
|
||||
sleep ($current_delay * 1sec)
|
||||
$current_delay = ($current_delay * $backoff_multiplier | into int)
|
||||
}
|
||||
if $result.exit_code == 0 {
|
||||
return $result.stdout
|
||||
}
|
||||
|
||||
$attempts = $attempts + 1
|
||||
$last_error = {msg: $result.stderr}
|
||||
|
||||
if $attempts < $max_attempts {
|
||||
log warning $"Attempt ($attempts) failed: ($result.stderr). Retrying in ($current_delay)s..."
|
||||
sleep ($current_delay * 1sec)
|
||||
$current_delay = ($current_delay * $backoff_multiplier | into int)
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,13 +285,15 @@ export def wait-for-condition [
|
||||
let timeout_duration = ($timeout * 1sec)
|
||||
|
||||
while true {
|
||||
try {
|
||||
let result = (do $condition)
|
||||
if $result {
|
||||
let result = (do {
|
||||
let cond_result = (do $condition)
|
||||
if $cond_result {
|
||||
return true
|
||||
}
|
||||
} catch { |err|
|
||||
# Ignore errors, keep waiting
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 {
|
||||
return $result.stdout
|
||||
}
|
||||
|
||||
let elapsed = ((date now) - $start_time)
|
||||
@ -326,45 +332,45 @@ export def check-service-health [service_name: string] {
|
||||
match $service_name {
|
||||
"orchestrator" => {
|
||||
let url = $"http://($service_config.host):($service_config.port)($service_config.health_endpoint)"
|
||||
try {
|
||||
let result = (do {
|
||||
let response = (http get $url)
|
||||
$response.status == 200
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 { $result.stdout } else { false }
|
||||
}
|
||||
"coredns" => {
|
||||
# Check DNS resolution
|
||||
try {
|
||||
let result = (do {
|
||||
dig @$service_config.host test.local | complete | get exit_code | $in == 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 { $result.stdout } else { false }
|
||||
}
|
||||
"gitea" => {
|
||||
let url = $"http://($service_config.host):($service_config.port)/api/v1/version"
|
||||
try {
|
||||
let result = (do {
|
||||
let response = (http get $url)
|
||||
$response.status == 200
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 { $result.stdout } else { false }
|
||||
}
|
||||
"postgres" => {
|
||||
try {
|
||||
let result = (do {
|
||||
psql -h $service_config.host -p $service_config.port -U $service_config.username -d $service_config.database -c "SELECT 1" | complete | get exit_code | $in == 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 { $result.stdout } else { false }
|
||||
}
|
||||
_ => {
|
||||
let url = $"http://($service_config.host):($service_config.port)/"
|
||||
try {
|
||||
let result = (do {
|
||||
let response = (http get $url)
|
||||
$response.status == 200
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 { $result.stdout } else { false }
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -400,7 +406,7 @@ export def run-test [
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
do $test_fn
|
||||
|
||||
let duration = ((date now) - $start_time | into int) / 1000000
|
||||
@ -408,12 +414,15 @@ export def run-test [
|
||||
log info $"✓ Test passed: ($test_name) \(($duration)ms\)"
|
||||
|
||||
create-test-result $test_name "passed" $duration
|
||||
} catch { |err|
|
||||
let duration = ((date now) - $start_time | into int) / 1000000
|
||||
} | complete)
|
||||
|
||||
log error $"✗ Test failed: ($test_name) - ($err.msg)"
|
||||
let duration = ((date now) - $start_time | into int) / 1000000
|
||||
|
||||
create-test-result $test_name "failed" $duration $err.msg
|
||||
if $result.exit_code != 0 {
|
||||
log error $"✗ Test failed: ($test_name) - ($result.stderr)"
|
||||
create-test-result $test_name "failed" $duration $result.stderr
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -421,9 +430,11 @@ export def run-test [
|
||||
export def cleanup-on-exit [cleanup_fn: closure] {
|
||||
# Register cleanup function to run on exit
|
||||
# Note: Nushell doesn't have built-in exit hooks, so this is a best-effort approach
|
||||
try {
|
||||
let result = (do {
|
||||
do $cleanup_fn
|
||||
} catch { |err|
|
||||
log warning $"Cleanup failed: ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log warning $"Cleanup failed: ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -184,54 +184,39 @@ def run-tests-parallel [
|
||||
def execute-test-file [test_file: string, verbose: bool] -> record {
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
# Run the test file
|
||||
let output = (nu $test_file | complete)
|
||||
# Run the test file
|
||||
let exec_result = (do {
|
||||
nu $test_file
|
||||
} | complete)
|
||||
|
||||
let duration = ((date now) - $start_time | into int) / 1000000
|
||||
let duration = ((date now) - $start_time | into int) / 1000000
|
||||
|
||||
if $output.exit_code == 0 {
|
||||
if $verbose {
|
||||
log info $"✓ ($test_file | path basename) passed \(($duration)ms\)"
|
||||
}
|
||||
|
||||
{
|
||||
test_file: $test_file
|
||||
test_name: ($test_file | path basename | str replace ".nu" "")
|
||||
status: "passed"
|
||||
duration_ms: $duration
|
||||
error_message: ""
|
||||
stdout: $output.stdout
|
||||
stderr: $output.stderr
|
||||
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S%.3fZ")
|
||||
}
|
||||
} else {
|
||||
log error $"✗ ($test_file | path basename) failed \(($duration)ms\)"
|
||||
|
||||
{
|
||||
test_file: $test_file
|
||||
test_name: ($test_file | path basename | str replace ".nu" "")
|
||||
status: "failed"
|
||||
duration_ms: $duration
|
||||
error_message: $output.stderr
|
||||
stdout: $output.stdout
|
||||
stderr: $output.stderr
|
||||
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S%.3fZ")
|
||||
}
|
||||
if $exec_result.exit_code == 0 {
|
||||
if $verbose {
|
||||
log info $"✓ ($test_file | path basename) passed \(($duration)ms\)"
|
||||
}
|
||||
} catch { |err|
|
||||
let duration = ((date now) - $start_time | into int) / 1000000
|
||||
|
||||
log error $"✗ ($test_file | path basename) crashed \(($duration)ms\): ($err.msg)"
|
||||
{
|
||||
test_file: $test_file
|
||||
test_name: ($test_file | path basename | str replace ".nu" "")
|
||||
status: "passed"
|
||||
duration_ms: $duration
|
||||
error_message: ""
|
||||
stdout: $exec_result.stdout
|
||||
stderr: $exec_result.stderr
|
||||
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S%.3fZ")
|
||||
}
|
||||
} else {
|
||||
log error $"✗ ($test_file | path basename) failed \(($duration)ms\)"
|
||||
|
||||
{
|
||||
test_file: $test_file
|
||||
test_name: ($test_file | path basename | str replace ".nu" "")
|
||||
status: "failed"
|
||||
duration_ms: $duration
|
||||
error_message: $err.msg
|
||||
stdout: ""
|
||||
stderr: $err.msg
|
||||
error_message: $exec_result.stderr
|
||||
stdout: $exec_result.stdout
|
||||
stderr: $exec_result.stderr
|
||||
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S%.3fZ")
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,19 +95,25 @@ def test-no-multiuser-services [test_config: record] {
|
||||
# Attempt to connect to Gitea (should fail)
|
||||
let gitea_url = $"http://($test_config.services.gitea.host):($test_config.services.gitea.port)"
|
||||
|
||||
try {
|
||||
let gitea_result = (do {
|
||||
http get $gitea_url
|
||||
} | complete)
|
||||
|
||||
if $gitea_result.exit_code == 0 {
|
||||
error make { msg: "Gitea should not be accessible in solo mode" }
|
||||
} catch {
|
||||
} else {
|
||||
# Expected to fail
|
||||
log info "✓ Gitea is not running (expected)"
|
||||
}
|
||||
|
||||
# Attempt to connect to PostgreSQL (should fail)
|
||||
try {
|
||||
psql -h $test_config.services.postgres.host -p $test_config.services.postgres.port -U test -d test -c "SELECT 1" | complete
|
||||
let postgres_result = (do {
|
||||
psql -h $test_config.services.postgres.host -p $test_config.services.postgres.port -U test -d test -c "SELECT 1"
|
||||
} | complete)
|
||||
|
||||
if $postgres_result.exit_code == 0 {
|
||||
error make { msg: "PostgreSQL should not be accessible in solo mode" }
|
||||
} catch {
|
||||
} else {
|
||||
# Expected to fail
|
||||
log info "✓ PostgreSQL is not running (expected)"
|
||||
}
|
||||
|
||||
@ -64,9 +64,11 @@ def verify-orbstack-machine [] {
|
||||
let machine_name = $test_config.orbstack.machine_name
|
||||
|
||||
# Check if orb CLI is available
|
||||
try {
|
||||
let result = (do {
|
||||
orb version | complete
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
error make {
|
||||
msg: "OrbStack CLI 'orb' not found. Please install OrbStack."
|
||||
}
|
||||
|
||||
@ -71,15 +71,16 @@ def collect-service-logs [test_config: record] {
|
||||
]
|
||||
|
||||
for container in $containers {
|
||||
try {
|
||||
log info $"Collecting logs for: ($container)"
|
||||
log info $"Collecting logs for: ($container)"
|
||||
|
||||
let result = (do {
|
||||
let log_file = $"($log_dir)/($container).log"
|
||||
orbstack-logs $container --tail 1000 | save -f $log_file
|
||||
|
||||
log info $"Logs saved: ($log_file)"
|
||||
} catch { |err|
|
||||
log warning $"Failed to collect logs for ($container): ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log warning $"Failed to collect logs for ($container): ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -132,11 +133,11 @@ def cleanup-docker-volumes [] {
|
||||
]
|
||||
|
||||
for volume in $volumes {
|
||||
try {
|
||||
let result = (do {
|
||||
docker -H $connection.docker_socket volume rm $volume
|
||||
log info $"Removed volume: ($volume)"
|
||||
} catch {
|
||||
# Ignore errors if volume doesn't exist
|
||||
}
|
||||
} | complete)
|
||||
|
||||
# Ignore errors if volume doesn't exist
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,157 +0,0 @@
|
||||
# Codebase Analysis Script
|
||||
|
||||
Script to analyze the technology distribution in the provisioning codebase.
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# From provisioning directory (analyzes current directory)
|
||||
cd provisioning
|
||||
nu tools/analyze-codebase.nu
|
||||
|
||||
# From project root, analyze provisioning
|
||||
nu provisioning/tools/analyze-codebase.nu --path provisioning
|
||||
|
||||
# Analyze any path
|
||||
nu provisioning/tools/analyze-codebase.nu --path /absolute/path/to/directory
|
||||
```
|
||||
|
||||
### Output Formats
|
||||
|
||||
```bash
|
||||
# Table format (default) - colored, visual bars
|
||||
nu provisioning/tools/analyze-codebase.nu --format table
|
||||
|
||||
# JSON format - for programmatic use
|
||||
nu provisioning/tools/analyze-codebase.nu --format json
|
||||
|
||||
# Markdown format - for documentation
|
||||
nu provisioning/tools/analyze-codebase.nu --format markdown
|
||||
```
|
||||
|
||||
### From provisioning directory
|
||||
|
||||
```bash
|
||||
cd provisioning
|
||||
nu tools/analyze-codebase.nu
|
||||
```
|
||||
|
||||
### Direct execution (if in PATH)
|
||||
|
||||
```bash
|
||||
# Make it globally available (one time)
|
||||
ln -sf "$(pwd)/provisioning/tools/analyze-codebase.nu" /usr/local/bin/analyze-codebase
|
||||
|
||||
# Then run from anywhere
|
||||
analyze-codebase
|
||||
analyze-codebase --format json
|
||||
analyze-codebase --format markdown > CODEBASE_STATS.md
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
The script analyzes:
|
||||
|
||||
- **Nushell** (.nu files)
|
||||
- **KCL** (.k files)
|
||||
- **Rust** (.rs files)
|
||||
- **Templates** (.j2, .tera files)
|
||||
|
||||
Across these sections:
|
||||
|
||||
- `core/` - CLI interface, core libraries
|
||||
- `extensions/` - Providers, taskservs, clusters
|
||||
- `platform/` - Rust services (orchestrator, control-center, etc.)
|
||||
- `templates/` - Template files
|
||||
- `kcl/` - KCL configuration schemas
|
||||
|
||||
## Example Output
|
||||
|
||||
### Table Format
|
||||
|
||||
```bash
|
||||
📊 Analyzing Codebase: provisioning
|
||||
|
||||
📋 Lines of Code by Section
|
||||
|
||||
╭─────────────┬─────────┬────────────┬─────┬─────────┬─────┬──────────┬───────────┬───────────────┬───────────┬───────╮
|
||||
│ section │ nushell │ nushell_pct│ kcl │ kcl_pct │ rust│ rust_pct │ templates │ templates_pct │ total │ │
|
||||
├─────────────┼─────────┼────────────┼─────┼─────────┼─────┼──────────┼───────────┼───────────────┼───────────┼───────┤
|
||||
│ core │ 53843 │ 99.87 │ 71 │ 0.13 │ 0 │ 0.00 │ 0 │ 0.00 │ 53914 │ │
|
||||
│ extensions │ 10202 │ 43.21 │3946 │ 16.72 │ 0 │ 0.00 │ 9456 │ 40.05 │ 23604 │ │
|
||||
│ platform │ 5759 │ 0.19 │ 0 │ 0.00 │2992107│ 99.81 │ 0 │ 0.00 │ 2997866 │ │
|
||||
│ templates │ 4197 │ 72.11 │ 834 │ 14.33 │ 0 │ 0.00 │ 789 │ 13.56 │ 5820 │ │
|
||||
│ kcl │ 0 │ 0.00 │5594 │ 100.00 │ 0 │ 0.00 │ 0 │ 0.00 │ 5594 │ │
|
||||
╰─────────────┴─────────┴────────────┴─────┴─────────┴─────┴──────────┴───────────┴───────────────┴───────────┴───────╯
|
||||
|
||||
📊 Overall Technology Distribution
|
||||
|
||||
╭──────────────────────┬──────────┬────────────┬────────────────────────────────────────────────────╮
|
||||
│ technology │ lines │ percentage │ visual │
|
||||
├──────────────────────┼──────────┼────────────┼────────────────────────────────────────────────────┤
|
||||
│ Nushell │ 74001 │ 2.40 │ █ │
|
||||
│ KCL │ 10445 │ 0.34 │ │
|
||||
│ Rust │ 2992107 │ 96.93 │ ████████████████████████████████████████████████ │
|
||||
│ Templates (Tera) │ 10245 │ 0.33 │ │
|
||||
╰──────────────────────┴──────────┴────────────┴────────────────────────────────────────────────────╯
|
||||
|
||||
📈 Total Lines of Code: 3086798
|
||||
```
|
||||
|
||||
### JSON Format
|
||||
|
||||
```json
|
||||
{
|
||||
"sections": [...],
|
||||
"totals": {
|
||||
"nushell": 74001,
|
||||
"kcl": 10445,
|
||||
"rust": 2992107,
|
||||
"templates": 10245,
|
||||
"grand_total": 3086798
|
||||
},
|
||||
"percentages": {
|
||||
"nushell": 2.40,
|
||||
"kcl": 0.34,
|
||||
"rust": 96.93,
|
||||
"templates": 0.33
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Markdown Format
|
||||
|
||||
```bash
|
||||
# Codebase Analysis
|
||||
|
||||
## Technology Distribution
|
||||
|
||||
| Technology | Lines | Percentage |
|
||||
|------------|-------|------------|
|
||||
| Nushell | 74001 | 2.40% |
|
||||
| KCL | 10445 | 0.34% |
|
||||
| Rust | 2992107 | 96.93% |
|
||||
| Templates | 10245 | 0.33% |
|
||||
| **TOTAL** | **3086798** | **100%** |
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- Nushell 0.107.1+
|
||||
- Access to the provisioning directory
|
||||
|
||||
## What It Analyzes
|
||||
|
||||
- ✅ All `.nu` files (Nushell scripts)
|
||||
- ✅ All `.k` files (KCL configuration)
|
||||
- ✅ All `.rs` files (Rust source)
|
||||
- ✅ All `.j2` and `.tera` files (Templates)
|
||||
|
||||
## Notes
|
||||
|
||||
- The script recursively searches all subdirectories
|
||||
- Empty sections show 0 for all technologies
|
||||
- Percentages are calculated per section and overall
|
||||
- Visual bars are proportional to percentage (max 50 chars = 100%)
|
||||
54
tools/add-module-docstrings.nu
Normal file
54
tools/add-module-docstrings.nu
Normal file
@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Add module-level docstrings to all .nu files
|
||||
#
|
||||
# Scans all .nu files and adds module-level docstrings to those missing them
|
||||
|
||||
use std log
|
||||
|
||||
def main [--dry-run = false --verbose = false] {
|
||||
log info "Starting module docstring addition..."
|
||||
|
||||
let nu_files = (find /Users/Akasha/project-provisioning/provisioning -name "*.nu" -type f | lines)
|
||||
let mut files_processed = 0
|
||||
let mut files_updated = 0
|
||||
let mut files_skipped = 0
|
||||
|
||||
for file in $nu_files {
|
||||
let content = open $file
|
||||
let has_module_doc = ($content =~ '(?m)^# [A-Z].*\n#')
|
||||
|
||||
if not $has_module_doc and ($content | str starts-with "#!/usr/bin/env nu") {
|
||||
# File has shebang but no module docstring
|
||||
if $verbose {
|
||||
log info $"Adding docstring to: ($file)"
|
||||
}
|
||||
|
||||
if not $dry_run {
|
||||
# Extract module name from filename
|
||||
let filename = ($file | path basename | str replace '.nu' '')
|
||||
let module_name = ($filename | str replace '-' '_' | str capitalize)
|
||||
|
||||
# Add module docstring after shebang
|
||||
let lines = $content | lines
|
||||
let new_content = (
|
||||
($lines | first 1 | str join "\n") + "\n\n# " + $module_name + "\n" +
|
||||
($lines | skip 1 | str join "\n")
|
||||
)
|
||||
|
||||
$new_content | save --force $file
|
||||
$files_updated += 1
|
||||
} else {
|
||||
$files_updated += 1
|
||||
}
|
||||
} else {
|
||||
$files_skipped += 1
|
||||
}
|
||||
|
||||
$files_processed += 1
|
||||
}
|
||||
|
||||
log info $"Processing complete. Processed: ($files_processed), Updated: ($files_updated), Skipped: ($files_skipped)"
|
||||
}
|
||||
|
||||
main
|
||||
File diff suppressed because it is too large
Load Diff
@ -12,13 +12,13 @@
|
||||
use std log
|
||||
|
||||
def main [
|
||||
--output-dir: string = "dist/core" # Output directory for core bundle
|
||||
--output-dir: string = "dist/core" # Output directory for core bundle
|
||||
--config-dir: string = "dist/config" # Configuration directory
|
||||
--validate = false # Validate Nushell syntax
|
||||
--compress = false # Compress bundle with gzip
|
||||
--exclude-dev = true # Exclude development files
|
||||
--verbose = false # Enable verbose logging
|
||||
] -> record {
|
||||
--validate # Validate Nushell syntax
|
||||
--compress # Compress bundle with gzip
|
||||
--exclude-dev # Exclude development files
|
||||
--verbose # Enable verbose logging
|
||||
] {
|
||||
|
||||
let repo_root = ($env.PWD | path dirname | path dirname | path dirname)
|
||||
let bundle_config = {
|
||||
@ -142,7 +142,7 @@ def bundle_component [
|
||||
component: record
|
||||
bundle_config: record
|
||||
repo_root: string
|
||||
] -> record {
|
||||
] {
|
||||
log info $"Bundling ($component.name)..."
|
||||
|
||||
if not ($component.source | path exists) {
|
||||
@ -155,7 +155,7 @@ def bundle_component [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Ensure target directory exists
|
||||
let target_parent = ($component.target | path dirname)
|
||||
mkdir $target_parent
|
||||
@ -174,7 +174,17 @@ def bundle_component [
|
||||
}
|
||||
|
||||
log info $"Successfully bundled ($component.name) -> ($component.target)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log error $"Failed to bundle ($component.name): ($result.stderr)"
|
||||
{
|
||||
component: $component.name
|
||||
status: "failed"
|
||||
reason: $result.stderr
|
||||
target: $component.target
|
||||
}
|
||||
} else {
|
||||
{
|
||||
component: $component.name
|
||||
status: "success"
|
||||
@ -182,15 +192,6 @@ def bundle_component [
|
||||
target: $component.target
|
||||
size: (get_directory_size $component.target)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
log error $"Failed to bundle ($component.name): ($err.msg)"
|
||||
{
|
||||
component: $component.name
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
target: $component.target
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -198,7 +199,7 @@ def bundle_component [
|
||||
def bundle_config_file [
|
||||
config: record
|
||||
bundle_config: record
|
||||
] -> record {
|
||||
] {
|
||||
log info $"Bundling config ($config.name)..."
|
||||
|
||||
if not ($config.source | path exists) {
|
||||
@ -211,7 +212,7 @@ def bundle_config_file [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Ensure target directory exists
|
||||
let target_parent = ($config.target | path dirname)
|
||||
mkdir $target_parent
|
||||
@ -220,22 +221,23 @@ def bundle_config_file [
|
||||
cp -r $config.source $config.target
|
||||
|
||||
log info $"Successfully bundled config ($config.name) -> ($config.target)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log error $"Failed to bundle config ($config.name): ($result.stderr)"
|
||||
{
|
||||
component: $config.name
|
||||
status: "failed"
|
||||
reason: $result.stderr
|
||||
target: $config.target
|
||||
}
|
||||
} else {
|
||||
{
|
||||
component: $config.name
|
||||
status: "success"
|
||||
source: $config.source
|
||||
target: $config.target
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
log error $"Failed to bundle config ($config.name): ($err.msg)"
|
||||
{
|
||||
component: $config.name
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
target: $config.target
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -257,16 +259,17 @@ def copy_directory_filtered [source: string, target: string] {
|
||||
mkdir $target
|
||||
|
||||
# Get all files recursively, excluding patterns
|
||||
let files = (find $source -type f | where {|file|
|
||||
let exclude = $exclude_patterns | any {|pattern|
|
||||
$file =~ $pattern
|
||||
}
|
||||
not $exclude
|
||||
let files = (glob ($source + "/**") | where {|file|
|
||||
($file | path type) == "file" and (
|
||||
not ($exclude_patterns | any {|pattern|
|
||||
$file =~ $pattern
|
||||
})
|
||||
)
|
||||
})
|
||||
|
||||
# Copy each file, preserving directory structure
|
||||
$files | each {|file|
|
||||
let relative_path = ($file | str replace $source "" | str trim-left "/")
|
||||
let relative_path = ($file | str replace $source "" | str trim --left --char "/")
|
||||
let target_path = ($target | path join $relative_path)
|
||||
let target_dir = ($target_path | path dirname)
|
||||
|
||||
@ -276,26 +279,33 @@ def copy_directory_filtered [source: string, target: string] {
|
||||
}
|
||||
|
||||
# Validate Nushell syntax in bundled files
|
||||
def validate_nushell_syntax [bundle_dir: string] -> record {
|
||||
def validate_nushell_syntax [bundle_dir: string] {
|
||||
log info "Validating Nushell syntax..."
|
||||
|
||||
let nu_files = (find $bundle_dir -name "*.nu" -type f)
|
||||
let mut validation_errors = []
|
||||
let mut validated_count = 0
|
||||
let nu_files = (glob ($bundle_dir + "/**/*.nu") | where {|file| ($file | path type) == "file"})
|
||||
|
||||
for file in $nu_files {
|
||||
try {
|
||||
# Use nu --check to validate syntax
|
||||
let validation_results = ($nu_files | each {|file|
|
||||
let result = (do {
|
||||
nu --check $file
|
||||
$validated_count = $validated_count + 1
|
||||
} catch {|err|
|
||||
$validation_errors = ($validation_errors | append {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log error $"Syntax error in ($file): ($result.stderr)"
|
||||
{
|
||||
file: $file
|
||||
error: $err.msg
|
||||
})
|
||||
log error $"Syntax error in ($file): ($err.msg)"
|
||||
error: $result.stderr
|
||||
status: "error"
|
||||
}
|
||||
} else {
|
||||
{
|
||||
file: $file
|
||||
status: "success"
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
let validation_errors = ($validation_results | where status == "error")
|
||||
let validated_count = ($validation_results | where status == "success" | length)
|
||||
|
||||
if ($validation_errors | length) > 0 {
|
||||
log error $"Found ($validation_errors | length) syntax errors"
|
||||
@ -336,23 +346,33 @@ def create_bundle_metadata [bundle_config: record, repo_root: string, results: l
|
||||
}
|
||||
|
||||
# Compress bundle directory
|
||||
def compress_bundle [bundle_dir: string] -> record {
|
||||
def compress_bundle [bundle_dir: string] {
|
||||
log info "Compressing bundle..."
|
||||
|
||||
try {
|
||||
let bundle_name = ($bundle_dir | path basename)
|
||||
let parent_dir = ($bundle_dir | path dirname)
|
||||
let archive_name = $"($bundle_name).tar.gz"
|
||||
let archive_path = ($parent_dir | path join $archive_name)
|
||||
let bundle_name = ($bundle_dir | path basename)
|
||||
let parent_dir = ($bundle_dir | path dirname)
|
||||
let archive_name = $"($bundle_name).tar.gz"
|
||||
let archive_path = ($parent_dir | path join $archive_name)
|
||||
|
||||
let result = (do {
|
||||
cd $parent_dir
|
||||
tar -czf $archive_name $bundle_name
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log error $"Failed to compress bundle: ($result.stderr)"
|
||||
{
|
||||
status: "failed"
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
let original_size = (get_directory_size $bundle_dir)
|
||||
let compressed_size = (ls $archive_path | get 0.size)
|
||||
let compression_ratio = (($compressed_size | into float) / ($original_size | into float) * 100)
|
||||
|
||||
log info $"Bundle compressed: ($original_size) -> ($compressed_size) (($compression_ratio | math round)% of original)"
|
||||
let ratio_percent = ($compression_ratio | math round)
|
||||
let msg = $"Bundle compressed: ($original_size) -> ($compressed_size) ($ratio_percent)% of original"
|
||||
log info $msg
|
||||
|
||||
{
|
||||
status: "success"
|
||||
@ -361,18 +381,11 @@ def compress_bundle [bundle_dir: string] -> record {
|
||||
compressed_size: $compressed_size
|
||||
compression_ratio: $compression_ratio
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
log error $"Failed to compress bundle: ($err.msg)"
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Get directory size recursively
|
||||
def get_directory_size [dir: string] -> int {
|
||||
def get_directory_size [dir: string] {
|
||||
if not ($dir | path exists) {
|
||||
return 0
|
||||
}
|
||||
@ -381,7 +394,7 @@ def get_directory_size [dir: string] -> int {
|
||||
return (ls $dir | get 0.size)
|
||||
}
|
||||
|
||||
let total_size = (find $dir -type f | each {|file|
|
||||
let total_size = (glob ($dir + "/**") | where {|file| ($file | path type) == "file"} | each {|file|
|
||||
ls $file | get 0.size
|
||||
} | math sum)
|
||||
|
||||
@ -404,8 +417,8 @@ def "main info" [bundle_dir: string = "dist/core"] {
|
||||
{
|
||||
directory: $bundle_dir
|
||||
size: (get_directory_size $bundle_dir)
|
||||
files: (find $bundle_dir -type f | length)
|
||||
nu_files: (find $bundle_dir -name "*.nu" -type f | length)
|
||||
files: (glob ($bundle_dir + "/**") | where {|file| ($file | path type) == "file"} | length)
|
||||
nu_files: (glob ($bundle_dir + "/**/*.nu") | where {|file| ($file | path type) == "file"} | length)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -12,12 +12,12 @@ use std log
|
||||
|
||||
def main [
|
||||
--target: string = "x86_64-unknown-linux-gnu" # Target platform
|
||||
--release = false # Build in release mode
|
||||
--release # Build in release mode
|
||||
--features: string = "" # Comma-separated features to enable
|
||||
--output-dir: string = "dist/platform" # Output directory for binaries
|
||||
--verbose = false # Enable verbose logging
|
||||
--clean = false # Clean before building
|
||||
] -> record {
|
||||
--verbose # Enable verbose logging
|
||||
--clean # Clean before building
|
||||
] {
|
||||
|
||||
let repo_root = ($env.PWD | path dirname | path dirname | path dirname)
|
||||
let build_config = {
|
||||
@ -88,7 +88,7 @@ def compile_rust_project [
|
||||
project: record
|
||||
build_config: record
|
||||
repo_root: string
|
||||
] -> record {
|
||||
] {
|
||||
log info $"Compiling ($project.name)..."
|
||||
|
||||
if not ($project.path | path exists) {
|
||||
@ -106,7 +106,7 @@ def compile_rust_project [
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Clean if requested
|
||||
if $build_config.clean {
|
||||
log info $"Cleaning ($project.name)..."
|
||||
@ -114,28 +114,19 @@ def compile_rust_project [
|
||||
}
|
||||
|
||||
# Build cargo command
|
||||
let mut cargo_cmd = ["cargo", "build"]
|
||||
|
||||
if $build_config.release {
|
||||
$cargo_cmd = ($cargo_cmd | append "--release")
|
||||
}
|
||||
|
||||
if $build_config.target != "native" {
|
||||
$cargo_cmd = ($cargo_cmd | append ["--target", $build_config.target])
|
||||
}
|
||||
|
||||
# Add project-specific features
|
||||
let all_features = ($build_config.features | append $project.features | uniq)
|
||||
if ($all_features | length) > 0 {
|
||||
$cargo_cmd = ($cargo_cmd | append ["--features", ($all_features | str join ",")])
|
||||
}
|
||||
|
||||
if $build_config.verbose {
|
||||
$cargo_cmd = ($cargo_cmd | append "--verbose")
|
||||
}
|
||||
let cargo_cmd = (
|
||||
["cargo", "build"]
|
||||
| if $build_config.release { append "--release" } else { . }
|
||||
| if $build_config.target != "native" { append ["--target", $build_config.target] } else { . }
|
||||
| if ($build_config.features | append $project.features | uniq | length) > 0 {
|
||||
let all_features = ($build_config.features | append $project.features | uniq)
|
||||
append ["--features", ($all_features | str join ",")]
|
||||
} else { . }
|
||||
| if $build_config.verbose { append "--verbose" } else { . }
|
||||
)
|
||||
|
||||
# Execute build
|
||||
let build_result = (run-external --redirect-combine $cargo_cmd.0 ...$cargo_cmd.1..)
|
||||
run-external $cargo_cmd.0 ...$cargo_cmd.1.. e>| null
|
||||
|
||||
# Determine binary path
|
||||
let profile = if $build_config.release { "release" } else { "debug" }
|
||||
@ -172,16 +163,19 @@ def compile_rust_project [
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
log error $"Failed to compile ($project.name): ($err.msg)"
|
||||
if $result.exit_code != 0 {
|
||||
log error $"Failed to compile ($project.name): exit code ($result.exit_code)"
|
||||
{
|
||||
project: $project.name
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $"build failed with exit code ($result.exit_code)"
|
||||
binary_path: null
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -207,7 +201,7 @@ def "main list" [] {
|
||||
]
|
||||
}
|
||||
|
||||
def check_project_status [path: string] -> string {
|
||||
def check_project_status [path: string] {
|
||||
if not ($path | path exists) {
|
||||
return "missing"
|
||||
}
|
||||
|
||||
@ -19,7 +19,7 @@ def main [
|
||||
--cleanup = true # Cleanup test workspace after tests
|
||||
--verbose = false # Enable verbose logging
|
||||
--timeout: int = 300 # Test timeout in seconds
|
||||
] -> record {
|
||||
] {
|
||||
|
||||
let dist_root = ($dist_dir | path expand)
|
||||
let platform_detected = if $platform == "" { detect_platform } else { $platform }
|
||||
@ -50,29 +50,16 @@ def main [
|
||||
# Create temporary workspace
|
||||
mkdir ($test_config.temp_workspace)
|
||||
|
||||
let test_results = []
|
||||
# Run different test categories
|
||||
let test_categories = if "all" in $test_config.test_types {
|
||||
["basic", "integration", "performance"]
|
||||
} else {
|
||||
$test_config.test_types
|
||||
}
|
||||
|
||||
try {
|
||||
# Run different test categories
|
||||
let test_categories = if "all" in $test_config.test_types {
|
||||
["basic", "integration", "performance"]
|
||||
} else {
|
||||
$test_config.test_types
|
||||
}
|
||||
|
||||
for category in $test_categories {
|
||||
let category_result = match $category {
|
||||
"basic" => { run_basic_tests $test_config }
|
||||
"integration" => { run_integration_tests $test_config }
|
||||
"performance" => { run_performance_tests $test_config }
|
||||
_ => {
|
||||
log warning $"Unknown test category: ($category)"
|
||||
{ category: $category, status: "skipped", reason: "unknown category" }
|
||||
}
|
||||
}
|
||||
let test_results = ($test_results | append $category_result)
|
||||
}
|
||||
let test_results = run_test_categories $test_categories $test_config
|
||||
|
||||
let result = (do {
|
||||
# Generate test report
|
||||
let test_report = generate_test_report $test_results $test_config
|
||||
|
||||
@ -98,21 +85,42 @@ def main [
|
||||
log info $"Distribution testing completed successfully - all ($summary.total_categories) categories passed"
|
||||
}
|
||||
|
||||
return $summary
|
||||
$summary
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
# Ensure cleanup even on error
|
||||
if $test_config.cleanup and ($test_config.temp_workspace | path exists) {
|
||||
rm -rf ($test_config.temp_workspace)
|
||||
}
|
||||
|
||||
log error $"Distribution testing failed: ($err.msg)"
|
||||
log error $"Distribution testing failed with exit code ($result.exit_code)"
|
||||
exit 1
|
||||
} else {
|
||||
return $result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Run all test categories and collect results
|
||||
def run_test_categories [categories: list, test_config: record] {
|
||||
mut accumulated_results = []
|
||||
for category in $categories {
|
||||
let category_result = match $category {
|
||||
"basic" => { run_basic_tests $test_config }
|
||||
"integration" => { run_integration_tests $test_config }
|
||||
"performance" => { run_performance_tests $test_config }
|
||||
_ => {
|
||||
log warning $"Unknown test category: ($category)"
|
||||
{ category: $category, status: "skipped", reason: "unknown category" }
|
||||
}
|
||||
}
|
||||
$accumulated_results = ($accumulated_results | append $category_result)
|
||||
}
|
||||
$accumulated_results
|
||||
}
|
||||
|
||||
# Detect current platform
|
||||
def detect_platform [] -> string {
|
||||
def detect_platform [] {
|
||||
match $nu.os-info.name {
|
||||
"linux" => "linux"
|
||||
"macos" => "macos"
|
||||
@ -122,13 +130,13 @@ def detect_platform [] -> string {
|
||||
}
|
||||
|
||||
# Run basic functionality tests
|
||||
def run_basic_tests [test_config: record] -> record {
|
||||
def run_basic_tests [test_config: record] {
|
||||
log info "Running basic functionality tests..."
|
||||
|
||||
let start_time = (date now)
|
||||
let mut test_errors = []
|
||||
let mut tests_passed = 0
|
||||
let mut tests_total = 0
|
||||
mut test_errors = []
|
||||
mut tests_passed = 0
|
||||
mut tests_total = 0
|
||||
|
||||
# Test 1: Platform binaries exist and execute
|
||||
let platform_test = test_platform_binaries $test_config
|
||||
@ -180,11 +188,11 @@ def run_basic_tests [test_config: record] -> record {
|
||||
}
|
||||
|
||||
# Test platform binaries
|
||||
def test_platform_binaries [test_config: record] -> record {
|
||||
def test_platform_binaries [test_config: record] {
|
||||
log info "Testing platform binaries..."
|
||||
|
||||
let platform_dir = ($test_config.dist_dir | path join "platform")
|
||||
let mut errors = []
|
||||
mut errors = []
|
||||
|
||||
if not ($platform_dir | path exists) {
|
||||
return {
|
||||
@ -205,25 +213,24 @@ def test_platform_binaries [test_config: record] -> record {
|
||||
|
||||
# Test each binary
|
||||
for binary in $binaries {
|
||||
try {
|
||||
let test_result = (do {
|
||||
# Test if binary is executable and runs without error
|
||||
let test_result = (run-external --redirect-combine $binary --help | complete)
|
||||
(run-external $binary --help e>| null) | complete
|
||||
} | complete)
|
||||
|
||||
if $test_result.exit_code != 0 {
|
||||
$errors = ($errors | append {
|
||||
test: "binary_execution"
|
||||
binary: $binary
|
||||
error: $"Binary failed to execute: ($test_result.stderr)"
|
||||
})
|
||||
} else {
|
||||
log info $"Binary test passed: ($binary)"
|
||||
}
|
||||
} catch {|err|
|
||||
if $test_result.exit_code != 0 {
|
||||
let parse_result = (do {
|
||||
$test_result.stdout | from json
|
||||
} | complete)
|
||||
let binary_result = if $parse_result.exit_code == 0 { $parse_result.stdout } else { {} }
|
||||
let exit_code = ($binary_result | get exit_code 1)
|
||||
$errors = ($errors | append {
|
||||
test: "binary_execution"
|
||||
binary: $binary
|
||||
error: $err.msg
|
||||
error: $"Binary failed to execute with exit code ($exit_code)"
|
||||
})
|
||||
} else {
|
||||
log info $"Binary test passed: ($binary)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -235,11 +242,11 @@ def test_platform_binaries [test_config: record] -> record {
|
||||
}
|
||||
|
||||
# Test core bundle
|
||||
def test_core_bundle [test_config: record] -> record {
|
||||
def test_core_bundle [test_config: record] {
|
||||
log info "Testing core bundle..."
|
||||
|
||||
let core_dir = ($test_config.dist_dir | path join "core")
|
||||
let mut errors = []
|
||||
mut errors = []
|
||||
|
||||
if not ($core_dir | path exists) {
|
||||
return {
|
||||
@ -269,18 +276,14 @@ def test_core_bundle [test_config: record] -> record {
|
||||
})
|
||||
} else {
|
||||
# Test CLI execution
|
||||
try {
|
||||
let cli_test = (run-external --redirect-combine $provisioning_cli help | complete)
|
||||
if $cli_test.exit_code != 0 {
|
||||
$errors = ($errors | append {
|
||||
test: "provisioning_cli"
|
||||
error: $"CLI execution failed: ($cli_test.stderr)"
|
||||
})
|
||||
}
|
||||
} catch {|err|
|
||||
let cli_test = (do {
|
||||
(run-external $provisioning_cli help e>| null) | complete
|
||||
} | complete)
|
||||
|
||||
if $cli_test.exit_code != 0 {
|
||||
$errors = ($errors | append {
|
||||
test: "provisioning_cli"
|
||||
error: $err.msg
|
||||
error: $"CLI execution failed with exit code ($cli_test.exit_code)"
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -301,11 +304,11 @@ def test_core_bundle [test_config: record] -> record {
|
||||
}
|
||||
|
||||
# Test configuration system
|
||||
def test_configuration_system [test_config: record] -> record {
|
||||
def test_configuration_system [test_config: record] {
|
||||
log info "Testing configuration system..."
|
||||
|
||||
let config_dir = ($test_config.dist_dir | path join "config")
|
||||
let mut errors = []
|
||||
mut errors = []
|
||||
|
||||
if not ($config_dir | path exists) {
|
||||
return {
|
||||
@ -323,14 +326,18 @@ def test_configuration_system [test_config: record] -> record {
|
||||
})
|
||||
} else {
|
||||
# Test config parsing
|
||||
try {
|
||||
let config_data = (open $default_config)
|
||||
log info $"Default config loaded successfully with ($config_data | columns | length) sections"
|
||||
} catch {|err|
|
||||
let config_result = (do {
|
||||
open $default_config
|
||||
} | complete)
|
||||
|
||||
if $config_result.exit_code != 0 {
|
||||
$errors = ($errors | append {
|
||||
test: "config_parsing"
|
||||
error: $"Failed to parse default config: ($err.msg)"
|
||||
error: $"Failed to parse default config with exit code ($config_result.exit_code)"
|
||||
})
|
||||
} else {
|
||||
let config_data = $config_result.stdout
|
||||
log info $"Default config loaded successfully with ($config_data | columns | length) sections"
|
||||
}
|
||||
}
|
||||
|
||||
@ -341,11 +348,11 @@ def test_configuration_system [test_config: record] -> record {
|
||||
}
|
||||
|
||||
# Test basic CLI functionality
|
||||
def test_basic_cli [test_config: record] -> record {
|
||||
def test_basic_cli [test_config: record] {
|
||||
log info "Testing basic CLI functionality..."
|
||||
|
||||
let provisioning_cli = ($test_config.dist_dir | path join "core" "bin" "provisioning")
|
||||
let mut errors = []
|
||||
mut errors = []
|
||||
|
||||
if not ($provisioning_cli | path exists) {
|
||||
return {
|
||||
@ -358,23 +365,18 @@ def test_basic_cli [test_config: record] -> record {
|
||||
let test_commands = ["version", "help", "env"]
|
||||
|
||||
for cmd in $test_commands {
|
||||
try {
|
||||
let cmd_result = (run-external --redirect-combine $provisioning_cli $cmd | complete)
|
||||
if $cmd_result.exit_code != 0 {
|
||||
$errors = ($errors | append {
|
||||
test: "cli_command"
|
||||
command: $cmd
|
||||
error: $"Command failed: ($cmd_result.stderr)"
|
||||
})
|
||||
} else {
|
||||
log info $"CLI command test passed: ($cmd)"
|
||||
}
|
||||
} catch {|err|
|
||||
let cmd_result = (do {
|
||||
(run-external $provisioning_cli $cmd e>| null) | complete
|
||||
} | complete)
|
||||
|
||||
if $cmd_result.exit_code != 0 {
|
||||
$errors = ($errors | append {
|
||||
test: "cli_command"
|
||||
command: $cmd
|
||||
error: $err.msg
|
||||
error: $"Command failed with exit code ($cmd_result.exit_code)"
|
||||
})
|
||||
} else {
|
||||
log info $"CLI command test passed: ($cmd)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -386,11 +388,11 @@ def test_basic_cli [test_config: record] -> record {
|
||||
}
|
||||
|
||||
# Run integration tests
|
||||
def run_integration_tests [test_config: record] -> record {
|
||||
def run_integration_tests [test_config: record] {
|
||||
log info "Running integration tests..."
|
||||
|
||||
let start_time = (date now)
|
||||
let mut test_errors = []
|
||||
mut test_errors = []
|
||||
|
||||
# Integration tests would include:
|
||||
# - End-to-end workflow testing
|
||||
@ -412,11 +414,11 @@ def run_integration_tests [test_config: record] -> record {
|
||||
}
|
||||
|
||||
# Run performance tests
|
||||
def run_performance_tests [test_config: record] -> record {
|
||||
def run_performance_tests [test_config: record] {
|
||||
log info "Running performance tests..."
|
||||
|
||||
let start_time = (date now)
|
||||
let mut test_errors = []
|
||||
mut test_errors = []
|
||||
|
||||
# Performance tests would include:
|
||||
# - CLI response time benchmarks
|
||||
@ -438,7 +440,7 @@ def run_performance_tests [test_config: record] -> record {
|
||||
}
|
||||
|
||||
# Generate test report
|
||||
def generate_test_report [results: list, test_config: record] -> record {
|
||||
def generate_test_report [results: list, test_config: record] {
|
||||
let total_tests = ($results | get tests_total | math sum)
|
||||
let total_passed = ($results | get tests_passed | math sum)
|
||||
let overall_success_rate = if $total_tests > 0 {
|
||||
@ -490,21 +492,24 @@ def "main info" [dist_dir: string = "dist"] {
|
||||
}
|
||||
platform_binaries: (if ($platform_dir | path exists) { ls $platform_dir | where type == file | get name } else { [] })
|
||||
core_size: (if ($core_dir | path exists) { get_directory_size $core_dir } else { 0 })
|
||||
config_files: (if ($config_dir | path exists) { find $config_dir -name "*.toml" | length } else { 0 })
|
||||
config_files: (if ($config_dir | path exists) { glob ($config_dir + "/**/*.toml") | length } else { 0 })
|
||||
}
|
||||
}
|
||||
|
||||
# Get directory size helper
|
||||
def get_directory_size [dir: string] -> int {
|
||||
def get_directory_size [dir: string] {
|
||||
if not ($dir | path exists) {
|
||||
return 0
|
||||
}
|
||||
|
||||
let total_size = try {
|
||||
find $dir -type f | each {|file| ls $file | get 0.size } | math sum
|
||||
} catch {
|
||||
0
|
||||
}
|
||||
let size_result = (do {
|
||||
glob ($dir + "/**/*") | where { |f| ($f | path exists) and (($f | ls | get 0.type) == "file") } | each {|file| $file | ls | get 0.size } | math sum
|
||||
} | complete)
|
||||
|
||||
return ($total_size | if $in == null { 0 } else { $in })
|
||||
if $size_result.exit_code != 0 {
|
||||
return 0
|
||||
} else {
|
||||
let total_size = $size_result.stdout
|
||||
return ($total_size | if $in == null { 0 } else { $in })
|
||||
}
|
||||
}
|
||||
|
||||
@ -27,15 +27,17 @@ def main [
|
||||
|
||||
# Export Nickel to JSON - uses Nickel's export command
|
||||
# nickel export generates JSON from the Nickel file
|
||||
let catalog = (
|
||||
try {
|
||||
nickel export ($best_practices_ncl)
|
||||
| from json
|
||||
} catch {
|
||||
print "❌ Failed to export Nickel schema"
|
||||
return 1
|
||||
}
|
||||
)
|
||||
let result = (do {
|
||||
nickel export ($best_practices_ncl)
|
||||
| from json
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
print "❌ Failed to export Nickel schema"
|
||||
return 1
|
||||
}
|
||||
|
||||
let catalog = $result.stdout
|
||||
|
||||
if ($catalog | is-empty) {
|
||||
print "⚠️ No best practices found in catalog"
|
||||
@ -147,12 +149,16 @@ export def as_object [
|
||||
] {
|
||||
let best_practices_ncl = $schemas_dir | path join "lib" "best-practices.ncl"
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
nickel export $best_practices_ncl | from json
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
print "❌ Failed to export best practices"
|
||||
error make { msg: "Export failed" }
|
||||
}
|
||||
|
||||
$result.stdout
|
||||
}
|
||||
|
||||
# Load catalog and return as array
|
||||
|
||||
@ -32,10 +32,13 @@ def main [
|
||||
let extensions = (
|
||||
$metadata_files
|
||||
| each { |file|
|
||||
try {
|
||||
if $verbose { print $" Loading: ($file)" }
|
||||
if $verbose { print $" Loading: ($file)" }
|
||||
let result = (do {
|
||||
nickel export $file | from json
|
||||
} catch {
|
||||
} | complete)
|
||||
if $result.exit_code == 0 {
|
||||
$result.stdout
|
||||
} else {
|
||||
print $"❌ Failed to load: ($file)"
|
||||
error make { msg: $"Failed to export ($file)" }
|
||||
}
|
||||
@ -242,9 +245,12 @@ export def list_by_category [
|
||||
let extensions = (
|
||||
$metadata_files
|
||||
| each { |file|
|
||||
try {
|
||||
let result = (do {
|
||||
nickel export $file | from json
|
||||
} catch {
|
||||
} | complete)
|
||||
if $result.exit_code == 0 {
|
||||
$result.stdout
|
||||
} else {
|
||||
null
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,62 +0,0 @@
|
||||
# Tmux Configuration for Claude Code Agent Monitoring
|
||||
# Usage: tmux -f provisioning/tools/claude-code-tmux.conf
|
||||
|
||||
# Enable mouse support
|
||||
set -g mouse on
|
||||
|
||||
# Increase scrollback buffer
|
||||
set-option -g history-limit 50000
|
||||
|
||||
# Enable vi mode for easier navigation
|
||||
set-window-option -g mode-keys vi
|
||||
|
||||
# Bind vi-style copy mode keys
|
||||
bind-key -T copy-mode-vi v send-keys -X begin-selection
|
||||
bind-key -T copy-mode-vi y send-keys -X copy-selection
|
||||
|
||||
# Easy split commands
|
||||
bind | split-window -h
|
||||
bind - split-window -v
|
||||
unbind '"'
|
||||
unbind %
|
||||
|
||||
# Easy pane navigation (vim-style)
|
||||
bind h select-pane -L
|
||||
bind j select-pane -D
|
||||
bind k select-pane -U
|
||||
bind l select-pane -R
|
||||
|
||||
# Pane resizing
|
||||
bind -r H resize-pane -L 5
|
||||
bind -r J resize-pane -D 5
|
||||
bind -r K resize-pane -U 5
|
||||
bind -r L resize-pane -R 5
|
||||
|
||||
# Quick reload config
|
||||
bind r source-file ~/.tmux.conf \; display "Config Reloaded!"
|
||||
|
||||
# Status bar configuration
|
||||
set -g status-bg colour235
|
||||
set -g status-fg colour136
|
||||
set -g status-left-length 40
|
||||
set -g status-left "#[fg=green]Session: #S #[fg=yellow]#I #[fg=cyan]#P"
|
||||
set -g status-right "#[fg=cyan]%d %b %R"
|
||||
set -g status-interval 60
|
||||
set -g status-justify centre
|
||||
|
||||
# Window status
|
||||
setw -g window-status-current-style fg=white,bg=red,bold
|
||||
|
||||
# Pane borders
|
||||
set -g pane-border-style fg=colour238
|
||||
set -g pane-active-border-style fg=colour136
|
||||
|
||||
# Enable copy to system clipboard (macOS)
|
||||
bind -T copy-mode-vi y send-keys -X copy-pipe-and-cancel "pbcopy"
|
||||
|
||||
# Scroll without entering copy mode
|
||||
bind -n WheelUpPane if-shell -F -t = "#{mouse_any_flag}" "send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'select-pane -t=; copy-mode -e; send-keys -M'"
|
||||
bind -n WheelDownPane select-pane -t= \; send-keys -M
|
||||
|
||||
# Quick access to monitoring layout
|
||||
bind M source-file ~/.tmux/layouts/claude-monitoring.conf
|
||||
@ -28,6 +28,15 @@ def main [
|
||||
error make { msg: "Extension name must be in kebab-case format (e.g., my-service)" }
|
||||
}
|
||||
|
||||
# Validate against path traversal
|
||||
if validate_safe_name $name {
|
||||
error make { msg: "Extension name contains invalid characters (/, .., or leading /)" }
|
||||
}
|
||||
|
||||
if validate_safe_name $category {
|
||||
error make { msg: "Category contains invalid characters (/, .., or leading /)" }
|
||||
}
|
||||
|
||||
# For taskservs, require category
|
||||
if $type == "taskserv" and ($category | is-empty) {
|
||||
error make { msg: "Category is required for taskservs. Use --category <category>" }
|
||||
@ -53,6 +62,26 @@ def main [
|
||||
}
|
||||
}
|
||||
|
||||
# Validate name is safe from path traversal
|
||||
def validate_safe_name [name: string] {
|
||||
# Returns true if INVALID (contains dangerous patterns)
|
||||
if ($name | str contains "/") or ($name | str contains "..") or ($name | str starts-with "/") {
|
||||
return true
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
# Validate final path stays within boundary
|
||||
def validate_path_boundary [final_path: string, boundary_dir: string] {
|
||||
# Returns true if path ESCAPES boundary (invalid)
|
||||
let resolved = ($final_path | path canonicalize)
|
||||
let resolved_boundary = ($boundary_dir | path canonicalize)
|
||||
if not ($resolved | str starts-with $resolved_boundary) {
|
||||
return true
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
# Create extension from template
|
||||
def create_extension_from_template [
|
||||
type: string,
|
||||
@ -79,6 +108,11 @@ def create_extension_from_template [
|
||||
($output_dir | path join ($type + "s") $name "kcl")
|
||||
}
|
||||
|
||||
# Validate path stays within output_dir boundary
|
||||
if validate_path_boundary $extension_path $output_dir {
|
||||
error make { msg: "Extension path escapes output directory - potential path traversal attack" }
|
||||
}
|
||||
|
||||
# Create output directory
|
||||
print $"📁 Creating directories..."
|
||||
mkdir $extension_path
|
||||
|
||||
@ -4,6 +4,11 @@
|
||||
use ../core/nulib/lib_provisioning/utils/logging.nu *
|
||||
use ../core/nulib/lib_provisioning/providers/registry.nu *
|
||||
|
||||
# Import refactored modules
|
||||
use ./provider_templates.nu
|
||||
use ./provider_structure.nu
|
||||
use ./provider_validator.nu
|
||||
|
||||
# Create a new provider
|
||||
export def main [
|
||||
provider_name: string # Name of the new provider (e.g., "digitalocean")
|
||||
|
||||
@ -1,741 +0,0 @@
|
||||
# Cross-References & Integration Report
|
||||
|
||||
**Agent**: Agent 6: Cross-References & Integration
|
||||
**Date**: 2025-10-10
|
||||
**Status**: ✅ Phase 1 Complete - Core Infrastructure Ready
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Successfully completed Phase 1 of documentation cross-referencing and integration, creating the foundational infrastructure for a unified documentation system. This phase focused on building the essential tools and reference materials needed for comprehensive documentation integration.
|
||||
|
||||
### Key Deliverables
|
||||
|
||||
1. ✅ **Documentation Validator Tool** - Automated link checking
|
||||
2. ✅ **Broken Links Report** - 261 broken links identified across 264 files
|
||||
3. ✅ **Comprehensive Glossary** - 80+ terms with cross-references
|
||||
4. ✅ **Documentation Map** - Complete navigation guide with user journeys
|
||||
5. ⚠️ **System Integration** - Diagnostics system analysis (existing references verified)
|
||||
|
||||
---
|
||||
|
||||
## 1. Documentation Validator Tool
|
||||
|
||||
**File**: `provisioning/tools/doc-validator.nu` (210 lines)
|
||||
|
||||
### Features
|
||||
|
||||
- ✅ Scans all markdown files in documentation (264 files found)
|
||||
- ✅ Extracts and validates internal links using regex parsing
|
||||
- ✅ Resolves relative paths and checks file existence
|
||||
- ✅ Classifies links: internal, external, anchor
|
||||
- ✅ Generates broken links report (JSON + Markdown)
|
||||
- ✅ Provides summary statistics
|
||||
- ✅ Supports multiple output formats (table, json, markdown)
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
# Run full validation
|
||||
nu provisioning/tools/doc-validator.nu
|
||||
|
||||
# Generate markdown report
|
||||
nu provisioning/tools/doc-validator.nu --format markdown
|
||||
|
||||
# Generate JSON for automation
|
||||
nu provisioning/tools/doc-validator.nu --format json
|
||||
```
|
||||
|
||||
### Performance
|
||||
|
||||
- **264 markdown files** scanned
|
||||
- **Completion time**: ~2 minutes
|
||||
- **Memory usage**: Minimal (streaming processing)
|
||||
|
||||
### Output Files
|
||||
|
||||
1. `provisioning/tools/broken-links-report.json` - Detailed broken links (261 entries)
|
||||
2. `provisioning/tools/doc-validation-full-report.json` - Complete validation data
|
||||
|
||||
---
|
||||
|
||||
## 2. Broken Links Analysis
|
||||
|
||||
### Statistics
|
||||
|
||||
**Total Links Analyzed**: 2,847 links
|
||||
**Broken Links**: 261 (9.2% failure rate)
|
||||
**Valid Links**: 2,586 (90.8% success rate)
|
||||
|
||||
### Link Type Breakdown
|
||||
|
||||
- **Internal links**: 1,842 (64.7%)
|
||||
- **External links**: 523 (18.4%)
|
||||
- **Anchor links**: 482 (16.9%)
|
||||
|
||||
### Broken Link Categories
|
||||
|
||||
#### 1. Missing Documentation Files (47%)
|
||||
|
||||
Common patterns:
|
||||
|
||||
- `docs/user/quickstart.md` - Referenced but not created
|
||||
- `docs/development/CONTRIBUTING.md` - Standard file missing
|
||||
- `.claude/features/*.md` - Path resolution issues from docs/
|
||||
|
||||
#### 2. Anchor Links to Missing Sections (31%)
|
||||
|
||||
Examples:
|
||||
|
||||
- `workspace-management.md#setup-and-initialization`
|
||||
- `configuration.md#configuration-architecture`
|
||||
- `workflow.md#daily-development-workflow`
|
||||
|
||||
#### 3. Path Resolution Issues (15%)
|
||||
|
||||
- References to files in `.claude/` from `docs/` (path mismatch)
|
||||
- References to `provisioning/` from `docs/` (relative path errors)
|
||||
|
||||
#### 4. Outdated References (7%)
|
||||
|
||||
- ADR links to non-existent ADRs
|
||||
- Old migration guide structure
|
||||
|
||||
### Recommendations
|
||||
|
||||
**High Priority Fixes**:
|
||||
|
||||
1. Create missing guide files in `docs/guides/`
|
||||
2. Create missing ADRs or update references
|
||||
3. Fix path resolution for `.claude/` references
|
||||
4. Add missing anchor sections in existing docs
|
||||
|
||||
**Medium Priority**:
|
||||
|
||||
1. Verify and add missing anchor links
|
||||
2. Update outdated migration paths
|
||||
3. Create CONTRIBUTING.md
|
||||
|
||||
**Low Priority**:
|
||||
|
||||
1. Validate external links (may be intentional placeholders)
|
||||
2. Standardize relative vs absolute paths
|
||||
|
||||
---
|
||||
|
||||
## 3. Glossary (GLOSSARY.md)
|
||||
|
||||
**File**: `provisioning/docs/src/GLOSSARY.md` (23,500+ lines)
|
||||
|
||||
### Comprehensive Terminology Reference
|
||||
|
||||
**80+ Terms Defined**, covering:
|
||||
|
||||
- Infrastructure concepts (Server, Cluster, Taskserv, Provider, etc.)
|
||||
- Security terms (Auth, JWT, MFA, Cedar, KMS, etc.)
|
||||
- Configuration (Config, KCL, Schema, Workspace, etc.)
|
||||
- Operations (Workflow, Batch Operation, Orchestrator, etc.)
|
||||
- Platform (Control Center, MCP, API Gateway, etc.)
|
||||
- Development (Extension, Plugin, Module, Template, etc.)
|
||||
|
||||
### Structure
|
||||
|
||||
Each term includes:
|
||||
|
||||
1. **Definition** - Clear, concise explanation
|
||||
2. **Where Used** - Context and use cases
|
||||
3. **Related Concepts** - Cross-references to related terms
|
||||
4. **Examples** - Code samples, commands, or configurations (where applicable)
|
||||
5. **Commands** - CLI commands related to the term (where applicable)
|
||||
6. **See Also** - Links to related documentation
|
||||
|
||||
### Special Sections
|
||||
|
||||
1. **Symbol and Acronym Index** - Quick lookup table
|
||||
2. **Cross-Reference Map** - Terms organized by topic area
|
||||
3. **Terminology Guidelines** - Writing style and conventions
|
||||
4. **Contributing to Glossary** - How to add/update terms
|
||||
|
||||
### Usage
|
||||
|
||||
The glossary serves as:
|
||||
|
||||
- **Learning resource** for new users
|
||||
- **Reference** for experienced users
|
||||
- **Documentation standard** for contributors
|
||||
- **Cross-reference hub** for all documentation
|
||||
|
||||
---
|
||||
|
||||
## 4. Documentation Map (DOCUMENTATION_MAP.md)
|
||||
|
||||
**File**: `provisioning/docs/src/DOCUMENTATION_MAP.md` (48,000+ lines)
|
||||
|
||||
### Comprehensive Navigation Guide
|
||||
|
||||
**264 Documents Mapped**, organized by:
|
||||
|
||||
- User Journeys (6 distinct paths)
|
||||
- Topic Areas (14 categories)
|
||||
- Difficulty Levels (Beginner, Intermediate, Advanced)
|
||||
- Estimated Reading Times
|
||||
|
||||
### User Journeys
|
||||
|
||||
#### 1. New User Journey (0-7 days, 4-6 hours)
|
||||
|
||||
8 steps from platform overview to basic deployment
|
||||
|
||||
#### 2. Intermediate User Journey (1-4 weeks, 8-12 hours)
|
||||
|
||||
8 steps mastering infrastructure automation and customization
|
||||
|
||||
#### 3. Advanced User Journey (1-3 months, 20-30 hours)
|
||||
|
||||
8 steps to become platform expert and contributor
|
||||
|
||||
#### 4. Developer Journey (Ongoing)
|
||||
|
||||
Contributing to platform development
|
||||
|
||||
#### 5. Security Specialist Journey (10-15 hours)
|
||||
|
||||
12 steps mastering security features
|
||||
|
||||
#### 6. Operations Specialist Journey (6-8 hours)
|
||||
|
||||
7 steps for daily operations mastery
|
||||
|
||||
### Documentation by Topic
|
||||
|
||||
**14 Major Categories**:
|
||||
|
||||
1. Core Platform (3 docs)
|
||||
2. User Guides (45+ docs)
|
||||
3. Guides & Tutorials (10+ specialized guides)
|
||||
4. Architecture (27 docs including 10 ADRs)
|
||||
5. Development (25+ docs)
|
||||
6. API Documentation (7 docs)
|
||||
7. Security (15+ docs)
|
||||
8. Operations (3+ docs)
|
||||
9. Configuration & Workspace (11+ docs)
|
||||
10. Reference Documentation (10+ docs)
|
||||
11. Testing & Validation (4+ docs)
|
||||
12. Migration (10+ docs)
|
||||
13. Examples (2+ with more planned)
|
||||
14. Quick References (10+ docs)
|
||||
|
||||
### Documentation Statistics
|
||||
|
||||
**By Category**:
|
||||
|
||||
- User Guides: 32 documents
|
||||
- Architecture: 27 documents
|
||||
- Development: 25 documents
|
||||
- API: 7 documents
|
||||
- Security: 15 documents
|
||||
- Migration: 10 documents
|
||||
- Operations: 3 documents
|
||||
- Configuration: 8 documents
|
||||
- KCL: 14 documents
|
||||
- Testing: 4 documents
|
||||
- Quick References: 10 documents
|
||||
- Examples: 2 documents
|
||||
- ADRs: 10 documents
|
||||
|
||||
**By Level**:
|
||||
|
||||
- Beginner: ~40 documents (4-6 hours total)
|
||||
- Intermediate: ~120 documents (20-30 hours total)
|
||||
- Advanced: ~100 documents (40-60 hours total)
|
||||
|
||||
**Total Estimated Reading Time**: 150-200 hours (complete corpus)
|
||||
|
||||
### Essential Reading Lists
|
||||
|
||||
Curated "Must-Read" lists for:
|
||||
|
||||
- Everyone (4 docs)
|
||||
- Operators (4 docs)
|
||||
- Developers (4 docs)
|
||||
- Security Specialists (4 docs)
|
||||
|
||||
### Features
|
||||
|
||||
- **Learning Paths**: Structured journeys for different user types
|
||||
- **Topic Browse**: Jump to specific topics
|
||||
- **Level Filtering**: Match docs to expertise
|
||||
- **Quick References**: Fast command lookup
|
||||
- **Alphabetical Index**: Complete file listing
|
||||
- **Time Estimates**: Plan learning sessions
|
||||
- **Cross-References**: Related document discovery
|
||||
|
||||
---
|
||||
|
||||
## 5. Diagnostics System Integration
|
||||
|
||||
### Analysis of Existing References
|
||||
|
||||
**Diagnostics System Files Analyzed**:
|
||||
|
||||
1. `provisioning/core/nulib/lib_provisioning/diagnostics/system_status.nu` (318 lines)
|
||||
2. `provisioning/core/nulib/lib_provisioning/diagnostics/health_check.nu` (423 lines)
|
||||
3. `provisioning/core/nulib/lib_provisioning/diagnostics/next_steps.nu` (316 lines)
|
||||
4. `provisioning/core/nulib/main_provisioning/commands/diagnostics.nu` (75 lines)
|
||||
|
||||
### Documentation References Found
|
||||
|
||||
**35+ documentation links** embedded in diagnostics system, referencing:
|
||||
|
||||
✅ **Existing Documentation**:
|
||||
|
||||
- `docs/user/WORKSPACE_SWITCHING_GUIDE.md`
|
||||
- `docs/guides/quickstart-cheatsheet.md`
|
||||
- `docs/guides/from-scratch.md`
|
||||
- `docs/user/troubleshooting-guide.md`
|
||||
- `docs/user/SERVICE_MANAGEMENT_GUIDE.md`
|
||||
- `.claude/features/orchestrator-architecture.md`
|
||||
- `docs/user/PLUGIN_INTEGRATION_GUIDE.md`
|
||||
- `docs/user/AUTHENTICATION_LAYER_GUIDE.md`
|
||||
- `docs/user/CONFIG_ENCRYPTION_GUIDE.md`
|
||||
- `docs/user/RUSTYVAULT_KMS_GUIDE.md`
|
||||
|
||||
### Integration Status
|
||||
|
||||
✅ **Already Integrated**:
|
||||
|
||||
- Status command references correct doc paths
|
||||
- Health command provides fix recommendations with doc links
|
||||
- Next steps command includes progressive guidance with docs
|
||||
- Phase command tracks deployment progress
|
||||
|
||||
⚠️ **Validation Needed**:
|
||||
|
||||
- Some references may point to moved/renamed files
|
||||
- Need to validate all 35+ doc paths against current structure
|
||||
- Should update to use new GLOSSARY.md and DOCUMENTATION_MAP.md
|
||||
|
||||
### Recommendations
|
||||
|
||||
**Immediate Actions**:
|
||||
|
||||
1. Validate all diagnostics doc paths against current file locations
|
||||
2. Update any broken references found in validation
|
||||
3. Add references to new GLOSSARY.md and DOCUMENTATION_MAP.md
|
||||
4. Consider adding doc path validation to CI/CD
|
||||
|
||||
**Future Enhancements**:
|
||||
|
||||
1. Auto-update doc paths when files move
|
||||
2. Add version checking for doc references
|
||||
3. Include doc freshness indicators
|
||||
4. Add inline doc previews
|
||||
|
||||
---
|
||||
|
||||
## 6. Pending Integration Work
|
||||
|
||||
### MCP Tools Integration (Not Started)
|
||||
|
||||
**Scope**: Ensure MCP (Model Context Protocol) tools reference correct documentation paths
|
||||
|
||||
**Files to Check**:
|
||||
|
||||
- `provisioning/platform/mcp-server/` - MCP server implementation
|
||||
- MCP tool definitions
|
||||
- Guidance system references
|
||||
|
||||
**Actions Needed**:
|
||||
|
||||
1. Locate MCP tool implementations
|
||||
2. Extract all documentation references
|
||||
3. Validate paths against current structure
|
||||
4. Update broken references
|
||||
5. Add GLOSSARY and DOCUMENTATION_MAP references
|
||||
|
||||
**Estimated Time**: 2-3 hours
|
||||
|
||||
---
|
||||
|
||||
### UI Integration (Not Started)
|
||||
|
||||
**Scope**: Ensure Control Center UI references correct documentation
|
||||
|
||||
**Files to Check**:
|
||||
|
||||
- `provisioning/platform/control-center/` - UI implementation
|
||||
- Tooltip references
|
||||
- QuickLinks definitions
|
||||
- Help modals
|
||||
|
||||
**Actions Needed**:
|
||||
|
||||
1. Locate UI documentation references
|
||||
2. Validate all doc paths
|
||||
3. Update broken references
|
||||
4. Test documentation viewer/modal
|
||||
5. Add navigation to GLOSSARY and DOCUMENTATION_MAP
|
||||
|
||||
**Estimated Time**: 3-4 hours
|
||||
|
||||
---
|
||||
|
||||
### Integration Tests (Not Started)
|
||||
|
||||
**Scope**: Create automated tests for documentation integration
|
||||
|
||||
**Test File**: `provisioning/tests/integration/docs_integration_test.nu`
|
||||
|
||||
**Test Coverage Needed**:
|
||||
|
||||
1. CLI hints reference valid docs
|
||||
2. MCP tools return valid doc paths
|
||||
3. UI links work correctly
|
||||
4. Diagnostics output is accurate
|
||||
5. All cross-references resolve
|
||||
6. GLOSSARY terms link correctly
|
||||
7. DOCUMENTATION_MAP paths valid
|
||||
|
||||
**Test Types**:
|
||||
|
||||
- Unit tests for link validation
|
||||
- Integration tests for system components
|
||||
- End-to-end tests for user journeys
|
||||
|
||||
**Estimated Time**: 4-5 hours
|
||||
|
||||
---
|
||||
|
||||
### Documentation System Guide (Not Started)
|
||||
|
||||
**Scope**: Document how the unified documentation system works
|
||||
|
||||
**File**: `provisioning/docs/src/development/documentation-system.md`
|
||||
|
||||
**Content Needed**:
|
||||
|
||||
1. **Organization**: How docs are structured
|
||||
2. **Adding Documentation**: Step-by-step process
|
||||
3. **CLI Integration**: How CLI links to docs
|
||||
4. **MCP Integration**: How MCP uses docs
|
||||
5. **UI Integration**: How UI presents docs
|
||||
6. **Cross-References**: How to maintain links
|
||||
7. **Architecture Diagram**: Visual system map
|
||||
8. **Best Practices**: Documentation standards
|
||||
9. **Tools**: Using doc-validator.nu
|
||||
10. **Maintenance**: Keeping docs updated
|
||||
|
||||
**Estimated Time**: 3-4 hours
|
||||
|
||||
---
|
||||
|
||||
### Final Integration Check (Not Started)
|
||||
|
||||
**Scope**: Complete user journey validation
|
||||
|
||||
**Test Journey**:
|
||||
|
||||
1. New user runs `provisioning status`
|
||||
2. Follows suggestions from output
|
||||
3. Uses `provisioning guide` commands
|
||||
4. Opens Control Center UI
|
||||
5. Completes onboarding wizard
|
||||
6. Deploys first infrastructure
|
||||
|
||||
**Validation Points**:
|
||||
|
||||
- All suggested commands work
|
||||
- All documentation links are valid
|
||||
- UI navigation is intuitive
|
||||
- Help system is comprehensive
|
||||
- Error messages include helpful doc links
|
||||
- User can complete journey without getting stuck
|
||||
|
||||
**Estimated Time**: 2-3 hours
|
||||
|
||||
---
|
||||
|
||||
## 7. Files Created/Modified
|
||||
|
||||
### Created Files
|
||||
|
||||
1. **`provisioning/tools/doc-validator.nu`** (210 lines)
|
||||
- Documentation link validator tool
|
||||
- Automated scanning and validation
|
||||
- Multiple output formats
|
||||
|
||||
2. **`provisioning/docs/src/GLOSSARY.md`** (23,500+ lines)
|
||||
- Comprehensive terminology reference
|
||||
- 80+ terms with cross-references
|
||||
- Symbol index and usage guidelines
|
||||
|
||||
3. **`provisioning/docs/src/DOCUMENTATION_MAP.md`** (48,000+ lines)
|
||||
- Complete documentation navigation guide
|
||||
- 6 user journeys
|
||||
- 14 topic categories
|
||||
- 264 documents mapped
|
||||
|
||||
4. **`provisioning/tools/broken-links-report.json`** (Generated)
|
||||
- 261 broken links identified
|
||||
- Source file and line numbers
|
||||
- Target paths and resolution attempts
|
||||
|
||||
5. **`provisioning/tools/doc-validation-full-report.json`** (Generated)
|
||||
- Complete validation results
|
||||
- All 2,847 links analyzed
|
||||
- Metadata and timestamps
|
||||
|
||||
6. **`provisioning/tools/CROSS_REFERENCES_INTEGRATION_REPORT.md`** (This file)
|
||||
- Comprehensive integration report
|
||||
- Status of all deliverables
|
||||
- Recommendations and next steps
|
||||
|
||||
### Modified Files
|
||||
|
||||
None (Phase 1 focused on analysis and reference material creation)
|
||||
|
||||
---
|
||||
|
||||
## 8. Success Metrics
|
||||
|
||||
### Deliverables Completed
|
||||
|
||||
| Task | Status | Lines Created | Time Invested |
|
||||
| ------ | -------- | --------------- | --------------- |
|
||||
| Documentation Validator | ✅ Complete | 210 | ~2 hours |
|
||||
| Broken Links Report | ✅ Complete | N/A (Generated) | ~30 min |
|
||||
| Glossary | ✅ Complete | 23,500+ | ~4 hours |
|
||||
| Documentation Map | ✅ Complete | 48,000+ | ~6 hours |
|
||||
| Diagnostics Integration Analysis | ✅ Complete | N/A (Analysis) | ~1 hour |
|
||||
| MCP Integration | ⏸️ Pending | - | - |
|
||||
| UI Integration | ⏸️ Pending | - | - |
|
||||
| Integration Tests | ⏸️ Pending | - | - |
|
||||
| Documentation System Guide | ⏸️ Pending | - | - |
|
||||
| Final Integration Check | ⏸️ Pending | - | - |
|
||||
|
||||
**Total Lines Created**: 71,710+ lines
|
||||
**Total Time Invested**: ~13.5 hours
|
||||
**Completion**: 50% (Phase 1 of 2)
|
||||
|
||||
### Quality Metrics
|
||||
|
||||
**Documentation Validator**:
|
||||
|
||||
- ✅ Handles 264 markdown files
|
||||
- ✅ Analyzes 2,847 links
|
||||
- ✅ 90.8% link validation accuracy
|
||||
- ✅ Multiple output formats
|
||||
- ✅ Extensible for future checks
|
||||
|
||||
**Glossary**:
|
||||
|
||||
- ✅ 80+ terms defined
|
||||
- ✅ 100% cross-referenced
|
||||
- ✅ Examples for 60% of terms
|
||||
- ✅ CLI commands for 40% of terms
|
||||
- ✅ Complete symbol index
|
||||
|
||||
**Documentation Map**:
|
||||
|
||||
- ✅ 100% of 264 docs cataloged
|
||||
- ✅ 6 complete user journeys
|
||||
- ✅ Reading time estimates for all docs
|
||||
- ✅ 14 topic categories
|
||||
- ✅ 3 difficulty levels
|
||||
|
||||
---
|
||||
|
||||
## 9. Integration Architecture
|
||||
|
||||
### Current State
|
||||
|
||||
```bash
|
||||
Documentation System (Phase 1 - Complete)
|
||||
├── Validator Tool ────────────┐
|
||||
│ └── doc-validator.nu │
|
||||
│ │
|
||||
├── Reference Materials │
|
||||
│ ├── GLOSSARY.md ───────────┤──> Cross-References
|
||||
│ └── DOCUMENTATION_MAP.md ──┤
|
||||
│ │
|
||||
├── Reports │
|
||||
│ ├── broken-links-report ───┘
|
||||
│ └── validation-full-report
|
||||
│
|
||||
└── System Integration (Phase 1 Analysis)
|
||||
├── Diagnostics ✅ (35+ doc refs verified)
|
||||
├── MCP Tools ⏸️ (pending)
|
||||
├── UI ⏸️ (pending)
|
||||
└── Tests ⏸️ (pending)
|
||||
```
|
||||
|
||||
### Target State (Phase 2)
|
||||
|
||||
```bash
|
||||
Unified Documentation System
|
||||
├── Validator Tool ────────────┐
|
||||
│ └── doc-validator.nu │
|
||||
│ ├── Link checking │
|
||||
│ ├── Freshness checks │
|
||||
│ └── CI/CD integration │
|
||||
│ │
|
||||
├── Reference Hub │
|
||||
│ ├── GLOSSARY.md ───────────┤──> All Systems
|
||||
│ ├── DOCUMENTATION_MAP.md ──┤
|
||||
│ └── System Guide ──────────┤
|
||||
│ │
|
||||
├── System Integration │
|
||||
│ ├── Diagnostics ✅ │
|
||||
│ ├── MCP Tools ✅ ──────────┤
|
||||
│ ├── UI ✅ ─────────────────┤
|
||||
│ └── CLI ✅ ────────────────┤
|
||||
│ │
|
||||
├── Automated Testing │
|
||||
│ ├── Link validation ───────┘
|
||||
│ ├── Integration tests
|
||||
│ └── User journey tests
|
||||
│
|
||||
└── CI/CD Integration
|
||||
├── Pre-commit hooks
|
||||
├── PR validation
|
||||
└── Doc freshness checks
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. Recommendations
|
||||
|
||||
### Immediate Actions (Priority 1)
|
||||
|
||||
1. **Fix High-Impact Broken Links** (2-3 hours)
|
||||
- Create missing guide files
|
||||
- Fix path resolution issues
|
||||
- Update ADR references
|
||||
|
||||
2. **Complete MCP Integration** (2-3 hours)
|
||||
- Validate MCP tool doc references
|
||||
- Update broken paths
|
||||
- Add GLOSSARY/MAP references
|
||||
|
||||
3. **Complete UI Integration** (3-4 hours)
|
||||
- Validate UI doc references
|
||||
- Test documentation viewer
|
||||
- Update tooltips and help modals
|
||||
|
||||
### Short-Term Actions (Priority 2)
|
||||
|
||||
1. **Create Integration Tests** (4-5 hours)
|
||||
- Write automated test suite
|
||||
- Cover all system integrations
|
||||
- Add to CI/CD pipeline
|
||||
|
||||
2. **Write Documentation System Guide** (3-4 hours)
|
||||
- Document unified system architecture
|
||||
- Provide maintenance guidelines
|
||||
- Include contribution process
|
||||
|
||||
3. **Run Final Integration Check** (2-3 hours)
|
||||
- Test complete user journey
|
||||
- Validate all touchpoints
|
||||
- Fix any issues found
|
||||
|
||||
### Medium-Term Actions (Priority 3)
|
||||
|
||||
1. **Automate Link Validation** (1-2 hours)
|
||||
- Add doc-validator to CI/CD
|
||||
- Run on every PR
|
||||
- Block merges with broken links
|
||||
|
||||
2. **Add Doc Freshness Checks** (2-3 hours)
|
||||
- Track doc last-updated dates
|
||||
- Flag stale documentation
|
||||
- Auto-create update issues
|
||||
|
||||
3. **Create Documentation Dashboard** (4-6 hours)
|
||||
- Visual doc health metrics
|
||||
- Link validation status
|
||||
- Coverage statistics
|
||||
- Contribution tracking
|
||||
|
||||
---
|
||||
|
||||
## 11. Lessons Learned
|
||||
|
||||
### Successes
|
||||
|
||||
1. **Comprehensive Scope**: Mapping 264 documents revealed true system complexity
|
||||
2. **Tool-First Approach**: Building validator before manual work saved significant time
|
||||
3. **User Journey Focus**: Organizing by user type makes docs more accessible
|
||||
4. **Cross-Reference Hub**: GLOSSARY + MAP create powerful navigation
|
||||
5. **Existing Integration**: Diagnostics system already follows good practices
|
||||
|
||||
### Challenges
|
||||
|
||||
1. **Link Validation Complexity**: 261 broken links harder to fix than expected
|
||||
2. **Path Resolution**: Multiple doc directories create path confusion
|
||||
3. **Moving Target**: Documentation structure evolving during project
|
||||
4. **Time Estimation**: Original scope underestimated total work
|
||||
5. **Tool Limitations**: Anchor validation requires parsing headers (future work)
|
||||
|
||||
### Improvements for Phase 2
|
||||
|
||||
1. **Incremental Validation**: Fix broken links category by category
|
||||
2. **Automated Updates**: Update references when files move
|
||||
3. **Version Tracking**: Track doc versions for compatibility
|
||||
4. **CI/CD Integration**: Prevent new broken links from being added
|
||||
5. **Living Documentation**: Auto-update maps and glossary
|
||||
|
||||
---
|
||||
|
||||
## 12. Next Steps
|
||||
|
||||
### Phase 2 Work (12-16 hours estimated)
|
||||
|
||||
**Week 1**:
|
||||
|
||||
- Day 1-2: Fix high-priority broken links (5-6 hours)
|
||||
- Day 3: Complete MCP integration (2-3 hours)
|
||||
- Day 4: Complete UI integration (3-4 hours)
|
||||
|
||||
**Week 2**:
|
||||
|
||||
- Day 5: Create integration tests (4-5 hours)
|
||||
- Day 6: Write documentation system guide (3-4 hours)
|
||||
- Day 7: Run final integration check (2-3 hours)
|
||||
|
||||
### Acceptance Criteria
|
||||
|
||||
Phase 2 complete when:
|
||||
|
||||
- ✅ <5% broken links (currently 9.2%)
|
||||
- ✅ All system components reference valid docs
|
||||
- ✅ Integration tests pass
|
||||
- ✅ Documentation system guide published
|
||||
- ✅ Complete user journey validated
|
||||
- ✅ CI/CD validation in place
|
||||
|
||||
---
|
||||
|
||||
## 13. Conclusion
|
||||
|
||||
Phase 1 of the Cross-References & Integration project is **successfully complete**. We have built the foundational infrastructure for a unified documentation system:
|
||||
|
||||
✅ **Tool Created**: Automated documentation validator
|
||||
✅ **Baseline Established**: 261 broken links identified
|
||||
✅ **References Built**: Comprehensive glossary and documentation map
|
||||
✅ **Integration Analyzed**: Diagnostics system verified
|
||||
|
||||
The project is on track for Phase 2 completion, which will integrate all system components (MCP, UI, Tests) and validate the complete user experience.
|
||||
|
||||
**Total Progress**: 50% complete
|
||||
**Quality**: High - All Phase 1 deliverables meet or exceed requirements
|
||||
**Risk**: Low - Clear path to Phase 2 completion
|
||||
**Recommendation**: Proceed with Phase 2 implementation
|
||||
|
||||
---
|
||||
|
||||
**Report Generated**: 2025-10-10
|
||||
**Agent**: Agent 6: Cross-References & Integration
|
||||
**Status**: ✅ Phase 1 Complete
|
||||
**Next Review**: After Phase 2 completion (estimated 12-16 hours)
|
||||
File diff suppressed because it is too large
Load Diff
211
tools/distribution/docs_discovery.nu
Normal file
211
tools/distribution/docs_discovery.nu
Normal file
@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Documentation discovery module - extracts discovery phase from generate-docs.nu
|
||||
# Responsible for discovering documentation sources, API endpoints, and project structure
|
||||
|
||||
use std log
|
||||
|
||||
# Main discovery orchestrator - discovers all documentation sources in the project
|
||||
export def discover-all-docs [docs_config: record] {
|
||||
log info "Discovering documentation sources..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Find existing documentation files
|
||||
let existing_docs = find-existing-docs $docs_config.source_root
|
||||
|
||||
# Analyze project structure for automatic documentation generation
|
||||
let project_structure = analyze-structure $docs_config.source_root
|
||||
|
||||
# Discover configuration examples
|
||||
let config_examples = find-config-examples $docs_config.source_root
|
||||
|
||||
# Find API endpoints for documentation
|
||||
let api_endpoints = if $docs_config.generate_api {
|
||||
discover-api-endpoints $docs_config.source_root
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
{
|
||||
status: "success"
|
||||
existing_docs: $existing_docs
|
||||
project_structure: $project_structure
|
||||
config_examples: $config_examples
|
||||
api_endpoints: $api_endpoints
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Find existing documentation files
|
||||
export def find-existing-docs [repo_root: string] {
|
||||
let doc_patterns = ["README.md", "*.md", "*.rst", "docs/*", "doc/*"]
|
||||
mut found_docs = []
|
||||
|
||||
for pattern in $doc_patterns {
|
||||
let result = (do {
|
||||
^find $repo_root -name $pattern -type f 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 and ($result.stdout | length) > 0 {
|
||||
let files = ($result.stdout | lines)
|
||||
$found_docs = ($found_docs | append $files)
|
||||
}
|
||||
}
|
||||
|
||||
let categorized_docs = $found_docs
|
||||
| where { |doc| $doc != "" and ($doc | path exists) }
|
||||
| each {|doc|
|
||||
{
|
||||
path: $doc
|
||||
name: ($doc | path basename)
|
||||
category: (categorize-document $doc)
|
||||
size: (ls $doc | get 0.size)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
total_docs: ($categorized_docs | length)
|
||||
readme_files: ($categorized_docs | where category == "readme")
|
||||
user_docs: ($categorized_docs | where category == "user")
|
||||
dev_docs: ($categorized_docs | where category == "dev")
|
||||
api_docs: ($categorized_docs | where category == "api")
|
||||
other_docs: ($categorized_docs | where category == "other")
|
||||
all_docs: $categorized_docs
|
||||
}
|
||||
}
|
||||
|
||||
# Discover API endpoints for documentation
|
||||
export def discover-api-endpoints [repo_root: string] {
|
||||
# This would analyze Rust source files to find API routes
|
||||
# For now, we'll return a placeholder structure
|
||||
|
||||
[
|
||||
{ path: "/health", method: "GET", description: "Health check endpoint" }
|
||||
{ path: "/version", method: "GET", description: "System version information" }
|
||||
{ path: "/workflows", method: "GET", description: "List workflows" }
|
||||
{ path: "/workflows", method: "POST", description: "Create new workflow" }
|
||||
{ path: "/workflows/{id}", method: "GET", description: "Get workflow details" }
|
||||
{ path: "/workflows/{id}", method: "DELETE", description: "Delete workflow" }
|
||||
]
|
||||
}
|
||||
|
||||
# Analyze project structure for documentation generation
|
||||
export def analyze-structure [repo_root: string] {
|
||||
# Find major components
|
||||
let components = [
|
||||
{ name: "orchestrator", path: ($repo_root | path join "orchestrator") }
|
||||
{ name: "control-center", path: ($repo_root | path join "control-center") }
|
||||
{ name: "provisioning", path: ($repo_root | path join "provisioning") }
|
||||
{ name: "platform", path: ($repo_root | path join "platform") }
|
||||
{ name: "core", path: ($repo_root | path join "core") }
|
||||
]
|
||||
|
||||
let existing_components = ($components | where {|comp| ($comp.path | path exists) })
|
||||
|
||||
# Analyze Nushell modules for documentation
|
||||
let nu_result = (do {
|
||||
^find $repo_root -name "*.nu" -type f 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
let nu_modules = if $nu_result.exit_code == 0 and ($nu_result.stdout | length) > 0 {
|
||||
$nu_result.stdout | lines
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
# Find configuration files
|
||||
let toml_files = (do {
|
||||
^find $repo_root -name "*.toml" -type f 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
let yaml_files = (do {
|
||||
^find $repo_root -name "*.yaml" -type f 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
let json_files = (do {
|
||||
^find $repo_root -name "*.json" -type f 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
let config_files = (
|
||||
(if $toml_files.exit_code == 0 and ($toml_files.stdout | length) > 0 { $toml_files.stdout | lines } else { [] }) |
|
||||
append (if $yaml_files.exit_code == 0 and ($yaml_files.stdout | length) > 0 { $yaml_files.stdout | lines } else { [] }) |
|
||||
append (if $json_files.exit_code == 0 and ($json_files.stdout | length) > 0 { $json_files.stdout | lines } else { [] })
|
||||
)
|
||||
|
||||
{
|
||||
components: $existing_components
|
||||
nu_modules: ($nu_modules | length)
|
||||
config_files: ($config_files | length)
|
||||
has_rust_projects: (($repo_root | path join "Cargo.toml") | path exists)
|
||||
has_docker: (($repo_root | path join "Dockerfile") | path exists)
|
||||
}
|
||||
}
|
||||
|
||||
# Find configuration examples in the repository
|
||||
def find-config-examples [repo_root: string] {
|
||||
let example_patterns = ["*.example", "*.template", "examples/*", "config/*"]
|
||||
mut examples = []
|
||||
|
||||
for pattern in $example_patterns {
|
||||
let result = (do {
|
||||
^find $repo_root -name $pattern -type f 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 and ($result.stdout | length) > 0 {
|
||||
let files = ($result.stdout | lines)
|
||||
$examples = ($examples | append $files)
|
||||
}
|
||||
}
|
||||
|
||||
return ($examples
|
||||
| where { |ex| $ex != "" }
|
||||
| each {|ex|
|
||||
{
|
||||
path: $ex
|
||||
name: ($ex | path basename)
|
||||
type: (get-config-type $ex)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
# Categorize document based on filename and location
|
||||
def categorize-document [doc_path: string] {
|
||||
let name = ($doc_path | path basename | str downcase)
|
||||
let path = ($doc_path | str downcase)
|
||||
|
||||
if ($name == "readme.md") or ($name | str starts-with "readme") {
|
||||
return "readme"
|
||||
}
|
||||
|
||||
if ($name | str contains "install") or ($name | str contains "setup") or ($name | str contains "getting") {
|
||||
return "user"
|
||||
}
|
||||
|
||||
if ($name | str contains "api") or ($path | str contains "/api/") {
|
||||
return "api"
|
||||
}
|
||||
|
||||
if ($name | str contains "dev") or ($name | str contains "contrib") or ($path | str contains "/dev/") {
|
||||
return "dev"
|
||||
}
|
||||
|
||||
if ($name | str contains "config") or ($name | str contains "reference") {
|
||||
return "admin"
|
||||
}
|
||||
|
||||
return "other"
|
||||
}
|
||||
|
||||
# Get configuration file type
|
||||
def get-config-type [config_path: string] {
|
||||
let parsed = ($config_path | path parse)
|
||||
let ext = $parsed.extension | str downcase
|
||||
match $ext {
|
||||
"toml" => "toml"
|
||||
"yaml" | "yml" => "yaml"
|
||||
"json" => "json"
|
||||
"env" => "environment"
|
||||
_ => "unknown"
|
||||
}
|
||||
}
|
||||
126
tools/distribution/docs_postprocessing.nu
Normal file
126
tools/distribution/docs_postprocessing.nu
Normal file
@ -0,0 +1,126 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Documentation postprocessing
|
||||
#
|
||||
# Handles index creation, format conversion, and document counting
|
||||
|
||||
use std log
|
||||
|
||||
# Create documentation index
|
||||
export def create_documentation_index [docs_config: record, generation_results: list] {
|
||||
log info "Creating documentation index..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
let result = (do {
|
||||
let index_content = $"# Documentation Index
|
||||
|
||||
Welcome to the Provisioning System documentation!
|
||||
|
||||
## User Documentation
|
||||
- [Getting Started Guide](user/getting-started.md) - Quick start guide for new users
|
||||
- [Installation Guide](user/installation.md) - Complete installation instructions
|
||||
- [CLI Reference](user/cli-reference.md) - Comprehensive command reference
|
||||
- [Troubleshooting Guide](user/troubleshooting.md) - Common issues and solutions
|
||||
- [FAQ](user/faq.md) - Frequently asked questions
|
||||
|
||||
## Administrator Documentation
|
||||
- Configuration Guide - Advanced configuration options
|
||||
- Security Guide - Security best practices and configuration
|
||||
- Monitoring Guide - System monitoring and observability
|
||||
- Backup and Recovery - Data protection strategies
|
||||
|
||||
## Developer Documentation
|
||||
- Architecture Overview - System architecture and design
|
||||
- API Reference - REST API documentation
|
||||
- Plugin Development - Creating custom plugins and extensions
|
||||
- Contributing Guide - How to contribute to the project
|
||||
|
||||
## Generated Documentation
|
||||
This documentation was generated automatically on (date now | format date '%Y-%m-%d %H:%M:%S').
|
||||
|
||||
For the most up-to-date information, visit the online documentation.
|
||||
"
|
||||
|
||||
$index_content | save ($docs_config.output_dir | path join "README.md")
|
||||
|
||||
{
|
||||
status: "success"
|
||||
index_file: ($docs_config.output_dir | path join "README.md")
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Convert documentation to additional formats (placeholder)
|
||||
export def convert_documentation_formats [docs_config: record, generation_results: list] {
|
||||
log info "Converting documentation formats..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Format conversion would happen here (markdown to HTML, PDF, etc.)
|
||||
log warning "Documentation format conversion not fully implemented"
|
||||
|
||||
{
|
||||
status: "skipped"
|
||||
reason: "format conversion not fully implemented"
|
||||
converted_files: 0
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Count generated documents from results
|
||||
export def count_generated_documents [generation_results: list] {
|
||||
let user_result = (do {
|
||||
let user_result = ($generation_results | where phase == "user" | get 0.result)
|
||||
$user_result.docs_generated
|
||||
} | complete)
|
||||
let user_count = if $user_result.exit_code == 0 { $user_result.stdout } else { 0 }
|
||||
|
||||
let admin_result = (do {
|
||||
let admin_result = ($generation_results | where phase == "admin" | get 0.result)
|
||||
$admin_result.docs_generated
|
||||
} | complete)
|
||||
let admin_count = if $admin_result.exit_code == 0 { $admin_result.stdout } else { 0 }
|
||||
|
||||
let dev_result = (do {
|
||||
let dev_result = ($generation_results | where phase == "dev" | get 0.result)
|
||||
$dev_result.docs_generated
|
||||
} | complete)
|
||||
let dev_count = if $dev_result.exit_code == 0 { $dev_result.stdout } else { 0 }
|
||||
|
||||
let api_result = (do {
|
||||
let api_result = ($generation_results | where phase == "api" | get 0.result)
|
||||
$api_result.docs_generated
|
||||
} | complete)
|
||||
let api_count = if $api_result.exit_code == 0 { $api_result.stdout } else { 0 }
|
||||
|
||||
return ($user_count + $admin_count + $dev_count + $api_count + 1) # +1 for index
|
||||
}
|
||||
|
||||
# Get directory size helper
|
||||
export def get_directory_size [dir: string] {
|
||||
if not ($dir | path exists) {
|
||||
return 0
|
||||
}
|
||||
|
||||
let result = (do {
|
||||
ls $dir | where type == file | each {|file| $file.size } | math sum
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 {
|
||||
$result.stdout
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
58
tools/distribution/docs_templates.nu
Normal file
58
tools/distribution/docs_templates.nu
Normal file
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Documentation templates
|
||||
#
|
||||
# Generates admin, developer, and API documentation templates
|
||||
|
||||
use std log
|
||||
|
||||
# Generate admin documentation (placeholder)
|
||||
export def generate_admin_documentation [docs_config: record, discovery_result: record] {
|
||||
log info "Generating admin documentation..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Placeholder for admin docs
|
||||
log warning "Admin documentation generation not fully implemented"
|
||||
|
||||
{
|
||||
status: "skipped"
|
||||
reason: "admin documentation not fully implemented"
|
||||
docs_generated: 0
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Generate developer documentation (placeholder)
|
||||
export def generate_developer_documentation [docs_config: record, discovery_result: record] {
|
||||
log info "Generating developer documentation..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Placeholder for dev docs
|
||||
log warning "Developer documentation generation not fully implemented"
|
||||
|
||||
{
|
||||
status: "skipped"
|
||||
reason: "developer documentation not fully implemented"
|
||||
docs_generated: 0
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Generate API documentation (placeholder)
|
||||
export def generate_api_documentation [docs_config: record, discovery_result: record] {
|
||||
log info "Generating API documentation..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Placeholder for API docs
|
||||
log warning "API documentation generation not fully implemented"
|
||||
|
||||
{
|
||||
status: "skipped"
|
||||
reason: "API documentation not fully implemented"
|
||||
docs_generated: 0
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
@ -11,6 +11,7 @@
|
||||
# - Multi-platform distribution assembly
|
||||
|
||||
use std log
|
||||
use ./platform_compiler.nu [compile-platforms, get-target-triple]
|
||||
|
||||
def main [
|
||||
--version: string = "" # Distribution version (auto-detected if empty)
|
||||
@ -58,13 +59,13 @@ def main [
|
||||
# Ensure output directory exists
|
||||
mkdir ($distribution_config.output_dir)
|
||||
|
||||
let generation_results = []
|
||||
mut generation_results = []
|
||||
|
||||
try {
|
||||
# Phase 1: Preparation
|
||||
let preparation_result = prepare_distribution_environment $distribution_config
|
||||
|
||||
let generation_results = ($generation_results | append { phase: "preparation", result: $preparation_result })
|
||||
$generation_results = ($generation_results | append { phase: "preparation", result: $preparation_result })
|
||||
|
||||
if $preparation_result.status != "success" {
|
||||
log error $"Distribution preparation failed: ($preparation_result.reason)"
|
||||
@ -74,7 +75,7 @@ def main [
|
||||
# Phase 2: Core Distribution
|
||||
let core_result = generate_core_distribution $distribution_config
|
||||
|
||||
let generation_results = ($generation_results | append { phase: "core", result: $core_result })
|
||||
$generation_results = ($generation_results | append { phase: "core", result: $core_result })
|
||||
|
||||
if $core_result.status != "success" {
|
||||
log error $"Core distribution generation failed: ($core_result.reason)"
|
||||
@ -84,7 +85,7 @@ def main [
|
||||
# Phase 3: Platform Services
|
||||
let platform_result = generate_platform_distributions $distribution_config
|
||||
|
||||
let generation_results = ($generation_results | append { phase: "platform", result: $platform_result })
|
||||
$generation_results = ($generation_results | append { phase: "platform", result: $platform_result })
|
||||
|
||||
if $platform_result.status != "success" {
|
||||
log error $"Platform distribution generation failed: ($platform_result.reason)"
|
||||
@ -98,12 +99,12 @@ def main [
|
||||
{ status: "skipped", reason: "documentation generation disabled" }
|
||||
}
|
||||
|
||||
let generation_results = ($generation_results | append { phase: "documentation", result: $docs_result })
|
||||
$generation_results = ($generation_results | append { phase: "documentation", result: $docs_result })
|
||||
|
||||
# Phase 5: Assembly
|
||||
let assembly_result = assemble_complete_distributions $distribution_config $generation_results
|
||||
|
||||
let generation_results = ($generation_results | append { phase: "assembly", result: $assembly_result })
|
||||
$generation_results = ($generation_results | append { phase: "assembly", result: $assembly_result })
|
||||
|
||||
if $assembly_result.status != "success" {
|
||||
log error $"Distribution assembly failed: ($assembly_result.reason)"
|
||||
@ -117,7 +118,7 @@ def main [
|
||||
{ status: "skipped", reason: "validation disabled" }
|
||||
}
|
||||
|
||||
let generation_results = ($generation_results | append { phase: "validation", result: $validation_result })
|
||||
$generation_results = ($generation_results | append { phase: "validation", result: $validation_result })
|
||||
|
||||
let summary = {
|
||||
version: $distribution_config.version
|
||||
@ -146,38 +147,37 @@ def detect_project_version [repo_root: string]: nothing -> string {
|
||||
cd $repo_root
|
||||
|
||||
# Try git tags first
|
||||
let git_version = try {
|
||||
let tag = (git describe --tags --exact-match HEAD 2>/dev/null | str trim)
|
||||
if $tag != "" {
|
||||
return ($tag | str replace "^v" "")
|
||||
let git_version = (do {
|
||||
let result = (do { ^git describe --tags --exact-match HEAD 2>/dev/null } | complete)
|
||||
if $result.exit_code == 0 {
|
||||
let tag = ($result.stdout | str trim)
|
||||
if $tag != "" {
|
||||
return ($tag | str replace "^v" "")
|
||||
}
|
||||
}
|
||||
let latest = (git describe --tags --abbrev=0 2>/dev/null | str trim)
|
||||
if $latest != "" {
|
||||
return ($latest | str replace "^v" "")
|
||||
|
||||
let latest_result = (do { ^git describe --tags --abbrev=0 2>/dev/null } | complete)
|
||||
if $latest_result.exit_code == 0 {
|
||||
let latest = ($latest_result.stdout | str trim)
|
||||
if $latest != "" {
|
||||
return ($latest | str replace "^v" "")
|
||||
}
|
||||
}
|
||||
""
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
})
|
||||
|
||||
if $git_version != "" {
|
||||
return $git_version
|
||||
}
|
||||
|
||||
# Try Cargo.toml
|
||||
let cargo_version = try {
|
||||
let cargo_files = (glob **/Cargo.toml --depth 2)
|
||||
if ($cargo_files | length) > 0 {
|
||||
let cargo_data = (open ($cargo_files | get 0))
|
||||
return $cargo_data.package.version
|
||||
let cargo_files = (glob **/Cargo.toml --depth 2)
|
||||
if ($cargo_files | length) > 0 {
|
||||
let cargo_data = (open ($cargo_files | get 0))
|
||||
let cargo_version = ($cargo_data.package?.version? | default "")
|
||||
if $cargo_version != "" {
|
||||
return $cargo_version
|
||||
}
|
||||
""
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
|
||||
if $cargo_version != "" {
|
||||
return $cargo_version
|
||||
}
|
||||
|
||||
# Fallback to date-based version
|
||||
@ -190,7 +190,7 @@ def prepare_distribution_environment [distribution_config: record]: nothing -> r
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let preparation_result = do {
|
||||
# Clean build if requested
|
||||
if $distribution_config.build_clean {
|
||||
log info "Cleaning build environment..."
|
||||
@ -199,13 +199,34 @@ def prepare_distribution_environment [distribution_config: record]: nothing -> r
|
||||
let packages_dir = ($distribution_config.repo_root | path join "packages")
|
||||
|
||||
if ($target_dir | path exists) {
|
||||
^rm -rf $target_dir
|
||||
let rm_result = (do { ^rm -rf $target_dir } | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
return {
|
||||
status: "failed"
|
||||
reason: "Failed to remove target directory"
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ($dist_dir | path exists) {
|
||||
^rm -rf $dist_dir
|
||||
let rm_result = (do { ^rm -rf $dist_dir } | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
return {
|
||||
status: "failed"
|
||||
reason: "Failed to remove dist directory"
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ($packages_dir | path exists) {
|
||||
^rm -rf $packages_dir
|
||||
let rm_result = (do { ^rm -rf $packages_dir } | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
return {
|
||||
status: "failed"
|
||||
reason: "Failed to remove packages directory"
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
}
|
||||
log info "Build environment cleaned"
|
||||
}
|
||||
@ -244,30 +265,31 @@ def prepare_distribution_environment [distribution_config: record]: nothing -> r
|
||||
dependency_check: $dependency_check
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
} catch { |err|
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
return $preparation_result
|
||||
}
|
||||
|
||||
# Validate distribution dependencies
|
||||
def validate_distribution_dependencies [distribution_config: record]: nothing -> record {
|
||||
let required_tools = [
|
||||
{ name: "nu", command: "nu --version", description: "Nushell shell" }
|
||||
{ name: "git", command: "git --version", description: "Git version control" }
|
||||
{ name: "cargo", command: "cargo --version", description: "Rust package manager" }
|
||||
{ name: "rustc", command: "rustc --version", description: "Rust compiler" }
|
||||
{ name: "nu", command: ["nu", "--version"], description: "Nushell shell" }
|
||||
{ name: "git", command: ["git", "--version"], description: "Git version control" }
|
||||
{ name: "cargo", command: ["cargo", "--version"], description: "Rust package manager" }
|
||||
{ name: "rustc", command: ["rustc", "--version"], description: "Rust compiler" }
|
||||
]
|
||||
|
||||
let optional_tools = [
|
||||
{ name: "kcl", command: "kcl version", description: "KCL configuration language" }
|
||||
{ name: "docker", command: "docker --version", description: "Docker containerization" }
|
||||
{ name: "tar", command: "tar --version", description: "Archive creation" }
|
||||
{ name: "zip", command: "zip -v", description: "Zip compression" }
|
||||
{ name: "kcl", command: ["kcl", "version"], description: "KCL configuration language" }
|
||||
{ name: "docker", command: ["docker", "--version"], description: "Docker containerization" }
|
||||
{ name: "tar", command: ["tar", "--version"], description: "Archive creation" }
|
||||
{ name: "zip", command: ["zip", "-v"], description: "Zip compression" }
|
||||
]
|
||||
|
||||
mut available = []
|
||||
@ -275,11 +297,12 @@ def validate_distribution_dependencies [distribution_config: record]: nothing ->
|
||||
|
||||
# Check required tools
|
||||
for tool in $required_tools {
|
||||
let check_result = try {
|
||||
run-external $tool.command err> /dev/null | complete
|
||||
let check_result = (do {
|
||||
let cmd_parts = $tool.command
|
||||
^($cmd_parts | get 0) ...($cmd_parts | skip 1) err> /dev/null | complete
|
||||
} catch {
|
||||
{ exit_code: 1 }
|
||||
}
|
||||
})
|
||||
|
||||
if $check_result.exit_code == 0 {
|
||||
$available = ($available | append $tool.name)
|
||||
@ -290,11 +313,12 @@ def validate_distribution_dependencies [distribution_config: record]: nothing ->
|
||||
|
||||
# Check optional tools
|
||||
for tool in $optional_tools {
|
||||
let check_result = try {
|
||||
run-external $tool.command err> /dev/null | complete
|
||||
let check_result = (do {
|
||||
let cmd_parts = $tool.command
|
||||
^($cmd_parts | get 0) ...($cmd_parts | skip 1) err> /dev/null | complete
|
||||
} catch {
|
||||
{ exit_code: 1 }
|
||||
}
|
||||
})
|
||||
|
||||
if $check_result.exit_code == 0 {
|
||||
$available = ($available | append $tool.name)
|
||||
@ -367,12 +391,8 @@ def generate_platform_distributions [distribution_config: record]: nothing -> re
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
# Compile platform components for each target platform
|
||||
let platform_results = if $distribution_config.parallel_builds {
|
||||
compile_platforms_parallel $distribution_config
|
||||
} else {
|
||||
compile_platforms_sequential $distribution_config
|
||||
}
|
||||
# Compile platform components for each target platform using the platform_compiler module
|
||||
let platform_results = (compile-platforms $distribution_config)
|
||||
|
||||
let successful_platforms = ($platform_results | where status == "success" | length)
|
||||
let total_platforms = ($platform_results | length)
|
||||
@ -403,66 +423,6 @@ def generate_platform_distributions [distribution_config: record]: nothing -> re
|
||||
}
|
||||
}
|
||||
|
||||
# Compile platforms in parallel
|
||||
def compile_platforms_parallel [distribution_config: record]: nothing -> list {
|
||||
# For simplicity, using sequential compilation for now
|
||||
# In a real implementation, you might use background processes
|
||||
compile_platforms_sequential $distribution_config
|
||||
}
|
||||
|
||||
# Compile platforms sequentially
|
||||
def compile_platforms_sequential [distribution_config: record]: nothing -> list {
|
||||
$distribution_config.platforms | each {|platform|
|
||||
compile_single_platform $platform $distribution_config
|
||||
}
|
||||
}
|
||||
|
||||
# Compile platform components for a single platform
|
||||
def compile_single_platform [platform: string, distribution_config: record]: nothing -> record {
|
||||
log info $"Compiling platform: ($platform)"
|
||||
|
||||
let start_time = (date now)
|
||||
let target_triple = get_rust_target_triple $platform
|
||||
|
||||
try {
|
||||
# Compile platform components
|
||||
let compile_result = (nu ($distribution_config.repo_root | path join "src" "tools" "build" "compile-platform.nu")
|
||||
--target $target_triple
|
||||
--release
|
||||
--output-dir ($distribution_config.output_dir | path join "platform")
|
||||
--verbose:$distribution_config.verbose
|
||||
--clean:$distribution_config.build_clean)
|
||||
|
||||
{
|
||||
platform: $platform
|
||||
target: $target_triple
|
||||
status: (if $compile_result.failed > 0 { "failed" } else { "success" })
|
||||
compiled_components: $compile_result.successful
|
||||
total_components: $compile_result.total
|
||||
compile_result: $compile_result
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
{
|
||||
platform: $platform
|
||||
target: $target_triple
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Get Rust target triple for platform
|
||||
def get_rust_target_triple [platform: string]: nothing -> string {
|
||||
match $platform {
|
||||
"linux" => "x86_64-unknown-linux-gnu"
|
||||
"macos" => "x86_64-apple-darwin"
|
||||
"windows" => "x86_64-pc-windows-gnu"
|
||||
_ => $platform # Assume it's already a target triple
|
||||
}
|
||||
}
|
||||
|
||||
# Generate distribution documentation
|
||||
def generate_distribution_docs [distribution_config: record]: nothing -> record {
|
||||
@ -834,7 +794,7 @@ def assemble_single_distribution [
|
||||
let dist_name = $"provisioning-($distribution_config.version)-($platform)-($variant)"
|
||||
let dist_dir = ($distribution_config.output_dir | path join "tmp" $dist_name)
|
||||
|
||||
try {
|
||||
let assembly_result = do {
|
||||
# Create distribution directory
|
||||
mkdir $dist_dir
|
||||
|
||||
@ -865,7 +825,15 @@ def assemble_single_distribution [
|
||||
} else {
|
||||
# Move directory to final location
|
||||
let final_dir = ($distribution_config.output_dir | path join $dist_name)
|
||||
mv $dist_dir $final_dir
|
||||
let mv_result = (do { ^mv $dist_dir $final_dir } | complete)
|
||||
if $mv_result.exit_code != 0 {
|
||||
return {
|
||||
platform: $platform
|
||||
variant: $variant
|
||||
status: "failed"
|
||||
reason: "Failed to move distribution directory"
|
||||
}
|
||||
}
|
||||
$final_dir
|
||||
}
|
||||
|
||||
@ -881,7 +849,7 @@ def assemble_single_distribution [
|
||||
compressed: $distribution_config.compress
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
} catch { |err|
|
||||
{
|
||||
platform: $platform
|
||||
variant: $variant
|
||||
@ -889,6 +857,8 @@ def assemble_single_distribution [
|
||||
reason: $err.msg
|
||||
}
|
||||
}
|
||||
|
||||
return $assembly_result
|
||||
}
|
||||
|
||||
# Copy complete distribution components
|
||||
@ -944,7 +914,7 @@ def copy_platform_binaries [source_path: string, target_path: string, platform:
|
||||
mkdir $target_path
|
||||
|
||||
let all_binaries = (ls $source_path | where type == file | get name)
|
||||
let target_suffix = get_rust_target_triple $platform
|
||||
let target_suffix = get-target-triple $platform
|
||||
|
||||
for binary in $all_binaries {
|
||||
let binary_name = ($binary | path basename)
|
||||
@ -1056,10 +1026,17 @@ def create_distribution_archive [dist_dir: string, output_dir: string]: nothing
|
||||
let parent_dir = ($dist_dir | path dirname)
|
||||
|
||||
cd $parent_dir
|
||||
tar -czf $archive_path $dist_name
|
||||
let tar_result = (^tar -czf $archive_path $dist_name | complete)
|
||||
if $tar_result.exit_code != 0 {
|
||||
log error "Failed to create tar archive"
|
||||
return $archive_path
|
||||
}
|
||||
|
||||
# Clean up directory
|
||||
rm -rf $dist_dir
|
||||
let rm_result = (^rm -rf $dist_dir | complete)
|
||||
if $rm_result.exit_code != 0 {
|
||||
log warning "Failed to remove temporary distribution directory"
|
||||
}
|
||||
|
||||
return $archive_path
|
||||
}
|
||||
@ -1174,11 +1151,22 @@ def get_directory_size [path: string]: nothing -> int {
|
||||
return 0
|
||||
}
|
||||
|
||||
let total_size = try {
|
||||
let total_size = do {
|
||||
if ($path | path type) == "file" {
|
||||
ls $path | get 0.size
|
||||
} else {
|
||||
find $path -type f | each {|file| ls $file | get 0.size } | math sum
|
||||
let find_result = (do { ^find $path -type f } | complete)
|
||||
if $find_result.exit_code == 0 {
|
||||
($find_result.stdout | lines | each {|file|
|
||||
if ($file | str trim) != "" {
|
||||
ls $file | get 0.size
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} | math sum)
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
0
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
99
tools/distribution/guide_generators.nu
Normal file
99
tools/distribution/guide_generators.nu
Normal file
@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Guide generators for documentation
|
||||
#
|
||||
# Generates user-facing guides:
|
||||
# - Getting started guide
|
||||
# - Installation guide
|
||||
# - CLI reference
|
||||
# - Troubleshooting guide
|
||||
# - FAQ
|
||||
|
||||
use std log
|
||||
|
||||
# Generate user documentation guides
|
||||
export def generate_user_documentation [docs_config: record, discovery_result: record] {
|
||||
log info "Generating user documentation..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
let result = (do {
|
||||
let user_docs_dir = ($docs_config.output_dir | path join "user")
|
||||
mkdir $user_docs_dir
|
||||
|
||||
mut generated_docs = []
|
||||
|
||||
# Generate getting started guide
|
||||
let getting_started = generate_getting_started_guide $docs_config $discovery_result
|
||||
|
||||
$getting_started | save ($user_docs_dir | path join "getting-started.md")
|
||||
$generated_docs = ($generated_docs | append "getting-started.md")
|
||||
|
||||
# Generate installation guide
|
||||
let installation_guide = generate_installation_guide $docs_config $discovery_result
|
||||
|
||||
$installation_guide | save ($user_docs_dir | path join "installation.md")
|
||||
$generated_docs = ($generated_docs | append "installation.md")
|
||||
|
||||
# Generate CLI reference
|
||||
let cli_reference = generate_cli_reference $docs_config $discovery_result
|
||||
|
||||
$cli_reference | save ($user_docs_dir | path join "cli-reference.md")
|
||||
$generated_docs = ($generated_docs | append "cli-reference.md")
|
||||
|
||||
# Generate troubleshooting guide
|
||||
let troubleshooting = generate_troubleshooting_guide $docs_config $discovery_result
|
||||
|
||||
$troubleshooting | save ($user_docs_dir | path join "troubleshooting.md")
|
||||
$generated_docs = ($generated_docs | append "troubleshooting.md")
|
||||
|
||||
# Generate FAQ
|
||||
let faq = generate_faq $docs_config $discovery_result
|
||||
|
||||
$faq | save ($user_docs_dir | path join "faq.md")
|
||||
$generated_docs = ($generated_docs | append "faq.md")
|
||||
|
||||
{
|
||||
status: "success"
|
||||
docs_generated: ($generated_docs | length)
|
||||
generated_docs: $generated_docs
|
||||
user_docs_dir: $user_docs_dir
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Generate getting started guide (placeholder - content generated at runtime)
|
||||
def generate_getting_started_guide [docs_config: record, discovery_result: record] {
|
||||
"# Getting Started with Provisioning System\n\nThis is a placeholder guide generated at runtime."
|
||||
}
|
||||
|
||||
# Generate installation guide (placeholder - content generated at runtime)
|
||||
def generate_installation_guide [docs_config: record, discovery_result: record] {
|
||||
"# Installation Guide\n\nThis is a placeholder installation guide generated at runtime."
|
||||
}
|
||||
|
||||
# Generate CLI reference (placeholder - content generated at runtime)
|
||||
def generate_cli_reference [docs_config: record, discovery_result: record] {
|
||||
"# CLI Reference\n\nThis is a placeholder CLI reference generated at runtime."
|
||||
}
|
||||
|
||||
# Generate troubleshooting guide (placeholder - content generated at runtime)
|
||||
def generate_troubleshooting_guide [docs_config: record, discovery_result: record] {
|
||||
"# Troubleshooting Guide\n\nThis is a placeholder troubleshooting guide generated at runtime."
|
||||
}
|
||||
|
||||
# Generate FAQ (placeholder - content generated at runtime)
|
||||
def generate_faq [docs_config: record, discovery_result: record] {
|
||||
"# Frequently Asked Questions\n\nThis is a placeholder FAQ generated at runtime."
|
||||
}
|
||||
732
tools/distribution/installer_generator.nu
Normal file
732
tools/distribution/installer_generator.nu
Normal file
@ -0,0 +1,732 @@
|
||||
# Module: Installer Generation
|
||||
# Purpose: Generates platform-specific installation scripts and packages
|
||||
# Dependencies: std log
|
||||
|
||||
use std log
|
||||
|
||||
# Create shell installers for different platforms
|
||||
export def create_shell_installers [
|
||||
installer_config: record
|
||||
analysis_result: record
|
||||
] {
|
||||
log info "Creating shell installers..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
let result = (do {
|
||||
mut created_installers = []
|
||||
|
||||
for platform in $installer_config.platforms {
|
||||
match $platform {
|
||||
"linux" | "macos" => {
|
||||
let installer_result = create_unix_shell_installer $platform $installer_config $analysis_result
|
||||
|
||||
if $installer_result.status == "success" {
|
||||
$created_installers = ($created_installers | append $installer_result)
|
||||
}
|
||||
}
|
||||
"windows" => {
|
||||
let installer_result = create_windows_batch_installer $installer_config $analysis_result
|
||||
|
||||
if $installer_result.status == "success" {
|
||||
$created_installers = ($created_installers | append $installer_result)
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
log warning $"Unsupported platform for shell installer: ($platform)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
status: "success"
|
||||
installers_created: ($created_installers | length)
|
||||
created_installers: $created_installers
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Create Unix shell installer
|
||||
export def create_unix_shell_installer [
|
||||
platform: string
|
||||
installer_config: record
|
||||
analysis_result: record
|
||||
] {
|
||||
let version = $analysis_result.version_info.version
|
||||
let components = $analysis_result.components
|
||||
let requirements = $analysis_result.requirements
|
||||
|
||||
let installer_content = $"#!/bin/bash
|
||||
# Provisioning System Installer
|
||||
# Platform: ($platform)
|
||||
# Version: ($version)
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\\033[0;31m'
|
||||
GREEN='\\033[0;32m'
|
||||
YELLOW='\\033[1;33m'
|
||||
NC='\\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
INSTALL_USER=\"($requirements.system_user)\"
|
||||
INSTALL_GROUP=\"($requirements.system_group)\"
|
||||
SERVICE_NAME=\"provisioning\"
|
||||
|
||||
# Installation directories
|
||||
BIN_DIR=\"/usr/local/bin\"
|
||||
LIB_DIR=\"/usr/local/lib/provisioning\"
|
||||
CONFIG_DIR=\"/etc/provisioning\"
|
||||
DATA_DIR=\"/var/lib/provisioning\"
|
||||
LOG_DIR=\"/var/log/provisioning\"
|
||||
|
||||
# Functions
|
||||
log_info() {
|
||||
echo -e \"${GREEN}[INFO]${NC} $1\"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e \"${YELLOW}[WARN]${NC} $1\"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e \"${RED}[ERROR]${NC} $1\"
|
||||
}
|
||||
|
||||
check_root() {
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
log_error \"This script must be run as root (use sudo)\"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
create_user() {
|
||||
if ! id \"$INSTALL_USER\" >/dev/null 2>&1; then
|
||||
log_info \"Creating user: $INSTALL_USER\"
|
||||
useradd -r -s /bin/false -m -d \"$DATA_DIR\" \"$INSTALL_USER\"
|
||||
usermod -a -G \"$INSTALL_GROUP\" \"$INSTALL_USER\" 2>/dev/null || true
|
||||
else
|
||||
log_info \"User $INSTALL_USER already exists\"
|
||||
fi
|
||||
}
|
||||
|
||||
create_directories() {
|
||||
log_info \"Creating directories...\"
|
||||
mkdir -p \"$BIN_DIR\" \"$LIB_DIR\" \"$CONFIG_DIR\" \"$DATA_DIR\" \"$LOG_DIR\"
|
||||
|
||||
# Set ownership
|
||||
chown -R \"$INSTALL_USER:$INSTALL_GROUP\" \"$DATA_DIR\" \"$LOG_DIR\"
|
||||
chmod 755 \"$DATA_DIR\" \"$LOG_DIR\"
|
||||
}
|
||||
|
||||
install_binaries() {
|
||||
log_info \"Installing binaries...\"
|
||||
if [[ -d \"platform\" ]]; then
|
||||
cp platform/* \"$BIN_DIR/\"
|
||||
chmod +x \"$BIN_DIR\"/provisioning-*
|
||||
elif [[ -d \"core/bin\" ]]; then
|
||||
cp core/bin/* \"$BIN_DIR/\"
|
||||
chmod +x \"$BIN_DIR\"/provisioning*
|
||||
else
|
||||
log_error \"No binaries found to install\"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
install_libraries() {
|
||||
if [[ -d \"core\" ]]; then
|
||||
log_info \"Installing core libraries...\"
|
||||
cp -r core/* \"$LIB_DIR/\"
|
||||
chown -R root:root \"$LIB_DIR\"
|
||||
find \"$LIB_DIR\" -type f -name \"*.nu\" -exec chmod 644 {} \\;
|
||||
fi
|
||||
}
|
||||
|
||||
install_configuration() {
|
||||
if [[ -d \"config\" ]]; then
|
||||
log_info \"Installing configuration...\"
|
||||
cp config/*.toml \"$CONFIG_DIR/\" 2>/dev/null || true
|
||||
cp config/*.template \"$CONFIG_DIR/\" 2>/dev/null || true
|
||||
|
||||
# Set secure permissions on config files
|
||||
chown -R root:\"$INSTALL_GROUP\" \"$CONFIG_DIR\"
|
||||
chmod 755 \"$CONFIG_DIR\"
|
||||
find \"$CONFIG_DIR\" -name \"*.toml\" -exec chmod 640 {} \\;
|
||||
fi
|
||||
}
|
||||
|
||||
install_services() {
|
||||
if [[ -d \"services\" ]] && command -v systemctl >/dev/null 2>&1; then
|
||||
log_info \"Installing systemd services...\"
|
||||
cp services/*.service /etc/systemd/system/ 2>/dev/null || true
|
||||
systemctl daemon-reload
|
||||
|
||||
# Enable but don't start services
|
||||
for service in services/*.service; do
|
||||
if [[ -f \"$service\" ]]; then
|
||||
service_name=$(basename \"$service\")
|
||||
log_info \"Enabling service: $service_name\"
|
||||
systemctl enable \"$service_name\"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
configure_environment() {
|
||||
log_info \"Configuring environment...\"
|
||||
|
||||
# Create environment file
|
||||
cat > /etc/environment.d/provisioning.conf << EOF
|
||||
PROVISIONING_HOME=\"$LIB_DIR\"
|
||||
PROVISIONING_CONFIG=\"$CONFIG_DIR\"
|
||||
PROVISIONING_DATA=\"$DATA_DIR\"
|
||||
PROVISIONING_LOG=\"$LOG_DIR\"
|
||||
EOF
|
||||
|
||||
# Create shell profile
|
||||
cat > /etc/profile.d/provisioning.sh << 'EOF'
|
||||
# Provisioning System Environment
|
||||
if [ -d \"/usr/local/bin\" ]; then
|
||||
case \":$PATH:\" in
|
||||
*:/usr/local/bin:*) ;;
|
||||
*) PATH=\"/usr/local/bin:$PATH\" ;;
|
||||
esac
|
||||
fi
|
||||
export PROVISIONING_HOME=\"/usr/local/lib/provisioning\"
|
||||
export PROVISIONING_CONFIG=\"/etc/provisioning\"
|
||||
EOF
|
||||
}
|
||||
|
||||
setup_logrotate() {
|
||||
log_info \"Setting up log rotation...\"
|
||||
cat > /etc/logrotate.d/provisioning << 'EOF'
|
||||
/var/log/provisioning/*.log {
|
||||
daily
|
||||
missingok
|
||||
rotate 52
|
||||
compress
|
||||
delaycompress
|
||||
notifempty
|
||||
create 644 provisioning provisioning
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
main() {
|
||||
log_info \"Starting Provisioning System installation...\"
|
||||
|
||||
check_root
|
||||
create_user
|
||||
create_directories
|
||||
install_binaries
|
||||
install_libraries
|
||||
install_configuration
|
||||
|
||||
if [[ \"$1\" != \"--no-services\" ]]; then
|
||||
install_services
|
||||
fi
|
||||
|
||||
configure_environment
|
||||
setup_logrotate
|
||||
|
||||
log_info \"Installation completed successfully!\"
|
||||
log_info \"\"
|
||||
log_info \"Next steps:\"
|
||||
log_info \" 1. Review configuration in $CONFIG_DIR\"
|
||||
log_info \" 2. Start services: sudo systemctl start provisioning\"
|
||||
log_info \" 3. Run 'provisioning help' to get started\"
|
||||
log_info \"\"
|
||||
log_info \"For more information, see the documentation in $LIB_DIR/docs\"
|
||||
}
|
||||
|
||||
# Handle command line arguments
|
||||
case \"$1\" in
|
||||
--help|-h)
|
||||
echo \"Provisioning System Installer ($version)\"
|
||||
echo \"\"
|
||||
echo \"Usage: $0 [OPTIONS]\"
|
||||
echo \"\"
|
||||
echo \"Options:\"
|
||||
echo \" --no-services Skip service installation\"
|
||||
echo \" --help, -h Show this help message\"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
main \"$@\"
|
||||
;;
|
||||
esac
|
||||
"
|
||||
|
||||
let installer_file = ($installer_config.output_dir | path join $"install-($platform).sh")
|
||||
$installer_content | save $installer_file
|
||||
chmod +x $installer_file
|
||||
|
||||
{
|
||||
platform: $platform
|
||||
status: "success"
|
||||
installer_type: "shell"
|
||||
installer_file: $installer_file
|
||||
size: ($installer_content | str length)
|
||||
}
|
||||
}
|
||||
|
||||
# Create Windows batch installer
|
||||
export def create_windows_batch_installer [
|
||||
installer_config: record
|
||||
analysis_result: record
|
||||
] {
|
||||
let version = $analysis_result.version_info.version
|
||||
let components = $analysis_result.components
|
||||
|
||||
let installer_content = $"@echo off
|
||||
REM Provisioning System Installer
|
||||
REM Platform: Windows
|
||||
REM Version: ($version)
|
||||
|
||||
setlocal EnableDelayedExpansion
|
||||
|
||||
REM Configuration
|
||||
set \"INSTALL_DIR=C:\\Program Files\\Provisioning\"
|
||||
set \"CONFIG_DIR=C:\\ProgramData\\Provisioning\"
|
||||
set \"SERVICE_NAME=ProvisioningService\"
|
||||
|
||||
echo.
|
||||
echo ========================================
|
||||
echo Provisioning System Installer ($version)
|
||||
echo ========================================
|
||||
echo.
|
||||
|
||||
REM Check for administrator privileges
|
||||
net session >nul 2>&1
|
||||
if %errorLevel% neq 0 (
|
||||
echo ERROR: This script must be run as Administrator
|
||||
echo Right-click and select \"Run as administrator\"
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo Creating directories...
|
||||
mkdir \"%INSTALL_DIR%\\bin\" 2>nul
|
||||
mkdir \"%INSTALL_DIR%\\lib\" 2>nul
|
||||
mkdir \"%CONFIG_DIR%\" 2>nul
|
||||
|
||||
echo Installing binaries...
|
||||
if exist \"platform\\\" (
|
||||
xcopy platform\\* \"%INSTALL_DIR%\\bin\\\" /Y /Q
|
||||
) else if exist \"core\\bin\\\" (
|
||||
xcopy core\\bin\\* \"%INSTALL_DIR%\\bin\\\" /Y /Q
|
||||
) else (
|
||||
echo ERROR: No binaries found to install
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo Installing libraries...
|
||||
if exist \"core\\\" (
|
||||
xcopy core\\* \"%INSTALL_DIR%\\lib\\\" /Y /Q /S
|
||||
)
|
||||
|
||||
echo Installing configuration...
|
||||
if exist \"config\\\" (
|
||||
xcopy config\\* \"%CONFIG_DIR%\\\" /Y /Q /S
|
||||
)
|
||||
|
||||
echo Configuring environment...
|
||||
REM Add installation directory to system PATH
|
||||
for /f \"usebackq tokens=2,*\" %%A in (\`reg query HKCU\\Environment /v PATH\`) do set \"current_path=%%B\"
|
||||
echo !current_path! | findstr /C:\"%INSTALL_DIR%\\bin\" >nul
|
||||
if !errorLevel! neq 0 (
|
||||
setx PATH \"!current_path!;%INSTALL_DIR%\\bin\"
|
||||
echo Added %INSTALL_DIR%\\bin to PATH
|
||||
)
|
||||
|
||||
REM Set environment variables
|
||||
setx PROVISIONING_HOME \"%INSTALL_DIR%\"
|
||||
setx PROVISIONING_CONFIG \"%CONFIG_DIR%\"
|
||||
|
||||
echo.
|
||||
echo Installation completed successfully!
|
||||
echo.
|
||||
echo Next steps:
|
||||
echo 1. Review configuration in %CONFIG_DIR%
|
||||
echo 2. Run 'provisioning-orchestrator --help' to get started
|
||||
echo 3. Check the documentation in %INSTALL_DIR%\\lib\\docs
|
||||
echo.
|
||||
echo NOTE: You may need to restart your command prompt for PATH changes to take effect.
|
||||
echo.
|
||||
pause
|
||||
"
|
||||
|
||||
let installer_file = ($installer_config.output_dir | path join "install-windows.bat")
|
||||
$installer_content | save $installer_file
|
||||
|
||||
{
|
||||
platform: "windows"
|
||||
status: "success"
|
||||
installer_type: "batch"
|
||||
installer_file: $installer_file
|
||||
size: ($installer_content | str length)
|
||||
}
|
||||
}
|
||||
|
||||
# Create package installers (deb, rpm, msi)
|
||||
export def create_package_installers [
|
||||
installer_config: record
|
||||
analysis_result: record
|
||||
] {
|
||||
log info "Creating package installers..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Package creation would involve:
|
||||
# 1. Creating package control files
|
||||
# 2. Building packages with appropriate tools
|
||||
# 3. Signing packages if requested
|
||||
|
||||
log warning "Package installer creation not fully implemented"
|
||||
|
||||
{
|
||||
status: "skipped"
|
||||
reason: "package installers not fully implemented"
|
||||
installers_created: 0
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Create GUI installers
|
||||
export def create_gui_installers [
|
||||
installer_config: record
|
||||
analysis_result: record
|
||||
] {
|
||||
log info "Creating GUI installers..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# GUI installer creation would involve:
|
||||
# 1. Creating installer definition files
|
||||
# 2. Using platform-specific tools (NSIS, InstallShield, etc.)
|
||||
# 3. Including custom installation wizards
|
||||
|
||||
log warning "GUI installer creation not fully implemented"
|
||||
|
||||
{
|
||||
status: "skipped"
|
||||
reason: "GUI installers not fully implemented"
|
||||
installers_created: 0
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Create uninstall scripts
|
||||
export def create_uninstall_scripts [
|
||||
installer_config: record
|
||||
analysis_result: record
|
||||
] {
|
||||
log info "Creating uninstall scripts..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
let result = (do {
|
||||
mut created_uninstallers = []
|
||||
|
||||
for platform in $installer_config.platforms {
|
||||
match $platform {
|
||||
"linux" | "macos" => {
|
||||
let uninstaller_result = create_unix_uninstaller $platform $installer_config $analysis_result
|
||||
|
||||
if $uninstaller_result.status == "success" {
|
||||
$created_uninstallers = ($created_uninstallers | append $uninstaller_result)
|
||||
}
|
||||
}
|
||||
"windows" => {
|
||||
let uninstaller_result = create_windows_uninstaller $installer_config $analysis_result
|
||||
|
||||
if $uninstaller_result.status == "success" {
|
||||
$created_uninstallers = ($created_uninstallers | append $uninstaller_result)
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
log warning $"Unsupported platform for uninstaller: ($platform)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
status: "success"
|
||||
uninstallers_created: ($created_uninstallers | length)
|
||||
created_uninstallers: $created_uninstallers
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Create Unix uninstaller
|
||||
export def create_unix_uninstaller [
|
||||
platform: string
|
||||
installer_config: record
|
||||
analysis_result: record
|
||||
] {
|
||||
let version = $analysis_result.version_info.version
|
||||
let requirements = $analysis_result.requirements
|
||||
|
||||
let uninstaller_content = $"#!/bin/bash
|
||||
# Provisioning System Uninstaller
|
||||
# Platform: ($platform)
|
||||
# Version: ($version)
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\\033[0;31m'
|
||||
GREEN='\\033[0;32m'
|
||||
YELLOW='\\033[1;33m'
|
||||
NC='\\033[0m'
|
||||
|
||||
log_info() {
|
||||
echo -e \"${GREEN}[INFO]${NC} $1\"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e \"${YELLOW}[WARN]${NC} $1\"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e \"${RED}[ERROR]${NC} $1\"
|
||||
}
|
||||
|
||||
check_root() {
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
log_error \"This script must be run as root (use sudo)\"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
confirm_uninstall() {
|
||||
echo \"This will completely remove the Provisioning System from your system.\"
|
||||
echo \"This includes:\"
|
||||
echo \" - All binaries and libraries\"
|
||||
echo \" - Configuration files\"
|
||||
echo \" - Service definitions\"
|
||||
echo \" - Log files and data\"
|
||||
echo \"\"
|
||||
read -p \"Are you sure you want to continue? (y/N): \" -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
log_info \"Uninstallation cancelled\"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
stop_services() {
|
||||
log_info \"Stopping services...\"
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
systemctl stop provisioning* 2>/dev/null || true
|
||||
systemctl disable provisioning* 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
remove_binaries() {
|
||||
log_info \"Removing binaries...\"
|
||||
rm -f /usr/local/bin/provisioning*
|
||||
}
|
||||
|
||||
remove_libraries() {
|
||||
log_info \"Removing libraries...\"
|
||||
rm -rf /usr/local/lib/provisioning
|
||||
}
|
||||
|
||||
remove_configuration() {
|
||||
log_info \"Removing configuration...\"
|
||||
rm -rf /etc/provisioning
|
||||
}
|
||||
|
||||
remove_data() {
|
||||
if [[ \"$1\" == \"--keep-data\" ]]; then
|
||||
log_info \"Keeping data directory (--keep-data specified)\"
|
||||
else
|
||||
log_info \"Removing data and logs...\"
|
||||
rm -rf /var/lib/provisioning
|
||||
rm -rf /var/log/provisioning
|
||||
fi
|
||||
}
|
||||
|
||||
remove_services() {
|
||||
log_info \"Removing service definitions...\"
|
||||
rm -f /etc/systemd/system/provisioning*.service
|
||||
if command -v systemctl >/dev/null 2>&1; then
|
||||
systemctl daemon-reload
|
||||
fi
|
||||
}
|
||||
|
||||
remove_environment() {
|
||||
log_info \"Removing environment configuration...\"
|
||||
rm -f /etc/environment.d/provisioning.conf
|
||||
rm -f /etc/profile.d/provisioning.sh
|
||||
rm -f /etc/logrotate.d/provisioning
|
||||
}
|
||||
|
||||
remove_user() {
|
||||
if [[ \"$1\" == \"--keep-user\" ]]; then
|
||||
log_info \"Keeping system user (--keep-user specified)\"
|
||||
else
|
||||
log_info \"Removing system user...\"
|
||||
userdel provisioning 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
log_info \"Starting Provisioning System uninstallation...\"
|
||||
|
||||
check_root
|
||||
confirm_uninstall
|
||||
|
||||
stop_services
|
||||
remove_services
|
||||
remove_binaries
|
||||
remove_libraries
|
||||
remove_configuration
|
||||
remove_data \"$1\"
|
||||
remove_environment
|
||||
remove_user \"$1\"
|
||||
|
||||
log_info \"Uninstallation completed successfully!\"
|
||||
log_info \"\"
|
||||
log_info \"Thank you for using the Provisioning System.\"
|
||||
}
|
||||
|
||||
case \"$1\" in
|
||||
--help|-h)
|
||||
echo \"Provisioning System Uninstaller ($version)\"
|
||||
echo \"\"
|
||||
echo \"Usage: $0 [OPTIONS]\"
|
||||
echo \"\"
|
||||
echo \"Options:\"
|
||||
echo \" --keep-data Keep data directory and logs\"
|
||||
echo \" --keep-user Keep system user account\"
|
||||
echo \" --help, -h Show this help message\"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
main \"$@\"
|
||||
;;
|
||||
esac
|
||||
"
|
||||
|
||||
let uninstaller_file = ($installer_config.output_dir | path join $"uninstall-($platform).sh")
|
||||
$uninstaller_content | save $uninstaller_file
|
||||
chmod +x $uninstaller_file
|
||||
|
||||
{
|
||||
platform: $platform
|
||||
status: "success"
|
||||
uninstaller_type: "shell"
|
||||
uninstaller_file: $uninstaller_file
|
||||
size: ($uninstaller_content | str length)
|
||||
}
|
||||
}
|
||||
|
||||
# Create Windows uninstaller
|
||||
export def create_windows_uninstaller [
|
||||
installer_config: record
|
||||
analysis_result: record
|
||||
] {
|
||||
let version = $analysis_result.version_info.version
|
||||
|
||||
let uninstaller_content = $"@echo off
|
||||
REM Provisioning System Uninstaller
|
||||
REM Platform: Windows
|
||||
REM Version: ($version)
|
||||
|
||||
setlocal EnableDelayedExpansion
|
||||
|
||||
echo.
|
||||
echo ==========================================
|
||||
echo Provisioning System Uninstaller ($version)
|
||||
echo ==========================================
|
||||
echo.
|
||||
|
||||
REM Check for administrator privileges
|
||||
net session >nul 2>&1
|
||||
if %errorLevel% neq 0 (
|
||||
echo ERROR: This script must be run as Administrator
|
||||
pause
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
echo This will completely remove the Provisioning System from your system.
|
||||
echo.
|
||||
set /p \"confirm=Are you sure you want to continue? (y/N): \"
|
||||
if /I not \"!confirm!\"==\"y\" (
|
||||
echo Uninstallation cancelled
|
||||
exit /b 0
|
||||
)
|
||||
|
||||
echo.
|
||||
echo Stopping services...
|
||||
REM Stop any running services here
|
||||
|
||||
echo Removing installation directory...
|
||||
if exist \"C:\\Program Files\\Provisioning\" (
|
||||
rmdir /S /Q \"C:\\Program Files\\Provisioning\"
|
||||
)
|
||||
|
||||
echo Removing configuration...
|
||||
if exist \"C:\\ProgramData\\Provisioning\" (
|
||||
rmdir /S /Q \"C:\\ProgramData\\Provisioning\"
|
||||
)
|
||||
|
||||
echo Removing environment variables...
|
||||
reg delete HKCU\\Environment /v PROVISIONING_HOME /f 2>nul
|
||||
reg delete HKCU\\Environment /v PROVISIONING_CONFIG /f 2>nul
|
||||
|
||||
echo Removing from PATH...
|
||||
for /f \"usebackq tokens=2,*\" %%A in (\`reg query HKCU\\Environment /v PATH 2^>nul\`) do (
|
||||
set \"current_path=%%B\"
|
||||
set \"new_path=!current_path:C:\\Program Files\\Provisioning\\bin;=!\"
|
||||
set \"new_path=!new_path:;C:\\Program Files\\Provisioning\\bin=!\"
|
||||
if not \"!new_path!\"==\"!current_path!\" (
|
||||
setx PATH \"!new_path!\"
|
||||
echo Removed from PATH
|
||||
)
|
||||
)
|
||||
|
||||
echo.
|
||||
echo Uninstallation completed successfully!
|
||||
echo Thank you for using the Provisioning System.
|
||||
echo.
|
||||
pause
|
||||
"
|
||||
|
||||
let uninstaller_file = ($installer_config.output_dir | path join "uninstall-windows.bat")
|
||||
$uninstaller_content | save $uninstaller_file
|
||||
|
||||
{
|
||||
platform: "windows"
|
||||
status: "success"
|
||||
uninstaller_type: "batch"
|
||||
uninstaller_file: $uninstaller_file
|
||||
size: ($uninstaller_content | str length)
|
||||
}
|
||||
}
|
||||
163
tools/distribution/installer_metadata.nu
Normal file
163
tools/distribution/installer_metadata.nu
Normal file
@ -0,0 +1,163 @@
|
||||
# Module: Installer Metadata Analysis
|
||||
# Purpose: Analyzes distribution structure, components, versions, and installation requirements
|
||||
# Dependencies: None (standalone utilities)
|
||||
|
||||
# Extract distribution archive for analysis
|
||||
export def extract_distribution_for_analysis [dist_path: string, installer_config: record] {
|
||||
let temp_dir = ($installer_config.output_dir | path join "tmp" "analysis")
|
||||
mkdir $temp_dir
|
||||
|
||||
let dist_name = ($dist_path | path basename)
|
||||
|
||||
if ($dist_name | str ends-with ".tar.gz") or ($dist_name | str ends-with ".tgz") {
|
||||
cd $temp_dir
|
||||
tar -xzf $dist_path
|
||||
} else if ($dist_name | str ends-with ".zip") {
|
||||
cd $temp_dir
|
||||
unzip $dist_path
|
||||
} else {
|
||||
return { status: "error", reason: $"Unsupported archive format: ($dist_name)" }
|
||||
}
|
||||
|
||||
# Find the extracted directory (usually a single top-level directory)
|
||||
let extracted_contents = (ls $temp_dir)
|
||||
if ($extracted_contents | length) == 1 and (($extracted_contents | get 0.type) == "dir") {
|
||||
return ($extracted_contents | get 0.name)
|
||||
} else {
|
||||
return $temp_dir
|
||||
}
|
||||
}
|
||||
|
||||
# Analyze distribution components
|
||||
export def analyze_distribution_components [analysis_path: string] {
|
||||
let components = {
|
||||
has_platform: (($analysis_path | path join "platform") | path exists)
|
||||
has_core: (($analysis_path | path join "core") | path exists)
|
||||
has_config: (($analysis_path | path join "config") | path exists)
|
||||
has_docs: (($analysis_path | path join "docs") | path exists)
|
||||
has_services: (($analysis_path | path join "services") | path exists)
|
||||
}
|
||||
|
||||
# Find executables
|
||||
let executables = if $components.has_platform {
|
||||
ls ($analysis_path | path join "platform") | where type == file | get name
|
||||
} else if $components.has_core and (($analysis_path | path join "core" "bin") | path exists) {
|
||||
ls ($analysis_path | path join "core" "bin") | where type == file | get name
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
# Find configuration files
|
||||
let config_files = if $components.has_config {
|
||||
ls -la ($analysis_path | path join "config")
|
||||
| where name =~ "\\.(toml|yaml|json)$"
|
||||
| get name
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
# Find service definitions
|
||||
let service_files = if $components.has_services {
|
||||
ls -la ($analysis_path | path join "services")
|
||||
| where name =~ "\\.(service|yml|yaml)$"
|
||||
| get name
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
let components = ($components
|
||||
| insert executables $executables
|
||||
| insert config_files $config_files
|
||||
| insert service_files $service_files
|
||||
| insert total_size (get_directory_size $analysis_path))
|
||||
|
||||
return $components
|
||||
}
|
||||
|
||||
# Detect distribution version
|
||||
export def detect_distribution_version [analysis_path: string] {
|
||||
# Try to find version from metadata
|
||||
let metadata_files = [
|
||||
($analysis_path | path join "core-metadata.json")
|
||||
($analysis_path | path join "platform-metadata.json")
|
||||
($analysis_path | path join "metadata.json")
|
||||
($analysis_path | path join "VERSION")
|
||||
]
|
||||
|
||||
for metadata_file in $metadata_files {
|
||||
if ($metadata_file | path exists) {
|
||||
let version = if ($metadata_file | str ends-with ".json") {
|
||||
let data = (open $metadata_file)
|
||||
$data.version? | default "unknown"
|
||||
} else {
|
||||
open $metadata_file --raw | str trim
|
||||
}
|
||||
|
||||
if $version != "unknown" {
|
||||
return { version: $version, source: ($metadata_file | path basename) }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Fallback: try to extract from directory name
|
||||
let dir_name = ($analysis_path | path basename)
|
||||
let version_match = ($dir_name | parse --regex ".*-([0-9]+\\.[0-9]+\\.[0-9]+)")
|
||||
|
||||
if ($version_match | length) > 0 {
|
||||
return { version: ($version_match | get 0.capture0), source: "directory_name" }
|
||||
}
|
||||
|
||||
return { version: "unknown", source: "none" }
|
||||
}
|
||||
|
||||
# Analyze installation requirements
|
||||
export def analyze_installation_requirements [analysis_path: string, components: record] {
|
||||
mut requirements = {
|
||||
system_user: "provisioning"
|
||||
system_group: "provisioning"
|
||||
install_dirs: []
|
||||
config_dirs: []
|
||||
data_dirs: []
|
||||
log_dirs: []
|
||||
service_dependencies: []
|
||||
port_requirements: []
|
||||
}
|
||||
|
||||
# Standard installation directories
|
||||
if $components.has_platform or ($components.executables | length) > 0 {
|
||||
$requirements.install_dirs = ($requirements.install_dirs | append "/usr/local/bin")
|
||||
}
|
||||
|
||||
if $components.has_core {
|
||||
$requirements.install_dirs = ($requirements.install_dirs | append "/usr/local/lib/provisioning")
|
||||
}
|
||||
|
||||
if $components.has_config {
|
||||
$requirements.config_dirs = ($requirements.config_dirs | append "/etc/provisioning")
|
||||
}
|
||||
|
||||
# Data and log directories
|
||||
$requirements.data_dirs = ($requirements.data_dirs | append "/var/lib/provisioning")
|
||||
$requirements.log_dirs = ($requirements.log_dirs | append "/var/log/provisioning")
|
||||
|
||||
# Service dependencies (would analyze service files to determine)
|
||||
if $components.has_services {
|
||||
$requirements.service_dependencies = ($requirements.service_dependencies | append "network.target")
|
||||
}
|
||||
|
||||
# Port requirements (would analyze configuration to determine)
|
||||
$requirements.port_requirements = ($requirements.port_requirements | append { port: 8080, description: "Main API" })
|
||||
|
||||
return $requirements
|
||||
}
|
||||
|
||||
# Get directory size helper
|
||||
def get_directory_size [dir: string] {
|
||||
if not ($dir | path exists) {
|
||||
return 0
|
||||
}
|
||||
|
||||
glob $"($dir)/**/*"
|
||||
| each {|file| stat $file | get size }
|
||||
| math sum
|
||||
}
|
||||
47
tools/distribution/installer_utilities.nu
Normal file
47
tools/distribution/installer_utilities.nu
Normal file
@ -0,0 +1,47 @@
|
||||
# Module: Installer Utilities
|
||||
# Purpose: Helper functions for installer creation (metrics, size calculation, etc.)
|
||||
# Dependencies: None (standalone utilities)
|
||||
|
||||
# Count created installers from results
|
||||
export def count_created_installers [creation_results: list] {
|
||||
let shell_count = (
|
||||
$creation_results
|
||||
| where phase == "shell"
|
||||
| first
|
||||
| if $in == null { 0 } else { $in.result.installers_created? // 0 }
|
||||
)
|
||||
|
||||
let package_count = (
|
||||
$creation_results
|
||||
| where phase == "package"
|
||||
| first
|
||||
| if $in == null { 0 } else { $in.result.installers_created? // 0 }
|
||||
)
|
||||
|
||||
let gui_count = (
|
||||
$creation_results
|
||||
| where phase == "gui"
|
||||
| first
|
||||
| if $in == null { 0 } else { $in.result.installers_created? // 0 }
|
||||
)
|
||||
|
||||
let uninstall_count = (
|
||||
$creation_results
|
||||
| where phase == "uninstall"
|
||||
| first
|
||||
| if $in == null { 0 } else { $in.result.uninstallers_created? // 0 }
|
||||
)
|
||||
|
||||
return ($shell_count + $package_count + $gui_count + $uninstall_count)
|
||||
}
|
||||
|
||||
# Get directory size helper
|
||||
export def get_directory_size [dir: string] {
|
||||
if not ($dir | path exists) {
|
||||
return 0
|
||||
}
|
||||
|
||||
glob $"($dir)/**/*"
|
||||
| each {|file| stat $file | get size }
|
||||
| math sum
|
||||
}
|
||||
29
tools/distribution/installer_validator.nu
Normal file
29
tools/distribution/installer_validator.nu
Normal file
@ -0,0 +1,29 @@
|
||||
# Module: Installer Validation
|
||||
# Purpose: Validates generated installers for correctness and completeness
|
||||
# Dependencies: std log
|
||||
|
||||
use std log
|
||||
|
||||
# Validate generated installers
|
||||
export def validate_installers [
|
||||
installer_config: record
|
||||
creation_results: list
|
||||
] {
|
||||
log info "Validating installers..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Installer validation would involve:
|
||||
# 1. Syntax checking of shell scripts
|
||||
# 2. Testing installation in clean environments
|
||||
# 3. Verifying uninstaller functionality
|
||||
|
||||
log warning "Installer validation not fully implemented"
|
||||
|
||||
{
|
||||
status: "skipped"
|
||||
reason: "installer validation not fully implemented"
|
||||
validated_installers: 0
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
135
tools/distribution/platform_compiler.nu
Normal file
135
tools/distribution/platform_compiler.nu
Normal file
@ -0,0 +1,135 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Platform compilation module for distribution system
|
||||
#
|
||||
# Handles all platform-specific compilation logic including:
|
||||
# - Rust target triple detection
|
||||
# - Single platform compilation
|
||||
# - Parallel and sequential multi-platform compilation
|
||||
# - Compilation result validation
|
||||
|
||||
use std log
|
||||
|
||||
# Main compilation orchestrator - delegates to parallel or sequential based on config
|
||||
export def compile-platforms [config: record]: nothing -> list {
|
||||
if $config.parallel_builds {
|
||||
compile-platforms-parallel $config
|
||||
} else {
|
||||
compile-platforms-sequential $config
|
||||
}
|
||||
}
|
||||
|
||||
# Compile platforms in parallel
|
||||
# Note: Current implementation uses sequential compilation
|
||||
# Future: Can be enhanced with background processes for true parallelization
|
||||
def compile-platforms-parallel [config: record]: nothing -> list {
|
||||
log info "Compiling platforms in parallel mode..."
|
||||
compile-platforms-sequential $config
|
||||
}
|
||||
|
||||
# Compile platforms sequentially
|
||||
# Iterates through each platform and compiles for that target
|
||||
def compile-platforms-sequential [config: record]: nothing -> list {
|
||||
$config.platforms | each {|platform|
|
||||
compile-platform $platform $config
|
||||
}
|
||||
}
|
||||
|
||||
# Compile platform components for a single platform
|
||||
# Invokes cargo compilation for the target triple and collects results
|
||||
export def compile-platform [platform: string, config: record]: nothing -> record {
|
||||
log info $"Compiling platform: ($platform)"
|
||||
|
||||
let start_time = (date now)
|
||||
let target_triple = get-target-triple $platform
|
||||
|
||||
try {
|
||||
# Invoke cargo compilation for the target
|
||||
let compile_result = compile-with-cargo $target_triple $config
|
||||
|
||||
# Validate compilation results
|
||||
let validation = validate-compilation $compile_result
|
||||
|
||||
{
|
||||
platform: $platform
|
||||
target: $target_triple
|
||||
status: (if $compile_result.failed > 0 { "failed" } else { "success" })
|
||||
compiled_components: $compile_result.successful
|
||||
total_components: $compile_result.total
|
||||
compile_result: $compile_result
|
||||
validation: $validation
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
{
|
||||
platform: $platform
|
||||
target: $target_triple
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Get Rust target triple for a platform name
|
||||
# Maps common platform names to official Rust target triples
|
||||
export def get-target-triple [platform: string]: nothing -> string {
|
||||
match $platform {
|
||||
"linux" => "x86_64-unknown-linux-gnu"
|
||||
"macos" => "x86_64-apple-darwin"
|
||||
"windows" => "x86_64-pc-windows-gnu"
|
||||
_ => $platform # Assume it's already a target triple
|
||||
}
|
||||
}
|
||||
|
||||
# Compile with cargo for a target triple
|
||||
# Invokes the build compilation tool with appropriate flags
|
||||
def compile-with-cargo [target: string, config: record]: nothing -> record {
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let compile_result = (nu ($config.repo_root | path join "src" "tools" "build" "compile-platform.nu")
|
||||
--target $target
|
||||
--release
|
||||
--output-dir ($config.output_dir | path join "platform")
|
||||
--verbose:$config.verbose
|
||||
--clean:$config.build_clean)
|
||||
|
||||
return $compile_result
|
||||
|
||||
} catch {|err|
|
||||
{
|
||||
failed: 1
|
||||
successful: 0
|
||||
total: 0
|
||||
error: $err.msg
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Validate compilation result structure
|
||||
# Ensures the result contains expected fields and that compilation succeeded
|
||||
def validate-compilation [result: record]: nothing -> record {
|
||||
let has_required_fields = (
|
||||
($result | has-key "successful") and
|
||||
($result | has-key "total") and
|
||||
($result | has-key "failed")
|
||||
)
|
||||
|
||||
let is_valid = (
|
||||
$has_required_fields and
|
||||
(($result.successful // 0) >= 0) and
|
||||
(($result.total // 0) >= 0) and
|
||||
(($result.failed // 0) >= 0)
|
||||
)
|
||||
|
||||
{
|
||||
is_valid: $is_valid
|
||||
has_required_fields: $has_required_fields
|
||||
compilation_successful: (($result.failed // 1) == 0)
|
||||
components_count: ($result.successful // 0)
|
||||
total_expected: ($result.total // 0)
|
||||
}
|
||||
}
|
||||
@ -46,13 +46,13 @@ def main [
|
||||
# Ensure output directory exists
|
||||
mkdir ($core_config.output_dir)
|
||||
|
||||
let preparation_results = []
|
||||
let result = (do {
|
||||
mut preparation_results = []
|
||||
|
||||
try {
|
||||
# Phase 1: Discover and validate core components
|
||||
let discovery_result = discover_core_components $core_config
|
||||
|
||||
let preparation_results = ($preparation_results | append { phase: "discovery", result: $discovery_result })
|
||||
$preparation_results = ($preparation_results | append { phase: "discovery", result: $discovery_result })
|
||||
|
||||
if $discovery_result.status != "success" {
|
||||
log error $"Core component discovery failed: ($discovery_result.reason)"
|
||||
@ -62,17 +62,17 @@ def main [
|
||||
# Phase 2: Prepare core libraries
|
||||
let libraries_result = prepare_core_libraries $core_config $discovery_result
|
||||
|
||||
let preparation_results = ($preparation_results | append { phase: "libraries", result: $libraries_result })
|
||||
$preparation_results = ($preparation_results | append { phase: "libraries", result: $libraries_result })
|
||||
|
||||
# Phase 3: Prepare CLI components
|
||||
let cli_result = prepare_cli_components $core_config $discovery_result
|
||||
|
||||
let preparation_results = ($preparation_results | append { phase: "cli", result: $cli_result })
|
||||
$preparation_results = ($preparation_results | append { phase: "cli", result: $cli_result })
|
||||
|
||||
# Phase 4: Prepare configuration system
|
||||
let config_result = prepare_configuration_system $core_config $discovery_result
|
||||
|
||||
let preparation_results = ($preparation_results | append { phase: "configuration", result: $config_result })
|
||||
$preparation_results = ($preparation_results | append { phase: "configuration", result: $config_result })
|
||||
|
||||
# Phase 5: Bundle plugins and extensions
|
||||
let plugins_result = if $core_config.bundle_plugins {
|
||||
@ -81,7 +81,7 @@ def main [
|
||||
{ status: "skipped", reason: "plugin bundling disabled" }
|
||||
}
|
||||
|
||||
let preparation_results = ($preparation_results | append { phase: "plugins", result: $plugins_result })
|
||||
$preparation_results = ($preparation_results | append { phase: "plugins", result: $plugins_result })
|
||||
|
||||
# Phase 6: Generate indexes and metadata
|
||||
let index_result = if $core_config.generate_index {
|
||||
@ -90,12 +90,12 @@ def main [
|
||||
{ status: "skipped", reason: "index generation disabled" }
|
||||
}
|
||||
|
||||
let preparation_results = ($preparation_results | append { phase: "indexes", result: $index_result })
|
||||
$preparation_results = ($preparation_results | append { phase: "indexes", result: $index_result })
|
||||
|
||||
# Phase 7: Create distribution metadata
|
||||
let metadata_result = create_core_metadata $core_config $preparation_results
|
||||
|
||||
let preparation_results = ($preparation_results | append { phase: "metadata", result: $metadata_result })
|
||||
$preparation_results = ($preparation_results | append { phase: "metadata", result: $metadata_result })
|
||||
|
||||
let summary = {
|
||||
source_root: $core_config.source_root
|
||||
@ -110,22 +110,24 @@ def main [
|
||||
|
||||
log info $"Core distribution preparation completed successfully - ($summary.core_files_prepared) files prepared"
|
||||
|
||||
return $summary
|
||||
$summary
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
log error $"Core distribution preparation failed: ($err.msg)"
|
||||
if $result.exit_code != 0 {
|
||||
log error $"Core distribution preparation failed: ($result.stderr)"
|
||||
exit 1
|
||||
} else {
|
||||
return $result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Discover core components in the source tree
|
||||
def discover_core_components [core_config: record]
|
||||
{
|
||||
def discover_core_components [core_config: record] {
|
||||
log info "Discovering core components..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Define core component locations
|
||||
let core_locations = {
|
||||
provisioning_cli: ($core_config.source_root | path join "provisioning" "core" "nulib" "provisioning")
|
||||
@ -137,12 +139,12 @@ def discover_core_components [core_config: record]
|
||||
}
|
||||
|
||||
# Discover Nushell files
|
||||
let nu_files = []
|
||||
mut nu_files = []
|
||||
for location_name in ($core_locations | columns) {
|
||||
let location_path = ($core_locations | get $location_name)
|
||||
if ($location_path | path exists) {
|
||||
let found_files = (find $location_path -name "*.nu" -type f)
|
||||
let nu_files = ($nu_files | append ($found_files | each {|file|
|
||||
let found_files = (glob ($location_path | path join "**" "*.nu"))
|
||||
$nu_files = ($nu_files | append ($found_files | each {|file|
|
||||
{
|
||||
path: $file
|
||||
component: $location_name
|
||||
@ -154,18 +156,18 @@ def discover_core_components [core_config: record]
|
||||
}
|
||||
|
||||
# Discover configuration files
|
||||
let config_files = (find ($core_config.source_root | path join "provisioning") -name "*.toml" -type f)
|
||||
let config_files = (glob (($core_config.source_root | path join "provisioning") | path join "**" "*.toml"))
|
||||
|
||||
# Discover template files
|
||||
let template_files = (find ($core_config.source_root | path join "provisioning") -name "*.j2" -o -name "*.template" -type f)
|
||||
let template_files = (glob (($core_config.source_root | path join "provisioning") | path join "**" "*.j2")) | append (glob (($core_config.source_root | path join "provisioning") | path join "**" "*.template"))
|
||||
|
||||
# Validate critical components exist
|
||||
let missing_components = []
|
||||
mut missing_components = []
|
||||
let critical_components = ["provisioning_cli", "core_libraries"]
|
||||
for component in $critical_components {
|
||||
let component_path = ($core_locations | get $component)
|
||||
if not ($component_path | path exists) {
|
||||
let missing_components = ($missing_components | append $component)
|
||||
$missing_components = ($missing_components | append $component)
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,13 +190,16 @@ def discover_core_components [core_config: record]
|
||||
total_template_files: ($template_files | length)
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -202,17 +207,16 @@ def discover_core_components [core_config: record]
|
||||
def prepare_core_libraries [
|
||||
core_config: record
|
||||
discovery_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Preparing core libraries..."
|
||||
|
||||
let start_time = (date now)
|
||||
let lib_output_dir = ($core_config.output_dir | path join "lib")
|
||||
mkdir $lib_output_dir
|
||||
|
||||
try {
|
||||
let prepared_files = []
|
||||
let validation_errors = []
|
||||
let result = (do {
|
||||
mut prepared_files = []
|
||||
mut validation_errors = []
|
||||
|
||||
# Process core library files
|
||||
let core_lib_files = ($discovery_result.nu_files | where component == "core_libraries")
|
||||
@ -264,13 +268,16 @@ def prepare_core_libraries [
|
||||
lib_output_dir: $lib_output_dir
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -278,15 +285,14 @@ def prepare_core_libraries [
|
||||
def prepare_cli_components [
|
||||
core_config: record
|
||||
discovery_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Preparing CLI components..."
|
||||
|
||||
let start_time = (date now)
|
||||
let cli_output_dir = ($core_config.output_dir | path join "bin")
|
||||
mkdir $cli_output_dir
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Process main provisioning CLI
|
||||
let cli_location = ($discovery_result.core_locations.provisioning_cli)
|
||||
|
||||
@ -307,7 +313,7 @@ def prepare_cli_components [
|
||||
{
|
||||
status: "success"
|
||||
cli_prepared: $target_cli
|
||||
wrappers_created: 3 # Unix, Windows, Development
|
||||
wrappers_created: 3
|
||||
files_processed: 4
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
@ -318,65 +324,68 @@ def prepare_cli_components [
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Create CLI wrapper scripts
|
||||
def create_cli_wrappers [cli_output_dir: string, core_config: record] {
|
||||
# Unix shell wrapper
|
||||
let unix_wrapper = $"#!/bin/bash
|
||||
let unix_wrapper = '#!/bin/bash
|
||||
# Provisioning System CLI Wrapper
|
||||
# This wrapper sets up the environment and executes the main CLI
|
||||
|
||||
# Set environment variables
|
||||
export PROVISIONING_HOME=\"$(dirname \"$(readlink -f \"$0\")\")/../\"
|
||||
export PROVISIONING_LIB=\"$PROVISIONING_HOME/lib\"
|
||||
export PROVISIONING_CONFIG=\"$PROVISIONING_HOME/../config\"
|
||||
export PROVISIONING_HOME="$(dirname "$(readlink -f "$0")")/../"
|
||||
export PROVISIONING_LIB="$PROVISIONING_HOME/lib"
|
||||
export PROVISIONING_CONFIG="$PROVISIONING_HOME/../config"
|
||||
|
||||
# Execute the main CLI with proper library path
|
||||
exec nu \"$PROVISIONING_HOME/bin/provisioning\" \"$@\"
|
||||
"
|
||||
exec nu "$PROVISIONING_HOME/bin/provisioning" "$@"
|
||||
'
|
||||
|
||||
$unix_wrapper | save ($cli_output_dir | path join "provisioning.sh")
|
||||
chmod +x ($cli_output_dir | path join "provisioning.sh")
|
||||
|
||||
# Windows batch wrapper
|
||||
let windows_wrapper = $"@echo off
|
||||
let windows_wrapper = '@echo off
|
||||
REM Provisioning System CLI Wrapper
|
||||
REM This wrapper sets up the environment and executes the main CLI
|
||||
|
||||
REM Set environment variables
|
||||
set \"PROVISIONING_HOME=%~dp0..\\\"
|
||||
set \"PROVISIONING_LIB=%PROVISIONING_HOME%lib\"
|
||||
set \"PROVISIONING_CONFIG=%PROVISIONING_HOME%..\\config\"
|
||||
set "PROVISIONING_HOME=%~dp0..\"
|
||||
set "PROVISIONING_LIB=%PROVISIONING_HOME%lib"
|
||||
set "PROVISIONING_CONFIG=%PROVISIONING_HOME%..\\config"
|
||||
|
||||
REM Execute the main CLI
|
||||
nu \"%PROVISIONING_HOME%bin\\provisioning\" %*
|
||||
"
|
||||
nu "%PROVISIONING_HOME%bin\\provisioning" %*
|
||||
'
|
||||
|
||||
$windows_wrapper | save ($cli_output_dir | path join "provisioning.bat")
|
||||
|
||||
# Development wrapper (preserves source paths)
|
||||
let dev_wrapper = $"#!/usr/bin/env nu
|
||||
let dev_wrapper = '#!/usr/bin/env nu
|
||||
# Provisioning Development CLI Wrapper
|
||||
# This wrapper is used during development with source paths
|
||||
|
||||
# Set development paths
|
||||
$env.PROVISIONING_HOME = ($env.PWD | path dirname)
|
||||
$env.PROVISIONING_LIB = ($env.PROVISIONING_HOME | path join \"lib\")
|
||||
$env.PROVISIONING_CONFIG = ($env.PROVISIONING_HOME | path join \"../config\")
|
||||
$env.PROVISIONING_LIB = ($env.PROVISIONING_HOME | path join "lib")
|
||||
$env.PROVISIONING_CONFIG = ($env.PROVISIONING_HOME | path join "../config")
|
||||
$env.PROVISIONING_DEV = true
|
||||
|
||||
# Execute the main CLI
|
||||
source ($env.PROVISIONING_HOME | path join \"bin\" \"provisioning\")
|
||||
"
|
||||
source ($env.PROVISIONING_HOME | path join "bin" "provisioning")
|
||||
'
|
||||
|
||||
$dev_wrapper | save ($cli_output_dir | path join "provisioning-dev.nu")
|
||||
chmod +x ($cli_output_dir | path join "provisioning-dev.nu")
|
||||
@ -386,16 +395,15 @@ source ($env.PROVISIONING_HOME | path join \"bin\" \"provisioning\")
|
||||
def prepare_configuration_system [
|
||||
core_config: record
|
||||
discovery_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Preparing configuration system..."
|
||||
|
||||
let start_time = (date now)
|
||||
let config_output_dir = ($core_config.output_dir | path join "config")
|
||||
mkdir $config_output_dir
|
||||
|
||||
try {
|
||||
let processed_configs = []
|
||||
let result = (do {
|
||||
mut processed_configs = []
|
||||
|
||||
# Process configuration files
|
||||
for config_file in $discovery_result.config_files {
|
||||
@ -417,17 +425,20 @@ def prepare_configuration_system [
|
||||
status: "success"
|
||||
files_processed: ($processed_configs | length)
|
||||
config_files: $processed_configs
|
||||
templates_created: 3 # user, dev, prod
|
||||
templates_created: 3
|
||||
config_output_dir: $config_output_dir
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -436,15 +447,14 @@ def process_nushell_file [
|
||||
file_info: record
|
||||
core_config: record
|
||||
output_dir: string
|
||||
]
|
||||
{
|
||||
let relative_path = ($file_info.relative_path | str trim-left "/")
|
||||
] {
|
||||
let relative_path = ($file_info.relative_path | str trim --left --char "/")
|
||||
let target_file = ($output_dir | path join ($file_info.path | path basename))
|
||||
|
||||
# Ensure target directory exists
|
||||
mkdir ($target_file | path dirname)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let content = (open $file_info.path --raw)
|
||||
|
||||
# Validate syntax if requested
|
||||
@ -485,21 +495,23 @@ def process_nushell_file [
|
||||
size: ($final_content | str length)
|
||||
component: $file_info.component
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
file: $file_info.path
|
||||
target: $target_file
|
||||
errors: [$err.msg]
|
||||
errors: [$result.stderr]
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Validate Nushell syntax
|
||||
def validate_nu_syntax [file_path: string, content: string]
|
||||
{
|
||||
try {
|
||||
def validate_nu_syntax [file_path: string, content: string] {
|
||||
let result = (do {
|
||||
# Use Nushell's built-in syntax checking
|
||||
nu --check $file_path
|
||||
|
||||
@ -507,19 +519,21 @@ def validate_nu_syntax [file_path: string, content: string]
|
||||
status: "success"
|
||||
file: $file_path
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
file: $file_path
|
||||
errors: [$err.msg]
|
||||
errors: [$result.stderr]
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Minify Nushell script by removing comments and extra whitespace
|
||||
def minify_nushell_script [content: string, core_config: record]
|
||||
{
|
||||
def minify_nushell_script [content: string, core_config: record] {
|
||||
if not $core_config.minify_scripts {
|
||||
return $content
|
||||
}
|
||||
@ -544,11 +558,10 @@ def minify_nushell_script [content: string, core_config: record]
|
||||
}
|
||||
|
||||
# Filter out development-specific code
|
||||
def filter_development_code [content: string]
|
||||
{
|
||||
def filter_development_code [content: string] {
|
||||
let lines = ($content | lines)
|
||||
let filtered_lines = []
|
||||
let in_dev_block = false
|
||||
mut filtered_lines = []
|
||||
mut in_dev_block = false
|
||||
|
||||
for line in $lines {
|
||||
# Check for development block markers
|
||||
@ -579,9 +592,8 @@ def filter_development_code [content: string]
|
||||
}
|
||||
|
||||
# Process configuration file
|
||||
def process_config_file [source_file: string, target_file: string, core_config: record]
|
||||
{
|
||||
try {
|
||||
def process_config_file [source_file: string, target_file: string, core_config: record] {
|
||||
let result = (do {
|
||||
# Validate TOML syntax
|
||||
let config_data = (open $source_file)
|
||||
|
||||
@ -594,68 +606,71 @@ def process_config_file [source_file: string, target_file: string, core_config:
|
||||
target: $target_file
|
||||
validated: true
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
source: $source_file
|
||||
target: $target_file
|
||||
error: $err.msg
|
||||
error: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Create configuration templates
|
||||
def create_config_templates [config_output_dir: string, core_config: record] {
|
||||
# User configuration template
|
||||
let user_template = $"# User Configuration Template
|
||||
let user_template = '# User Configuration Template
|
||||
# Copy this file to config.user.toml and customize as needed
|
||||
|
||||
# User-specific paths and preferences
|
||||
[paths]
|
||||
# Override default paths if needed
|
||||
# home = \"/custom/path\"
|
||||
# home = "/custom/path"
|
||||
|
||||
[user]
|
||||
name = \"Your Name\"
|
||||
email = \"your.email@example.com\"
|
||||
name = "Your Name"
|
||||
email = "your.email@example.com"
|
||||
|
||||
# Development settings
|
||||
[dev]
|
||||
debug = false
|
||||
verbose = false
|
||||
"
|
||||
'
|
||||
|
||||
$user_template | save ($config_output_dir | path join "config.user.toml.template")
|
||||
|
||||
# Development configuration template
|
||||
let dev_template = $"# Development Configuration Template
|
||||
let dev_template = '# Development Configuration Template
|
||||
# Copy this file to config.dev.toml for development environment
|
||||
|
||||
[general]
|
||||
environment = \"development\"
|
||||
environment = "development"
|
||||
debug = true
|
||||
log_level = \"debug\"
|
||||
log_level = "debug"
|
||||
|
||||
[paths]
|
||||
cache_ttl = 60 # Short cache for development
|
||||
"
|
||||
'
|
||||
|
||||
$dev_template | save ($config_output_dir | path join "config.dev.toml.template")
|
||||
|
||||
# Production configuration template
|
||||
let prod_template = $"# Production Configuration Template
|
||||
let prod_template = '# Production Configuration Template
|
||||
# Copy this file to config.prod.toml for production environment
|
||||
|
||||
[general]
|
||||
environment = \"production\"
|
||||
environment = "production"
|
||||
debug = false
|
||||
log_level = \"info\"
|
||||
log_level = "info"
|
||||
|
||||
[security]
|
||||
# Enable security features for production
|
||||
strict_mode = true
|
||||
"
|
||||
'
|
||||
|
||||
$prod_template | save ($config_output_dir | path join "config.prod.toml.template")
|
||||
}
|
||||
@ -664,18 +679,17 @@ strict_mode = true
|
||||
def prepare_plugin_system [
|
||||
core_config: record
|
||||
discovery_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Preparing plugin system..."
|
||||
|
||||
let start_time = (date now)
|
||||
let plugins_output_dir = ($core_config.output_dir | path join "plugins")
|
||||
mkdir $plugins_output_dir
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Process extension files
|
||||
let extension_files = ($discovery_result.nu_files | where component == "extensions")
|
||||
let processed_extensions = []
|
||||
mut processed_extensions = []
|
||||
|
||||
for file_info in $extension_files {
|
||||
let processing_result = process_nushell_file $file_info $core_config $plugins_output_dir
|
||||
@ -696,19 +710,21 @@ def prepare_plugin_system [
|
||||
plugins_output_dir: $plugins_output_dir
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Create plugin registry
|
||||
def create_plugin_registry [processed_extensions: list, core_config: record]
|
||||
{
|
||||
def create_plugin_registry [processed_extensions: list, core_config: record] {
|
||||
let plugin_registry = {
|
||||
version: "1.0.0"
|
||||
plugins: ($processed_extensions | each {|ext|
|
||||
@ -732,13 +748,12 @@ def create_plugin_registry [processed_extensions: list, core_config: record]
|
||||
def generate_core_indexes [
|
||||
core_config: record
|
||||
preparation_results: list
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Generating core indexes..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Generate library index
|
||||
let lib_result = ($preparation_results | where {|r| $r.phase == "libraries"} | get 0.result)
|
||||
let lib_index = generate_library_index $lib_result.prepared_files $core_config
|
||||
@ -758,19 +773,21 @@ def generate_core_indexes [
|
||||
main_index: $main_index
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Generate library index
|
||||
def generate_library_index [prepared_files: list, core_config: record]
|
||||
{
|
||||
def generate_library_index [prepared_files: list, core_config: record] {
|
||||
let index_content = $"# Core Library Index
|
||||
|
||||
This file provides an index of all core library modules included in this distribution.
|
||||
@ -781,7 +798,7 @@ This file provides an index of all core library modules included in this distrib
|
||||
|
||||
let modules = ($prepared_files | group-by component)
|
||||
|
||||
let full_content = [$index_content]
|
||||
mut full_content = [$index_content]
|
||||
|
||||
for component in ($modules | columns) {
|
||||
let component_files = ($modules | get $component)
|
||||
@ -803,8 +820,7 @@ This file provides an index of all core library modules included in this distrib
|
||||
}
|
||||
|
||||
# Generate CLI index
|
||||
def generate_cli_index [cli_result: record, core_config: record]
|
||||
{
|
||||
def generate_cli_index [cli_result: record, core_config: record] {
|
||||
let cli_index = $"# CLI Components Index
|
||||
|
||||
## Main CLI
|
||||
@ -829,8 +845,7 @@ Generated: (date now | format date '%Y-%m-%d %H:%M:%S')
|
||||
}
|
||||
|
||||
# Generate main index
|
||||
def generate_main_index [preparation_results: list, core_config: record]
|
||||
{
|
||||
def generate_main_index [preparation_results: list, core_config: record] {
|
||||
let successful_phases = ($preparation_results | where {|r| $r.result.status == "success"})
|
||||
let total_files = ($successful_phases | get result.files_processed | math sum)
|
||||
|
||||
@ -867,13 +882,12 @@ For more information, see the documentation in each subdirectory.
|
||||
def create_core_metadata [
|
||||
core_config: record
|
||||
preparation_results: list
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Creating core metadata..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let metadata = {
|
||||
name: "provisioning-core"
|
||||
version: (detect_version $core_config.source_root)
|
||||
@ -907,43 +921,59 @@ def create_core_metadata [
|
||||
metadata: $metadata
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Detect version from git or other sources
|
||||
def detect_version [repo_root: string]
|
||||
{
|
||||
def detect_version [repo_root: string] {
|
||||
cd $repo_root
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let git_version = (git describe --tags --always --dirty 2>/dev/null | str trim)
|
||||
if $git_version != "" {
|
||||
return ($git_version | str replace "^v" "")
|
||||
}
|
||||
|
||||
return $"dev-(date now | format date '%Y%m%d')"
|
||||
} catch {
|
||||
return "unknown"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
"unknown"
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Get directory size helper
|
||||
def get_directory_size [dir: string] -> int {
|
||||
def get_directory_size [dir: string] {
|
||||
if not ($dir | path exists) {
|
||||
return 0
|
||||
}
|
||||
|
||||
try {
|
||||
find $dir -type f | each {|file| ls $file | get 0.size } | math sum | if $in == null { 0 } else { $in }
|
||||
} catch {
|
||||
let result = (do {
|
||||
(glob ($dir | path join "**" "*")) | each {|file|
|
||||
if ($file | path exists) and (($file | path type) == file) {
|
||||
ls $file | get 0.size
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} | math sum | if $in == null { 0 } else { $in }
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
0
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -956,6 +986,20 @@ def "main status" [] {
|
||||
let provisioning_cli = ($repo_root | path join "provisioning" "core" "nulib" "provisioning")
|
||||
let core_libraries = ($repo_root | path join "provisioning" "core" "nulib" "lib_provisioning")
|
||||
|
||||
# Count Nu files
|
||||
let nu_count_result = (do {
|
||||
(glob ($repo_root | path join "**" "*.nu")) | length
|
||||
} | complete)
|
||||
|
||||
let nu_files_found = if $nu_count_result.exit_code == 0 { $nu_count_result.stdout } else { 0 }
|
||||
|
||||
# Count config files
|
||||
let config_count_result = (do {
|
||||
(glob (($repo_root | path join "provisioning") | path join "**" "*.toml")) | length
|
||||
} | complete)
|
||||
|
||||
let config_files_found = if $config_count_result.exit_code == 0 { $config_count_result.stdout } else { 0 }
|
||||
|
||||
{
|
||||
repository: $repo_root
|
||||
version: $version
|
||||
@ -963,8 +1007,8 @@ def "main status" [] {
|
||||
provisioning_cli: ($provisioning_cli | path exists)
|
||||
core_libraries: ($core_libraries | path exists)
|
||||
}
|
||||
nu_files_found: (try { find $repo_root -name "*.nu" -type f | length } catch { 0 })
|
||||
config_files_found: (try { find ($repo_root | path join "provisioning") -name "*.toml" -type f | length } catch { 0 })
|
||||
nu_files_found: $nu_files_found
|
||||
config_files_found: $config_files_found
|
||||
ready_for_distribution: (($provisioning_cli | path exists) and ($core_libraries | path exists))
|
||||
}
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ def main [
|
||||
|
||||
let preparation_results = []
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Phase 1: Discover platform components
|
||||
let discovery_result = discover_platform_components $platform_config
|
||||
|
||||
@ -117,22 +117,24 @@ def main [
|
||||
|
||||
log info $"Platform distribution preparation completed - ($summary.binaries_built) binaries built for ($summary.target_platforms) platforms"
|
||||
|
||||
return $summary
|
||||
$summary
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
log error $"Platform distribution preparation failed: ($err.msg)"
|
||||
if $result.exit_code != 0 {
|
||||
log error $"Platform distribution preparation failed: ($result.stderr)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
$result.stdout
|
||||
}
|
||||
|
||||
# Discover platform components in the source tree
|
||||
def discover_platform_components [platform_config: record]
|
||||
{
|
||||
def discover_platform_components [platform_config: record] {
|
||||
log info "Discovering platform components..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Define platform project locations
|
||||
let rust_projects = [
|
||||
{
|
||||
@ -166,8 +168,8 @@ def discover_platform_components [platform_config: record]
|
||||
]
|
||||
|
||||
# Validate project existence and Cargo.toml files
|
||||
let validated_projects = []
|
||||
let missing_projects = []
|
||||
mut validated_projects = []
|
||||
mut missing_projects = []
|
||||
|
||||
for project in $rust_projects {
|
||||
let cargo_file = ($project.path | path join "Cargo.toml")
|
||||
@ -203,69 +205,53 @@ def discover_platform_components [platform_config: record]
|
||||
build_environment: $build_env
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Validate build environment
|
||||
def validate_build_environment [platform_config: record]
|
||||
{
|
||||
let build_tools = {}
|
||||
def validate_build_environment [platform_config: record] {
|
||||
mut build_tools = {}
|
||||
|
||||
# Check Rust toolchain
|
||||
let rust_version = try {
|
||||
rustc --version | str trim
|
||||
} catch {
|
||||
"not available"
|
||||
}
|
||||
let rust_result = (do { rustc --version | str trim } | complete)
|
||||
let rust_version = (if $rust_result.exit_code == 0 { $rust_result.stdout } else { "not available" })
|
||||
|
||||
let cargo_version = try {
|
||||
cargo --version | str trim
|
||||
} catch {
|
||||
"not available"
|
||||
}
|
||||
let cargo_result = (do { cargo --version | str trim } | complete)
|
||||
let cargo_version = (if $cargo_result.exit_code == 0 { $cargo_result.stdout } else { "not available" })
|
||||
|
||||
$build_tools = ($build_tools | insert rust $rust_version | insert cargo $cargo_version)
|
||||
|
||||
# Check for target platforms
|
||||
let target_availability = {}
|
||||
mut target_availability = {}
|
||||
|
||||
for platform in $platform_config.target_platforms {
|
||||
let target_triple = get_rust_target_triple $platform
|
||||
|
||||
let target_installed = try {
|
||||
rustup target list --installed | lines | any {|line| $line | str contains $target_triple}
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let target_result = (do { rustup target list --installed | lines | any {|line| $line | str contains $target_triple} } | complete)
|
||||
let target_installed = (if $target_result.exit_code == 0 { $target_result.stdout } else { false })
|
||||
|
||||
$target_availability = ($target_availability | insert $platform $target_installed)
|
||||
}
|
||||
|
||||
# Check optional tools
|
||||
let upx_available = try {
|
||||
upx --version | complete | get exit_code | $in == 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let upx_result = (do { upx --version } | complete)
|
||||
let upx_available = ($upx_result.exit_code == 0)
|
||||
|
||||
let strip_available = try {
|
||||
strip --version | complete | get exit_code | $in == 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let strip_result = (do { strip --version } | complete)
|
||||
let strip_available = ($strip_result.exit_code == 0)
|
||||
|
||||
let docker_available = try {
|
||||
docker --version | complete | get exit_code | $in == 0
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
let docker_result = (do { docker --version } | complete)
|
||||
let docker_available = ($docker_result.exit_code == 0)
|
||||
|
||||
{
|
||||
build_tools: $build_tools
|
||||
@ -283,13 +269,12 @@ def validate_build_environment [platform_config: record]
|
||||
def build_platform_binaries [
|
||||
platform_config: record
|
||||
discovery_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Building platform binaries..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Build binaries for each platform
|
||||
let build_results = if $platform_config.parallel_builds {
|
||||
build_platforms_parallel $platform_config $discovery_result
|
||||
@ -317,13 +302,16 @@ def build_platform_binaries [
|
||||
build_results: $build_results
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -331,8 +319,7 @@ def build_platform_binaries [
|
||||
def build_platforms_parallel [
|
||||
platform_config: record
|
||||
discovery_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
build_platforms_sequential $platform_config $discovery_result
|
||||
}
|
||||
|
||||
@ -340,9 +327,8 @@ def build_platforms_parallel [
|
||||
def build_platforms_sequential [
|
||||
platform_config: record
|
||||
discovery_result: record
|
||||
]
|
||||
{
|
||||
let all_build_results = []
|
||||
] {
|
||||
mut all_build_results = []
|
||||
|
||||
for platform in $platform_config.target_platforms {
|
||||
for project in $discovery_result.rust_projects {
|
||||
@ -360,18 +346,17 @@ def build_single_binary [
|
||||
platform: string
|
||||
project: record
|
||||
platform_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Building ($project.name) for ($platform)..."
|
||||
|
||||
let start_time = (date now)
|
||||
let target_triple = get_rust_target_triple $platform
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
cd $project.path
|
||||
|
||||
# Build cargo command
|
||||
let cargo_cmd = ["cargo", "build"]
|
||||
mut cargo_cmd = ["cargo", "build"]
|
||||
|
||||
# Add build mode
|
||||
if $platform_config.build_mode == "release" or $platform_config.build_mode == "optimized" {
|
||||
@ -391,7 +376,7 @@ def build_single_binary [
|
||||
log info $"Running: ($cargo_cmd | str join ' ')"
|
||||
}
|
||||
|
||||
let build_result = (run-external --redirect-combine $cargo_cmd.0 ...$cargo_cmd.1.. | complete)
|
||||
let build_result = (^$cargo_cmd.0 $cargo_cmd.1.. | complete)
|
||||
|
||||
if $build_result.exit_code == 0 {
|
||||
# Determine binary path
|
||||
@ -443,22 +428,24 @@ def build_single_binary [
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
project: $project.name
|
||||
platform: $platform
|
||||
target: $target_triple
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Get Rust target triple for platform
|
||||
def get_rust_target_triple [platform: string]
|
||||
{
|
||||
def get_rust_target_triple [platform: string] {
|
||||
match $platform {
|
||||
"linux-amd64" => "x86_64-unknown-linux-gnu"
|
||||
"linux-arm64" => "aarch64-unknown-linux-gnu"
|
||||
@ -474,15 +461,14 @@ def get_rust_target_triple [platform: string]
|
||||
def post_process_binaries [
|
||||
platform_config: record
|
||||
build_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Post-processing binaries..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let successful_builds = ($build_result.build_results | where status == "success")
|
||||
let processing_results = []
|
||||
mut processing_results = []
|
||||
|
||||
for build in $successful_builds {
|
||||
let processing_result = process_single_binary $build $platform_config
|
||||
@ -500,13 +486,16 @@ def post_process_binaries [
|
||||
processing_results: $processing_results
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -514,39 +503,43 @@ def post_process_binaries [
|
||||
def process_single_binary [
|
||||
build: record
|
||||
platform_config: record
|
||||
]
|
||||
{
|
||||
let processing_steps = []
|
||||
] {
|
||||
let binary_path = $build.binary_path
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let original_size = $build.binary_size
|
||||
let mut_processing_steps = []
|
||||
|
||||
# Strip debug symbols if requested
|
||||
if $platform_config.strip_symbols and not ($build.platform | str contains "windows") {
|
||||
let processing_steps = if $platform_config.strip_symbols and not ($build.platform | str contains "windows") {
|
||||
let strip_result = strip_binary_symbols $binary_path $platform_config
|
||||
|
||||
$processing_steps = ($processing_steps | append { step: "strip", result: $strip_result })
|
||||
($mut_processing_steps | append { step: "strip", result: $strip_result })
|
||||
} else {
|
||||
$mut_processing_steps
|
||||
}
|
||||
|
||||
# UPX compress if requested
|
||||
let final_size = $original_size
|
||||
|
||||
if $platform_config.upx_compress {
|
||||
let processing_steps = if $platform_config.upx_compress {
|
||||
let upx_result = upx_compress_binary $binary_path $platform_config
|
||||
|
||||
$processing_steps = ($processing_steps | append { step: "upx", result: $upx_result })
|
||||
let upx_steps = ($processing_steps | append { step: "upx", result: $upx_result })
|
||||
|
||||
if $upx_result.status == "success" {
|
||||
$final_size = $upx_result.compressed_size
|
||||
{ final_size: $upx_result.compressed_size, steps: $upx_steps }
|
||||
} else {
|
||||
{ final_size: $final_size, steps: $upx_steps }
|
||||
}
|
||||
} else {
|
||||
{ final_size: $final_size, steps: $processing_steps }
|
||||
}
|
||||
|
||||
# Sign binary if requested
|
||||
if $platform_config.sign_binaries {
|
||||
let processing_steps = if $platform_config.sign_binaries {
|
||||
let sign_result = sign_binary $binary_path $platform_config
|
||||
|
||||
$processing_steps = ($processing_steps | append { step: "sign", result: $sign_result })
|
||||
($processing_steps.steps | append { step: "sign", result: $sign_result })
|
||||
} else {
|
||||
$processing_steps.steps
|
||||
}
|
||||
|
||||
# Update final size
|
||||
@ -563,23 +556,25 @@ def process_single_binary [
|
||||
compression_ratio: $compression_ratio
|
||||
processing_steps: $processing_steps
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
project: $build.project
|
||||
platform: $build.platform
|
||||
binary_path: $binary_path
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
processing_steps: $processing_steps
|
||||
reason: $result.stderr
|
||||
processing_steps: []
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Strip debug symbols from binary
|
||||
def strip_binary_symbols [binary_path: string, platform_config: record]
|
||||
{
|
||||
try {
|
||||
def strip_binary_symbols [binary_path: string, platform_config: record] {
|
||||
let result = (do {
|
||||
if $platform_config.verbose {
|
||||
log info $"Stripping symbols from: ($binary_path)"
|
||||
}
|
||||
@ -591,16 +586,18 @@ def strip_binary_symbols [binary_path: string, platform_config: record]
|
||||
} else {
|
||||
{ status: "failed", reason: $strip_result.stderr }
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
{ status: "failed", reason: $err.msg }
|
||||
if $result.exit_code != 0 {
|
||||
{ status: "failed", reason: $result.stderr }
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Compress binary with UPX
|
||||
def upx_compress_binary [binary_path: string, platform_config: record]
|
||||
{
|
||||
try {
|
||||
def upx_compress_binary [binary_path: string, platform_config: record] {
|
||||
let result = (do {
|
||||
if $platform_config.verbose {
|
||||
log info $"UPX compressing: ($binary_path)"
|
||||
}
|
||||
@ -623,15 +620,17 @@ def upx_compress_binary [binary_path: string, platform_config: record]
|
||||
} else {
|
||||
{ status: "failed", reason: $upx_result.stderr }
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
{ status: "failed", reason: $err.msg }
|
||||
if $result.exit_code != 0 {
|
||||
{ status: "failed", reason: $result.stderr }
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Sign binary (placeholder - would need actual signing implementation)
|
||||
def sign_binary [binary_path: string, platform_config: record]
|
||||
{
|
||||
def sign_binary [binary_path: string, platform_config: record] {
|
||||
log warning "Binary signing not implemented - skipping"
|
||||
{ status: "skipped", reason: "signing not implemented" }
|
||||
}
|
||||
@ -640,22 +639,21 @@ def sign_binary [binary_path: string, platform_config: record]
|
||||
def generate_service_definitions [
|
||||
platform_config: record
|
||||
build_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Generating service definitions..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let services_dir = ($platform_config.output_dir | path join "services")
|
||||
mkdir $services_dir
|
||||
|
||||
let successful_builds = ($build_result.build_results | where status == "success")
|
||||
let generated_services = []
|
||||
mut generated_services = []
|
||||
|
||||
# Generate systemd service files
|
||||
for build in $successful_builds {
|
||||
if $build.platform | str starts-with "linux" {
|
||||
if ($build.platform | str starts-with "linux") {
|
||||
let systemd_service = generate_systemd_service $build $platform_config
|
||||
|
||||
let service_file = ($services_dir | path join $"($build.project).service")
|
||||
@ -684,19 +682,21 @@ def generate_service_definitions [
|
||||
docker_compose: $compose_file
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Generate systemd service file
|
||||
def generate_systemd_service [build: record, platform_config: record]
|
||||
{
|
||||
def generate_systemd_service [build: record, platform_config: record] {
|
||||
$"[Unit]
|
||||
Description=Provisioning ($build.project | str title-case) Service
|
||||
After=network.target
|
||||
@ -726,9 +726,8 @@ WantedBy=multi-user.target
|
||||
}
|
||||
|
||||
# Generate Docker Compose file
|
||||
def generate_docker_compose [builds: list, platform_config: record]
|
||||
{
|
||||
let services = []
|
||||
def generate_docker_compose [builds: list, platform_config: record] {
|
||||
mut services = []
|
||||
|
||||
for build in $builds {
|
||||
let service_def = $" ($build.project | str replace "_" "-"):
|
||||
@ -771,32 +770,33 @@ networks:
|
||||
def create_container_images [
|
||||
platform_config: record
|
||||
build_result: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Creating container images..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Container creation would use the build-containers.nu tool
|
||||
let containers_result = try {
|
||||
nu ($platform_config.source_root | path join "src" "tools" "package" "build-containers.nu")
|
||||
--dist-dir ($platform_config.output_dir | path dirname)
|
||||
--tag-prefix "provisioning"
|
||||
--platforms "linux/amd64"
|
||||
--verbose:$platform_config.verbose
|
||||
let script_path = ($platform_config.source_root | path join "src" "tools" "package" "build-containers.nu")
|
||||
let dist_dir = ($platform_config.output_dir | path dirname)
|
||||
let containers_result = (do {
|
||||
nu $script_path --dist-dir $dist_dir --tag-prefix "provisioning" --platforms "linux/amd64" --verbose:$platform_config.verbose
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $containers_result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $containers_result.stderr
|
||||
containers_created: 0
|
||||
container_results: { status: "failed", reason: $containers_result.stderr }
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
{
|
||||
status: ($containers_result.stdout.status? // "success")
|
||||
containers_created: (if "successful_builds" in ($containers_result.stdout | columns) { $containers_result.stdout.successful_builds } else { 0 })
|
||||
container_results: $containers_result.stdout
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
status: $containers_result.status
|
||||
containers_created: (if "successful_builds" in ($containers_result | columns) { $containers_result.successful_builds } else { 0 })
|
||||
container_results: $containers_result
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
@ -804,13 +804,12 @@ def create_container_images [
|
||||
def generate_platform_metadata [
|
||||
platform_config: record
|
||||
preparation_results: list
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Generating platform metadata..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let metadata = {
|
||||
name: "provisioning-platform"
|
||||
version: (detect_version $platform_config.source_root)
|
||||
@ -842,43 +841,53 @@ def generate_platform_metadata [
|
||||
metadata: $metadata
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Detect version from git or other sources
|
||||
def detect_version [repo_root: string]
|
||||
{
|
||||
def detect_version [repo_root: string] {
|
||||
cd $repo_root
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let git_version = (git describe --tags --always --dirty 2>/dev/null | str trim)
|
||||
if $git_version != "" {
|
||||
return ($git_version | str replace "^v" "")
|
||||
}
|
||||
|
||||
return $"dev-(date now | format date '%Y%m%d')"
|
||||
} catch {
|
||||
return "unknown"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
"unknown"
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Get directory size helper
|
||||
def get_directory_size [dir: string] -> int {
|
||||
def get_directory_size [dir: string] {
|
||||
if not ($dir | path exists) {
|
||||
return 0
|
||||
}
|
||||
|
||||
try {
|
||||
find $dir -type f | each {|file| ls $file | get 0.size } | math sum | if $in == null { 0 } else { $in }
|
||||
} catch {
|
||||
let result = (do {
|
||||
glob ($dir + "/**/*") | where { |p| ($p | path type) == "file" } | each {|file| ls $file | get 0.size } | math sum | if $in == null { 0 } else { $in }
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
0
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -913,5 +922,5 @@ def "main quick" [
|
||||
--platform: string = "linux-amd64" # Single platform to build
|
||||
--output-dir: string = "dist/platform" # Output directory
|
||||
] {
|
||||
main --target-platforms $platform --output-dir $output_dir --parallel-builds:false
|
||||
main --target-platforms $platform --output-dir $output_dir
|
||||
}
|
||||
6
tools/distribution/test_min2.nu
Normal file
6
tools/distribution/test_min2.nu
Normal file
@ -0,0 +1,6 @@
|
||||
use std log
|
||||
use ./docs_discovery.nu
|
||||
|
||||
def main [--verbose] { "test" }
|
||||
|
||||
main
|
||||
File diff suppressed because it is too large
Load Diff
@ -14,16 +14,16 @@ def find-all-docs []: nothing -> list<string> {
|
||||
mut all_docs = []
|
||||
|
||||
# Find docs in main docs directory
|
||||
try {
|
||||
let result1 = (do {
|
||||
let main_docs = (glob ($docs_root + "/**/*.md"))
|
||||
$all_docs = ($all_docs | append $main_docs)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
# Find docs in provisioning/docs directory
|
||||
try {
|
||||
let result2 = (do {
|
||||
let prov_docs = (glob ($prov_docs_root + "/**/*.md"))
|
||||
$all_docs = ($all_docs | append $prov_docs)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
$all_docs | uniq
|
||||
}
|
||||
@ -31,7 +31,7 @@ def find-all-docs []: nothing -> list<string> {
|
||||
# Extract all markdown links from a file
|
||||
def extract-links [file: string]: nothing -> table {
|
||||
let content = (open $file)
|
||||
let lines = ($content | split row "\n")
|
||||
let lines = ($content | lines)
|
||||
|
||||
mut results = []
|
||||
|
||||
@ -128,11 +128,13 @@ def main [
|
||||
mut all_results = []
|
||||
|
||||
for doc in $docs {
|
||||
try {
|
||||
let result = (do {
|
||||
let results = (validate-file-links $doc)
|
||||
$all_results = ($all_results | append $results)
|
||||
} catch {|err|
|
||||
print $"⚠️ Error processing ($doc): ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
print $"⚠️ Error processing ($doc): ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -10,25 +10,34 @@ def main [
|
||||
] {
|
||||
print "🔍 Searching for files with try-catch blocks..."
|
||||
|
||||
let files = (
|
||||
find provisioning -name "*.nu" -type f
|
||||
| lines
|
||||
| where $it != ""
|
||||
| each { |file|
|
||||
if ($file | path exists) {
|
||||
let content = (open $file)
|
||||
let has_try = ($content | str contains "try {")
|
||||
let find_result = (do {
|
||||
^find provisioning -name "*.nu" -type f
|
||||
} | complete)
|
||||
|
||||
if $has_try {
|
||||
$file
|
||||
let files = (
|
||||
if $find_result.exit_code == 0 {
|
||||
$find_result.stdout
|
||||
| lines
|
||||
| where $it != ""
|
||||
| each { |file|
|
||||
if ($file | path exists) {
|
||||
let content = (open $file)
|
||||
let has_try = ($content | str contains "try {")
|
||||
|
||||
if $has_try {
|
||||
$file
|
||||
} else {
|
||||
null
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
| compact
|
||||
} else {
|
||||
print $"❌ Error finding files: ($find_result.stderr)"
|
||||
[]
|
||||
}
|
||||
| compact
|
||||
)
|
||||
|
||||
let total = ($files | length)
|
||||
@ -131,7 +140,7 @@ def main [
|
||||
}
|
||||
|
||||
# Fix try-catch in a single file
|
||||
def fix-file [file: path] -> nothing {
|
||||
def fix-file [file: path] {
|
||||
let content = (open $file)
|
||||
|
||||
# Pattern 1: Simple try-catch without error parameter
|
||||
@ -160,30 +169,39 @@ def stats [] {
|
||||
print "📊 Try-Catch Usage Statistics"
|
||||
print ""
|
||||
|
||||
let files = (
|
||||
find provisioning -name "*.nu" -type f
|
||||
| lines
|
||||
| where $it != ""
|
||||
| each { |file|
|
||||
if ($file | path exists) {
|
||||
let content = (open $file)
|
||||
let count = (
|
||||
$content
|
||||
| split row "try {"
|
||||
| length
|
||||
| $in - 1
|
||||
)
|
||||
let find_result = (do {
|
||||
^find provisioning -name "*.nu" -type f
|
||||
} | complete)
|
||||
|
||||
if $count > 0 {
|
||||
{ file: $file, count: $count }
|
||||
let files = (
|
||||
if $find_result.exit_code == 0 {
|
||||
$find_result.stdout
|
||||
| lines
|
||||
| where $it != ""
|
||||
| each { |file|
|
||||
if ($file | path exists) {
|
||||
let content = (open $file)
|
||||
let count = (
|
||||
$content
|
||||
| split row "try {"
|
||||
| length
|
||||
| $in - 1
|
||||
)
|
||||
|
||||
if $count > 0 {
|
||||
{ file: $file, count: $count }
|
||||
} else {
|
||||
null
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
} else {
|
||||
null
|
||||
}
|
||||
| compact
|
||||
} else {
|
||||
print $"❌ Error finding files: ($find_result.stderr)"
|
||||
[]
|
||||
}
|
||||
| compact
|
||||
)
|
||||
|
||||
let total_files = ($files | length)
|
||||
|
||||
@ -142,10 +142,14 @@ def load-manifest [
|
||||
]
|
||||
{
|
||||
if ($manifest_path | path exists) {
|
||||
try {
|
||||
let result = (do {
|
||||
open $manifest_path | from yaml
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
} else {
|
||||
{}
|
||||
@ -163,7 +167,7 @@ def publish-extensions [
|
||||
for ext in $extensions {
|
||||
log info $"Publishing ($ext.type)/($ext.name):($ext.version)"
|
||||
|
||||
try {
|
||||
let push_result = (do {
|
||||
let result = (push-artifact $ext.path $registry $namespace $ext.name $ext.version)
|
||||
|
||||
if $result {
|
||||
@ -177,8 +181,10 @@ def publish-extensions [
|
||||
} else {
|
||||
log error $" ✗ Failed to publish ($ext.name):($ext.version)"
|
||||
}
|
||||
} catch { |err|
|
||||
log error $" ✗ Error publishing ($ext.name): ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $push_result.exit_code != 0 {
|
||||
log error $" ✗ Error publishing ($ext.name): ($push_result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -123,8 +123,16 @@ export def validate-extension [
|
||||
$errors = ($errors | append "Missing manifest.yaml")
|
||||
} else {
|
||||
# Validate manifest content
|
||||
try {
|
||||
let manifest = (open $manifest_path | from yaml)
|
||||
let parse_result = (do {
|
||||
open $manifest_path | from yaml
|
||||
} | complete)
|
||||
|
||||
let manifest_result = if $parse_result.exit_code == 0 { $parse_result.stdout } else { null }
|
||||
|
||||
if ($manifest_result == null) {
|
||||
$errors = ($errors | append "Invalid manifest.yaml: Failed to parse YAML")
|
||||
} else {
|
||||
let manifest = $manifest_result
|
||||
|
||||
# Required fields
|
||||
let required_fields = ["name", "type", "version"]
|
||||
@ -146,8 +154,6 @@ export def validate-extension [
|
||||
$warnings = ($warnings | append "Version should follow semver format (x.y.z)")
|
||||
}
|
||||
}
|
||||
} catch { |err|
|
||||
$errors = ($errors | append $"Invalid manifest.yaml: ($err.msg)")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -133,29 +133,26 @@ def main [
|
||||
}
|
||||
|
||||
# Detect version from git or other sources
|
||||
def detect_version [repo_root: string]
|
||||
{
|
||||
try {
|
||||
def detect_version [repo_root: string] {
|
||||
let result = (do {
|
||||
cd $repo_root
|
||||
let git_version = (git describe --tags --always --dirty 2>/dev/null | complete)
|
||||
if $git_version.exit_code == 0 and ($git_version.stdout | str trim) != "" {
|
||||
return ($git_version.stdout | str trim)
|
||||
}
|
||||
return $"dev-(date now | format date "%Y%m%d")"
|
||||
} catch {
|
||||
return "dev-unknown"
|
||||
git describe --tags --always --dirty 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 and ($result.stdout | str trim) != "" {
|
||||
return ($result.stdout | str trim)
|
||||
}
|
||||
|
||||
return $"dev-(date now | format date "%Y%m%d")"
|
||||
}
|
||||
|
||||
# Check if Docker is available
|
||||
def check_docker_availability []
|
||||
{
|
||||
try {
|
||||
let docker_check = (docker --version | complete)
|
||||
return ($docker_check.exit_code == 0)
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
def check_docker_availability [] {
|
||||
let result = (do {
|
||||
docker --version
|
||||
} | complete)
|
||||
|
||||
return ($result.exit_code == 0)
|
||||
}
|
||||
|
||||
# Ensure Dockerfiles exist, create them if they don't
|
||||
@ -197,8 +194,7 @@ def create_dockerfile [
|
||||
}
|
||||
|
||||
# Create orchestrator Dockerfile
|
||||
def create_orchestrator_dockerfile [config: record]
|
||||
{
|
||||
def create_orchestrator_dockerfile [config: record] {
|
||||
$"# Provisioning Orchestrator Container
|
||||
# Version: ($config.version)
|
||||
|
||||
@ -241,8 +237,7 @@ CMD [\"/usr/local/bin/provisioning-orchestrator\", \"--host\", \"0.0.0.0\", \"--
|
||||
}
|
||||
|
||||
# Create control center Dockerfile
|
||||
def create_control_center_dockerfile [config: record]
|
||||
{
|
||||
def create_control_center_dockerfile [config: record] {
|
||||
$"# Provisioning Control Center Container
|
||||
# Version: ($config.version)
|
||||
|
||||
@ -285,8 +280,7 @@ CMD [\"/usr/local/bin/control-center\", \"--host\", \"0.0.0.0\", \"--port\", \"9
|
||||
}
|
||||
|
||||
# Create web UI Dockerfile
|
||||
def create_web_ui_dockerfile [config: record]
|
||||
{
|
||||
def create_web_ui_dockerfile [config: record] {
|
||||
$"# Provisioning Web UI Container
|
||||
# Version: ($config.version)
|
||||
|
||||
@ -311,8 +305,7 @@ CMD [\"nginx\", \"-g\", \"daemon off;\"]
|
||||
}
|
||||
|
||||
# Create all-in-one Dockerfile
|
||||
def create_all_in_one_dockerfile [config: record]
|
||||
{
|
||||
def create_all_in_one_dockerfile [config: record] {
|
||||
$"# Provisioning All-in-One Container
|
||||
# Version: ($config.version)
|
||||
|
||||
@ -361,8 +354,7 @@ CMD [\"/usr/bin/supervisord\", \"-c\", \"/etc/supervisor/conf.d/supervisord.conf
|
||||
}
|
||||
|
||||
# Create generic Dockerfile
|
||||
def create_generic_dockerfile [container: record, config: record]
|
||||
{
|
||||
def create_generic_dockerfile [container: record, config: record] {
|
||||
$"# Generic Provisioning Service Container
|
||||
# Service: ($container.name)
|
||||
# Version: ($config.version)
|
||||
@ -405,8 +397,7 @@ CMD [\"sh\", \"-c\", \"echo 'Container for ($container.name) - configure as need
|
||||
def build_containers_sequential [
|
||||
container_definitions: list
|
||||
container_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
$container_definitions | each {|container|
|
||||
build_single_container $container $container_config
|
||||
}
|
||||
@ -416,8 +407,7 @@ def build_containers_sequential [
|
||||
def build_containers_parallel [
|
||||
container_definitions: list
|
||||
container_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
# For simplicity, using sequential for now
|
||||
# In a real implementation, you might use background processes
|
||||
build_containers_sequential $container_definitions $container_config
|
||||
@ -427,91 +417,86 @@ def build_containers_parallel [
|
||||
def build_single_container [
|
||||
container: record
|
||||
container_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Building container: ($container.name)"
|
||||
|
||||
let start_time = (date now)
|
||||
let dockerfile_path = ($container_config.repo_root | path join "docker" $container.dockerfile)
|
||||
let image_tag = $"($container_config.tag_prefix)/($container.name):($container_config.version)"
|
||||
|
||||
try {
|
||||
# Check if required binaries exist
|
||||
let missing_deps = check_container_dependencies $container $container_config
|
||||
# Check if required binaries exist
|
||||
let missing_deps = check_container_dependencies $container $container_config
|
||||
|
||||
if ($missing_deps | length) > 0 {
|
||||
return {
|
||||
container: $container.name
|
||||
status: "failed"
|
||||
reason: $"Missing dependencies: ($missing_deps | str join ', ')"
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
if ($missing_deps | length) > 0 {
|
||||
return {
|
||||
container: $container.name
|
||||
status: "failed"
|
||||
reason: $"Missing dependencies: ($missing_deps | str join ', ')"
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Build Docker command
|
||||
let docker_cmd = ["docker", "build"]
|
||||
# Build Docker command
|
||||
let docker_cmd_base = ["docker", "build"]
|
||||
|
||||
# Add build arguments
|
||||
for arg in $container_config.build_args {
|
||||
$docker_cmd = ($docker_cmd | append ["--build-arg", $arg])
|
||||
# Build arguments list
|
||||
let build_args = if ($container_config.build_args | length) > 0 {
|
||||
$container_config.build_args | reduce {|arg, acc|
|
||||
$acc | append ["--build-arg", $arg]
|
||||
}
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
# Add cache options
|
||||
if not $container_config.cache {
|
||||
$docker_cmd = ($docker_cmd | append "--no-cache")
|
||||
# Cache options
|
||||
let cache_args = if not $container_config.cache { ["--no-cache"] } else { [] }
|
||||
|
||||
# Platform support for multi-arch
|
||||
let platform_args = if ($container_config.platforms | length) > 1 {
|
||||
["--platform", ($container_config.platforms | str join ",")]
|
||||
} else {
|
||||
["--platform", ($container_config.platforms | get 0)]
|
||||
}
|
||||
|
||||
# Tag arguments
|
||||
let tag_args = ["-t", $image_tag]
|
||||
|
||||
# Dockerfile and context arguments
|
||||
let context_args = ["-f", $dockerfile_path, $container.context]
|
||||
|
||||
# Combine all arguments
|
||||
let docker_cmd = $docker_cmd_base | append $build_args | append $cache_args | append $platform_args | append $tag_args | append $context_args
|
||||
|
||||
# Execute build
|
||||
cd ($container_config.repo_root)
|
||||
|
||||
if $container_config.verbose {
|
||||
log info $"Running: ($docker_cmd | str join ' ')"
|
||||
}
|
||||
|
||||
let build_result = (do {
|
||||
run-external $docker_cmd.0 ...$docker_cmd | skip 1
|
||||
} | complete)
|
||||
|
||||
if $build_result.exit_code == 0 {
|
||||
# Get image size
|
||||
let image_info = get_image_info $image_tag
|
||||
|
||||
log info $"Successfully built container: ($container.name) -> ($image_tag)"
|
||||
|
||||
{
|
||||
container: $container.name
|
||||
status: "success"
|
||||
image_tag: $image_tag
|
||||
image_size: $image_info.size
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
# Add platform support for multi-arch
|
||||
if ($container_config.platforms | length) > 1 {
|
||||
$docker_cmd = ($docker_cmd | append ["--platform", ($container_config.platforms | str join ",")])
|
||||
} else {
|
||||
$docker_cmd = ($docker_cmd | append ["--platform", ($container_config.platforms | get 0)])
|
||||
}
|
||||
|
||||
# Add tags
|
||||
$docker_cmd = ($docker_cmd | append ["-t", $image_tag])
|
||||
|
||||
# Add dockerfile and context
|
||||
$docker_cmd = ($docker_cmd | append ["-f", $dockerfile_path, $container.context])
|
||||
|
||||
# Execute build
|
||||
cd ($container_config.repo_root)
|
||||
|
||||
if $container_config.verbose {
|
||||
log info $"Running: ($docker_cmd | str join ' ')"
|
||||
}
|
||||
|
||||
let build_result = (run-external --redirect-combine $docker_cmd.0 ...$docker_cmd.1.. | complete)
|
||||
|
||||
if $build_result.exit_code == 0 {
|
||||
# Get image size
|
||||
let image_info = get_image_info $image_tag
|
||||
|
||||
log info $"Successfully built container: ($container.name) -> ($image_tag)"
|
||||
|
||||
{
|
||||
container: $container.name
|
||||
status: "success"
|
||||
image_tag: $image_tag
|
||||
image_size: $image_info.size
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
log error $"Failed to build container ($container.name): ($build_result.stderr)"
|
||||
{
|
||||
container: $container.name
|
||||
status: "failed"
|
||||
reason: $build_result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
log error $"Failed to build container ($container.name): ($err.msg)"
|
||||
} else {
|
||||
log error $"Failed to build container ($container.name): ($build_result.stderr)"
|
||||
{
|
||||
container: $container.name
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $build_result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
@ -521,15 +506,14 @@ def build_single_container [
|
||||
def check_container_dependencies [
|
||||
container: record
|
||||
container_config: record
|
||||
]
|
||||
{
|
||||
let missing_deps = []
|
||||
] {
|
||||
mut missing_deps = []
|
||||
|
||||
for dep in $container.dependencies {
|
||||
let binary_pattern = $"($dep)*"
|
||||
let found_binaries = (find $container.binary_path -name $binary_pattern -type f)
|
||||
let binary_pattern = $"($container.binary_path)/($dep)*"
|
||||
let found_binaries = (glob $binary_pattern | length)
|
||||
|
||||
if ($found_binaries | length) == 0 {
|
||||
if $found_binaries == 0 {
|
||||
$missing_deps = ($missing_deps | append $dep)
|
||||
}
|
||||
}
|
||||
@ -538,16 +522,19 @@ def check_container_dependencies [
|
||||
}
|
||||
|
||||
# Get image information
|
||||
def get_image_info [image_tag: string]
|
||||
{
|
||||
try {
|
||||
let inspect_result = (docker inspect $image_tag | from json | get 0)
|
||||
def get_image_info [image_tag: string] {
|
||||
let result = (do {
|
||||
docker inspect $image_tag | from json | get 0
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 {
|
||||
let inspect_result = ($result.stdout | from json | get 0)
|
||||
{
|
||||
size: $inspect_result.Size
|
||||
created: $inspect_result.Created
|
||||
architecture: $inspect_result.Architecture
|
||||
}
|
||||
} catch {
|
||||
} else {
|
||||
{ size: 0, created: "", architecture: "unknown" }
|
||||
}
|
||||
}
|
||||
@ -556,39 +543,30 @@ def get_image_info [image_tag: string]
|
||||
def push_containers [
|
||||
build_results: list
|
||||
container_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Pushing containers to registry: ($container_config.output_registry)"
|
||||
|
||||
let successful_builds = ($build_results | where status == "success")
|
||||
let push_results = []
|
||||
mut push_results = []
|
||||
|
||||
for build in $successful_builds {
|
||||
try {
|
||||
log info $"Pushing: ($build.image_tag)"
|
||||
let push_result = (docker push $build.image_tag | complete)
|
||||
log info $"Pushing: ($build.image_tag)"
|
||||
let push_result = (do {
|
||||
docker push $build.image_tag
|
||||
} | complete)
|
||||
|
||||
if $push_result.exit_code == 0 {
|
||||
log info $"Successfully pushed: ($build.image_tag)"
|
||||
$push_results = ($push_results | append {
|
||||
image: $build.image_tag
|
||||
status: "success"
|
||||
})
|
||||
} else {
|
||||
log error $"Failed to push ($build.image_tag): ($push_result.stderr)"
|
||||
$push_results = ($push_results | append {
|
||||
image: $build.image_tag
|
||||
status: "failed"
|
||||
reason: $push_result.stderr
|
||||
})
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
log error $"Failed to push ($build.image_tag): ($err.msg)"
|
||||
if $push_result.exit_code == 0 {
|
||||
log info $"Successfully pushed: ($build.image_tag)"
|
||||
$push_results = ($push_results | append {
|
||||
image: $build.image_tag
|
||||
status: "success"
|
||||
})
|
||||
} else {
|
||||
log error $"Failed to push ($build.image_tag): ($push_result.stderr)"
|
||||
$push_results = ($push_results | append {
|
||||
image: $build.image_tag
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $push_result.stderr
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -610,15 +588,23 @@ def "main info" [] {
|
||||
}
|
||||
|
||||
if $docker_available {
|
||||
let docker_info = try {
|
||||
let docker_result = (do {
|
||||
docker version --format json | from json
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
let docker_info = if $docker_result.exit_code == 0 {
|
||||
($docker_result.stdout | from json)
|
||||
} else {
|
||||
{}
|
||||
}
|
||||
|
||||
let images = try {
|
||||
let images_result = (do {
|
||||
docker images --filter "reference=provisioning/*" --format "table {{.Repository}}:{{.Tag}}\t{{.Size}}\t{{.CreatedAt}}" | lines | skip 1
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
let images = if $images_result.exit_code == 0 {
|
||||
($images_result.stdout | lines | skip 1)
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
@ -638,15 +624,18 @@ def "main list" [--all = false] {
|
||||
return { error: "Docker not available" }
|
||||
}
|
||||
|
||||
try {
|
||||
let images = if $all {
|
||||
let images_result = (do {
|
||||
if $all {
|
||||
docker images --format json | lines | each { from json }
|
||||
} else {
|
||||
docker images --filter "reference=provisioning/*" --format json | lines | each { from json }
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $images_result.exit_code == 0 {
|
||||
let images = ($images_result.stdout | lines | each { from json })
|
||||
$images | select Repository Tag Size CreatedAt
|
||||
} catch {
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
}
|
||||
|
||||
@ -21,8 +21,7 @@ def main [
|
||||
--exclude: string = "" # Comma-separated patterns to exclude
|
||||
--verbose # Enable verbose logging
|
||||
--checksum # Generate checksums for archives
|
||||
]
|
||||
{
|
||||
] {
|
||||
|
||||
let repo_root = ($env.PWD | path dirname | path dirname | path dirname)
|
||||
let dist_root = ($dist_dir | path expand)
|
||||
@ -100,29 +99,26 @@ def main [
|
||||
}
|
||||
|
||||
# Detect version from git or other sources
|
||||
def detect_version [repo_root: string]
|
||||
{
|
||||
try {
|
||||
cd $repo_root
|
||||
|
||||
# Try git describe first
|
||||
let git_version = (git describe --tags --always --dirty 2>/dev/null | complete)
|
||||
if $git_version.exit_code == 0 and ($git_version.stdout | str trim) != "" {
|
||||
return ($git_version.stdout | str trim)
|
||||
}
|
||||
|
||||
# Try git rev-parse for short hash
|
||||
let git_hash = (git rev-parse --short HEAD 2>/dev/null | complete)
|
||||
if $git_hash.exit_code == 0 {
|
||||
return $"dev-($git_hash.stdout | str trim)"
|
||||
}
|
||||
|
||||
# Fallback to date-based version
|
||||
return $"dev-(date now | format date "%Y%m%d")"
|
||||
|
||||
} catch {
|
||||
def detect_version [repo_root: string] {
|
||||
let cd_result = (do { cd $repo_root } | complete)
|
||||
if $cd_result.exit_code != 0 {
|
||||
return "dev-unknown"
|
||||
}
|
||||
|
||||
# Try git describe first
|
||||
let git_version = (do { git describe --tags --always --dirty 2>/dev/null } | complete)
|
||||
if $git_version.exit_code == 0 and ($git_version.stdout | str trim) != "" {
|
||||
return ($git_version.stdout | str trim)
|
||||
}
|
||||
|
||||
# Try git rev-parse for short hash
|
||||
let git_hash = (do { git rev-parse --short HEAD 2>/dev/null } | complete)
|
||||
if $git_hash.exit_code == 0 {
|
||||
return $"dev-($git_hash.stdout | str trim)"
|
||||
}
|
||||
|
||||
# Fallback to date-based version
|
||||
return $"dev-(date now | format date "%Y%m%d")"
|
||||
}
|
||||
|
||||
# Create package for a specific platform and format
|
||||
@ -131,14 +127,13 @@ def create_platform_package [
|
||||
format: string
|
||||
package_config: record
|
||||
repo_root: string
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Creating ($format) package for ($platform)..."
|
||||
|
||||
let start_time = (date now)
|
||||
let package_name = $"provisioning-($package_config.version)-($platform)-($package_config.variant)"
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Prepare package directory
|
||||
let package_dir = prepare_package_directory $platform $package_config $package_name
|
||||
|
||||
@ -176,17 +171,20 @@ def create_platform_package [
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
log error $"Failed to create package for ($platform) ($format): ($err.msg)"
|
||||
if $result.exit_code != 0 {
|
||||
log error $"Failed to create package for ($platform) ($format): ($result.stderr)"
|
||||
{
|
||||
platform: $platform
|
||||
format: $format
|
||||
package_name: $package_name
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,8 +193,7 @@ def prepare_package_directory [
|
||||
platform: string
|
||||
package_config: record
|
||||
package_name: string
|
||||
]
|
||||
{
|
||||
] {
|
||||
let temp_package_dir = ($package_config.output_dir | path join "tmp" $package_name)
|
||||
|
||||
# Clean and create package directory
|
||||
@ -428,14 +425,13 @@ def create_tar_archive [
|
||||
package_dir: string
|
||||
package_config: record
|
||||
package_name: string
|
||||
]
|
||||
{
|
||||
] {
|
||||
let archive_name = $"($package_name).tar.gz"
|
||||
let archive_path = ($package_config.output_dir | path join $archive_name)
|
||||
let package_parent = ($package_dir | path dirname)
|
||||
let package_basename = ($package_dir | path basename)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
cd $package_parent
|
||||
|
||||
# Create tar with specified compression level
|
||||
@ -461,12 +457,15 @@ def create_tar_archive [
|
||||
original_size: $original_size
|
||||
compression_ratio: $compression_ratio
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -475,18 +474,17 @@ def create_zip_archive [
|
||||
package_dir: string
|
||||
package_config: record
|
||||
package_name: string
|
||||
]
|
||||
{
|
||||
] {
|
||||
let archive_name = $"($package_name).zip"
|
||||
let archive_path = ($package_config.output_dir | path join $archive_name)
|
||||
let package_parent = ($package_dir | path dirname)
|
||||
let package_basename = ($package_dir | path basename)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
cd $package_parent
|
||||
|
||||
# Create zip archive
|
||||
zip -r -$package_config.compression_level $archive_path $package_basename
|
||||
^zip -r $"-($package_config.compression_level)" $archive_path $package_basename
|
||||
|
||||
# Clean up temporary directory
|
||||
rm -rf $package_dir
|
||||
@ -507,12 +505,15 @@ def create_zip_archive [
|
||||
original_size: $original_size
|
||||
compression_ratio: $compression_ratio
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -520,27 +521,29 @@ def create_zip_archive [
|
||||
def generate_checksums [
|
||||
package_results: list
|
||||
package_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Generating checksums..."
|
||||
|
||||
let successful_packages = ($package_results | where status == "success")
|
||||
let checksums = []
|
||||
mut checksums = []
|
||||
|
||||
for package in $successful_packages {
|
||||
try {
|
||||
let result = (do {
|
||||
let sha256_hash = (shasum -a 256 $package.archive_path | awk '{print $1}')
|
||||
let md5_hash = (md5sum $package.archive_path | awk '{print $1}')
|
||||
|
||||
$checksums = ($checksums | append {
|
||||
{
|
||||
file: ($package.archive_path | path basename)
|
||||
sha256: $sha256_hash
|
||||
md5: $md5_hash
|
||||
size: $package.archive_size
|
||||
})
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
log warning $"Failed to generate checksum for ($package.archive_path): ($err.msg)"
|
||||
if $result.exit_code == 0 {
|
||||
$checksums = ($checksums | append $result.stdout)
|
||||
} else {
|
||||
log warning $"Failed to generate checksum for ($package.archive_path): ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -568,12 +571,26 @@ def create_package_manifest [
|
||||
package_config: record
|
||||
repo_root: string
|
||||
] {
|
||||
let source_commit = (do { cd $repo_root; git rev-parse HEAD } | complete)
|
||||
let commit_value = if $source_commit.exit_code == 0 {
|
||||
($source_commit.stdout | str trim)
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
let source_branch = (do { cd $repo_root; git branch --show-current } | complete)
|
||||
let branch_value = if $source_branch.exit_code == 0 {
|
||||
($source_branch.stdout | str trim)
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
let manifest = {
|
||||
version: $package_config.version
|
||||
created_at: (date now | format date "%Y-%m-%d %H:%M:%S")
|
||||
created_by: "provisioning-package-system"
|
||||
source_commit: (try { cd $repo_root; git rev-parse HEAD } catch { "unknown" })
|
||||
source_branch: (try { cd $repo_root; git branch --show-current } catch { "unknown" })
|
||||
source_commit: $commit_value
|
||||
source_branch: $branch_value
|
||||
package_config: $package_config
|
||||
packages: $package_results
|
||||
checksums: $checksum_result.checksums
|
||||
@ -588,22 +605,33 @@ def create_package_manifest [
|
||||
}
|
||||
|
||||
# Utility functions
|
||||
def should_exclude [path: string, patterns: list]
|
||||
{
|
||||
def should_exclude [path: string, patterns: list] {
|
||||
if ($patterns | length) == 0 { return false }
|
||||
return ($patterns | any {|pattern| $path =~ $pattern })
|
||||
}
|
||||
|
||||
def get_directory_size [dir: string] -> int {
|
||||
def get_directory_size [dir: string] {
|
||||
if not ($dir | path exists) { return 0 }
|
||||
try {
|
||||
find $dir -type f | each {|file| ls $file | get 0.size } | math sum | if $in == null { 0 } else { $in }
|
||||
} catch { 0 }
|
||||
|
||||
let result = (do {
|
||||
glob ($dir | path join "**/*") | each {|file|
|
||||
if ($file | path type) == "file" {
|
||||
ls $file | get 0.size
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} | math sum | if $in == null { 0 } else { $in }
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code == 0 {
|
||||
$result.stdout
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
# Create installation script templates
|
||||
def create_linux_install_script [config: record]
|
||||
{
|
||||
def create_linux_install_script [config: record] {
|
||||
$"#!/bin/bash
|
||||
# Provisioning System Installation Script
|
||||
# Version: ($config.version)
|
||||
@ -619,7 +647,7 @@ echo \"Installing Provisioning System ($config.version) for Linux...\"
|
||||
|
||||
# Check for root privileges
|
||||
if [[ $EUID -ne 0 ]]; then
|
||||
echo \"This script must be run as root (use sudo)\"
|
||||
echo \"This script must be run as root\"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -651,8 +679,7 @@ echo \"Run 'provisioning help' to get started.\"
|
||||
"
|
||||
}
|
||||
|
||||
def create_macos_install_script [config: record]
|
||||
{
|
||||
def create_macos_install_script [config: record] {
|
||||
$"#!/bin/bash
|
||||
# Provisioning System Installation Script
|
||||
# Version: ($config.version)
|
||||
@ -695,8 +722,7 @@ echo \"Note: You may need to add /usr/local/bin to your PATH\"
|
||||
"
|
||||
}
|
||||
|
||||
def create_windows_install_script [config: record]
|
||||
{
|
||||
def create_windows_install_script [config: record] {
|
||||
$"@echo off
|
||||
REM Provisioning System Installation Script
|
||||
REM Version: ($config.version)
|
||||
@ -705,38 +731,37 @@ REM Platform: Windows
|
||||
echo Installing Provisioning System ($config.version) for Windows...
|
||||
|
||||
REM Create directories
|
||||
mkdir \"C:\\Program Files\\Provisioning\\bin\" 2>NUL
|
||||
mkdir \"C:\\Program Files\\Provisioning\\lib\" 2>NUL
|
||||
mkdir \"C:\\ProgramData\\Provisioning\" 2>NUL
|
||||
mkdir C:\\Program Files\\Provisioning\\bin 2>NUL
|
||||
mkdir C:\\Program Files\\Provisioning\\lib 2>NUL
|
||||
mkdir C:\\ProgramData\\Provisioning 2>NUL
|
||||
|
||||
REM Install binaries
|
||||
echo Installing binaries...
|
||||
xcopy platform\\* \"C:\\Program Files\\Provisioning\\bin\\\" /Y /Q
|
||||
xcopy platform\\* C:\\Program Files\\Provisioning\\bin\\ /Y /Q
|
||||
|
||||
REM Install libraries
|
||||
echo Installing libraries...
|
||||
xcopy core\\* \"C:\\Program Files\\Provisioning\\lib\\\" /Y /Q /S
|
||||
xcopy core\\* C:\\Program Files\\Provisioning\\lib\\ /Y /Q /S
|
||||
|
||||
REM Install configuration
|
||||
echo Installing configuration...
|
||||
xcopy config\\* \"C:\\ProgramData\\Provisioning\\\" /Y /Q
|
||||
xcopy config\\* C:\\ProgramData\\Provisioning\\ /Y /Q
|
||||
|
||||
REM Install KCL schemas
|
||||
if exist kcl\\ (
|
||||
echo Installing KCL schemas...
|
||||
mkdir \"C:\\Program Files\\Provisioning\\lib\\kcl\" 2>NUL
|
||||
xcopy kcl\\* \"C:\\Program Files\\Provisioning\\lib\\kcl\\\" /Y /Q /S
|
||||
mkdir C:\\Program Files\\Provisioning\\lib\\kcl 2>NUL
|
||||
xcopy kcl\\* C:\\Program Files\\Provisioning\\lib\\kcl\\ /Y /Q /S
|
||||
)
|
||||
|
||||
echo Installation complete!
|
||||
echo Add \"C:\\Program Files\\Provisioning\\bin\" to your PATH
|
||||
echo Run 'provisioning-orchestrator --help' to get started.
|
||||
echo Add C:\\Program Files\\Provisioning\\bin to your PATH
|
||||
echo Run provisioning-orchestrator --help to get started.
|
||||
pause
|
||||
"
|
||||
}
|
||||
|
||||
def create_generic_install_script [config: record]
|
||||
{
|
||||
def create_generic_install_script [config: record] {
|
||||
$"#!/bin/sh
|
||||
# Generic Installation Instructions
|
||||
# Version: ($config.version)
|
||||
@ -746,8 +771,7 @@ echo \"Please follow the instructions in README.md\"
|
||||
"
|
||||
}
|
||||
|
||||
def create_package_readme [config: record]
|
||||
{
|
||||
def create_package_readme [config: record] {
|
||||
$"# Provisioning System ($config.version)
|
||||
|
||||
Cloud-native infrastructure provisioning and management system.
|
||||
@ -802,7 +826,7 @@ def "main info" [packages_dir: string = "packages"] {
|
||||
if ($manifest_file | path exists) {
|
||||
open $manifest_file
|
||||
} else {
|
||||
let packages = (ls $packages_root | where name =~ "\.(tar\.gz|zip)$")
|
||||
let packages = (ls $packages_root | where name =~ r".*\.tar\.gz$|.*\.zip$")
|
||||
{
|
||||
directory: $packages_root
|
||||
packages: ($packages | length)
|
||||
|
||||
@ -11,16 +11,15 @@
|
||||
use std log
|
||||
|
||||
def main [
|
||||
--input-dir: string = "packages" # Directory containing packages to checksum
|
||||
--output-file: string = "" # Output file for checksums (auto-generated if empty)
|
||||
--algorithms: string = "sha256,md5" # Hash algorithms: sha256, md5, sha512, all
|
||||
--format: string = "standard" # Output format: standard, json, csv
|
||||
--verify: string = "" # Verify checksums from existing file
|
||||
--recursive # Process directories recursively
|
||||
--pattern: string = "*" # File pattern to match
|
||||
--verbose # Enable verbose logging
|
||||
]
|
||||
{
|
||||
--input-dir: string = "packages"
|
||||
--output-file: string = ""
|
||||
--algorithms: string = "sha256,md5"
|
||||
--format: string = "standard"
|
||||
--verify: string = ""
|
||||
--recursive
|
||||
--pattern: string = "*"
|
||||
--verbose
|
||||
] {
|
||||
|
||||
let input_root = ($input_dir | path expand)
|
||||
let algorithms_list = if $algorithms == "all" {
|
||||
@ -54,7 +53,8 @@ def main [
|
||||
|
||||
# If verifying, run verification instead
|
||||
if $checksum_config.verify_file != "" {
|
||||
return verify_checksums $checksum_config
|
||||
verify_checksums $checksum_config
|
||||
return
|
||||
}
|
||||
|
||||
# Find files to checksum
|
||||
@ -99,8 +99,7 @@ def main [
|
||||
}
|
||||
|
||||
# Find files to generate checksums for
|
||||
def find_checksum_files [checksum_config: record]
|
||||
{
|
||||
def find_checksum_files [checksum_config: record] {
|
||||
let find_command = if $checksum_config.recursive {
|
||||
$"find ($checksum_config.input_dir) -name \"($checksum_config.pattern)\" -type f"
|
||||
} else {
|
||||
@ -111,7 +110,7 @@ def find_checksum_files [checksum_config: record]
|
||||
|
||||
# Filter out checksum files themselves
|
||||
$found_files | where {|file|
|
||||
not ($file =~ "checksum" or $file =~ "\.sha256$" or $file =~ "\.md5$" or $file =~ "\.sha512$")
|
||||
not ($file =~ r"checksum" or $file =~ r"\.sha256$" or $file =~ r"\.md5$" or $file =~ r"\.sha512$")
|
||||
}
|
||||
}
|
||||
|
||||
@ -119,8 +118,7 @@ def find_checksum_files [checksum_config: record]
|
||||
def generate_checksums_for_files [
|
||||
files: list
|
||||
checksum_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
$files | each {|file|
|
||||
generate_checksums_for_file $file $checksum_config
|
||||
}
|
||||
@ -130,41 +128,45 @@ def generate_checksums_for_files [
|
||||
def generate_checksums_for_file [
|
||||
file: string
|
||||
checksum_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
if $checksum_config.verbose {
|
||||
log info $"Generating checksums for: ($file)"
|
||||
}
|
||||
|
||||
let start_time = (date now)
|
||||
let checksums = {}
|
||||
let errors = []
|
||||
|
||||
# Get file info
|
||||
let file_info = (ls $file | get 0)
|
||||
let relative_path = ($file | str replace $checksum_config.input_dir "" | str trim-left "/")
|
||||
let relative_path = ($file | str replace $checksum_config.input_dir "" | str trim --left "/")
|
||||
|
||||
# Generate each requested algorithm
|
||||
for algorithm in $checksum_config.algorithms {
|
||||
try {
|
||||
let checksum = match $algorithm {
|
||||
# Generate checksums for each algorithm
|
||||
let checksums_list = ($checksum_config.algorithms | each {|algorithm|
|
||||
let result = (do {
|
||||
match $algorithm {
|
||||
"sha256" => { generate_sha256 $file }
|
||||
"md5" => { generate_md5 $file }
|
||||
"sha512" => { generate_sha512 $file }
|
||||
_ => {
|
||||
$errors = ($errors | append $"Unknown algorithm: ($algorithm)")
|
||||
""
|
||||
error make {msg: $"Unknown algorithm: ($algorithm)"}
|
||||
}
|
||||
}
|
||||
} | complete)
|
||||
|
||||
if $checksum != "" {
|
||||
$checksums = ($checksums | insert $algorithm $checksum)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
$errors = ($errors | append $"Failed to generate ($algorithm): ($err.msg)")
|
||||
if $result.exit_code == 0 {
|
||||
{algorithm: $algorithm, checksum: $result.stdout}
|
||||
} else {
|
||||
{algorithm: $algorithm, error: $result.stderr}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
# Build checksums record from successful results
|
||||
let checksums_record = ($checksums_list
|
||||
| where {|item| $item | has "checksum"}
|
||||
| each {|item| {($item.algorithm): $item.checksum}}
|
||||
| reduce {|item, acc| $acc | merge $item})
|
||||
|
||||
# Collect errors from failed results
|
||||
let errors = ($checksums_list | where {|item| $item | has "error"} | each {|item| $item.error})
|
||||
|
||||
{
|
||||
file: $file
|
||||
@ -172,65 +174,62 @@ def generate_checksums_for_file [
|
||||
status: (if ($errors | length) > 0 { "failed" } else { "success" })
|
||||
size: $file_info.size
|
||||
modified: $file_info.modified
|
||||
checksums: $checksums
|
||||
checksums: $checksums_record
|
||||
errors: $errors
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Generate SHA256 checksum
|
||||
def generate_sha256 [file: string]
|
||||
{
|
||||
let result = (run-external --redirect-combine "shasum" "-a" "256" $file | complete)
|
||||
def generate_sha256 [file: string] {
|
||||
let result = (bash -c $"shasum -a 256 \"($file)\" 2>&1" | complete)
|
||||
if $result.exit_code == 0 {
|
||||
($result.stdout | split row " " | get 0)
|
||||
} else {
|
||||
# Fallback to other SHA256 tools
|
||||
let openssl_result = (run-external --redirect-combine "openssl" "sha256" $file | complete)
|
||||
let openssl_result = (bash -c $"openssl sha256 \"($file)\" 2>&1" | complete)
|
||||
if $openssl_result.exit_code == 0 {
|
||||
($openssl_result.stdout | str replace $"SHA256\\(.*\\)= " "" | str trim)
|
||||
} else {
|
||||
error $"Failed to generate SHA256 for ($file)"
|
||||
error make {msg: $"Failed to generate SHA256 for ($file)"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Generate MD5 checksum
|
||||
def generate_md5 [file: string]
|
||||
{
|
||||
let result = (run-external --redirect-combine "md5sum" $file | complete)
|
||||
def generate_md5 [file: string] {
|
||||
let result = (bash -c $"md5sum \"($file)\" 2>&1" | complete)
|
||||
if $result.exit_code == 0 {
|
||||
($result.stdout | split row " " | get 0)
|
||||
} else {
|
||||
# Fallback for macOS
|
||||
let md5_result = (run-external --redirect-combine "md5" "-r" $file | complete)
|
||||
let md5_result = (bash -c $"md5 -r \"($file)\" 2>&1" | complete)
|
||||
if $md5_result.exit_code == 0 {
|
||||
($md5_result.stdout | split row " " | get 0)
|
||||
} else {
|
||||
# Fallback to openssl
|
||||
let openssl_result = (run-external --redirect-combine "openssl" "md5" $file | complete)
|
||||
let openssl_result = (bash -c $"openssl md5 \"($file)\" 2>&1" | complete)
|
||||
if $openssl_result.exit_code == 0 {
|
||||
($openssl_result.stdout | str replace $"MD5\\(.*\\)= " "" | str trim)
|
||||
} else {
|
||||
error $"Failed to generate MD5 for ($file)"
|
||||
error make {msg: $"Failed to generate MD5 for ($file)"}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Generate SHA512 checksum
|
||||
def generate_sha512 [file: string]
|
||||
{
|
||||
let result = (run-external --redirect-combine "shasum" "-a" "512" $file | complete)
|
||||
def generate_sha512 [file: string] {
|
||||
let result = (bash -c $"shasum -a 512 \"($file)\" 2>&1" | complete)
|
||||
if $result.exit_code == 0 {
|
||||
($result.stdout | split row " " | get 0)
|
||||
} else {
|
||||
# Fallback to openssl
|
||||
let openssl_result = (run-external --redirect-combine "openssl" "sha512" $file | complete)
|
||||
let openssl_result = (bash -c $"openssl sha512 \"($file)\" 2>&1" | complete)
|
||||
if $openssl_result.exit_code == 0 {
|
||||
($openssl_result.stdout | str replace $"SHA512\\(.*\\)= " "" | str trim)
|
||||
} else {
|
||||
error $"Failed to generate SHA512 for ($file)"
|
||||
error make {msg: $"Failed to generate SHA512 for ($file)"}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -239,13 +238,12 @@ def generate_sha512 [file: string]
|
||||
def save_checksums [
|
||||
checksum_results: list
|
||||
checksum_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Saving checksums to: ($checksum_config.output_file)"
|
||||
|
||||
let successful_results = ($checksum_results | where status == "success")
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
match $checksum_config.format {
|
||||
"standard" => {
|
||||
save_standard_format $successful_results $checksum_config
|
||||
@ -257,7 +255,7 @@ def save_checksums [
|
||||
save_csv_format $successful_results $checksum_config
|
||||
}
|
||||
_ => {
|
||||
error $"Unknown format: ($checksum_config.format)"
|
||||
error make {msg: $"Unknown format: ($checksum_config.format)"}
|
||||
}
|
||||
}
|
||||
|
||||
@ -267,12 +265,15 @@ def save_checksums [
|
||||
file: $checksum_config.output_file
|
||||
entries: ($successful_results | length)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,28 +282,27 @@ def save_standard_format [
|
||||
results: list
|
||||
checksum_config: record
|
||||
] {
|
||||
let output_lines = []
|
||||
# Build header
|
||||
let header = [
|
||||
$"# Checksums generated on (date now)"
|
||||
$"# Algorithms: ($checksum_config.algorithms | str join ', ')"
|
||||
""
|
||||
]
|
||||
|
||||
# Add header
|
||||
$output_lines = ($output_lines | append $"# Checksums generated on (date now)")
|
||||
$output_lines = ($output_lines | append $"# Algorithms: ($checksum_config.algorithms | str join ', ')")
|
||||
$output_lines = ($output_lines | append "")
|
||||
|
||||
# Add checksums for each algorithm
|
||||
for algorithm in $checksum_config.algorithms {
|
||||
$output_lines = ($output_lines | append $"# ($algorithm | str upcase) checksums")
|
||||
|
||||
for result in $results {
|
||||
if ($algorithm in ($result.checksums | columns)) {
|
||||
# Build checksums sections for each algorithm
|
||||
let checksums_sections = ($checksum_config.algorithms | each {|algorithm|
|
||||
([$"# ($algorithm | str upcase) checksums"] +
|
||||
($results
|
||||
| where {|result| $algorithm in ($result.checksums | columns)}
|
||||
| each {|result|
|
||||
let checksum = ($result.checksums | get $algorithm)
|
||||
$output_lines = ($output_lines | append $"($checksum) ($result.relative_path)")
|
||||
}
|
||||
}
|
||||
$"($checksum) ($result.relative_path)"
|
||||
})
|
||||
+ [""])
|
||||
} | flatten)
|
||||
|
||||
$output_lines = ($output_lines | append "")
|
||||
}
|
||||
|
||||
($output_lines | str join "\n") | save $checksum_config.output_file
|
||||
# Combine all lines
|
||||
(($header + $checksums_sections) | str join "\n") | save $checksum_config.output_file
|
||||
}
|
||||
|
||||
# Save in JSON format
|
||||
@ -332,35 +332,34 @@ def save_csv_format [
|
||||
results: list
|
||||
checksum_config: record
|
||||
] {
|
||||
let csv_data = []
|
||||
# Create header row
|
||||
let header = (["file", "size", "modified"] + $checksum_config.algorithms | str join ",")
|
||||
|
||||
# Create header
|
||||
let header = ["file", "size", "modified"]
|
||||
$header = ($header | append $checksum_config.algorithms)
|
||||
$csv_data = ($csv_data | append ($header | str join ","))
|
||||
# Build data rows
|
||||
let data_rows = ($results | each {|result|
|
||||
let base_row = [
|
||||
$result.relative_path
|
||||
($result.size | into string)
|
||||
($result.modified | format date "%Y-%m-%d %H:%M:%S")
|
||||
]
|
||||
|
||||
# Add data rows
|
||||
for result in $results {
|
||||
let row = [$result.relative_path, ($result.size | into string), ($result.modified | format date "%Y-%m-%d %H:%M:%S")]
|
||||
|
||||
for algorithm in $checksum_config.algorithms {
|
||||
let checksum = if ($algorithm in ($result.checksums | columns)) {
|
||||
let checksum_row = ($checksum_config.algorithms | each {|algorithm|
|
||||
if ($algorithm in ($result.checksums | columns)) {
|
||||
($result.checksums | get $algorithm)
|
||||
} else {
|
||||
""
|
||||
}
|
||||
$row = ($row | append $checksum)
|
||||
}
|
||||
})
|
||||
|
||||
$csv_data = ($csv_data | append ($row | str join ","))
|
||||
}
|
||||
(($base_row + $checksum_row) | str join ",")
|
||||
})
|
||||
|
||||
($csv_data | str join "\n") | save $checksum_config.output_file
|
||||
# Combine header and data
|
||||
([$header] + $data_rows | str join "\n") | save $checksum_config.output_file
|
||||
}
|
||||
|
||||
# Verify checksums from existing file
|
||||
def verify_checksums [checksum_config: record]
|
||||
{
|
||||
def verify_checksums [checksum_config: record] {
|
||||
log info $"Verifying checksums from: ($checksum_config.verify_file)"
|
||||
|
||||
if not ($checksum_config.verify_file | path exists) {
|
||||
@ -394,8 +393,7 @@ def verify_checksums [checksum_config: record]
|
||||
def parse_and_verify_checksums [
|
||||
content: string
|
||||
checksum_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
let lines = ($content | lines | where {|line|
|
||||
not ($line | str starts-with "#") and ($line | str trim) != ""
|
||||
})
|
||||
@ -423,8 +421,7 @@ def verify_single_checksum [
|
||||
expected_checksum: string
|
||||
full_path: string
|
||||
relative_path: string
|
||||
]
|
||||
{
|
||||
] {
|
||||
if not ($full_path | path exists) {
|
||||
return {
|
||||
file: $relative_path
|
||||
@ -435,15 +432,15 @@ def verify_single_checksum [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
# Determine algorithm by checksum length
|
||||
let algorithm = match ($expected_checksum | str length) {
|
||||
32 => "md5"
|
||||
64 => "sha256"
|
||||
128 => "sha512"
|
||||
_ => "unknown"
|
||||
}
|
||||
# Determine algorithm by checksum length
|
||||
let algorithm = match ($expected_checksum | str length) {
|
||||
32 => "md5"
|
||||
64 => "sha256"
|
||||
128 => "sha512"
|
||||
_ => "unknown"
|
||||
}
|
||||
|
||||
let result = (do {
|
||||
let actual_checksum = match $algorithm {
|
||||
"md5" => { generate_md5 $full_path }
|
||||
"sha256" => { generate_sha256 $full_path }
|
||||
@ -469,15 +466,18 @@ def verify_single_checksum [
|
||||
reason: "checksum mismatch"
|
||||
}
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
file: $relative_path
|
||||
status: "failed"
|
||||
expected: $expected_checksum
|
||||
actual: ""
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -485,7 +485,7 @@ def verify_single_checksum [
|
||||
def "main info" [checksum_file: string = ""] {
|
||||
if $checksum_file == "" {
|
||||
# Look for checksum files in current directory
|
||||
let checksum_files = (find . -maxdepth 1 -name "*checksum*" -o -name "*.sha256" -o -name "*.md5" -o -name "*.sha512" | lines | where $it != "")
|
||||
let checksum_files = (bash -c "find . -maxdepth 1 \\( -name '*checksum*' -o -name '*.sha256' -o -name '*.md5' -o -name '*.sha512' \\) -type f" | lines | where $it != "")
|
||||
|
||||
return {
|
||||
current_directory: $env.PWD
|
||||
@ -534,8 +534,8 @@ def "main info" [checksum_file: string = ""] {
|
||||
|
||||
# Quick verify command
|
||||
def "main verify" [
|
||||
checksum_file: string # Checksum file to verify against
|
||||
--input-dir: string = "." # Directory containing files to verify
|
||||
checksum_file: string
|
||||
--input-dir: string = "."
|
||||
] {
|
||||
main --verify $checksum_file --input-dir $input_dir
|
||||
}
|
||||
|
||||
@ -65,11 +65,11 @@ def main [
|
||||
log info $"Found ($available_binaries | length) binaries to package"
|
||||
|
||||
# Package binaries for each platform
|
||||
let packaging_results = []
|
||||
mut packaging_results = []
|
||||
|
||||
for platform in $packaging_config.platforms {
|
||||
let platform_result = package_platform_binaries $platform $available_binaries $packaging_config
|
||||
let packaging_results = ($packaging_results | append $platform_result)
|
||||
$packaging_results = ($packaging_results | append $platform_result)
|
||||
}
|
||||
|
||||
let summary = {
|
||||
@ -96,10 +96,9 @@ def main [
|
||||
def find_available_binaries [
|
||||
source_dir: string
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
# Find all executable files
|
||||
let executables = (find $source_dir -type f -executable)
|
||||
] {
|
||||
# Find all executable files using glob
|
||||
let executables = (glob ($source_dir | path join "**" "*") | where { |path| ($path | path exists) and ((ls $path | get 0.type) == "file") })
|
||||
|
||||
$executables | each {|binary|
|
||||
let binary_info = analyze_binary $binary $packaging_config
|
||||
@ -119,59 +118,60 @@ def find_available_binaries [
|
||||
def analyze_binary [
|
||||
binary_path: string
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
try {
|
||||
# Use file command to get binary information
|
||||
let file_info = (file $binary_path)
|
||||
] {
|
||||
let result = (do {
|
||||
file $binary_path
|
||||
} | complete)
|
||||
|
||||
let architecture = if ($file_info =~ "x86-64") or ($file_info =~ "x86_64") {
|
||||
"amd64"
|
||||
} else if ($file_info =~ "ARM64") or ($file_info =~ "aarch64") {
|
||||
"arm64"
|
||||
} else if ($file_info =~ "i386") {
|
||||
"i386"
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
let platform = if ($file_info =~ "Linux") {
|
||||
"linux"
|
||||
} else if ($file_info =~ "Mach-O") {
|
||||
"macos"
|
||||
} else if ($file_info =~ "PE32") {
|
||||
"windows"
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
let format = if ($file_info =~ "ELF") {
|
||||
"elf"
|
||||
} else if ($file_info =~ "Mach-O") {
|
||||
"macho"
|
||||
} else if ($file_info =~ "PE32") {
|
||||
"pe"
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
let stripped = ($file_info =~ "stripped")
|
||||
|
||||
{
|
||||
architecture: $architecture
|
||||
platform: $platform
|
||||
format: $format
|
||||
stripped: $stripped
|
||||
}
|
||||
|
||||
} catch {
|
||||
{
|
||||
if $result.exit_code != 0 {
|
||||
return {
|
||||
architecture: "unknown"
|
||||
platform: "unknown"
|
||||
format: "unknown"
|
||||
stripped: false
|
||||
}
|
||||
}
|
||||
|
||||
let file_info = $result.stdout
|
||||
|
||||
let architecture = if ($file_info =~ "x86-64") or ($file_info =~ "x86_64") {
|
||||
"amd64"
|
||||
} else if ($file_info =~ "ARM64") or ($file_info =~ "aarch64") {
|
||||
"arm64"
|
||||
} else if ($file_info =~ "i386") {
|
||||
"i386"
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
let platform = if ($file_info =~ "Linux") {
|
||||
"linux"
|
||||
} else if ($file_info =~ "Mach-O") {
|
||||
"macos"
|
||||
} else if ($file_info =~ "PE32") {
|
||||
"windows"
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
let format = if ($file_info =~ "ELF") {
|
||||
"elf"
|
||||
} else if ($file_info =~ "Mach-O") {
|
||||
"macho"
|
||||
} else if ($file_info =~ "PE32") {
|
||||
"pe"
|
||||
} else {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
let stripped = ($file_info =~ "stripped")
|
||||
|
||||
{
|
||||
architecture: $architecture
|
||||
platform: $platform
|
||||
format: $format
|
||||
stripped: $stripped
|
||||
}
|
||||
}
|
||||
|
||||
# Package binaries for a specific platform
|
||||
@ -179,8 +179,7 @@ def package_platform_binaries [
|
||||
platform: string
|
||||
available_binaries: list
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Packaging binaries for platform: ($platform)"
|
||||
|
||||
let start_time = (date now)
|
||||
@ -205,9 +204,9 @@ def package_platform_binaries [
|
||||
}
|
||||
}
|
||||
|
||||
let packaging_errors = []
|
||||
let processed_binaries = []
|
||||
let total_package_size = 0
|
||||
mut packaging_errors = []
|
||||
mut processed_binaries = []
|
||||
mut total_package_size = 0
|
||||
|
||||
# Process each binary
|
||||
for binary in $platform_binaries {
|
||||
@ -247,8 +246,7 @@ def process_single_binary [
|
||||
binary: record
|
||||
platform: string
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
if $packaging_config.verbose {
|
||||
log info $"Processing binary: ($binary.name) for ($platform)"
|
||||
}
|
||||
@ -257,7 +255,7 @@ def process_single_binary [
|
||||
let output_name = $"($binary.name)-($platform)"
|
||||
let temp_binary = ($packaging_config.output_dir | path join "tmp" $output_name)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Ensure temp directory exists
|
||||
mkdir ($temp_binary | path dirname)
|
||||
|
||||
@ -328,15 +326,18 @@ def process_single_binary [
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
binary: $binary.name
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
errors: [{ error: $err.msg }]
|
||||
reason: $result.stderr
|
||||
errors: [{ error: $result.stderr }]
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -346,11 +347,12 @@ def strip_binary [binary_path: string, packaging_config: record] {
|
||||
log info $"Stripping debug symbols: ($binary_path)"
|
||||
}
|
||||
|
||||
try {
|
||||
# Use strip command (available on most Unix systems)
|
||||
let result = (do {
|
||||
strip $binary_path
|
||||
} catch {|err|
|
||||
log warning $"Failed to strip binary ($binary_path): ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log warning $"Failed to strip binary ($binary_path): ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
@ -360,62 +362,72 @@ def upx_compress_binary [binary_path: string, packaging_config: record] {
|
||||
log info $"UPX compressing: ($binary_path)"
|
||||
}
|
||||
|
||||
try {
|
||||
# Check if UPX is available
|
||||
let upx_check = (which upx | complete)
|
||||
if $upx_check.exit_code != 0 {
|
||||
log warning "UPX not available, skipping compression"
|
||||
return
|
||||
}
|
||||
# Check if UPX is available
|
||||
let upx_check = (do {
|
||||
which upx
|
||||
} | complete)
|
||||
|
||||
# Apply UPX compression
|
||||
if $upx_check.exit_code != 0 {
|
||||
log warning "UPX not available, skipping compression"
|
||||
return
|
||||
}
|
||||
|
||||
# Apply UPX compression
|
||||
let result = (do {
|
||||
upx --best $binary_path
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
log warning $"Failed to UPX compress binary ($binary_path): ($err.msg)"
|
||||
if $result.exit_code != 0 {
|
||||
log warning $"Failed to UPX compress binary ($binary_path): ($result.stderr)"
|
||||
}
|
||||
}
|
||||
|
||||
# Verify binary integrity
|
||||
def verify_binary_integrity [binary_path: string, packaging_config: record]
|
||||
{
|
||||
try {
|
||||
# Check if binary is still executable
|
||||
let file_info = (file $binary_path)
|
||||
if not ($file_info =~ "executable") {
|
||||
return {
|
||||
status: "failed"
|
||||
reason: "binary is not executable after processing"
|
||||
}
|
||||
}
|
||||
def verify_binary_integrity [binary_path: string, packaging_config: record] {
|
||||
# Check if binary is still executable
|
||||
let file_result = (do {
|
||||
file $binary_path
|
||||
} | complete)
|
||||
|
||||
# Try to run the binary with --help or --version
|
||||
let help_test = (run-external --redirect-combine $binary_path --help | complete)
|
||||
let version_test = (run-external --redirect-combine $binary_path --version | complete)
|
||||
|
||||
if ($help_test.exit_code == 0) or ($version_test.exit_code == 0) {
|
||||
return {
|
||||
status: "success"
|
||||
verified: true
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
status: "failed"
|
||||
reason: "binary does not respond to --help or --version"
|
||||
}
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
if $file_result.exit_code != 0 {
|
||||
return {
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: "unable to read binary file"
|
||||
}
|
||||
}
|
||||
|
||||
let file_info = $file_result.stdout
|
||||
if not ($file_info =~ "executable") {
|
||||
return {
|
||||
status: "failed"
|
||||
reason: "binary is not executable after processing"
|
||||
}
|
||||
}
|
||||
|
||||
# Try to run the binary with --help or --version
|
||||
let help_test = (do {
|
||||
^ $binary_path --help e>| null
|
||||
} | complete)
|
||||
|
||||
let version_test = (do {
|
||||
^ $binary_path --version e>| null
|
||||
} | complete)
|
||||
|
||||
if ($help_test.exit_code == 0) or ($version_test.exit_code == 0) {
|
||||
{
|
||||
status: "success"
|
||||
verified: true
|
||||
}
|
||||
} else {
|
||||
{
|
||||
status: "failed"
|
||||
reason: "binary does not respond to --help or --version"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Sign binary (placeholder - would need actual signing implementation)
|
||||
def sign_binary [binary_path: string, packaging_config: record]
|
||||
{
|
||||
def sign_binary [binary_path: string, packaging_config: record] {
|
||||
log warning "Binary signing not implemented - skipping"
|
||||
return {
|
||||
status: "success"
|
||||
@ -429,8 +441,7 @@ def package_binary [
|
||||
binary_path: string
|
||||
output_name: string
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
match $packaging_config.format {
|
||||
"archive" => { create_archive_package $binary_path $output_name $packaging_config }
|
||||
"installer" => { create_installer_package $binary_path $output_name $packaging_config }
|
||||
@ -449,12 +460,11 @@ def create_archive_package [
|
||||
binary_path: string
|
||||
output_name: string
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
let archive_name = $"($output_name).tar.gz"
|
||||
let archive_path = ($packaging_config.output_dir | path join $archive_name)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Create tar.gz archive
|
||||
let binary_dir = ($binary_path | path dirname)
|
||||
let binary_name = ($binary_path | path basename)
|
||||
@ -472,12 +482,15 @@ def create_archive_package [
|
||||
package_size: $archive_size
|
||||
compression_ratio: $compression_ratio
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -486,8 +499,7 @@ def create_installer_package [
|
||||
binary_path: string
|
||||
output_name: string
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
# Placeholder - would create platform-specific installers
|
||||
log warning "Installer packages not implemented - using archive format"
|
||||
create_archive_package $binary_path $output_name $packaging_config
|
||||
@ -498,11 +510,10 @@ def create_standalone_package [
|
||||
binary_path: string
|
||||
output_name: string
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
let standalone_path = ($packaging_config.output_dir | path join $output_name)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Just copy the binary as standalone
|
||||
cp $binary_path $standalone_path
|
||||
|
||||
@ -514,12 +525,15 @@ def create_standalone_package [
|
||||
package_size: $package_size
|
||||
compression_ratio: 100.0
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -528,8 +542,7 @@ def create_platform_package [
|
||||
platform: string
|
||||
processed_binaries: list
|
||||
packaging_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
if ($processed_binaries | length) == 0 {
|
||||
return {
|
||||
status: "skipped"
|
||||
@ -540,7 +553,7 @@ def create_platform_package [
|
||||
let platform_package_name = $"provisioning-binaries-($platform).tar.gz"
|
||||
let platform_package_path = ($packaging_config.output_dir | path join $platform_package_name)
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
# Create temporary directory for platform package
|
||||
let temp_platform_dir = ($packaging_config.output_dir | path join "tmp" $"platform-($platform)")
|
||||
mkdir $temp_platform_dir
|
||||
@ -571,12 +584,15 @@ def create_platform_package [
|
||||
package_size: $package_size
|
||||
binaries_included: ($processed_binaries | length)
|
||||
}
|
||||
} | complete)
|
||||
|
||||
} catch {|err|
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -609,7 +625,7 @@ def "main list" [packages_dir: string = "packages/binaries"] {
|
||||
return { error: "packages directory not found", directory: $packages_root }
|
||||
}
|
||||
|
||||
let binary_packages = (find $packages_root -name "*.tar.gz" -o -name "provisioning-*" -type f)
|
||||
let binary_packages = (glob ($packages_root | path join "*.tar.gz") | append (glob ($packages_root | path join "provisioning-*")))
|
||||
|
||||
$binary_packages | each {|package|
|
||||
let package_info = (ls $package | get 0)
|
||||
|
||||
@ -13,17 +13,16 @@
|
||||
use std log
|
||||
|
||||
def main [
|
||||
--channels: string = "slack" # Notification channels: slack,discord,twitter,email,rss,website,all
|
||||
--release-version: string = "" # Release version (auto-detected if empty)
|
||||
--message-template: string = "" # Custom message template file
|
||||
--notification-config: string = "" # Notification configuration file
|
||||
--recipient-list: string = "" # Recipient list file (for email)
|
||||
--dry-run = false # Show what would be sent without sending
|
||||
--urgent # Mark notifications as urgent/high priority
|
||||
--schedule: string = "" # Schedule notifications (e.g., "+1h", "2024-01-15T10:00:00")
|
||||
--verbose # Enable verbose logging
|
||||
]
|
||||
{
|
||||
--channels: string = "slack"
|
||||
--release-version: string = ""
|
||||
--message-template: string = ""
|
||||
--notification-config: string = ""
|
||||
--recipient-list: string = ""
|
||||
--dry-run = false
|
||||
--urgent = false
|
||||
--schedule: string = ""
|
||||
--verbose = false
|
||||
] {
|
||||
|
||||
let repo_root = ($env.PWD | path dirname | path dirname | path dirname)
|
||||
|
||||
@ -74,7 +73,7 @@ def main [
|
||||
|
||||
# Check if notifications should be scheduled
|
||||
if $notification_config.schedule != "" {
|
||||
return schedule_notifications $notification_config $message_templates $config_data $release_info
|
||||
return (schedule_notifications $notification_config $message_templates $config_data $release_info)
|
||||
}
|
||||
|
||||
# Send notifications to each channel
|
||||
@ -107,48 +106,57 @@ def main [
|
||||
}
|
||||
|
||||
# Detect release version from git
|
||||
def detect_release_version [repo_root: string]
|
||||
{
|
||||
def detect_release_version [repo_root: string] {
|
||||
cd $repo_root
|
||||
|
||||
try {
|
||||
# Try to get exact tag for current commit
|
||||
let exact_tag = (git describe --tags --exact-match HEAD 2>/dev/null | str trim)
|
||||
# Try to get exact tag for current commit
|
||||
let exact_result = (do {
|
||||
git describe --tags --exact-match HEAD 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
if $exact_result.exit_code == 0 {
|
||||
let exact_tag = ($exact_result.stdout | str trim)
|
||||
if $exact_tag != "" {
|
||||
return ($exact_tag | str replace "^v" "")
|
||||
}
|
||||
}
|
||||
|
||||
# Fallback to latest tag
|
||||
let latest_tag = (git describe --tags --abbrev=0 2>/dev/null | str trim)
|
||||
# Fallback to latest tag
|
||||
let latest_result = (do {
|
||||
git describe --tags --abbrev=0 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
if $latest_result.exit_code == 0 {
|
||||
let latest_tag = ($latest_result.stdout | str trim)
|
||||
if $latest_tag != "" {
|
||||
return ($latest_tag | str replace "^v" "")
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
} catch {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
# Load notification configuration from file
|
||||
def load_notification_config [config_file: string]
|
||||
{
|
||||
def load_notification_config [config_file: string] {
|
||||
if not ($config_file | path exists) {
|
||||
log warning $"Notification config file not found: ($config_file)"
|
||||
return (get_default_notification_config)
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
open $config_file
|
||||
} catch {|err|
|
||||
log warning $"Failed to load notification config: ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log warning $"Failed to load notification config: ($result.stderr)"
|
||||
return (get_default_notification_config)
|
||||
}
|
||||
|
||||
$result.stdout
|
||||
}
|
||||
|
||||
# Get default notification configuration
|
||||
def get_default_notification_config []
|
||||
{
|
||||
def get_default_notification_config [] {
|
||||
{
|
||||
slack: {
|
||||
webhook_url: ""
|
||||
@ -190,26 +198,25 @@ def get_default_notification_config []
|
||||
}
|
||||
|
||||
# Generate release information
|
||||
def generate_release_info [notification_config: record, repo_root: string]
|
||||
{
|
||||
def generate_release_info [notification_config: record, repo_root: string] {
|
||||
cd $repo_root
|
||||
|
||||
let version = $notification_config.release_version
|
||||
let tag_name = $"v($version)"
|
||||
|
||||
# Get release date
|
||||
let release_date = try {
|
||||
git log -1 --format=%cd --date=short $tag_name 2>/dev/null | str trim
|
||||
} catch {
|
||||
let date_result = (do {
|
||||
git log -1 --format=%cd --date=short $tag_name 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
let release_date = if $date_result.exit_code == 0 {
|
||||
$date_result.stdout | str trim
|
||||
} else {
|
||||
date now | format date "%Y-%m-%d"
|
||||
}
|
||||
|
||||
# Get changelog
|
||||
let changelog = try {
|
||||
get_changelog_summary $repo_root $tag_name
|
||||
} catch {
|
||||
"Bug fixes and improvements"
|
||||
}
|
||||
let changelog = get_changelog_summary $repo_root $tag_name
|
||||
|
||||
# Get download URLs
|
||||
let download_base_url = $"https://github.com/your-org/provisioning/releases/download/($tag_name)"
|
||||
@ -235,12 +242,15 @@ def generate_release_info [notification_config: record, repo_root: string]
|
||||
}
|
||||
|
||||
# Get changelog summary for a specific tag
|
||||
def get_changelog_summary [repo_root: string, tag_name: string]
|
||||
{
|
||||
def get_changelog_summary [repo_root: string, tag_name: string] {
|
||||
# Get previous tag
|
||||
let previous_tag = try {
|
||||
git describe --tags --abbrev=0 $"($tag_name)^" 2>/dev/null | str trim
|
||||
} catch {
|
||||
let prev_tag_result = (do {
|
||||
git describe --tags --abbrev=0 $"($tag_name)^" 2>/dev/null
|
||||
} | complete)
|
||||
|
||||
let previous_tag = if $prev_tag_result.exit_code == 0 {
|
||||
$prev_tag_result.stdout | str trim
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
@ -251,16 +261,20 @@ def get_changelog_summary [repo_root: string, tag_name: string]
|
||||
$tag_name
|
||||
}
|
||||
|
||||
let commits = try {
|
||||
let commits_result = (do {
|
||||
git log $commit_range --pretty=format:"%s" --no-merges | lines | where $it != ""
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
let commits = if $commits_result.exit_code == 0 {
|
||||
$commits_result.stdout
|
||||
} else {
|
||||
[]
|
||||
}
|
||||
|
||||
# Summarize changes
|
||||
let features = ($commits | where ($it =~ "^feat"))
|
||||
let fixes = ($commits | where ($it =~ "^fix"))
|
||||
let summary_parts = []
|
||||
mut summary_parts = []
|
||||
|
||||
if ($features | length) > 0 {
|
||||
$summary_parts = ($summary_parts | append $"($features | length) new features")
|
||||
@ -271,15 +285,14 @@ def get_changelog_summary [repo_root: string, tag_name: string]
|
||||
}
|
||||
|
||||
if ($summary_parts | length) > 0 {
|
||||
return ($summary_parts | str join ", ")
|
||||
$summary_parts | str join ", "
|
||||
} else {
|
||||
return "Bug fixes and improvements"
|
||||
"Bug fixes and improvements"
|
||||
}
|
||||
}
|
||||
|
||||
# Check if version is a major release
|
||||
def is_major_version [version: string]
|
||||
{
|
||||
def is_major_version [version: string] {
|
||||
let parts = ($version | split row ".")
|
||||
if ($parts | length) >= 3 {
|
||||
let minor = ($parts | get 1)
|
||||
@ -290,32 +303,33 @@ def is_major_version [version: string]
|
||||
}
|
||||
|
||||
# Check if release contains security fixes
|
||||
def is_security_release [changelog: string]
|
||||
{
|
||||
($changelog | str downcase | str contains "security") or
|
||||
def is_security_release [changelog: string] {
|
||||
(($changelog | str downcase | str contains "security") or
|
||||
($changelog | str downcase | str contains "vulnerability") or
|
||||
($changelog | str downcase | str contains "cve")
|
||||
($changelog | str downcase | str contains "cve"))
|
||||
}
|
||||
|
||||
# Load message templates from file
|
||||
def load_message_templates [template_file: string]
|
||||
{
|
||||
def load_message_templates [template_file: string] {
|
||||
if not ($template_file | path exists) {
|
||||
log warning $"Template file not found: ($template_file)"
|
||||
return {}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
open $template_file
|
||||
} catch {|err|
|
||||
log warning $"Failed to load templates: ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log warning $"Failed to load templates: ($result.stderr)"
|
||||
return {}
|
||||
}
|
||||
|
||||
$result.stdout
|
||||
}
|
||||
|
||||
# Generate default message templates
|
||||
def generate_default_templates [release_info: record]
|
||||
{
|
||||
def generate_default_templates [release_info: record] {
|
||||
let urgency_text = if $release_info.is_security { "🚨 Security Update " } else if $release_info.is_major { "🎉 Major Release " } else { "" }
|
||||
let emoji = if $release_info.is_security { "🔒" } else if $release_info.is_major { "🎉" } else { "🚀" }
|
||||
|
||||
@ -343,10 +357,10 @@ def generate_default_templates [release_info: record]
|
||||
{
|
||||
title: $"Release v($release_info.version)"
|
||||
description: $release_info.changelog
|
||||
color: (if $release_info.is_security { 15158332 } else { 3066993 }) # Red or Green
|
||||
color: (if $release_info.is_security { 15158332 } else { 3066993 })
|
||||
fields: [
|
||||
{ name: "Release Date", value: $release_info.release_date, inline: true }
|
||||
{ name: "Downloads", value: $"[Linux]((\"($release_info.download_urls.linux)\")) | [macOS]((\"($release_info.download_urls.macos)\")) | [Windows]((\"($release_info.download_urls.windows)\"))", inline: false }
|
||||
{ name: "Downloads", value: $"[Linux](($release_info.download_urls.linux)) | [macOS](($release_info.download_urls.macos)) | [Windows](($release_info.download_urls.windows))", inline: false }
|
||||
]
|
||||
url: $release_info.release_url
|
||||
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S.000Z")
|
||||
@ -388,8 +402,7 @@ def schedule_notifications [
|
||||
message_templates: record
|
||||
config_data: record
|
||||
release_info: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Scheduling notifications for: ($notification_config.schedule)"
|
||||
|
||||
# In a real implementation, this would use a job scheduler like cron
|
||||
@ -411,8 +424,7 @@ def send_notification [
|
||||
message_templates: record
|
||||
config_data: record
|
||||
release_info: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Sending notification to: ($channel)"
|
||||
|
||||
let start_time = (date now)
|
||||
@ -442,8 +454,7 @@ def send_slack_notification [
|
||||
message_templates: record
|
||||
config_data: record
|
||||
release_info: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Sending Slack notification..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -468,7 +479,7 @@ def send_slack_notification [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let payload = {
|
||||
channel: $slack_config.channel
|
||||
username: $slack_config.username
|
||||
@ -477,29 +488,21 @@ def send_slack_notification [
|
||||
attachments: $message_templates.slack.attachments
|
||||
}
|
||||
|
||||
let curl_result = (curl -X POST -H "Content-type: application/json" --data ($payload | to json) $slack_config.webhook_url | complete)
|
||||
curl -X POST -H "Content-type: application/json" --data ($payload | to json) $slack_config.webhook_url
|
||||
} | complete)
|
||||
|
||||
if $curl_result.exit_code == 0 {
|
||||
{
|
||||
channel: "slack"
|
||||
status: "success"
|
||||
webhook_url: $slack_config.webhook_url
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
{
|
||||
channel: "slack"
|
||||
status: "failed"
|
||||
reason: $curl_result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
if $result.exit_code == 0 {
|
||||
{
|
||||
channel: "slack"
|
||||
status: "success"
|
||||
webhook_url: $slack_config.webhook_url
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
} else {
|
||||
{
|
||||
channel: "slack"
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
@ -511,8 +514,7 @@ def send_discord_notification [
|
||||
message_templates: record
|
||||
config_data: record
|
||||
release_info: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Sending Discord notification..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -537,7 +539,7 @@ def send_discord_notification [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let payload = {
|
||||
username: $discord_config.username
|
||||
avatar_url: $discord_config.avatar_url
|
||||
@ -545,29 +547,21 @@ def send_discord_notification [
|
||||
embeds: $message_templates.discord.embeds
|
||||
}
|
||||
|
||||
let curl_result = (curl -X POST -H "Content-type: application/json" --data ($payload | to json) $discord_config.webhook_url | complete)
|
||||
curl -X POST -H "Content-type: application/json" --data ($payload | to json) $discord_config.webhook_url
|
||||
} | complete)
|
||||
|
||||
if $curl_result.exit_code == 0 {
|
||||
{
|
||||
channel: "discord"
|
||||
status: "success"
|
||||
webhook_url: $discord_config.webhook_url
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
{
|
||||
channel: "discord"
|
||||
status: "failed"
|
||||
reason: $curl_result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
if $result.exit_code == 0 {
|
||||
{
|
||||
channel: "discord"
|
||||
status: "success"
|
||||
webhook_url: $discord_config.webhook_url
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
} else {
|
||||
{
|
||||
channel: "discord"
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
@ -579,8 +573,7 @@ def send_twitter_notification [
|
||||
message_templates: record
|
||||
config_data: record
|
||||
release_info: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Sending Twitter notification..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -612,8 +605,7 @@ def send_email_notification [
|
||||
message_templates: record
|
||||
config_data: record
|
||||
release_info: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Sending email notification..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -645,8 +637,7 @@ def update_rss_feed [
|
||||
message_templates: record
|
||||
config_data: record
|
||||
release_info: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Updating RSS feed..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -662,27 +653,29 @@ def update_rss_feed [
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let rss_item = generate_rss_item $release_info
|
||||
# RSS feed update logic would be implemented here
|
||||
log warning "RSS feed update not fully implemented"
|
||||
null
|
||||
} | complete)
|
||||
|
||||
{
|
||||
channel: "rss"
|
||||
status: "skipped"
|
||||
reason: "not fully implemented"
|
||||
feed_file: $rss_config.feed_file
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
{
|
||||
if $result.exit_code != 0 {
|
||||
return {
|
||||
channel: "rss"
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
channel: "rss"
|
||||
status: "skipped"
|
||||
reason: "not fully implemented"
|
||||
feed_file: $rss_config.feed_file
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Update website banner
|
||||
@ -691,8 +684,7 @@ def update_website_banner [
|
||||
message_templates: record
|
||||
config_data: record
|
||||
release_info: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Updating website banner..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -718,20 +710,20 @@ def update_website_banner [
|
||||
}
|
||||
|
||||
# Generate RSS item for release
|
||||
def generate_rss_item [release_info: record]
|
||||
{
|
||||
def generate_rss_item [release_info: record] {
|
||||
let pub_date = date now | format date "%a, %d %b %Y %H:%M:%S %z"
|
||||
$"<item>
|
||||
<title>Provisioning v($release_info.version) Released</title>
|
||||
<description>($release_info.changelog)</description>
|
||||
<link>($release_info.release_url)</link>
|
||||
<guid>($release_info.release_url)</guid>
|
||||
<pubDate>(date now | format date "%a, %d %b %Y %H:%M:%S %z")</pubDate>
|
||||
<pubDate>($pub_date)</pubDate>
|
||||
</item>"
|
||||
}
|
||||
|
||||
# Show notification status
|
||||
def "main status" [] {
|
||||
let curl_available = (try { curl --version | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
let curl_available = (do { curl --version } | complete).exit_code == 0
|
||||
|
||||
let repo_root = ($env.PWD | path dirname | path dirname | path dirname)
|
||||
let current_version = (detect_release_version $repo_root)
|
||||
@ -742,7 +734,7 @@ def "main status" [] {
|
||||
curl: $curl_available
|
||||
}
|
||||
supported_channels: ["slack", "discord", "twitter", "email", "rss", "website"]
|
||||
implemented_channels: ["slack", "discord"] # Only these are fully implemented
|
||||
implemented_channels: ["slack", "discord"]
|
||||
}
|
||||
}
|
||||
|
||||
@ -751,13 +743,13 @@ def "main init-config" [output_file: string = "notification-config.toml"] {
|
||||
let config_template = $"# Notification Configuration
|
||||
|
||||
[slack]
|
||||
webhook_url = \"\" # Your Slack webhook URL
|
||||
webhook_url = \"\"
|
||||
channel = \"#general\"
|
||||
username = \"Provisioning Bot\"
|
||||
icon_emoji = \":rocket:\"
|
||||
|
||||
[discord]
|
||||
webhook_url = \"\" # Your Discord webhook URL
|
||||
webhook_url = \"\"
|
||||
username = \"Provisioning Bot\"
|
||||
avatar_url = \"\"
|
||||
|
||||
@ -799,8 +791,8 @@ api_key = \"\"
|
||||
|
||||
# Test notification to specific channel
|
||||
def "main test" [
|
||||
channel: string = "slack" # Channel to test
|
||||
--config: string = "" # Configuration file
|
||||
channel: string = "slack"
|
||||
--config: string = ""
|
||||
] {
|
||||
log info $"Testing notification to: ($channel)"
|
||||
|
||||
|
||||
@ -127,22 +127,16 @@ def validate_rollback_request [rollback_config: record]
|
||||
|
||||
# Check if release version exists
|
||||
let tag_name = $"v($rollback_config.release_version)"
|
||||
let tag_exists = try {
|
||||
git tag -l $tag_name | str trim
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
let tag_result = (do { ^git tag -l $tag_name } | complete)
|
||||
let tag_exists = if $tag_result.exit_code == 0 { $tag_result.stdout | str trim } else { "" }
|
||||
|
||||
if $tag_exists == "" {
|
||||
$errors = ($errors | append $"Release tag ($tag_name) does not exist")
|
||||
}
|
||||
|
||||
# Check if this is the latest release
|
||||
let latest_tag = try {
|
||||
git describe --tags --abbrev=0 2>/dev/null | str trim
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
let latest_result = (do { ^git describe --tags --abbrev=0 } | complete)
|
||||
let latest_tag = if $latest_result.exit_code == 0 { $latest_result.stdout | str trim } else { "" }
|
||||
|
||||
if $latest_tag != $tag_name {
|
||||
$warnings = ($warnings | append $"Rolling back ($tag_name) which is not the latest release (latest: ($latest_tag))")
|
||||
@ -156,7 +150,7 @@ def validate_rollback_request [rollback_config: record]
|
||||
|
||||
# Check GitHub CLI availability for GitHub scope
|
||||
if "github" in $rollback_config.scopes {
|
||||
let gh_check = try { gh --version | complete } catch { { exit_code: 1 } }
|
||||
let gh_check = (do { ^gh --version } | complete)
|
||||
if $gh_check.exit_code != 0 {
|
||||
$warnings = ($warnings | append "GitHub CLI not available - GitHub rollback will be skipped")
|
||||
}
|
||||
@ -227,40 +221,32 @@ def rollback_git_release [rollback_config: record]
|
||||
let tag_name = $"v($rollback_config.release_version)"
|
||||
let actions_taken = []
|
||||
|
||||
try {
|
||||
# Delete local tag
|
||||
let delete_local = (git tag -d $tag_name | complete)
|
||||
if $delete_local.exit_code == 0 {
|
||||
$actions_taken = ($actions_taken | append "deleted local tag")
|
||||
}
|
||||
# Delete local tag
|
||||
let delete_local = (do { git tag -d $tag_name } | complete)
|
||||
let actions_taken = if $delete_local.exit_code == 0 {
|
||||
($actions_taken | append "deleted local tag")
|
||||
} else {
|
||||
$actions_taken
|
||||
}
|
||||
|
||||
# Delete remote tag
|
||||
let delete_remote = (git push --delete origin $tag_name | complete)
|
||||
if $delete_remote.exit_code == 0 {
|
||||
$actions_taken = ($actions_taken | append "deleted remote tag")
|
||||
} else {
|
||||
log warning $"Failed to delete remote tag: ($delete_remote.stderr)"
|
||||
}
|
||||
# Delete remote tag
|
||||
let delete_remote = (do { git push --delete origin $tag_name } | complete)
|
||||
let actions_taken = if $delete_remote.exit_code == 0 {
|
||||
($actions_taken | append "deleted remote tag")
|
||||
} else {
|
||||
log warning $"Failed to delete remote tag: ($delete_remote.stderr)"
|
||||
$actions_taken
|
||||
}
|
||||
|
||||
# Note: We don't automatically reset commits as that could be destructive
|
||||
# Users should manually handle commit rollbacks if needed
|
||||
# Note: We don't automatically reset commits as that could be destructive
|
||||
# Users should manually handle commit rollbacks if needed
|
||||
|
||||
{
|
||||
scope: "git"
|
||||
status: "success"
|
||||
tag_name: $tag_name
|
||||
actions_taken: $actions_taken
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
{
|
||||
scope: "git"
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
tag_name: $tag_name
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
{
|
||||
scope: "git"
|
||||
status: "success"
|
||||
tag_name: $tag_name
|
||||
actions_taken: $actions_taken
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
@ -272,7 +258,7 @@ def rollback_github_release [rollback_config: record]
|
||||
let start_time = (date now)
|
||||
|
||||
# Check GitHub CLI availability
|
||||
let gh_check = try { gh --version | complete } catch { { exit_code: 1 } }
|
||||
let gh_check = (do { ^gh --version } | complete)
|
||||
if $gh_check.exit_code != 0 {
|
||||
return {
|
||||
scope: "github"
|
||||
@ -294,47 +280,36 @@ def rollback_github_release [rollback_config: record]
|
||||
|
||||
let tag_name = $"v($rollback_config.release_version)"
|
||||
|
||||
try {
|
||||
cd $rollback_config.repo_root
|
||||
cd $rollback_config.repo_root
|
||||
|
||||
# Check if GitHub release exists
|
||||
let release_check = (gh release view $tag_name | complete)
|
||||
if $release_check.exit_code != 0 {
|
||||
return {
|
||||
scope: "github"
|
||||
status: "skipped"
|
||||
reason: $"GitHub release ($tag_name) does not exist"
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
# Check if GitHub release exists
|
||||
let release_check = (do { gh release view $tag_name } | complete)
|
||||
if $release_check.exit_code != 0 {
|
||||
return {
|
||||
scope: "github"
|
||||
status: "skipped"
|
||||
reason: $"GitHub release ($tag_name) does not exist"
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
}
|
||||
|
||||
# Delete GitHub release
|
||||
let delete_result = (gh release delete $tag_name --yes | complete)
|
||||
# Delete GitHub release
|
||||
let delete_result = (do { gh release delete $tag_name --yes } | complete)
|
||||
|
||||
if $delete_result.exit_code == 0 {
|
||||
log info $"Successfully deleted GitHub release: ($tag_name)"
|
||||
if $delete_result.exit_code == 0 {
|
||||
log info $"Successfully deleted GitHub release: ($tag_name)"
|
||||
|
||||
{
|
||||
scope: "github"
|
||||
status: "success"
|
||||
release_tag: $tag_name
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
} else {
|
||||
{
|
||||
scope: "github"
|
||||
status: "failed"
|
||||
reason: $delete_result.stderr
|
||||
release_tag: $tag_name
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
{
|
||||
scope: "github"
|
||||
status: "success"
|
||||
release_tag: $tag_name
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
} else {
|
||||
{
|
||||
scope: "github"
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $delete_result.stderr
|
||||
release_tag: $tag_name
|
||||
duration: ((date now) - $start_time)
|
||||
}
|
||||
@ -523,17 +498,13 @@ def "main status" [release_version: string = ""] {
|
||||
|
||||
if $release_version == "" {
|
||||
# Show general rollback status
|
||||
let latest_tag = try {
|
||||
git describe --tags --abbrev=0 2>/dev/null | str trim
|
||||
} catch {
|
||||
"none"
|
||||
}
|
||||
let latest_result = (do { ^git describe --tags --abbrev=0 } | complete)
|
||||
let latest_tag = if $latest_result.exit_code == 0 { $latest_result.stdout | str trim } else { "none" }
|
||||
|
||||
let recent_tags = try {
|
||||
git tag -l --sort=-version:refname | head -5 | lines
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
let recent_result = (do { ^git tag -l --sort=-version:refname | head -5 } | complete)
|
||||
let recent_tags = if $recent_result.exit_code == 0 { $recent_result.stdout | lines } else { [] }
|
||||
|
||||
let gh_avail = (do { ^gh --version } | complete)
|
||||
|
||||
return {
|
||||
repository: $repo_root
|
||||
@ -542,20 +513,19 @@ def "main status" [release_version: string = ""] {
|
||||
rollback_scopes: ["git", "github", "packages", "containers", "notifications"]
|
||||
tools_available: {
|
||||
git: true
|
||||
github_cli: (try { gh --version | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
github_cli: ($gh_avail.exit_code == 0)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
# Show status for specific release
|
||||
let tag_name = $"v($release_version)"
|
||||
let tag_exists = try {
|
||||
git tag -l $tag_name | str trim
|
||||
} catch {
|
||||
""
|
||||
}
|
||||
let tag_result = (do { ^git tag -l $tag_name } | complete)
|
||||
let tag_exists = if $tag_result.exit_code == 0 { $tag_result.stdout | str trim } else { "" }
|
||||
|
||||
let github_release_exists = if (try { gh --version | complete } catch { { exit_code: 1 } }).exit_code == 0 {
|
||||
(try { gh release view $tag_name | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
let gh_check = (do { ^gh --version } | complete)
|
||||
let github_release_exists = if $gh_check.exit_code == 0 {
|
||||
let release_check = (do { ^gh release view $tag_name } | complete)
|
||||
$release_check.exit_code == 0
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@ -575,26 +545,22 @@ def "main list" [] {
|
||||
let repo_root = ($env.PWD | path dirname | path dirname | path dirname)
|
||||
cd $repo_root
|
||||
|
||||
let tags = try {
|
||||
git tag -l --sort=-version:refname | head -10 | lines
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
let tags_result = (do { ^git tag -l --sort=-version:refname | head -10 } | complete)
|
||||
let tags = if $tags_result.exit_code == 0 { $tags_result.stdout | lines } else { [] }
|
||||
|
||||
let gh_available = (try { gh --version | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
let gh_avail_check = (do { ^gh --version } | complete)
|
||||
let gh_available = ($gh_avail_check.exit_code == 0)
|
||||
|
||||
$tags | each {|tag|
|
||||
let release_exists = if $gh_available {
|
||||
(try { gh release view $tag | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
let release_check = (do { ^gh release view $tag } | complete)
|
||||
$release_check.exit_code == 0
|
||||
} else {
|
||||
false
|
||||
}
|
||||
|
||||
let tag_date = try {
|
||||
git log -1 --format=%cd --date=short $tag | str trim
|
||||
} catch {
|
||||
"unknown"
|
||||
}
|
||||
let tag_date_result = (do { ^git log -1 --format=%cd --date=short $tag } | complete)
|
||||
let tag_date = if $tag_date_result.exit_code == 0 { $tag_date_result.stdout | str trim } else { "unknown" }
|
||||
|
||||
{
|
||||
tag: $tag
|
||||
|
||||
@ -22,8 +22,7 @@ def main [
|
||||
--verify-uploads = true # Verify uploads after completion
|
||||
--dry-run = false # Show what would be uploaded without doing it
|
||||
--verbose # Enable verbose logging
|
||||
]
|
||||
{
|
||||
] {
|
||||
|
||||
let artifacts_root = ($artifacts_dir | path expand)
|
||||
let upload_targets = if $targets == "all" {
|
||||
@ -120,9 +119,8 @@ def main [
|
||||
}
|
||||
|
||||
# Detect current release tag
|
||||
def detect_current_release_tag []
|
||||
{
|
||||
try {
|
||||
def detect_current_release_tag [] {
|
||||
let result = (do {
|
||||
let latest_tag = (git describe --tags --exact-match HEAD 2>/dev/null | str trim)
|
||||
if $latest_tag != "" {
|
||||
return $latest_tag
|
||||
@ -130,15 +128,18 @@ def detect_current_release_tag []
|
||||
|
||||
# Fallback to latest tag
|
||||
git describe --tags --abbrev=0 2>/dev/null | str trim
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log warning "No release tag found, using 'latest'"
|
||||
return "latest"
|
||||
}
|
||||
|
||||
$result.stdout
|
||||
}
|
||||
|
||||
# Find available artifacts to upload
|
||||
def find_available_artifacts [upload_config: record]
|
||||
{
|
||||
def find_available_artifacts [upload_config: record] {
|
||||
# Define artifact patterns by type
|
||||
let artifact_patterns = {
|
||||
archives: ["*.tar.gz", "*.zip"]
|
||||
@ -148,13 +149,13 @@ def find_available_artifacts [upload_config: record]
|
||||
metadata: ["checksums.txt", "manifest.json", "*.sig"]
|
||||
}
|
||||
|
||||
let all_artifacts = []
|
||||
mut all_artifacts = []
|
||||
|
||||
# Find artifacts by pattern
|
||||
for category in ($artifact_patterns | columns) {
|
||||
let patterns = ($artifact_patterns | get $category)
|
||||
for pattern in $patterns {
|
||||
let found_files = (find $upload_config.artifacts_dir -name $pattern -type f)
|
||||
let found_files = (glob ($upload_config.artifacts_dir + "/" + $pattern) | where { |p| ($p | path type) == "file" })
|
||||
for file in $found_files {
|
||||
$all_artifacts = ($all_artifacts | append {
|
||||
path: $file
|
||||
@ -171,18 +172,21 @@ def find_available_artifacts [upload_config: record]
|
||||
}
|
||||
|
||||
# Load credentials from configuration file
|
||||
def load_credentials [credentials_file: string]
|
||||
{
|
||||
def load_credentials [credentials_file: string] {
|
||||
if not ($credentials_file | path exists) {
|
||||
log warning $"Credentials file not found: ($credentials_file)"
|
||||
return {}
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
open $credentials_file
|
||||
} catch {|err|
|
||||
log warning $"Failed to load credentials: ($err.msg)"
|
||||
return {}
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log warning $"Failed to load credentials: ($result.stderr)"
|
||||
{}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -191,8 +195,7 @@ def upload_parallel [
|
||||
upload_config: record
|
||||
artifacts: list
|
||||
credentials: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
# For simplicity, using sequential for now
|
||||
# In a real implementation, you might use background processes
|
||||
upload_sequential $upload_config $artifacts $credentials
|
||||
@ -203,8 +206,7 @@ def upload_sequential [
|
||||
upload_config: record
|
||||
artifacts: list
|
||||
credentials: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
$upload_config.targets | each {|target|
|
||||
upload_to_target $target $artifacts $upload_config $credentials
|
||||
}
|
||||
@ -216,8 +218,7 @@ def upload_to_target [
|
||||
artifacts: list
|
||||
upload_config: record
|
||||
credentials: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Uploading to target: ($target)"
|
||||
|
||||
let start_time = (date now)
|
||||
@ -246,8 +247,7 @@ def upload_to_github [
|
||||
artifacts: list
|
||||
upload_config: record
|
||||
credentials: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Uploading to GitHub releases..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -263,7 +263,8 @@ def upload_to_github [
|
||||
}
|
||||
|
||||
# Check GitHub CLI availability
|
||||
let gh_check = try { gh --version | complete } catch { { exit_code: 1 } }
|
||||
let gh_result = (do { gh --version | complete } | complete)
|
||||
let gh_check = if $gh_result.exit_code == 0 { $gh_result.stdout } else { { exit_code: 1 } }
|
||||
if $gh_check.exit_code != 0 {
|
||||
return {
|
||||
target: "github"
|
||||
@ -274,36 +275,37 @@ def upload_to_github [
|
||||
}
|
||||
}
|
||||
|
||||
let upload_errors = []
|
||||
let uploaded_count = 0
|
||||
|
||||
# Filter artifacts suitable for GitHub releases
|
||||
let github_artifacts = ($artifacts | where category in ["archives", "packages", "metadata"])
|
||||
|
||||
for artifact in $github_artifacts {
|
||||
try {
|
||||
if $upload_config.verbose {
|
||||
log info $"Uploading to GitHub: ($artifact.name)"
|
||||
}
|
||||
let upload_result = (do {
|
||||
mut upload_errors = []
|
||||
mut uploaded_count = 0
|
||||
|
||||
let upload_result = (gh release upload $upload_config.release_tag $artifact.path | complete)
|
||||
for artifact in $github_artifacts {
|
||||
let result = (do {
|
||||
if $upload_config.verbose {
|
||||
log info $"Uploading to GitHub: ($artifact.name)"
|
||||
}
|
||||
|
||||
if $upload_result.exit_code == 0 {
|
||||
$uploaded_count = $uploaded_count + 1
|
||||
} else {
|
||||
gh release upload $upload_config.release_tag $artifact.path | complete
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
$upload_errors = ($upload_errors | append {
|
||||
artifact: $artifact.name
|
||||
error: $upload_result.stderr
|
||||
error: $result.stderr
|
||||
})
|
||||
} else {
|
||||
$uploaded_count = ($uploaded_count + 1)
|
||||
}
|
||||
|
||||
} catch {|err|
|
||||
$upload_errors = ($upload_errors | append {
|
||||
artifact: $artifact.name
|
||||
error: $err.msg
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
{ errors: $upload_errors, count: $uploaded_count }
|
||||
} | complete)
|
||||
|
||||
let upload_errors = (if $upload_result.exit_code != 0 { [] } else { $upload_result.stdout.errors })
|
||||
let uploaded_count = (if $upload_result.exit_code != 0 { 0 } else { $upload_result.stdout.count })
|
||||
|
||||
let status = if ($upload_errors | length) > 0 { "partial" } else { "success" }
|
||||
|
||||
@ -322,14 +324,13 @@ def upload_to_docker [
|
||||
artifacts: list
|
||||
upload_config: record
|
||||
credentials: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Uploading to Docker registry..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Check Docker availability
|
||||
let docker_check = try { docker --version | complete } catch { { exit_code: 1 } }
|
||||
let docker_check = (do { docker --version } | complete)
|
||||
if $docker_check.exit_code != 0 {
|
||||
return {
|
||||
target: "docker"
|
||||
@ -351,32 +352,42 @@ def upload_to_docker [
|
||||
}
|
||||
}
|
||||
|
||||
let upload_errors = []
|
||||
let uploaded_count = 0
|
||||
|
||||
# Find container artifacts
|
||||
let container_artifacts = ($artifacts | where category == "containers")
|
||||
|
||||
for artifact in $container_artifacts {
|
||||
try {
|
||||
if $upload_config.verbose {
|
||||
log info $"Loading Docker image: ($artifact.name)"
|
||||
let upload_result = (do {
|
||||
mut upload_errors = []
|
||||
mut uploaded_count = 0
|
||||
|
||||
for artifact in $container_artifacts {
|
||||
let result = (do {
|
||||
if $upload_config.verbose {
|
||||
log info $"Loading Docker image: ($artifact.name)"
|
||||
}
|
||||
|
||||
# Load container image
|
||||
docker load -i $artifact.path
|
||||
|
||||
# Tag and push (would need proper registry configuration)
|
||||
log warning "Docker registry push not fully implemented - container loaded locally"
|
||||
{ success: true }
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
$upload_errors = ($upload_errors | append {
|
||||
artifact: $artifact.name
|
||||
error: $result.stderr
|
||||
})
|
||||
} else {
|
||||
$uploaded_count = ($uploaded_count + 1)
|
||||
}
|
||||
|
||||
# Load container image
|
||||
docker load -i $artifact.path
|
||||
|
||||
# Tag and push (would need proper registry configuration)
|
||||
log warning "Docker registry push not fully implemented - container loaded locally"
|
||||
$uploaded_count = $uploaded_count + 1
|
||||
|
||||
} catch {|err|
|
||||
$upload_errors = ($upload_errors | append {
|
||||
artifact: $artifact.name
|
||||
error: $err.msg
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
{ errors: $upload_errors, count: $uploaded_count }
|
||||
} | complete)
|
||||
|
||||
let upload_errors = (if $upload_result.exit_code != 0 { [] } else { $upload_result.stdout.errors })
|
||||
let uploaded_count = (if $upload_result.exit_code != 0 { 0 } else { $upload_result.stdout.count })
|
||||
|
||||
let status = if ($upload_errors | length) > 0 { "partial" } else { "success" }
|
||||
|
||||
@ -395,14 +406,13 @@ def upload_to_npm [
|
||||
artifacts: list
|
||||
upload_config: record
|
||||
credentials: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Uploading to npm registry..."
|
||||
|
||||
let start_time = (date now)
|
||||
|
||||
# Check for npm packages
|
||||
let npm_artifacts = ($artifacts | where name =~ "\.tgz$")
|
||||
let npm_artifacts = ($artifacts | where name =~ r'\.tgz$')
|
||||
|
||||
if ($npm_artifacts | length) == 0 {
|
||||
return {
|
||||
@ -440,8 +450,7 @@ def upload_to_cargo [
|
||||
artifacts: list
|
||||
upload_config: record
|
||||
credentials: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Uploading to Cargo registry..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -463,8 +472,7 @@ def upload_to_homebrew [
|
||||
artifacts: list
|
||||
upload_config: record
|
||||
credentials: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info $"Uploading to Homebrew..."
|
||||
|
||||
let start_time = (date now)
|
||||
@ -494,12 +502,11 @@ def upload_to_homebrew [
|
||||
def verify_uploads [
|
||||
upload_results: list
|
||||
upload_config: record
|
||||
]
|
||||
{
|
||||
] {
|
||||
log info "Verifying uploads..."
|
||||
|
||||
let verified = []
|
||||
let failed_verifications = []
|
||||
mut verified = []
|
||||
mut failed_verifications = []
|
||||
|
||||
for result in $upload_results {
|
||||
if $result.status in ["success", "partial"] {
|
||||
@ -522,8 +529,7 @@ def verify_uploads [
|
||||
}
|
||||
|
||||
# Verify upload to specific target
|
||||
def verify_target_upload [target: string, upload_config: record]
|
||||
{
|
||||
def verify_target_upload [target: string, upload_config: record] {
|
||||
match $target {
|
||||
"github" => { verify_github_upload $upload_config }
|
||||
"docker" => { verify_docker_upload $upload_config }
|
||||
@ -538,9 +544,8 @@ def verify_target_upload [target: string, upload_config: record]
|
||||
}
|
||||
|
||||
# Verify GitHub upload
|
||||
def verify_github_upload [upload_config: record]
|
||||
{
|
||||
try {
|
||||
def verify_github_upload [upload_config: record] {
|
||||
let result = (do {
|
||||
let release_info = (gh release view $upload_config.release_tag --json assets | from json)
|
||||
let asset_count = ($release_info.assets | length)
|
||||
|
||||
@ -549,18 +554,21 @@ def verify_github_upload [upload_config: record]
|
||||
status: "success"
|
||||
verified_assets: $asset_count
|
||||
}
|
||||
} catch {|err|
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
{
|
||||
target: "github"
|
||||
status: "failed"
|
||||
reason: $err.msg
|
||||
reason: $result.stderr
|
||||
}
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
# Verify Docker upload
|
||||
def verify_docker_upload [upload_config: record]
|
||||
{
|
||||
def verify_docker_upload [upload_config: record] {
|
||||
# Docker verification would check registry
|
||||
{
|
||||
target: "docker"
|
||||
@ -571,10 +579,10 @@ def verify_docker_upload [upload_config: record]
|
||||
|
||||
# Show upload targets and their status
|
||||
def "main info" [] {
|
||||
let github_available = (try { gh --version | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
let docker_available = (try { docker --version | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
let npm_available = (try { npm --version | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
let cargo_available = (try { cargo --version | complete } catch { { exit_code: 1 } }).exit_code == 0
|
||||
let github_available = ((do { gh --version } | complete).exit_code == 0)
|
||||
let docker_available = ((do { docker --version } | complete).exit_code == 0)
|
||||
let npm_available = ((do { npm --version } | complete).exit_code == 0)
|
||||
let cargo_available = ((do { cargo --version } | complete).exit_code == 0)
|
||||
|
||||
{
|
||||
available_targets: {
|
||||
|
||||
@ -1,104 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Setup iTerm2 splits for Claude Code agent monitoring
|
||||
# Usage: ./provisioning/tools/setup-iterm-monitoring.sh
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
|
||||
echo "🚀 Setting up iTerm2 monitoring layout for Claude Code..."
|
||||
echo ""
|
||||
|
||||
# AppleScript to create iTerm splits
|
||||
osascript <<EOF
|
||||
tell application "iTerm"
|
||||
activate
|
||||
|
||||
-- Create new window
|
||||
create window with default profile
|
||||
|
||||
tell current session of current window
|
||||
-- Main pane (Claude Code runs here)
|
||||
set name to "Claude Code"
|
||||
write text "cd $PROJECT_ROOT"
|
||||
write text "echo ''"
|
||||
write text "echo '✅ Main Panel: Run Claude Code here'"
|
||||
write text "echo ' Commands like: provisioning/core/cli/provisioning <command>'"
|
||||
write text "echo ''"
|
||||
end tell
|
||||
|
||||
-- Split horizontally (monitoring dashboard on bottom)
|
||||
tell current session of current window
|
||||
-- Split horizontally (top/bottom)
|
||||
set monitoring_pane to (split horizontally with default profile)
|
||||
end tell
|
||||
|
||||
tell monitoring_pane
|
||||
set name to "Agent Monitor"
|
||||
write text "cd $PROJECT_ROOT"
|
||||
write text "echo ''"
|
||||
write text "echo '📊 Monitoring Panel: Agent activity'"
|
||||
write text "echo ' Running: nu provisioning/tools/monitor-agents.nu --mode dashboard'"
|
||||
write text "echo ''"
|
||||
write text "sleep 2"
|
||||
write text "nu provisioning/tools/monitor-agents.nu --mode dashboard"
|
||||
end tell
|
||||
|
||||
-- Split the top pane vertically (reports on right)
|
||||
tell first session of current tab of current window
|
||||
set reports_pane to (split vertically with default profile)
|
||||
end tell
|
||||
|
||||
tell reports_pane
|
||||
set name to "Reports"
|
||||
write text "cd $PROJECT_ROOT"
|
||||
write text "echo ''"
|
||||
write text "echo '📄 Reports Panel: Real-time report viewer'"
|
||||
write text "echo ' Monitoring /tmp for new reports...'"
|
||||
write text "echo ''"
|
||||
write text "echo 'Available commands:'"
|
||||
write text "echo ' - bat /tmp/<report>.md # View specific report'"
|
||||
write text "echo ' - nu provisioning/tools/monitor-agents.nu --mode reports'"
|
||||
write text "echo ''"
|
||||
end tell
|
||||
|
||||
-- Resize panes for better visibility
|
||||
tell current window
|
||||
-- Make bottom pane smaller (monitoring dashboard)
|
||||
tell monitoring_pane
|
||||
-- Set height to about 30% of window
|
||||
end tell
|
||||
end tell
|
||||
|
||||
-- Select the main Claude Code pane
|
||||
tell current window
|
||||
select first session of current tab
|
||||
end tell
|
||||
|
||||
end tell
|
||||
EOF
|
||||
|
||||
echo ""
|
||||
echo "✅ iTerm2 monitoring layout created!"
|
||||
echo ""
|
||||
echo "📋 Panel Layout:"
|
||||
echo " ┌─────────────────┬──────────────────┐"
|
||||
echo " │ │ │"
|
||||
echo " │ Claude Code │ Reports │"
|
||||
echo " │ (Main) │ (Viewer) │"
|
||||
echo " │ │ │"
|
||||
echo " ├─────────────────┴──────────────────┤"
|
||||
echo " │ │"
|
||||
echo " │ Agent Monitor (Dashboard) │"
|
||||
echo " │ │"
|
||||
echo " └────────────────────────────────────┘"
|
||||
echo ""
|
||||
echo "💡 Tips:"
|
||||
echo " • Top left: Run your Claude Code commands here"
|
||||
echo " • Top right: View reports with 'bat /tmp/<file>.md'"
|
||||
echo " • Bottom: Auto-refreshing agent activity monitor"
|
||||
echo ""
|
||||
echo "🎯 Navigation:"
|
||||
echo " • Cmd+Option+Arrow Keys: Switch between panes"
|
||||
echo " • Cmd+D: New vertical split"
|
||||
echo " • Cmd+Shift+D: New horizontal split"
|
||||
echo ""
|
||||
@ -1,120 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Setup tmux layout for Claude Code agent monitoring
|
||||
# Usage: ./provisioning/tools/setup-tmux-monitoring.sh
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
SESSION_NAME="claude-code-monitor"
|
||||
|
||||
echo "🚀 Setting up tmux monitoring layout for Claude Code..."
|
||||
echo ""
|
||||
|
||||
# Check if tmux is installed
|
||||
if ! command -v tmux &> /dev/null; then
|
||||
echo "❌ tmux is not installed. Installing via Homebrew..."
|
||||
brew install tmux
|
||||
fi
|
||||
|
||||
# Kill existing session if it exists
|
||||
tmux kill-session -t $SESSION_NAME 2>/dev/null
|
||||
|
||||
# Create new session
|
||||
tmux new-session -d -s $SESSION_NAME -c "$PROJECT_ROOT"
|
||||
|
||||
# Rename first window
|
||||
tmux rename-window -t $SESSION_NAME:0 'Claude-Monitor'
|
||||
|
||||
# Main pane (Claude Code)
|
||||
tmux send-keys -t $SESSION_NAME:0 "cd $PROJECT_ROOT" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "clear" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo '╔══════════════════════════════════════════════════════════════╗'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo '║ Claude Code Main Panel ║'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo '╚══════════════════════════════════════════════════════════════╝'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo '✅ Run your Claude Code commands here:'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ' provisioning/core/cli/provisioning <command>'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo '📊 Monitors running in other panes:'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ' • Right pane: Report viewer'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ' • Bottom pane: Agent activity dashboard'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo '🔑 Tmux shortcuts:'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ' Ctrl+B then h/j/k/l → Navigate panes (vim style)'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ' Ctrl+B then [ → Enter scroll/copy mode'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ' Ctrl+B then d → Detach session'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ' Mouse: Click and scroll works!'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0 "echo ''" C-m
|
||||
|
||||
# Split vertically (reports pane on right) - 70% / 30%
|
||||
tmux split-window -h -t $SESSION_NAME:0 -c "$PROJECT_ROOT" -p 30
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "cd $PROJECT_ROOT" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "clear" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo '╔════════════════════════════════╗'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo '║ Reports Viewer ║'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo '╚════════════════════════════════╝'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo '📄 View reports with:'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo 'bat /tmp/<report>.md'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo 'Or run monitor:'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo 'nu provisioning/tools/monitor-agents.nu --mode reports'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo ''" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo '───────────────────────────────'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "echo 'Recent reports:'" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "ls /tmp | grep -E '(report|summary|verification)' | tail -5" C-m
|
||||
|
||||
# Split the main pane horizontally (agent monitor on bottom) - 70% / 30%
|
||||
tmux select-pane -t $SESSION_NAME:0.0
|
||||
tmux split-window -v -t $SESSION_NAME:0.0 -c "$PROJECT_ROOT" -p 35
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "cd $PROJECT_ROOT" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "sleep 1" C-m
|
||||
tmux send-keys -t $SESSION_NAME:0.1 "nu provisioning/tools/monitor-agents.nu --mode dashboard --refresh 3" C-m
|
||||
|
||||
# Select the main pane
|
||||
tmux select-pane -t $SESSION_NAME:0.0
|
||||
|
||||
echo "✅ Tmux session '$SESSION_NAME' created!"
|
||||
echo ""
|
||||
echo "📋 Panel Layout:"
|
||||
echo " ┌─────────────────────────┬──────────────┐"
|
||||
echo " │ │ │"
|
||||
echo " │ Claude Code (Main) │ Reports │"
|
||||
echo " │ │ (Viewer) │"
|
||||
echo " │ │ │"
|
||||
echo " ├─────────────────────────┴──────────────┤"
|
||||
echo " │ │"
|
||||
echo " │ Agent Monitor (Auto-refresh) │"
|
||||
echo " │ │"
|
||||
echo " └────────────────────────────────────────┘"
|
||||
echo ""
|
||||
echo "🚀 To attach to the session, run:"
|
||||
echo " tmux attach -t $SESSION_NAME"
|
||||
echo ""
|
||||
echo "💡 Tmux Quick Reference:"
|
||||
echo " Ctrl+B then: "
|
||||
echo " • h/j/k/l → Navigate panes (vim-style)"
|
||||
echo " • Arrow keys → Navigate panes"
|
||||
echo " • [ → Scroll mode (q to exit, use vi keys)"
|
||||
echo " • d → Detach (session keeps running)"
|
||||
echo " • z → Zoom current pane (toggle fullscreen)"
|
||||
echo " • x → Close current pane"
|
||||
echo ""
|
||||
echo "🖱️ Mouse Support:"
|
||||
echo " • Click to select pane"
|
||||
echo " • Scroll wheel works in all panes"
|
||||
echo " • Drag border to resize panes"
|
||||
echo ""
|
||||
echo "📚 Full tmux config:"
|
||||
echo " provisioning/tools/claude-code-tmux.conf"
|
||||
echo ""
|
||||
|
||||
# Auto-attach if run interactively
|
||||
if [ -t 0 ]; then
|
||||
echo "🔗 Attaching to session in 2 seconds... (Ctrl+C to cancel)"
|
||||
sleep 2
|
||||
tmux attach -t $SESSION_NAME
|
||||
fi
|
||||
@ -4,9 +4,6 @@
|
||||
|
||||
use std log
|
||||
|
||||
# Test results accumulator
|
||||
mut $test_results = []
|
||||
|
||||
# Test 1: Check if OCI tools are available
|
||||
export def "test oci-tools" []: nothing -> bool {
|
||||
log info "Test 1: Checking OCI tools availability..."
|
||||
@ -34,15 +31,17 @@ export def "test oci-tools" []: nothing -> bool {
|
||||
export def "test kcl-schemas" []: nothing -> bool {
|
||||
log info "Test 2: Validating KCL schemas..."
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
cd provisioning/kcl
|
||||
let result = (kcl run dependencies.k)
|
||||
kcl run dependencies.k
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log error $" ✗ KCL schema compilation failed: ($result.stderr)"
|
||||
false
|
||||
} else {
|
||||
log info " ✓ KCL schemas compile successfully"
|
||||
true
|
||||
} catch { |err|
|
||||
log error $" ✗ KCL schema compilation failed: ($err.msg)"
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,19 +57,22 @@ export def "test nushell-modules" []: nothing -> bool {
|
||||
"provisioning/tools/migrate-to-oci.nu"
|
||||
]
|
||||
|
||||
mut all_valid = true
|
||||
|
||||
for module in $modules {
|
||||
try {
|
||||
let results = $modules | each {|module|
|
||||
let basename = ($module | path basename)
|
||||
let result = (do {
|
||||
nu --commands $"use ($module)"
|
||||
log info $" ✓ ($module | path basename) - Valid syntax"
|
||||
} catch { |err|
|
||||
log error $" ✗ ($module | path basename) - Syntax error"
|
||||
$all_valid = false
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log error $" ✗ ($basename) - Syntax error"
|
||||
false
|
||||
} else {
|
||||
log info $" ✓ ($basename) - Valid syntax"
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
$all_valid
|
||||
$results | all {|it| $it == true}
|
||||
}
|
||||
|
||||
# Test 4: Validate directory structure
|
||||
@ -159,20 +161,21 @@ export def "test implementation-size" []: nothing -> bool {
|
||||
export def "test manifest-template" []: nothing -> bool {
|
||||
log info "Test 7: Testing manifest generation..."
|
||||
|
||||
try {
|
||||
# This would require loading the oci-package module
|
||||
# For now, just check file existence
|
||||
let result = (do {
|
||||
let package_file = "provisioning/tools/oci-package.nu"
|
||||
if ($package_file | path exists) {
|
||||
log info " ✓ OCI package tool available"
|
||||
true
|
||||
"available"
|
||||
} else {
|
||||
log error " ✗ OCI package tool missing"
|
||||
false
|
||||
error make {msg: "OCI package tool missing"}
|
||||
}
|
||||
} catch { |err|
|
||||
log error $" ✗ Manifest template test failed: ($err.msg)"
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
log error $" ✗ Manifest template test failed: ($result.stderr)"
|
||||
false
|
||||
} else {
|
||||
log info " ✓ OCI package tool available"
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -42,10 +42,14 @@ export def test-provider-loading []: nothing -> nothing {
|
||||
for provider_name in ["aws", "upcloud", "local"] {
|
||||
log-info $"Testing load of provider: ($provider_name)" "test"
|
||||
|
||||
let provider = try {
|
||||
let load_result = (do {
|
||||
load-provider $provider_name
|
||||
} catch {|err|
|
||||
log-error $"Failed to load ($provider_name): ($err.msg)" "test"
|
||||
} | complete)
|
||||
|
||||
let provider = if $load_result.exit_code == 0 {
|
||||
$load_result.stdout
|
||||
} else {
|
||||
log-error $"Failed to load ($provider_name): ($load_result.stderr)" "test"
|
||||
{}
|
||||
}
|
||||
|
||||
@ -123,11 +127,14 @@ export def test-middleware []: nothing -> nothing {
|
||||
|
||||
# Test query servers (this will show the dynamic dispatch in action)
|
||||
log-info "Testing server query with dynamic provider dispatch..." "test"
|
||||
try {
|
||||
let query_result = (mw_query_servers $mock_settings)
|
||||
log-info $"Query completed, returned ($query_result | length) results" "test"
|
||||
} catch {|err|
|
||||
log-warning $"Query test failed (expected for mock data): ($err.msg)" "test"
|
||||
let query_result = (do {
|
||||
mw_query_servers $mock_settings
|
||||
} | complete)
|
||||
|
||||
if $query_result.exit_code == 0 {
|
||||
log-info $"Query completed, returned ($query_result.stdout | length) results" "test"
|
||||
} else {
|
||||
log-warning $"Query test failed (expected for mock data): ($query_result.stderr)" "test"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -130,7 +130,7 @@ def install-workspace-provisioning [workspace_path: string]: nothing -> nothing
|
||||
# Check if source exists
|
||||
if not ($kcl_source | path exists) {
|
||||
# Try alternative: check if PROVISIONING env var is set
|
||||
let alt_source = try {
|
||||
let alt_source_result = (do {
|
||||
let config = (get-config)
|
||||
let base_path = ($config.paths.base? | default "")
|
||||
if ($base_path | is-not-empty) {
|
||||
@ -138,8 +138,12 @@ def install-workspace-provisioning [workspace_path: string]: nothing -> nothing
|
||||
} else {
|
||||
""
|
||||
}
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
let alt_source = if $alt_source_result.exit_code != 0 {
|
||||
""
|
||||
} else {
|
||||
$alt_source_result.stdout
|
||||
}
|
||||
|
||||
if ($alt_source | is-not-empty) and ($alt_source | path exists) {
|
||||
@ -184,7 +188,7 @@ def install-home-provisioning []: nothing -> nothing {
|
||||
# Check if source exists
|
||||
if not ($kcl_source | path exists) {
|
||||
# Try alternative: check if PROVISIONING env var is set
|
||||
let alt_source = try {
|
||||
let alt_source_result = (do {
|
||||
let config = (get-config)
|
||||
let base_path = ($config.paths.base? | default "")
|
||||
if ($base_path | is-not-empty) {
|
||||
@ -192,8 +196,12 @@ def install-home-provisioning []: nothing -> nothing {
|
||||
} else {
|
||||
""
|
||||
}
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
let alt_source = if $alt_source_result.exit_code != 0 {
|
||||
""
|
||||
} else {
|
||||
$alt_source_result.stdout
|
||||
}
|
||||
|
||||
if ($alt_source | is-not-empty) and ($alt_source | path exists) {
|
||||
@ -259,11 +267,15 @@ def get-kcl-version [kcl_path: string]: nothing -> string {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
try {
|
||||
let result = (do {
|
||||
let mod_content = (open $kcl_mod | from toml)
|
||||
$mod_content.package?.version? | default "unknown"
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $result.exit_code != 0 {
|
||||
"unknown"
|
||||
} else {
|
||||
$result.stdout
|
||||
}
|
||||
}
|
||||
|
||||
@ -277,7 +289,7 @@ def build-distribution-package [provisioning_root: string, kcl_source: string]:
|
||||
mkdir $registry_dir
|
||||
|
||||
# Generate package info
|
||||
let version = try {
|
||||
let version_result = (do {
|
||||
let kcl_mod = ($kcl_source | path join "kcl.mod")
|
||||
if ($kcl_mod | path exists) {
|
||||
let mod_content = (open $kcl_mod | from toml)
|
||||
@ -285,8 +297,12 @@ def build-distribution-package [provisioning_root: string, kcl_source: string]:
|
||||
} else {
|
||||
"0.0.1"
|
||||
}
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
let version = if $version_result.exit_code != 0 {
|
||||
"0.0.1"
|
||||
} else {
|
||||
$version_result.stdout
|
||||
}
|
||||
|
||||
let package_name = $"provisioning-kcl-($version).tar.gz"
|
||||
@ -364,10 +380,17 @@ For more information, visit the provisioning documentation.
|
||||
$readme_content | save ($staging_dir | path join "README.md")
|
||||
|
||||
# Create package archive
|
||||
try {
|
||||
let tar_result = (do {
|
||||
cd $staging_dir
|
||||
tar czf $package_path provisioning README.md
|
||||
cd $dist_dir # Go back before cleanup
|
||||
} | complete)
|
||||
|
||||
if $tar_result.exit_code != 0 {
|
||||
print $" ⚠️ Warning: Could not create distribution package: ($tar_result.stderr)"
|
||||
# Try cleanup anyway
|
||||
let cleanup_result = (do { rm -rf $staging_dir } | complete)
|
||||
} else {
|
||||
print $" ✅ Created distribution package: ($package_path)"
|
||||
|
||||
# Update registry
|
||||
@ -375,10 +398,6 @@ For more information, visit the provisioning documentation.
|
||||
|
||||
# Clean up staging
|
||||
rm -rf $staging_dir
|
||||
} catch {
|
||||
print $" ⚠️ Warning: Could not create distribution package: ($in)"
|
||||
# Try cleanup anyway
|
||||
try { rm -rf $staging_dir } catch { }
|
||||
}
|
||||
}
|
||||
|
||||
@ -441,12 +460,14 @@ def load-default-modules [workspace_path: string, infra_path: string]: nothing -
|
||||
}
|
||||
|
||||
# Load os taskserv to infrastructure layer
|
||||
try {
|
||||
let taskserv_result = (do {
|
||||
print $" 📦 Loading os taskserv to infrastructure layer..."
|
||||
^nu $module_loader "load" "taskservs" $infra_abs "os"
|
||||
print $" ✅ Loaded os taskserv"
|
||||
} catch {
|
||||
print $" ⚠️ Warning: Could not load os taskserv: ($in)"
|
||||
} | complete)
|
||||
|
||||
if $taskserv_result.exit_code != 0 {
|
||||
print $" ⚠️ Warning: Could not load os taskserv: ($taskserv_result.stderr)"
|
||||
print $" 💡 You can load it manually: cd ($infra_path) && provisioning mod load taskservs . os"
|
||||
}
|
||||
}
|
||||
@ -907,17 +928,32 @@ export def "main info" [workspace_path: string]: nothing -> record {
|
||||
error make { msg: $"Workspace does not exist: ($workspace_abs)" }
|
||||
}
|
||||
|
||||
let taskservs_manifest = try {
|
||||
let taskservs_result = (do {
|
||||
open ($workspace_abs | path join ".manifest" | path join "taskservs.yaml")
|
||||
} catch { { loaded_taskservs: [] } }
|
||||
} | complete)
|
||||
let taskservs_manifest = if $taskservs_result.exit_code != 0 {
|
||||
{ loaded_taskservs: [] }
|
||||
} else {
|
||||
$taskservs_result.stdout
|
||||
}
|
||||
|
||||
let providers_manifest = try {
|
||||
let providers_result = (do {
|
||||
open ($workspace_abs | path join ".manifest" | path join "providers.yaml")
|
||||
} catch { { loaded_providers: [] } }
|
||||
} | complete)
|
||||
let providers_manifest = if $providers_result.exit_code != 0 {
|
||||
{ loaded_providers: [] }
|
||||
} else {
|
||||
$providers_result.stdout
|
||||
}
|
||||
|
||||
let clusters_manifest = try {
|
||||
let clusters_result = (do {
|
||||
open ($workspace_abs | path join ".manifest" | path join "clusters.yaml")
|
||||
} catch { { loaded_clusters: [] } }
|
||||
} | complete)
|
||||
let clusters_manifest = if $clusters_result.exit_code != 0 {
|
||||
{ loaded_clusters: [] }
|
||||
} else {
|
||||
$clusters_result.stdout
|
||||
}
|
||||
|
||||
{
|
||||
workspace: $workspace_abs
|
||||
|
||||
@ -1,390 +0,0 @@
|
||||
#!/usr/bin/env nu
|
||||
|
||||
# Workspace Migration Tool
|
||||
# Migrates existing workspaces to new KCL package and module loader structure
|
||||
|
||||
use ../core/nulib/lib_provisioning/config/accessor.nu *
|
||||
|
||||
# Main migration command
|
||||
def main [workspace_path: string] {
|
||||
print $"Migrating workspace: ($workspace_path)"
|
||||
|
||||
# Validate workspace exists
|
||||
if not ($workspace_path | path exists) {
|
||||
error make { msg: $"Workspace not found: ($workspace_path)" }
|
||||
}
|
||||
|
||||
# Analyze current workspace
|
||||
let analysis = analyze-workspace $workspace_path
|
||||
|
||||
print $"📊 Workspace Analysis:"
|
||||
print $" Type: ($analysis.type)"
|
||||
print $" KCL files: ($analysis.kcl_files | length)"
|
||||
print $" Has kcl.mod: ($analysis.has_kcl_mod)"
|
||||
print $" Extension references: ($analysis.extension_refs | length)"
|
||||
|
||||
# Perform migration steps
|
||||
let migration_result = migrate-workspace $workspace_path $analysis
|
||||
|
||||
print $"✅ Migration completed"
|
||||
print $"📁 New structure created"
|
||||
print $"📄 Configuration files updated"
|
||||
print $"🔄 Import paths converted"
|
||||
|
||||
if not ($migration_result.warnings | is-empty) {
|
||||
print ""
|
||||
print "⚠️ Warnings:"
|
||||
for $warning in $migration_result.warnings {
|
||||
print $" - ($warning)"
|
||||
}
|
||||
}
|
||||
|
||||
print ""
|
||||
print "Next steps:"
|
||||
print " 1. Review updated kcl.mod file"
|
||||
print " 2. Load required modules: module-loader load taskservs . [modules...]"
|
||||
print " 3. Test configuration: kcl run servers.k"
|
||||
print " 4. Deploy: provisioning server create --infra . --check"
|
||||
}
|
||||
|
||||
# Analyze existing workspace structure
|
||||
def analyze-workspace [workspace_path: string]: nothing -> record {
|
||||
let workspace_abs = ($workspace_path | path expand)
|
||||
|
||||
# Find KCL files
|
||||
let kcl_files = try {
|
||||
glob ($workspace_abs | path join "**/*.k") | where { |path| $path | path exists }
|
||||
} catch { [] }
|
||||
|
||||
# Check for kcl.mod
|
||||
let kcl_mod_path = ($workspace_abs | path join "kcl.mod")
|
||||
let has_kcl_mod = ($kcl_mod_path | path exists)
|
||||
|
||||
# Find extension references in KCL files
|
||||
let extension_refs = ($kcl_files | each { |file|
|
||||
let content = try { open $file } catch { "" }
|
||||
# Look for relative imports pointing to extensions
|
||||
let imports = ($content | lines | where { |line| ($line | str contains "import") and ($line | str contains "../") })
|
||||
$imports | each { |import| { file: $file, import: $import } }
|
||||
} | flatten)
|
||||
|
||||
# Detect workspace type
|
||||
let workspace_type = if ($extension_refs | where { |ref| $ref.import | str contains "taskservs" } | length) > 0 {
|
||||
"legacy-with-taskservs"
|
||||
} else if ($kcl_files | length) > 0 {
|
||||
"kcl-workspace"
|
||||
} else {
|
||||
"basic"
|
||||
}
|
||||
|
||||
{
|
||||
path: $workspace_abs,
|
||||
type: $workspace_type,
|
||||
kcl_files: $kcl_files,
|
||||
has_kcl_mod: $has_kcl_mod,
|
||||
extension_refs: $extension_refs,
|
||||
needs_migration: ((not $has_kcl_mod) or (($extension_refs | length) > 0))
|
||||
}
|
||||
}
|
||||
|
||||
# Perform workspace migration
|
||||
def migrate-workspace [workspace_path: string, analysis: record]: nothing -> record {
|
||||
let workspace_abs = ($workspace_path | path expand)
|
||||
let warnings = []
|
||||
|
||||
# Step 1: Create new directory structure
|
||||
print "🔧 Creating new directory structure..."
|
||||
mkdir ($workspace_abs | path join ".taskservs")
|
||||
mkdir ($workspace_abs | path join ".providers")
|
||||
mkdir ($workspace_abs | path join ".clusters")
|
||||
mkdir ($workspace_abs | path join ".manifest")
|
||||
|
||||
# Ensure other standard directories exist
|
||||
mkdir ($workspace_abs | path join "data")
|
||||
mkdir ($workspace_abs | path join "tmp")
|
||||
mkdir ($workspace_abs | path join "resources")
|
||||
mkdir ($workspace_abs | path join "clusters")
|
||||
|
||||
# Step 2: Create or update kcl.mod
|
||||
print "🔧 Creating/updating kcl.mod..."
|
||||
let kcl_mod_content = if $analysis.has_kcl_mod {
|
||||
# Update existing kcl.mod
|
||||
let existing = (open ($workspace_abs | path join "kcl.mod"))
|
||||
update-kcl-mod $existing
|
||||
} else {
|
||||
# Create new kcl.mod
|
||||
create-new-kcl-mod ($workspace_path | path basename)
|
||||
}
|
||||
$kcl_mod_content | save -f ($workspace_abs | path join "kcl.mod")
|
||||
|
||||
# Step 3: Convert import paths in KCL files
|
||||
print "🔧 Converting import paths..."
|
||||
let conversion_results = ($analysis.kcl_files | each { |file|
|
||||
convert-imports-in-file $file
|
||||
})
|
||||
|
||||
# Step 4: Create empty manifest files
|
||||
print "🔧 Creating manifest files..."
|
||||
let empty_manifest = {
|
||||
last_updated: (date now | format date "%Y-%m-%d %H:%M:%S"),
|
||||
workspace: $workspace_abs
|
||||
}
|
||||
|
||||
($empty_manifest | merge { loaded_taskservs: [] }) | to yaml | save -f ($workspace_abs | path join ".manifest" | path join "taskservs.yaml")
|
||||
($empty_manifest | merge { loaded_providers: [] }) | to yaml | save -f ($workspace_abs | path join ".manifest" | path join "providers.yaml")
|
||||
($empty_manifest | merge { loaded_clusters: [] }) | to yaml | save -f ($workspace_abs | path join ".manifest" | path join "clusters.yaml")
|
||||
|
||||
# Step 5: Create .gitignore if it doesn't exist
|
||||
let gitignore_path = ($workspace_abs | path join ".gitignore")
|
||||
if not ($gitignore_path | path exists) {
|
||||
print "🔧 Creating .gitignore..."
|
||||
let gitignore_content = "# Workspace runtime data
|
||||
.manifest/
|
||||
data/
|
||||
tmp/
|
||||
*.log
|
||||
|
||||
# Module directories (managed by module-loader)
|
||||
.taskservs/
|
||||
.providers/
|
||||
.clusters/
|
||||
|
||||
# Generated files
|
||||
taskservs.k
|
||||
providers.k
|
||||
clusters.k
|
||||
|
||||
# Secrets and sensitive data
|
||||
*.age
|
||||
*.enc
|
||||
*.key
|
||||
"
|
||||
$gitignore_content | save $gitignore_path
|
||||
}
|
||||
|
||||
# Step 6: Create migration summary
|
||||
let migration_info = {
|
||||
migrated_at: (date now | format date "%Y-%m-%d %H:%M:%S"),
|
||||
original_type: $analysis.type,
|
||||
files_converted: ($conversion_results | length),
|
||||
extension_refs_converted: ($analysis.extension_refs | length)
|
||||
}
|
||||
|
||||
$migration_info | to yaml | save ($workspace_abs | path join ".migration-info.yaml")
|
||||
|
||||
{
|
||||
success: true,
|
||||
workspace: $workspace_abs,
|
||||
files_converted: ($conversion_results | length),
|
||||
warnings: $warnings
|
||||
}
|
||||
}
|
||||
|
||||
# Update existing kcl.mod to add provisioning dependency
|
||||
def update-kcl-mod [existing_content: string]: nothing -> string {
|
||||
let lines = ($existing_content | lines)
|
||||
|
||||
# Check if provisioning dependency already exists
|
||||
let has_provisioning_dep = ($lines | any { |line| $line | str contains "provisioning" })
|
||||
|
||||
if $has_provisioning_dep {
|
||||
return $existing_content
|
||||
}
|
||||
|
||||
# Find [dependencies] section or add it
|
||||
let deps_line_idx = ($lines | enumerate | where { |item| $item.item | str contains "[dependencies]" } | get index | first)
|
||||
|
||||
if ($deps_line_idx | is-empty) {
|
||||
# Add dependencies section
|
||||
$existing_content + "\n\n[dependencies]\nprovisioning = { path = \"~/.kcl/packages/provisioning\", version = \"0.0.1\" }\n"
|
||||
} else {
|
||||
# Insert after dependencies line
|
||||
let before = ($lines | first ($deps_line_idx + 1))
|
||||
let after = ($lines | skip ($deps_line_idx + 1))
|
||||
($before | append ["provisioning = { path = \"~/.kcl/packages/provisioning\", version = \"0.0.1\" }"] | append $after | str join "\n")
|
||||
}
|
||||
}
|
||||
|
||||
# Create new kcl.mod file
|
||||
def create-new-kcl-mod [workspace_name: string]: nothing -> string {
|
||||
$"[package]
|
||||
name = \"($workspace_name)\"
|
||||
edition = \"v0.11.3\"
|
||||
version = \"0.0.1\"
|
||||
|
||||
[dependencies]
|
||||
provisioning = { path = \"~/.kcl/packages/provisioning\", version = \"0.0.1\" }
|
||||
"
|
||||
}
|
||||
|
||||
# Convert import paths in a KCL file
|
||||
def convert-imports-in-file [file_path: string]: nothing -> record {
|
||||
let content = try { open $file_path } catch { return { file: $file_path, status: "error", message: "Could not read file" } }
|
||||
|
||||
# Define import conversion patterns
|
||||
let conversions = [
|
||||
# Convert relative imports to package imports
|
||||
{ pattern: "import ../../../kcl/", replacement: "import provisioning." }
|
||||
{ pattern: "import ../../kcl/", replacement: "import provisioning." }
|
||||
{ pattern: "import ../kcl/", replacement: "import provisioning." }
|
||||
|
||||
# Convert extension imports (these will need to be loaded via module-loader)
|
||||
{ pattern: "import ../../../extensions/taskservs/", replacement: "# import .taskservs." }
|
||||
{ pattern: "import ../../extensions/taskservs/", replacement: "# import .taskservs." }
|
||||
{ pattern: "import ../extensions/taskservs/", replacement: "# import .taskservs." }
|
||||
|
||||
{ pattern: "import ../../../extensions/providers/", replacement: "# import .providers." }
|
||||
{ pattern: "import ../../extensions/providers/", replacement: "# import .providers." }
|
||||
{ pattern: "import ../extensions/providers/", replacement: "# import .providers." }
|
||||
|
||||
{ pattern: "import ../../../extensions/clusters/", replacement: "# import .clusters." }
|
||||
{ pattern: "import ../../extensions/clusters/", replacement: "# import .clusters." }
|
||||
{ pattern: "import ../extensions/clusters/", replacement: "# import .clusters." }
|
||||
]
|
||||
|
||||
# Apply conversions
|
||||
let updated_content = ($conversions | reduce -f $content { |acc, conv|
|
||||
$acc | str replace -a $conv.pattern $conv.replacement
|
||||
})
|
||||
|
||||
# Check if any changes were made
|
||||
if $content != $updated_content {
|
||||
# Backup original file
|
||||
let backup_path = ($file_path + ".bak")
|
||||
$content | save $backup_path
|
||||
|
||||
# Save updated content
|
||||
$updated_content | save -f $file_path
|
||||
|
||||
{
|
||||
file: $file_path,
|
||||
status: "converted",
|
||||
backup: $backup_path,
|
||||
changes: true
|
||||
}
|
||||
} else {
|
||||
{
|
||||
file: $file_path,
|
||||
status: "no-changes",
|
||||
changes: false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Dry run migration (analyze what would be changed)
|
||||
export def "main dry-run" [workspace_path: string]: nothing -> record {
|
||||
print $"Analyzing workspace for migration: ($workspace_path)"
|
||||
|
||||
let analysis = analyze-workspace $workspace_path
|
||||
|
||||
print $"📊 Migration Analysis Report:"
|
||||
print $" Workspace: ($analysis.path)"
|
||||
print $" Type: ($analysis.type)"
|
||||
print $" Needs migration: ($analysis.needs_migration)"
|
||||
print ""
|
||||
print $" KCL files found: ($analysis.kcl_files | length)"
|
||||
for $file in $analysis.kcl_files {
|
||||
print $" - ($file | str replace ($analysis.path + '/') '')"
|
||||
}
|
||||
print ""
|
||||
print $" Extension references: ($analysis.extension_refs | length)"
|
||||
for $ref in $analysis.extension_refs {
|
||||
let file_rel = ($ref.file | str replace ($analysis.path + '/') '')
|
||||
print $" - ($file_rel): ($ref.import | str trim)"
|
||||
}
|
||||
|
||||
if $analysis.needs_migration {
|
||||
print ""
|
||||
print "🔧 Migration steps that would be performed:"
|
||||
print " 1. Create .taskservs/, .providers/, .clusters/, .manifest/ directories"
|
||||
print " 2. Create/update kcl.mod with provisioning dependency"
|
||||
print " 3. Convert import paths from relative to package-based"
|
||||
print " 4. Comment out extension imports (load via module-loader)"
|
||||
print " 5. Create manifest files for module tracking"
|
||||
print " 6. Create .gitignore if missing"
|
||||
} else {
|
||||
print ""
|
||||
print "✅ Workspace appears to already be migrated or needs no migration"
|
||||
}
|
||||
|
||||
$analysis
|
||||
}
|
||||
|
||||
# Rollback migration (restore from backups)
|
||||
export def "main rollback" [workspace_path: string]: nothing -> nothing {
|
||||
print $"Rolling back migration for: ($workspace_path)"
|
||||
|
||||
let workspace_abs = ($workspace_path | path expand)
|
||||
|
||||
# Find backup files
|
||||
let backup_files = try {
|
||||
glob ($workspace_abs | path join "**/*.bak")
|
||||
} catch { [] }
|
||||
|
||||
if ($backup_files | is-empty) {
|
||||
print "⚠️ No backup files found. Cannot rollback."
|
||||
return
|
||||
}
|
||||
|
||||
print $"Found ($backup_files | length) backup files"
|
||||
|
||||
# Restore each backup
|
||||
for $backup in $backup_files {
|
||||
let original = ($backup | str replace ".bak" "")
|
||||
print $"Restoring: ($original | str replace ($workspace_abs + '/') '')"
|
||||
|
||||
# Copy backup back to original
|
||||
cp $backup $original
|
||||
|
||||
# Remove backup
|
||||
rm $backup
|
||||
}
|
||||
|
||||
# Remove migration artifacts
|
||||
let migration_artifacts = [
|
||||
".taskservs"
|
||||
".providers"
|
||||
".clusters"
|
||||
".manifest"
|
||||
".migration-info.yaml"
|
||||
]
|
||||
|
||||
for $artifact in $migration_artifacts {
|
||||
let artifact_path = ($workspace_abs | path join $artifact)
|
||||
if ($artifact_path | path exists) {
|
||||
rm -rf $artifact_path
|
||||
}
|
||||
}
|
||||
|
||||
print "✅ Migration rolled back successfully"
|
||||
print " Original files restored from backups"
|
||||
print " Migration artifacts removed"
|
||||
}
|
||||
|
||||
# Show migration status
|
||||
export def "main status" [workspace_path: string]: nothing -> record {
|
||||
let workspace_abs = ($workspace_path | path expand)
|
||||
|
||||
let has_new_structure = [".taskservs", ".providers", ".clusters", ".manifest"] | all { |dir|
|
||||
($workspace_abs | path join $dir) | path exists
|
||||
}
|
||||
|
||||
let migration_info_path = ($workspace_abs | path join ".migration-info.yaml")
|
||||
let migration_info = if ($migration_info_path | path exists) {
|
||||
open $migration_info_path
|
||||
} else {
|
||||
null
|
||||
}
|
||||
|
||||
let backup_files = try {
|
||||
glob ($workspace_abs | path join "**/*.bak") | length
|
||||
} catch { 0 }
|
||||
|
||||
{
|
||||
workspace: $workspace_abs,
|
||||
has_new_structure: $has_new_structure,
|
||||
migration_info: $migration_info,
|
||||
backup_files: $backup_files,
|
||||
is_migrated: ($has_new_structure and ($migration_info != null))
|
||||
}
|
||||
}
|
||||
@ -18,9 +18,8 @@ export def fmt [
|
||||
}
|
||||
|
||||
if $check {
|
||||
try {
|
||||
^cargo fmt --all -- --check
|
||||
} catch {
|
||||
let result = (do { ^cargo fmt --all -- --check } | complete)
|
||||
if $result.exit_code != 0 {
|
||||
error make --unspanned {
|
||||
msg: $"\nplease run ('toolkit fmt' | pretty-format-command) to fix formatting!"
|
||||
}
|
||||
@ -42,7 +41,7 @@ export def clippy [
|
||||
}
|
||||
|
||||
# If changing these settings also change CI settings in .github/workflows/ci.yml
|
||||
try {(
|
||||
let result1 = (do {
|
||||
^cargo clippy
|
||||
--workspace
|
||||
--exclude nu_plugin_*
|
||||
@ -51,13 +50,19 @@ export def clippy [
|
||||
-D warnings
|
||||
-D clippy::unwrap_used
|
||||
-D clippy::unchecked_duration_subtraction
|
||||
)
|
||||
} | complete)
|
||||
|
||||
if $result1.exit_code != 0 {
|
||||
error make --unspanned {
|
||||
msg: $"\nplease fix the above ('clippy' | pretty-format-command) errors before continuing!"
|
||||
}
|
||||
}
|
||||
|
||||
if $verbose {
|
||||
print $"running ('toolkit clippy' | pretty-format-command) on tests"
|
||||
}
|
||||
# In tests we don't have to deny unwrap
|
||||
(
|
||||
let result2 = (do {
|
||||
^cargo clippy
|
||||
--tests
|
||||
--workspace
|
||||
@ -65,21 +70,27 @@ export def clippy [
|
||||
--features ($features | default [] | str join ",")
|
||||
--
|
||||
-D warnings
|
||||
)
|
||||
} | complete)
|
||||
|
||||
if $result2.exit_code != 0 {
|
||||
error make --unspanned {
|
||||
msg: $"\nplease fix the above ('clippy' | pretty-format-command) errors before continuing!"
|
||||
}
|
||||
}
|
||||
|
||||
if $verbose {
|
||||
print $"running ('toolkit clippy' | pretty-format-command) on plugins"
|
||||
}
|
||||
(
|
||||
let result3 = (do {
|
||||
^cargo clippy
|
||||
--package nu_plugin_*
|
||||
--
|
||||
-D warnings
|
||||
-D clippy::unwrap_used
|
||||
-D clippy::unchecked_duration_subtraction
|
||||
)
|
||||
} | complete)
|
||||
|
||||
} catch {
|
||||
if $result3.exit_code != 0 {
|
||||
error make --unspanned {
|
||||
msg: $"\nplease fix the above ('clippy' | pretty-format-command) errors before continuing!"
|
||||
}
|
||||
@ -262,20 +273,18 @@ export def "check pr" [
|
||||
$env.LANG = 'en_US.UTF-8'
|
||||
$env.LANGUAGE = 'en'
|
||||
|
||||
try {
|
||||
fmt --check --verbose
|
||||
} catch {
|
||||
let fmt_result = (do { fmt --check --verbose } | complete)
|
||||
if $fmt_result.exit_code != 0 {
|
||||
return (report --fail-fmt)
|
||||
}
|
||||
|
||||
try {
|
||||
clippy --features $features --verbose
|
||||
} catch {
|
||||
let clippy_result = (do { clippy --features $features --verbose } | complete)
|
||||
if $clippy_result.exit_code != 0 {
|
||||
return (report --fail-clippy)
|
||||
}
|
||||
|
||||
print $"running ('toolkit test' | pretty-format-command)"
|
||||
try {
|
||||
let test_result = (do {
|
||||
if $fast {
|
||||
if ($features | is-empty) {
|
||||
test --workspace --fast
|
||||
@ -289,14 +298,15 @@ export def "check pr" [
|
||||
test --features $features
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
} | complete)
|
||||
|
||||
if $test_result.exit_code != 0 {
|
||||
return (report --fail-test)
|
||||
}
|
||||
|
||||
print $"running ('toolkit test stdlib' | pretty-format-command)"
|
||||
try {
|
||||
test stdlib
|
||||
} catch {
|
||||
let stdlib_result = (do { test stdlib } | complete)
|
||||
if $stdlib_result.exit_code != 0 {
|
||||
return (report --fail-test-stdlib)
|
||||
}
|
||||
|
||||
@ -425,11 +435,12 @@ export def "add plugins" [] {
|
||||
}
|
||||
|
||||
for plugin in $plugins {
|
||||
try {
|
||||
let plugin_result = (do {
|
||||
print $"> plugin add ($plugin)"
|
||||
plugin add $plugin
|
||||
} catch { |err|
|
||||
print -e $"(ansi rb)Failed to add ($plugin):\n($err.msg)(ansi reset)"
|
||||
} | complete)
|
||||
if $plugin_result.exit_code != 0 {
|
||||
print -e $"(ansi rb)Failed to add ($plugin):\n($plugin_result.stderr)(ansi reset)"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -24,13 +24,12 @@ def test_layer_core [module_name: string] {
|
||||
print " 🔍 Layer 1 (Core): Priority 100"
|
||||
|
||||
# Use discovery system to find taskserv in grouped structure
|
||||
let taskserv_exists = try {
|
||||
let discovery_result = (do {
|
||||
use ../../core/nulib/taskservs/discover.nu *
|
||||
let taskserv_info = get-taskserv-info $module_name
|
||||
($taskserv_info.name == $module_name)
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
} | complete)
|
||||
let taskserv_exists = if $discovery_result.exit_code == 0 { $discovery_result.stdout } else { false }
|
||||
|
||||
let provider_path = $"provisioning/extensions/providers/($module_name)"
|
||||
let provider_exists = ($provider_path | path exists)
|
||||
@ -189,13 +188,12 @@ def summarize_resolution [module_name: string, infra: string] {
|
||||
}
|
||||
|
||||
# Check core layer using discovery system (Layer 1)
|
||||
let core_exists = try {
|
||||
let core_discovery_result = (do {
|
||||
use ../../core/nulib/taskservs/discover.nu *
|
||||
let taskserv_info = get-taskserv-info $module_name
|
||||
($taskserv_info.name == $module_name)
|
||||
} catch {
|
||||
false
|
||||
}
|
||||
} | complete)
|
||||
let core_exists = if $core_discovery_result.exit_code == 0 { $core_discovery_result.stdout } else { false }
|
||||
|
||||
if $core_exists {
|
||||
$resolution_chain = ($resolution_chain | append "system")
|
||||
@ -217,12 +215,11 @@ export def get_layer_modules [layer: string] {
|
||||
match $layer {
|
||||
"system" | "core" => {
|
||||
# Use the discovery system to get all modules from system extensions
|
||||
try {
|
||||
let discover_result = (do {
|
||||
use ../../core/nulib/taskservs/discover.nu *
|
||||
discover-taskservs | get name
|
||||
} catch {
|
||||
[]
|
||||
}
|
||||
} | complete)
|
||||
if $discover_result.exit_code == 0 { $discover_result.stdout } else { [] }
|
||||
}
|
||||
"workspace" => {
|
||||
# Get loaded modules from workspace layer
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user