From fc1c6997955a82d37a3aec25c445c7fe53ba82a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jesu=CC=81s=20Pe=CC=81rez?= Date: Wed, 4 Feb 2026 01:02:18 +0000 Subject: [PATCH] chore: update crates, names and clippy fixes --- .gitignore | 2 + .gitmodules | 9 + Cargo.toml | 177 +++- crates/ai-service/Cargo.toml | 19 +- crates/ai-service/src/main.rs | 16 +- crates/control-center-ui/Cargo.toml | 96 +-- crates/control-center/Cargo.toml | 14 +- crates/control-center/Dockerfile | 65 -- crates/control-center/src/main.rs | 33 +- .../Cargo.toml | 8 +- crates/daemon/Dockerfile.runtime | 38 + .../src/config.rs | 0 .../src/main.rs | 14 +- crates/extension-registry/Cargo.toml | 13 +- crates/extension-registry/Dockerfile | 115 ++- crates/extension-registry/src/handlers.rs | 1 + crates/extension-registry/src/main.rs | 12 + crates/mcp-server/Cargo.toml | 24 +- crates/mcp-server/Dockerfile | 63 -- .../src/{main.rs => main.rs.disabled} | 795 ++++++++++++------ crates/orchestrator/Cargo.toml | 42 +- crates/orchestrator/Dockerfile | 101 ++- ...s_14043518-e459-4316-aadd-6ee6d221e644.txt | 20 - ...s_1e9b4914-f290-4bec-80f2-35128250f9fd.txt | 20 - ...s_21c8a4af-2562-4304-b5ec-90fb1b5fd0ab.txt | 20 - ...s_317e31fa-b549-49c9-a212-1f13445d913f.txt | 20 - ...s_5da5d888-527e-4aac-ab53-93e9a30014cc.txt | 20 - ...s_7c16746f-24b0-4bcc-8a49-b5dc6bc1f0c7.txt | 20 - ...s_cb3ced5a-ab49-4754-ba90-c815ab0948ba.txt | 20 - crates/orchestrator/src/container_manager.rs | 111 ++- crates/orchestrator/src/lib.rs | 28 +- crates/orchestrator/src/main.rs | 1 + .../237315de-8a7f-430a-8804-65d050f3bfb0.json | 0 .../7ff31593-cb5f-4a52-88ff-3a3d9bfbf931.json | 0 .../b14f9a93-318b-4d56-aa73-a5c1e38a2a9b.json | 0 .../c2050e55-46d9-47bc-abcd-8b137a6ee459.json | 0 .../orchestrator/{ => wrks}/_data/status.json | 0 .../999c70f4-3fa4-4879-bbd2-e85f5d0027f3.json | 0 .../{ => wrks}/_data/tasks/task-001.json | 0 .../{ => wrks}/_data/tasks/task-002.json | 0 .../{ => wrks}/_data/tasks/task-003.json | 0 crates/platform-config/src/hierarchy.rs | 17 +- crates/platform-config/src/lib.rs | 2 + crates/platform-config/src/resolver.rs | 212 +++++ crates/rag/Cargo.toml | 22 +- crates/rag/docker/Dockerfile | 59 -- crates/rag/src/embeddings.rs | 403 +++------ crates/rag/src/llm.rs | 242 ++---- crates/rag/src/main.rs | 22 + crates/service-clients/Cargo.toml | 14 +- crates/vault-service/Cargo.toml | 23 +- secretumvault | 1 + stratumiops | 1 + syntaxis | 1 + 54 files changed, 1635 insertions(+), 1321 deletions(-) create mode 100644 .gitmodules delete mode 100644 crates/control-center/Dockerfile rename crates/{provisioning-daemon => daemon}/Cargo.toml (91%) create mode 100644 crates/daemon/Dockerfile.runtime rename crates/{provisioning-daemon => daemon}/src/config.rs (100%) rename crates/{provisioning-daemon => daemon}/src/main.rs (90%) delete mode 100644 crates/mcp-server/Dockerfile rename crates/mcp-server/src/{main.rs => main.rs.disabled} (67%) delete mode 100644 crates/orchestrator/rollback_instructions_14043518-e459-4316-aadd-6ee6d221e644.txt delete mode 100644 crates/orchestrator/rollback_instructions_1e9b4914-f290-4bec-80f2-35128250f9fd.txt delete mode 100644 crates/orchestrator/rollback_instructions_21c8a4af-2562-4304-b5ec-90fb1b5fd0ab.txt delete mode 100644 crates/orchestrator/rollback_instructions_317e31fa-b549-49c9-a212-1f13445d913f.txt delete mode 100644 crates/orchestrator/rollback_instructions_5da5d888-527e-4aac-ab53-93e9a30014cc.txt delete mode 100644 crates/orchestrator/rollback_instructions_7c16746f-24b0-4bcc-8a49-b5dc6bc1f0c7.txt delete mode 100644 crates/orchestrator/rollback_instructions_cb3ced5a-ab49-4754-ba90-c815ab0948ba.txt rename crates/orchestrator/{ => wrks}/_data/queue.rkvs/tasks/237315de-8a7f-430a-8804-65d050f3bfb0.json (100%) rename crates/orchestrator/{ => wrks}/_data/queue.rkvs/tasks/7ff31593-cb5f-4a52-88ff-3a3d9bfbf931.json (100%) rename crates/orchestrator/{ => wrks}/_data/queue.rkvs/tasks/b14f9a93-318b-4d56-aa73-a5c1e38a2a9b.json (100%) rename crates/orchestrator/{ => wrks}/_data/queue.rkvs/tasks/c2050e55-46d9-47bc-abcd-8b137a6ee459.json (100%) rename crates/orchestrator/{ => wrks}/_data/status.json (100%) rename crates/orchestrator/{ => wrks}/_data/tasks/999c70f4-3fa4-4879-bbd2-e85f5d0027f3.json (100%) rename crates/orchestrator/{ => wrks}/_data/tasks/task-001.json (100%) rename crates/orchestrator/{ => wrks}/_data/tasks/task-002.json (100%) rename crates/orchestrator/{ => wrks}/_data/tasks/task-003.json (100%) create mode 100644 crates/platform-config/src/resolver.rs delete mode 100644 crates/rag/docker/Dockerfile create mode 160000 secretumvault create mode 160000 stratumiops create mode 160000 syntaxis diff --git a/.gitignore b/.gitignore index a05a0db..e18cb67 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ .p .claude +.opencode +AGENTS.md .vscode .shellcheckrc .coder diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..324a826 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "secretumvault"] + path = secretumvault + url = ssh://git@repo.jesusperez.pro:32225/jesus/secretumvault.git +[submodule "stratumiops"] + path = stratumiops + url = ssh://git@repo.jesusperez.pro:32225/jesus/stratumiops.git +[submodule "syntaxis"] + path = syntaxis + url = ssh://git@repo.jesusperez.pro:32225/jesus/syntaxis.git diff --git a/Cargo.toml b/Cargo.toml index e517408..e0d6030 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,11 +9,26 @@ members = [ "crates/control-center", "crates/control-center-ui", "crates/vault-service", - "crates/rag", "crates/detector", "crates/mcp-server", - "crates/provisioning-daemon", + "crates/daemon", + "prov-ecosystem/crates/daemon-cli", + "prov-ecosystem/crates/machines", + "prov-ecosystem/crates/encrypt", + "prov-ecosystem/crates/backup", + "prov-ecosystem/crates/observability", ] + +exclude = [ + "syntaxis", + "syntaxis/core", + "prov-ecosystem/crates/syntaxis-integration", + "prov-ecosystem/crates/audit", + "prov-ecosystem/crates/valida", + "prov-ecosystem/crates/runtime", + "prov-ecosystem/crates/gitops", +] + resolver = "2" [workspace.package] @@ -39,7 +54,7 @@ resolver = "2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" toml = "0.9" - uuid = { version = "1.19", features = ["v4", "serde"] } + uuid = { version = "1.20", features = ["v4", "serde"] } # ============================================================================ # ERROR HANDLING @@ -80,7 +95,7 @@ resolver = "2" # DATABASE AND STORAGE # ============================================================================ sqlx = { version = "0.8", features = ["runtime-tokio-rustls", "sqlite", "chrono", "uuid"] } - surrealdb = { version = "2.4", features = ["kv-mem", "protocol-ws", "protocol-http"] } + surrealdb = { version = "2.6", features = ["kv-mem", "protocol-ws", "protocol-http"] } # ============================================================================ # SECURITY AND CRYPTOGRAPHY @@ -89,7 +104,7 @@ resolver = "2" argon2 = "0.5" base64 = "0.22" hmac = "0.12" - jsonwebtoken = { version = "10.2", features = ["rust_crypto"] } + jsonwebtoken = { version = "10.3", features = ["rust_crypto"] } rand = { version = "0.9", features = ["std_rng", "os_rng"] } ring = "0.17" sha2 = "0.10" @@ -127,7 +142,7 @@ resolver = "2" # Additional cryptography hkdf = "0.12" - rsa = "0.9.9" + rsa = "0.9.10" zeroize = { version = "1.8", features = ["derive"] } # Additional security @@ -186,7 +201,7 @@ resolver = "2" wasm-bindgen-futures = "0.4" # Random number generation - getrandom = { version = "0.3" } + getrandom = { version = "0.4" } # ============================================================================ # TUI (Terminal User Interface) @@ -216,28 +231,164 @@ resolver = "2" parking_lot = "0.12" which = "8" yaml-rust = "0.4" + humantime-serde = "1.1" + + # Metrics + prometheus = "0.14" + + approx = "0.5" + + # Utilities + xxhash-rust = { version = "0.8", features = ["xxh3"] } # ============================================================================ # RAG FRAMEWORK DEPENDENCIES (Rig) # ============================================================================ - rig-core = "0.27" + rig-core = "0.30" rig-surrealdb = "0.1" tokenizers = "0.22" # ============================================================================ - # PROV-ECOSYSTEM DAEMON (replaces cli-daemon) + # STRATUM ECOSYSTEM DEPENDENCIES (for RAG embeddings & LLM) # ============================================================================ - daemon-cli = { path = "../../submodules/prov-ecosystem/crates/daemon-cli" } + moka = { version = "0.12", features = ["future"] } + sled = "0.34" + fastembed = "5.8" + lancedb = "0.23" + arrow = "=56" # ============================================================================ - # SECRETUMVAULT (Enterprise Secrets Management) + # INTERNAL WORKSPACE CRATES (Local path dependencies) # ============================================================================ - secretumvault = { path = "../../submodules/secretumvault" } + platform-config = { path = "./crates/platform-config" } + service-clients = { path = "./crates/service-clients" } + rag = { path = "./crates/rag" } + mcp-server = { path = "./crates/mcp-server" } + ai-service = { path = "./crates/ai-service" } + + # ============================================================================ + # PROV-ECOSYSTEM (Now members of workspace) + # ============================================================================ + daemon-cli = { path = "./prov-ecosystem/crates/daemon-cli" } + machines = { path = "./prov-ecosystem/crates/machines" } + encrypt = { path = "./prov-ecosystem/crates/encrypt" } + backup = { path = "./prov-ecosystem/crates/backup" } + observability = { path = "./prov-ecosystem/crates/observability" } + init-servs = { path = "./prov-ecosystem/crates/init-servs" } + + # stratum-embeddings and stratum-llm are built in isolated Docker context for RAG + # See: crates/rag/docker/Dockerfile + stratum-embeddings = { path = "./stratumiops/crates/stratum-embeddings", features = ["openai-provider", "ollama-provider", "fastembed-provider", "memory-cache"] } + stratum-llm = { path = "./stratumiops/crates/stratum-llm", features = ["anthropic", "openai", "ollama"] } + + # ============================================================================ + # SECRETUMVAULT (Enterprise Secrets Management - optional) + # ============================================================================ + secretumvault = { path = "./secretumvault" } + + # ============================================================================ + # WASM/WEB-SPECIFIC DEPENDENCIES + # ============================================================================ + web-sys = { version = "0.3", features = [ + "console", + "Window", + "Document", + "Element", + "HtmlElement", + "HtmlCanvasElement", + "CanvasRenderingContext2d", + "EventTarget", + "Event", + "DragEvent", + "DataTransfer", + "HtmlInputElement", + "HtmlSelectElement", + "HtmlTextAreaElement", + "HtmlButtonElement", + "HtmlDivElement", + "Storage", + "Location", + "History", + "Navigator", + "ServiceWorkerRegistration", + "ServiceWorker", + "NotificationPermission", + "Notification", + "Headers", + "Request", + "RequestInit", + "RequestMode", + "Response", + "AbortController", + "AbortSignal", + "WebSocket", + "MessageEvent", + "CloseEvent", + "ErrorEvent", + "Blob", + "Url", + "FileReader", + "File", + "HtmlAnchorElement", + "MouseEvent", + "TouchEvent", + "KeyboardEvent", + "ResizeObserver", + "ResizeObserverEntry", + "IntersectionObserver", + "IntersectionObserverEntry", + "MediaQueryList", + "MediaQueryListEvent", + "CredentialsContainer", + "PublicKeyCredential", + "PublicKeyCredentialCreationOptions", + "PublicKeyCredentialRequestOptions", + "AuthenticatorResponse", + "AuthenticatorAttestationResponse", + "AuthenticatorAssertionResponse", + "Crypto", + "SubtleCrypto", + "CryptoKey", + ] } + + # ============================================================================ + # ADDITIONAL MISSING DEPENDENCIES (Not in original workspace) + # ============================================================================ + ed25519-dalek = "2.2" + http-body-util = "0.1" # ============================================================================ # BYTES MANIPULATION # ============================================================================ - bytes = "1.5" + bytes = "1.11" + + # ============================================================================ + # HTTP AND PROTOCOL UTILITIES + # ============================================================================ + http = "1" + + # ============================================================================ + # CONTAINER MANAGEMENT AND SSH + # ============================================================================ + bollard = "0.20" + russh = "0.57" + russh-keys = "0.49" + + # ============================================================================ + # SECRETS MANAGEMENT + # ============================================================================ + age = "0.11" + rusty_vault = "0.2.1" + + # ============================================================================ + # ADDITIONAL DATA FORMAT SERIALIZATION + # ============================================================================ + serde_yaml = "0.9" + + # ============================================================================ + # PATH AND SHELL UTILITIES + # ============================================================================ + shellexpand = "3.1" [workspace.metadata] description = "Provisioning Platform - Rust workspace for cloud infrastructure automation tools" diff --git a/crates/ai-service/Cargo.toml b/crates/ai-service/Cargo.toml index 3464be8..c5f28cd 100644 --- a/crates/ai-service/Cargo.toml +++ b/crates/ai-service/Cargo.toml @@ -5,6 +5,10 @@ edition.workspace = true name = "ai-service" version.workspace = true +[[bin]] +name = "provisioning-ai-service" +path = "src/main.rs" + [dependencies] # Workspace dependencies async-trait = { workspace = true } @@ -22,7 +26,7 @@ serde_json = { workspace = true } toml = { workspace = true } # Platform configuration -platform-config = { path = "../platform-config" } +platform-config = { workspace = true } # Error handling anyhow = { workspace = true } @@ -40,14 +44,18 @@ uuid = { workspace = true, features = ["v4", "serde"] } clap = { workspace = true, features = ["derive"] } # RAG crate for AI capabilities -provisioning-rag = { path = "../rag" } +rag = { workspace = true } # MCP server tools for real implementations -provisioning-mcp-server = { path = "../mcp-server" } +mcp-server = { workspace = true } # Graph operations for DAG petgraph = { workspace = true } +# Stratum ecosystem - embeddings and LLM abstraction (optional - requires external setup) +stratum-embeddings = { workspace = true } +stratum-llm = { workspace = true } + [dev-dependencies] tempfile = { workspace = true } tokio-test = { workspace = true } @@ -56,8 +64,3 @@ tokio-test = { workspace = true } [lib] name = "ai_service" path = "src/lib.rs" - -# Binary target -[[bin]] -name = "ai-service" -path = "src/main.rs" diff --git a/crates/ai-service/src/main.rs b/crates/ai-service/src/main.rs index b1f663f..6c20de4 100644 --- a/crates/ai-service/src/main.rs +++ b/crates/ai-service/src/main.rs @@ -13,12 +13,24 @@ use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; #[command(name = "ai-service")] #[command(about = "HTTP service for AI capabilities including RAG, MCP tool invocation, DAG operations, and knowledge graphs", long_about = None)] struct Args { + /// Configuration file path (highest priority) + #[arg(short = 'c', long, env = "AI_SERVICE_CONFIG")] + config: Option, + + /// Configuration directory (searches for ai-service.ncl|toml|json) + #[arg(long, env = "PROVISIONING_CONFIG_DIR")] + config_dir: Option, + + /// Deployment mode (solo, multiuser, cicd, enterprise) + #[arg(short = 'm', long, env = "AI_SERVICE_MODE")] + mode: Option, + /// Service bind address - #[arg(short, long, default_value = "127.0.0.1")] + #[arg(short = 'H', long, default_value = "127.0.0.1")] host: String, /// Service bind port - #[arg(short, long, default_value_t = DEFAULT_PORT)] + #[arg(short = 'p', long, default_value_t = DEFAULT_PORT)] port: u16, } diff --git a/crates/control-center-ui/Cargo.toml b/crates/control-center-ui/Cargo.toml index 5b270e5..904fac1 100644 --- a/crates/control-center-ui/Cargo.toml +++ b/crates/control-center-ui/Cargo.toml @@ -1,9 +1,11 @@ [package] -authors = ["Control Center Team"] -autobins = false # Disable auto-detection of binary targets +authors.workspace = true +autobins = false description = "Control Center UI - Leptos CSR App for Cloud Infrastructure Management" edition.workspace = true +license.workspace = true name = "control-center-ui" +repository.workspace = true version.workspace = true [lib] @@ -87,89 +89,21 @@ js-sys = { workspace = true } wasm-bindgen-futures = { workspace = true } # Random number generation (WASM-specific override with js feature) -getrandom = { version = "0.3.4", features = ["wasm_js"] } +getrandom = { workspace = true, features = ["wasm_js"] } -# ============================================================================ -# PROJECT-SPECIFIC DEPENDENCIES (not in workspace) -# ============================================================================ +# HTTP client +reqwest = { workspace = true, features = ["json"] } -# Web APIs -web-sys = { version = "0.3", features = [ - "console", - "Window", - "Document", - "Element", - "HtmlElement", - "HtmlCanvasElement", - "CanvasRenderingContext2d", - "EventTarget", - "Event", - "DragEvent", - "DataTransfer", - "HtmlInputElement", - "HtmlSelectElement", - "HtmlTextAreaElement", - "HtmlButtonElement", - "HtmlDivElement", - "Storage", - "Location", - "History", - "Navigator", - "ServiceWorkerRegistration", - "ServiceWorker", - "NotificationPermission", - "Notification", - "Headers", - "Request", - "RequestInit", - "RequestMode", - "Response", - "AbortController", - "AbortSignal", - "WebSocket", - "MessageEvent", - "CloseEvent", - "ErrorEvent", - "Blob", - "Url", - "FileReader", - "File", - "HtmlAnchorElement", - "MouseEvent", - "TouchEvent", - "KeyboardEvent", - "ResizeObserver", - "ResizeObserverEntry", - "IntersectionObserver", - "IntersectionObserverEntry", - # Media Query APIs - "MediaQueryList", - "MediaQueryListEvent", - # WebAuthn APIs - "CredentialsContainer", - "PublicKeyCredential", - "PublicKeyCredentialCreationOptions", - "PublicKeyCredentialRequestOptions", - "AuthenticatorResponse", - "AuthenticatorAttestationResponse", - "AuthenticatorAssertionResponse", - # Crypto APIs - "Crypto", - "SubtleCrypto", - "CryptoKey", -] } +# Tokio with time features +tokio = { workspace = true, features = ["time"] } -# HTTP client (project-specific for WASM features) -reqwest = { version = "0.13", features = ["json"] } - -# Tokio with time features for WASM (project-specific version) -tokio = { version = "1.49", features = ["time"] } +# Web APIs (WASM browser APIs) +web-sys = { workspace = true } # Profile configurations moved to workspace root - # WASM pack settings - [package.metadata.wasm-pack.profile.release] - wasm-opt = ['-Oz', '--enable-mutable-globals'] +[package.metadata.wasm-pack.profile.release] +wasm-opt = ['-Oz', '--enable-mutable-globals'] - [package.metadata.wasm-pack.profile.dev] - wasm-opt = false +[package.metadata.wasm-pack.profile.dev] +wasm-opt = false diff --git a/crates/control-center/Cargo.toml b/crates/control-center/Cargo.toml index e530568..c1e6eea 100644 --- a/crates/control-center/Cargo.toml +++ b/crates/control-center/Cargo.toml @@ -52,10 +52,10 @@ validator = { workspace = true } reqwest = { workspace = true } # HTTP service clients (machines, init, AI) - enables remote service calls -service-clients = { path = "../service-clients" } +service-clients = { workspace = true } # Platform configuration management -platform-config = { path = "../platform-config" } +platform-config = { workspace = true } # Security and cryptography aes-gcm = { workspace = true } @@ -153,9 +153,8 @@ compliance = ["core"] # Modules: anomaly (detection) experimental = ["core"] -# Default: Recommended for standard deployments -# Includes auth, KMS, audit - the essentials -default = ["core", "kms", "audit"] +# Default: All features enabled +default = ["core", "kms", "audit", "mfa", "compliance", "experimental"] # Full: All features enabled (development and testing) all = ["core", "kms", "audit", "mfa", "compliance", "experimental"] @@ -165,8 +164,7 @@ all = ["core", "kms", "audit", "mfa", "compliance", "experimental"] name = "control_center" path = "src/lib.rs" -# Binary target (uses all features) +# Binary target (uses all features by default) [[bin]] -name = "control-center" +name = "provisioning-control-center" path = "src/main.rs" -required-features = ["all"] diff --git a/crates/control-center/Dockerfile b/crates/control-center/Dockerfile deleted file mode 100644 index a35a741..0000000 --- a/crates/control-center/Dockerfile +++ /dev/null @@ -1,65 +0,0 @@ -# Multi-stage build for Control-Center -# Builds from platform workspace root - -# Build stage - Using nightly for edition2024 support (required by async-graphql 7.x) -FROM rustlang/rust:nightly-bookworm AS builder - -WORKDIR /workspace - -# Install build dependencies -RUN apt-get update && apt-get install -y \ - pkg-config \ - libssl-dev \ - && rm -rf /var/lib/apt/lists/* - -# Copy entire platform workspace (required for workspace dependencies) -COPY Cargo.toml Cargo.lock ./ -COPY orchestrator ./orchestrator -COPY control-center ./control-center -COPY control-center-ui ./control-center-ui -COPY mcp-server ./mcp-server -COPY installer ./installer - -# Build control-center (workspace-aware) -WORKDIR /workspace -RUN cargo build --release --package control-center - -# Runtime stage -FROM debian:bookworm-slim - -# Install runtime dependencies -RUN apt-get update && apt-get install -y \ - ca-certificates \ - curl \ - && rm -rf /var/lib/apt/lists/* - -# Create non-root user -RUN useradd -m -u 1000 provisioning && \ - mkdir -p /data /var/log/control-center && \ - chown -R provisioning:provisioning /data /var/log/control-center - -# Copy binary from builder -COPY --from=builder /workspace/target/release/control-center /usr/local/bin/control-center -RUN chmod +x /usr/local/bin/control-center - -# Copy default configuration -COPY control-center/config.defaults.toml /etc/provisioning/config.defaults.toml - -# Switch to non-root user -USER provisioning -WORKDIR /app - -# Expose port -EXPOSE 8081 - -# Set environment variables -ENV RUST_LOG=info -ENV DATA_DIR=/data -ENV CONTROL_CENTER_DATABASE_URL=rocksdb:///data/control-center.db - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ - CMD curl -f http://localhost:8081/health || exit 1 - -# Run the binary with config path -CMD ["control-center", "--config", "/etc/provisioning/config.defaults.toml"] diff --git a/crates/control-center/src/main.rs b/crates/control-center/src/main.rs index f95a94e..3a5ae57 100644 --- a/crates/control-center/src/main.rs +++ b/crates/control-center/src/main.rs @@ -47,13 +47,30 @@ use tracing_subscriber::EnvFilter; #[command(name = "control-center")] #[command(about = "Control Center - JWT Authentication & User Management Service")] #[command(version = env!("CARGO_PKG_VERSION"))] +#[command(after_help = "CONFIGURATION HIERARCHY (highest to lowest priority):\n 1. CLI: -c/--config (explicit file)\n 2. CLI: --config-dir --mode (directory + mode)\n 3. CLI: --config-dir (searches for control-center.ncl|toml|json)\n 4. CLI: --mode (searches in provisioning/platform/config/)\n 5. ENV: CONTROL_CENTER_CONFIG (explicit file)\n 6. ENV: PROVISIONING_CONFIG_DIR (searches for control-center.ncl|toml|json)\n 7. ENV: CONTROL_CENTER_MODE (mode-based in default path)\n 8. Built-in defaults")] struct Cli { - /// Configuration file path - #[arg(short, long, default_value = "config.toml")] + /// Configuration file path (highest priority) + /// + /// Accepts absolute or relative path. Supports .ncl, .toml, and .json formats. + #[arg(short = 'c', long, env = "CONTROL_CENTER_CONFIG")] config: Option, + /// Configuration directory (searches for control-center.ncl|toml|json) + /// + /// Searches for configuration files in order of preference: .ncl > .toml > .json + /// Can also search for mode-specific files: control-center.{mode}.{ncl|toml|json} + #[arg(long, env = "PROVISIONING_CONFIG_DIR")] + config_dir: Option, + + /// Deployment mode (solo, multiuser, cicd, enterprise) + /// + /// Determines which configuration profile to use. Searches in: + /// provisioning/platform/config/control-center.{mode}.{ncl|toml} + #[arg(short = 'm', long, env = "CONTROL_CENTER_MODE")] + mode: Option, + /// Server port (overrides config file) - #[arg(short, long)] + #[arg(short = 'p', long)] port: Option, /// Server host (overrides config file) @@ -90,9 +107,15 @@ async fn main() -> Result<()> { .with_target(false) .init(); + // Resolve config file path using new resolver + let resolver = platform_config::ConfigResolver::new() + .with_cli_config(cli.config.clone()) + .with_cli_config_dir(cli.config_dir.clone()) + .with_cli_mode(cli.mode.clone()); + // Load configuration - let mut config = if let Some(config_path) = cli.config { - Config::load_from_file(config_path)? + let mut config = if let Some(path) = resolver.resolve("control-center") { + Config::load_from_file(path)? } else { Config::load()? }; diff --git a/crates/provisioning-daemon/Cargo.toml b/crates/daemon/Cargo.toml similarity index 91% rename from crates/provisioning-daemon/Cargo.toml rename to crates/daemon/Cargo.toml index 33685f0..4a0276b 100644 --- a/crates/provisioning-daemon/Cargo.toml +++ b/crates/daemon/Cargo.toml @@ -2,10 +2,14 @@ authors.workspace = true edition.workspace = true license.workspace = true -name = "provisioning-daemon" +name = "daemon" repository.workspace = true version.workspace = true +[[bin]] +name = "provisioning-daemon" +path = "src/main.rs" + [dependencies] # Core daemon library from prov-ecosystem daemon-cli = { workspace = true } @@ -22,7 +26,7 @@ serde_json = { workspace = true } toml = { workspace = true } # Platform configuration -platform-config = { path = "../platform-config" } +platform-config = { workspace = true } # Error handling anyhow = { workspace = true } diff --git a/crates/daemon/Dockerfile.runtime b/crates/daemon/Dockerfile.runtime new file mode 100644 index 0000000..2480ae6 --- /dev/null +++ b/crates/daemon/Dockerfile.runtime @@ -0,0 +1,38 @@ +FROM debian:bookworm-slim + +# Install runtime dependencies +RUN apt-get update && apt-get install -y \ + ca-certificates \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Create user +RUN useradd -m -u 1000 provisioning && \ + mkdir -p /data /var/log/provisioning-daemon /etc/provisioning && \ + chown -R provisioning:provisioning /data /var/log/provisioning-daemon /etc/provisioning + +# Copy pre-built binary +COPY target/release/provisioning-daemon /usr/local/bin/provisioning-daemon +RUN chmod +x /usr/local/bin/provisioning-daemon + +# Copy default configuration files (assumes they're available at build time) +COPY provisioning/platform/config/runtime/generated/provisioning-daemon.*.toml /etc/provisioning/ + +USER provisioning +WORKDIR /app + +EXPOSE 8079 + +ENV RUST_LOG=info +ENV DATA_DIR=/data +ENV PROVISIONING_DAEMON_MODE=solo +ENV PROVISIONING_CONFIG_DIR=/etc/provisioning + +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ + CMD curl -f http://localhost:8079/api/v1/health || exit 1 + +# Configuration precedence: +# 1. PROVISIONING_DAEMON_CONFIG (explicit path) +# 2. PROVISIONING_DAEMON_MODE (mode-specific file) +# 3. Default fallback +CMD ["provisioning-daemon"] diff --git a/crates/provisioning-daemon/src/config.rs b/crates/daemon/src/config.rs similarity index 100% rename from crates/provisioning-daemon/src/config.rs rename to crates/daemon/src/config.rs diff --git a/crates/provisioning-daemon/src/main.rs b/crates/daemon/src/main.rs similarity index 90% rename from crates/provisioning-daemon/src/main.rs rename to crates/daemon/src/main.rs index e9138c0..7b4c322 100644 --- a/crates/provisioning-daemon/src/main.rs +++ b/crates/daemon/src/main.rs @@ -30,12 +30,20 @@ use tracing_subscriber::EnvFilter; #[command(about = "Provisioning platform daemon with Nushell execution and config rendering")] #[command(version = env!("CARGO_PKG_VERSION"))] struct Args { - /// Configuration file path - #[arg(short, long)] + /// Configuration file path (highest priority) + #[arg(short = 'c', long, env = "PROVISIONING_DAEMON_CONFIG")] config: Option, + /// Configuration directory (searches for provisioning-daemon.ncl|toml|json) + #[arg(long, env = "PROVISIONING_CONFIG_DIR")] + config_dir: Option, + + /// Deployment mode (solo, multiuser, cicd, enterprise) + #[arg(short = 'm', long, env = "PROVISIONING_DAEMON_MODE")] + mode: Option, + /// Enable verbose logging - #[arg(short, long)] + #[arg(short = 'v', long)] verbose: bool, /// Validate configuration and exit diff --git a/crates/extension-registry/Cargo.toml b/crates/extension-registry/Cargo.toml index a4cafb5..4e77ac5 100644 --- a/crates/extension-registry/Cargo.toml +++ b/crates/extension-registry/Cargo.toml @@ -5,6 +5,10 @@ edition.workspace = true name = "extension-registry" version.workspace = true +[[bin]] +name = "provisioning-extension-registry" +path = "src/main.rs" + [dependencies] # Workspace dependencies async-trait = { workspace = true } @@ -21,7 +25,7 @@ serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } # Platform configuration -platform-config = { path = "../platform-config" } +platform-config = { workspace = true } # Error handling anyhow = { workspace = true } @@ -61,7 +65,7 @@ parking_lot = { workspace = true } toml = { workspace = true } [dev-dependencies] -http-body-util = "0.1" +http-body-util = { workspace = true } hyper = { workspace = true } tempfile = { workspace = true } tokio-test = { workspace = true } @@ -70,8 +74,3 @@ tokio-test = { workspace = true } [lib] name = "extension_registry" path = "src/lib.rs" - -# Binary target -[[bin]] -name = "extension-registry" -path = "src/main.rs" diff --git a/crates/extension-registry/Dockerfile b/crates/extension-registry/Dockerfile index d25280b..4526056 100644 --- a/crates/extension-registry/Dockerfile +++ b/crates/extension-registry/Dockerfile @@ -1,7 +1,34 @@ -# Build stage -FROM rust:1.75-slim as builder +# Multi-stage build for extension-registry +# Generated from Nickel template - DO NOT EDIT DIRECTLY +# Source: provisioning/schemas/platform/templates/docker/Dockerfile.chef.ncl -WORKDIR /app +# ============================================================================ +# Stage 1: PLANNER - Generate dependency recipe +# ============================================================================ +FROM rust:1.82-trixie AS planner + +WORKDIR /workspace + +# Install cargo-chef +RUN cargo install cargo-chef --version 0.1.67 + +# Copy workspace manifests +COPY Cargo.toml Cargo.lock ./ +COPY crates ./crates +COPY daemon-cli ./daemon-cli +COPY secretumvault ./secretumvault +COPY prov-ecosystem ./prov-ecosystem +COPY stratumiops ./stratumiops + +# Generate recipe.json (dependency graph) +RUN cargo chef prepare --recipe-path recipe.json --bin extension-registry + +# ============================================================================ +# Stage 2: CACHER - Build dependencies only +# ============================================================================ +FROM rust:1.82-trixie AS cacher + +WORKDIR /workspace # Install build dependencies RUN apt-get update && apt-get install -y \ @@ -9,40 +36,84 @@ RUN apt-get update && apt-get install -y \ libssl-dev \ && rm -rf /var/lib/apt/lists/* -# Copy manifests -COPY Cargo.toml Cargo.lock ./ +# Install cargo-chef +RUN cargo install cargo-chef --version 0.1.67 + +# sccache disabled + +# Copy recipe from planner +COPY --from=planner /workspace/recipe.json recipe.json + +# Build dependencies - This layer will be cached +RUN cargo chef cook --release --recipe-path recipe.json + +# ============================================================================ +# Stage 3: BUILDER - Build source code +# ============================================================================ +FROM rust:1.82-trixie AS builder + +WORKDIR /workspace + +# Install build dependencies +RUN apt-get update && apt-get install -y \ + pkg-config \ + libssl-dev \ + && rm -rf /var/lib/apt/lists/* + +# sccache disabled + +# Copy cached dependencies from cacher stage +COPY --from=cacher /workspace/target target +COPY --from=cacher /usr/local/cargo /usr/local/cargo # Copy source code -COPY src ./src +COPY Cargo.toml Cargo.lock ./ +COPY crates ./crates +COPY daemon-cli ./daemon-cli +COPY secretumvault ./secretumvault +COPY prov-ecosystem ./prov-ecosystem +COPY stratumiops ./stratumiops -# Build release binary -RUN cargo build --release +# Build release binary with parallelism +ENV CARGO_BUILD_JOBS=4 +RUN cargo build --release --package extension-registry -# Runtime stage -FROM debian:bookworm-slim +# ============================================================================ +# Stage 4: RUNTIME - Minimal runtime image +# ============================================================================ +FROM debian:trixie-slim # Install runtime dependencies RUN apt-get update && apt-get install -y \ ca-certificates \ + curl \ && rm -rf /var/lib/apt/lists/* # Create non-root user -RUN useradd -m -u 1000 registry && \ - mkdir -p /app/data && \ - chown -R registry:registry /app - -USER registry -WORKDIR /app +RUN useradd -m -u 1000 provisioning && \ + mkdir -p /data /var/log/extension-registry && \ + chown -R provisioning:provisioning /data /var/log/extension-registry # Copy binary from builder -COPY --from=builder /app/target/release/extension-registry /usr/local/bin/ +COPY --from=builder /workspace/target/release/extension-registry /usr/local/bin/extension-registry +RUN chmod +x /usr/local/bin/extension-registry -# Expose port -EXPOSE 8082 +# No config file to copy + +# Switch to non-root user +USER provisioning +WORKDIR /app + +# Expose service port +EXPOSE 9093 + +# Environment variables +ENV RUST_LOG=info +ENV DATA_DIR=/data # Health check -HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:8082/api/v1/health || exit 1 +HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ + CMD curl -f http://localhost:9093/health || exit 1 -# Run service +# Run the binary CMD ["extension-registry"] diff --git a/crates/extension-registry/src/handlers.rs b/crates/extension-registry/src/handlers.rs index 9f4e29d..1305e68 100644 --- a/crates/extension-registry/src/handlers.rs +++ b/crates/extension-registry/src/handlers.rs @@ -287,5 +287,6 @@ pub fn routes(state: AppState) -> Router { .route("/extensions/:name", get(get_extension)) // Health .route("/health", get(health)) + .route("/api/v1/health", get(health)) .with_state(state) } diff --git a/crates/extension-registry/src/main.rs b/crates/extension-registry/src/main.rs index b13520d..882728b 100644 --- a/crates/extension-registry/src/main.rs +++ b/crates/extension-registry/src/main.rs @@ -13,6 +13,18 @@ use handlers::{routes, AppState}; #[command(name = "extension-registry")] #[command(about = "OCI-compliant extension registry proxy", long_about = None)] struct Cli { + /// Configuration file path (highest priority) + #[arg(short = 'c', long, env = "EXTENSION_REGISTRY_CONFIG")] + config: Option, + + /// Configuration directory (searches for extension-registry.ncl|toml|json) + #[arg(long, env = "PROVISIONING_CONFIG_DIR")] + config_dir: Option, + + /// Deployment mode (solo, multiuser, cicd, enterprise) + #[arg(short = 'm', long, env = "EXTENSION_REGISTRY_MODE")] + mode: Option, + /// Host to bind to #[arg(long, default_value = "127.0.0.1")] host: String, diff --git a/crates/mcp-server/Cargo.toml b/crates/mcp-server/Cargo.toml index 87fae59..b71dcbb 100644 --- a/crates/mcp-server/Cargo.toml +++ b/crates/mcp-server/Cargo.toml @@ -1,14 +1,18 @@ [package] -authors = ["Jesús Pérez Lorenzo "] +authors.workspace = true categories = ["command-line-utilities", "development-tools"] description = "Rust-native MCP server for Infrastructure Automation system" edition.workspace = true keywords = ["mcp", "rust", "infrastructure", "provisioning", "ai"] license.workspace = true -name = "provisioning-mcp-server" +name = "mcp-server" repository.workspace = true version.workspace = true +[[bin]] +name = "provisioning-mcp-server" +path = "src/simple_main.rs" + [dependencies] # ============================================================================ # WORKSPACE DEPENDENCIES @@ -23,7 +27,7 @@ serde_json = { workspace = true } toml = { workspace = true } # Platform configuration -platform-config = { path = "../platform-config" } +platform-config = { workspace = true } # Error handling anyhow = { workspace = true } @@ -63,13 +67,13 @@ walkdir = { workspace = true } # rust-mcp-sdk = "0.7.0" # RAG System (from provisioning-rag crate) -provisioning-rag = { path = "../rag", features = [] } +rag = { path = "../rag", features = [] } # Date/time utilities chrono = { workspace = true } # YAML parsing -serde_yaml = "0.9" +serde_yaml = { workspace = true } # Directory utilities dirs = { workspace = true } @@ -83,14 +87,8 @@ tokio-test = { workspace = true } debug = ["tracing-subscriber/json"] default = [] -[[bin]] -name = "provisioning-mcp-server" -path = "src/simple_main.rs" - -# Disabled: main.rs uses incompatible rust_mcp_sdk v0.7.0 API -# [[bin]] -# name = "provisioning-mcp-server-full" -# path = "src/main.rs" +# Note: simple_main.rs is the active entry point +# main.rs uses incompatible rust_mcp_sdk v0.7.0 API and is disabled [lib] name = "provisioning_mcp_server" diff --git a/crates/mcp-server/Dockerfile b/crates/mcp-server/Dockerfile deleted file mode 100644 index 1e0f90e..0000000 --- a/crates/mcp-server/Dockerfile +++ /dev/null @@ -1,63 +0,0 @@ -# Build stage -FROM rust:1.75-slim as builder - -WORKDIR /app - -# Install build dependencies -RUN apt-get update && apt-get install -y \ - pkg-config \ - libssl-dev \ - && rm -rf /var/lib/apt/lists/* - -# Copy manifests -COPY Cargo.toml Cargo.lock ./ - -# Create dummy source to cache dependencies -RUN mkdir -p src && \ - echo "fn main() {}" > src/main.rs && \ - cargo build --release && \ - rm -rf src - -# Copy actual source code -COPY src ./src - -# Build release binary -RUN cargo build --release --bin mcp-server - -# Runtime stage -FROM debian:bookworm-slim - -# Install runtime dependencies -RUN apt-get update && apt-get install -y \ - ca-certificates \ - curl \ - && rm -rf /var/lib/apt/lists/* - -# Create non-root user -RUN useradd -m -u 1000 provisioning && \ - mkdir -p /data /var/log/mcp-server && \ - chown -R provisioning:provisioning /data /var/log/mcp-server - -# Copy binary from builder -COPY --from=builder /app/target/release/mcp-server /usr/local/bin/ - -# Copy default configuration -COPY config.defaults.toml /etc/provisioning/mcp-config.defaults.toml - -# Switch to non-root user -USER provisioning -WORKDIR /app - -# Expose port -EXPOSE 8084 - -# Set environment variables -ENV RUST_LOG=info -ENV MCP_PROTOCOL=http - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ - CMD curl -f http://localhost:8084/health || exit 1 - -# Run the binary -CMD ["mcp-server"] diff --git a/crates/mcp-server/src/main.rs b/crates/mcp-server/src/main.rs.disabled similarity index 67% rename from crates/mcp-server/src/main.rs rename to crates/mcp-server/src/main.rs.disabled index 7439943..507f3e1 100644 --- a/crates/mcp-server/src/main.rs +++ b/crates/mcp-server/src/main.rs.disabled @@ -1,5 +1,11 @@ #!/usr/bin/env -S cargo run -- -#![allow(dead_code, unused_imports, unused_variables, unused_assignments, unused)] +#![allow( + dead_code, + unused_imports, + unused_variables, + unused_assignments, + unused +)] #![allow( clippy::excessive_nesting, clippy::upper_case_acronyms, @@ -15,25 +21,25 @@ use anyhow::{Context, Result}; use clap::Parser; -use rust_mcp_sdk::Server; -use rust_mcp_sdk::schema::{ - CallToolRequest, CallToolResult, ServerCapabilities, TextContent, Tool -}; +// use rust_mcp_sdk::Server; +// use rust_mcp_sdk::schema::{ +// CallToolRequest, CallToolResult, ServerCapabilities, TextContent, Tool +// }; use serde_json::json; use std::path::PathBuf; -use tracing::{info, debug}; +use tracing::{debug, info}; mod config; +mod errors; mod provisioning; mod tools; -mod errors; use config::Config; use errors::ProvisioningError; use provisioning::ProvisioningEngine; use tools::{ - ProvisioningTools, SettingsTools, SystemStatusTool, NextActionTool, - DocFinderTool, TroubleshooterTool, ConfigValidatorTool, + ConfigValidatorTool, DocFinderTool, NextActionTool, ProvisioningTools, SettingsTools, + SystemStatusTool, TroubleshooterTool, }; #[derive(Parser, Debug)] @@ -41,20 +47,33 @@ use tools::{ #[command(about = "Rust-native MCP server for Cloud Native Provisioning")] #[command(version)] struct Args { - /// Configuration file path - #[arg(short, long, env = "PROVISIONING_MCP_CONFIG")] + /// Configuration file path (highest priority) + #[arg(short = 'c', long, env = "PROVISIONING_MCP_CONFIG")] config: Option, + /// Configuration directory (searches for mcp-server.ncl|toml|json) + #[arg(long, env = "PROVISIONING_CONFIG_DIR")] + config_dir: Option, + + /// Deployment mode (solo, multiuser, cicd, enterprise) + #[arg(short = 'm', long, env = "PROVISIONING_MCP_MODE")] + mode: Option, + /// Provisioning system path - #[arg(short, long, env = "PROVISIONING_PATH")] + #[arg(short = 'p', long, env = "PROVISIONING_PATH")] provisioning_path: Option, /// Debug mode - #[arg(short, long, env = "PROVISIONING_DEBUG")] + #[arg(short = 'd', long, env = "PROVISIONING_DEBUG")] debug: bool, /// Log level - #[arg(short, long, env = "PROVISIONING_LOG_LEVEL", default_value = "info")] + #[arg( + short = 'l', + long, + env = "PROVISIONING_LOG_LEVEL", + default_value = "info" + )] log_level: String, } @@ -77,18 +96,18 @@ impl ProvisioningMCPServer { pub fn new(config: Config) -> Result { let engine = ProvisioningEngine::new(&config)?; let tools = ProvisioningTools::new(&config); - let api_tools = tools::provisioning_tools::ProvisioningTools::new( - Some("http://localhost:3000".to_string()) - ); + let api_tools = tools::provisioning_tools::ProvisioningTools::new(Some( + "http://localhost:3000".to_string(), + )); let settings_tools = std::sync::Arc::new(tokio::sync::Mutex::new(SettingsTools::new())); // Initialize guidance tools let provisioning_root = config.provisioning_path.clone(); let system_status_tool = SystemStatusTool::new(provisioning_root.clone()); let next_action_tool = NextActionTool::new(provisioning_root.clone()); - let doc_finder_tool = std::sync::Arc::new(tokio::sync::Mutex::new( - DocFinderTool::new(provisioning_root.clone()) - )); + let doc_finder_tool = std::sync::Arc::new(tokio::sync::Mutex::new(DocFinderTool::new( + provisioning_root.clone(), + ))); let troubleshooter_tool = TroubleshooterTool::new(provisioning_root.clone()); let config_validator_tool = ConfigValidatorTool::new(provisioning_root); @@ -111,8 +130,8 @@ impl ProvisioningMCPServer { info!("Starting Enhanced Systems Provisioning MCP Server v2.0"); info!("New features: API integration, AI agents, dashboards, advanced analytics"); - let mut server = Server::new("enhanced-provisioning-server".to_string()) - .with_capabilities(ServerCapabilities { + let mut server = Server::new("enhanced-provisioning-server".to_string()).with_capabilities( + ServerCapabilities { tools: Some(json!({ "listChanged": true })), @@ -121,7 +140,8 @@ impl ProvisioningMCPServer { "listChanged": true })), ..Default::default() - }); + }, + ); // Register legacy tools server = server @@ -136,7 +156,10 @@ impl ProvisioningMCPServer { // Register enhanced API tools server = server .tool("ai_query", self.handle_enhanced_ai_query()) - .tool("get_infrastructure_status", self.handle_get_infrastructure_status()) + .tool( + "get_infrastructure_status", + self.handle_get_infrastructure_status(), + ) .tool("get_system_metrics", self.handle_get_system_metrics()) .tool("get_logs", self.handle_get_logs()) .tool("start_api_server", self.handle_start_api_server()) @@ -148,10 +171,16 @@ impl ProvisioningMCPServer { .tool("list_servers", self.handle_list_servers()) .tool("delete_servers", self.handle_delete_servers()) .tool("create_cluster", self.handle_create_cluster_enhanced()) - .tool("generate_infrastructure", self.handle_generate_infrastructure()) + .tool( + "generate_infrastructure", + self.handle_generate_infrastructure(), + ) .tool("get_cost_optimization", self.handle_get_cost_optimization()) .tool("get_security_analysis", self.handle_get_security_analysis()) - .tool("get_performance_analysis", self.handle_get_performance_analysis()) + .tool( + "get_performance_analysis", + self.handle_get_performance_analysis(), + ) .tool("predict_issues", self.handle_predict_issues()); // Register installer settings tools @@ -160,17 +189,35 @@ impl ProvisioningMCPServer { .tool("installer_complete_config", self.handle_complete_config()) .tool("installer_validate_config", self.handle_validate_config()) .tool("installer_get_defaults", self.handle_get_defaults()) - .tool("installer_platform_recommendations", self.handle_platform_recommendations()) - .tool("installer_service_recommendations", self.handle_service_recommendations()) - .tool("installer_resource_recommendations", self.handle_resource_recommendations()); + .tool( + "installer_platform_recommendations", + self.handle_platform_recommendations(), + ) + .tool( + "installer_service_recommendations", + self.handle_service_recommendations(), + ) + .tool( + "installer_resource_recommendations", + self.handle_resource_recommendations(), + ); // Register guidance tools server = server - .tool("guidance_check_system_status", self.handle_check_system_status()) - .tool("guidance_suggest_next_action", self.handle_suggest_next_action()) + .tool( + "guidance_check_system_status", + self.handle_check_system_status(), + ) + .tool( + "guidance_suggest_next_action", + self.handle_suggest_next_action(), + ) .tool("guidance_find_docs", self.handle_find_docs()) .tool("guidance_diagnose_issue", self.handle_diagnose_issue()) - .tool("guidance_validate_config_file", self.handle_validate_config_file()); + .tool( + "guidance_validate_config_file", + self.handle_validate_config_file(), + ); server .run() @@ -186,7 +233,9 @@ impl ProvisioningMCPServer { Tool { name: "provision_create_server".to_string(), title: Some("provision_create_server".to_string()), - description: "Create infrastructure servers using natural language or specific configuration".to_string(), + description: + "Create infrastructure servers using natural language or specific configuration" + .to_string(), input_schema: json!({ "type": "object", "properties": { @@ -213,7 +262,8 @@ impl ProvisioningMCPServer { }), annotations: None, meta: None, - output_schema: None, }, + output_schema: None, + }, Tool { name: "provision_ai_template".to_string(), title: Some("provision_ai_template".to_string()), @@ -241,11 +291,13 @@ impl ProvisioningMCPServer { }), annotations: None, meta: None, - output_schema: None, }, + output_schema: None, + }, Tool { name: "provision_query".to_string(), title: Some("provision_query".to_string()), - description: "Query infrastructure state and configuration using natural language".to_string(), + description: "Query infrastructure state and configuration using natural language" + .to_string(), input_schema: json!({ "type": "object", "properties": { @@ -268,11 +320,14 @@ impl ProvisioningMCPServer { }), annotations: None, meta: None, - output_schema: None, }, + output_schema: None, + }, Tool { name: "provision_deploy_taskserv".to_string(), title: Some("provision_deploy_taskserv".to_string()), - description: "Deploy specific infrastructure services (databases, networking, etc.)".to_string(), + description: + "Deploy specific infrastructure services (databases, networking, etc.)" + .to_string(), input_schema: json!({ "type": "object", "properties": { @@ -298,7 +353,8 @@ impl ProvisioningMCPServer { }), annotations: None, meta: None, - output_schema: None, }, + output_schema: None, + }, Tool { name: "provision_cluster_create".to_string(), title: Some("provision_cluster_create".to_string()), @@ -329,7 +385,8 @@ impl ProvisioningMCPServer { }), annotations: None, meta: None, - output_schema: None, }, + output_schema: None, + }, Tool { name: "provision_status".to_string(), title: Some("provision_status".to_string()), @@ -350,11 +407,13 @@ impl ProvisioningMCPServer { }), annotations: None, meta: None, - output_schema: None, }, + output_schema: None, + }, Tool { name: "provision_ai_config".to_string(), title: Some("provision_ai_config".to_string()), - description: "Configure and verify AI capabilities for the provisioning system".to_string(), + description: "Configure and verify AI capabilities for the provisioning system" + .to_string(), input_schema: json!({ "type": "object", "properties": { @@ -373,7 +432,8 @@ impl ProvisioningMCPServer { }), annotations: None, meta: None, - output_schema: None, }, + output_schema: None, + }, ]) } @@ -382,46 +442,60 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let description = arguments.get("description") + let description = arguments + .get("description") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("description is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("description is required".to_string()) + })?; - let infra_name = arguments.get("infra_name") + let infra_name = arguments + .get("infra_name") .and_then(|v| v.as_str()) .unwrap_or("ai-generated"); - let provider = arguments.get("provider") + let provider = arguments + .get("provider") .and_then(|v| v.as_str()) .unwrap_or("aws"); - let check_mode = arguments.get("check_mode") + let check_mode = arguments + .get("check_mode") .and_then(|v| v.as_bool()) .unwrap_or(true); info!("Creating server with description: {}", description); - debug!("Parameters: infra={}, provider={}, check={}", infra_name, provider, check_mode); + debug!( + "Parameters: infra={}, provider={}, check={}", + infra_name, provider, check_mode + ); // Use AI to parse the natural language description let parsed_json = self.tools.parse_server_description(description)?; // Convert JSON to ServerConfig let parsed_config = provisioning::ServerConfig { - hostname: parsed_json.get("hostname") + hostname: parsed_json + .get("hostname") .and_then(|v| v.as_str()) .unwrap_or(infra_name) .to_string(), - instance_type: parsed_json.get("instance_type") + instance_type: parsed_json + .get("instance_type") .and_then(|v| v.as_str()) .unwrap_or("t3.medium") .to_string(), - count: parsed_json.get("count") + count: parsed_json + .get("count") .and_then(|v| v.as_u64()) .unwrap_or(1) as u32, provider: provider.to_string(), - region: parsed_json.get("region") + region: parsed_json + .get("region") .and_then(|v| v.as_str()) .map(|s| s.to_string()), - purpose: parsed_json.get("purpose") + purpose: parsed_json + .get("purpose") .and_then(|v| v.as_str()) .map(|s| s.to_string()), }; @@ -430,7 +504,12 @@ impl ProvisioningMCPServer { let result = self.engine.create_server(&parsed_config, check_mode)?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("✅ Server creation completed:\n\n{}", result)))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "✅ Server creation completed:\n\n{}", + result + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -443,24 +522,38 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let description = arguments.get("description") + let description = arguments + .get("description") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("description is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("description is required".to_string()) + })?; - let template_type = arguments.get("template_type") + let template_type = arguments + .get("template_type") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("template_type is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("template_type is required".to_string()) + })?; - let complexity = arguments.get("complexity") + let complexity = arguments + .get("complexity") .and_then(|v| v.as_str()) .unwrap_or("medium"); info!("Generating {} template: {}", template_type, description); - let template = self.tools.generate_ai_template(description, template_type, complexity)?; + let template = + self.tools + .generate_ai_template(description, template_type, complexity)?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🤖 Generated {} template:\n\n```kcl\n{}\n```", template_type, template)))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🤖 Generated {} template:\n\n```kcl\n{}\n```", + template_type, template + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -473,23 +566,31 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let query = arguments.get("query") + let query = arguments + .get("query") .and_then(|v| v.as_str()) .ok_or_else(|| ProvisioningError::InvalidInput("query is required".to_string()))?; - let infra_name = arguments.get("infra_name") - .and_then(|v| v.as_str()); + let infra_name = arguments.get("infra_name").and_then(|v| v.as_str()); - let output_format = arguments.get("output_format") + let output_format = arguments + .get("output_format") .and_then(|v| v.as_str()) .unwrap_or("human"); info!("Processing query: {}", query); - let result = self.engine.process_query(query, infra_name, output_format)?; + let result = self + .engine + .process_query(query, infra_name, output_format)?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🔍 Query result:\n\n{}", result)))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🔍 Query result:\n\n{}", + result + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -502,28 +603,46 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let service_name = arguments.get("service_name") + let service_name = arguments + .get("service_name") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("service_name is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("service_name is required".to_string()) + })?; - let infra_name = arguments.get("infra_name") + let infra_name = arguments + .get("infra_name") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("infra_name is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("infra_name is required".to_string()) + })?; - let configuration = arguments.get("configuration") - .cloned() - .unwrap_or_default(); + let configuration = arguments.get("configuration").cloned().unwrap_or_default(); - let check_mode = arguments.get("check_mode") + let check_mode = arguments + .get("check_mode") .and_then(|v| v.as_bool()) .unwrap_or(true); - info!("Deploying service {} to infrastructure {}", service_name, infra_name); + info!( + "Deploying service {} to infrastructure {}", + service_name, infra_name + ); - let result = self.engine.deploy_taskserv(service_name, infra_name, &configuration, check_mode)?; + let result = self.engine.deploy_taskserv( + service_name, + infra_name, + &configuration, + check_mode, + )?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("⚙️ Service deployment result:\n\n{}", result)))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "⚙️ Service deployment result:\n\n{}", + result + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -536,29 +655,47 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let cluster_description = arguments.get("cluster_description") + let cluster_description = arguments + .get("cluster_description") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("cluster_description is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("cluster_description is required".to_string()) + })?; - let cluster_type = arguments.get("cluster_type") + let cluster_type = arguments + .get("cluster_type") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("cluster_type is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("cluster_type is required".to_string()) + })?; - let services = arguments.get("services") + let services = arguments + .get("services") .and_then(|v| v.as_array()) .map(|arr| arr.iter().filter_map(|v| v.as_str()).collect::>()) .unwrap_or_default(); - let infra_name = arguments.get("infra_name") + let infra_name = arguments + .get("infra_name") .and_then(|v| v.as_str()) .unwrap_or("ai-cluster"); info!("Creating {} cluster: {}", cluster_type, cluster_description); - let result = self.engine.create_cluster(cluster_description, cluster_type, &services, infra_name)?; + let result = self.engine.create_cluster( + cluster_description, + cluster_type, + &services, + infra_name, + )?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🚢 Cluster creation result:\n\n{}", result)))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🚢 Cluster creation result:\n\n{}", + result + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -571,10 +708,10 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let infra_name = arguments.get("infra_name") - .and_then(|v| v.as_str()); + let infra_name = arguments.get("infra_name").and_then(|v| v.as_str()); - let detailed = arguments.get("detailed") + let detailed = arguments + .get("detailed") .and_then(|v| v.as_bool()) .unwrap_or(false); @@ -583,7 +720,12 @@ impl ProvisioningMCPServer { let result = self.engine.get_status(infra_name, detailed)?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("📊 Infrastructure status:\n\n{}", result)))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "📊 Infrastructure status:\n\n{}", + result + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -596,29 +738,42 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let action = arguments.get("action") + let action = arguments + .get("action") .and_then(|v| v.as_str()) .ok_or_else(|| ProvisioningError::InvalidInput("action is required".to_string()))?; - let provider = arguments.get("provider") - .and_then(|v| v.as_str()); + let provider = arguments.get("provider").and_then(|v| v.as_str()); info!("AI config action: {}", action); let result = match action { "status" => self.tools.get_ai_status()?, "configure" => { - let provider = provider.ok_or_else(|| - ProvisioningError::InvalidInput("provider is required for configure action".to_string()) - )?; + let provider = provider.ok_or_else(|| { + ProvisioningError::InvalidInput( + "provider is required for configure action".to_string(), + ) + })?; self.tools.configure_ai(provider)? - }, + } "test" => self.tools.test_ai_connection()?, - _ => return Err(ProvisioningError::InvalidInput(format!("Unknown action: {}", action)).into()), + _ => { + return Err(ProvisioningError::InvalidInput(format!( + "Unknown action: {}", + action + )) + .into()) + } }; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🤖 AI configuration result:\n\n{}", result)))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🤖 AI configuration result:\n\n{}", + result + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -631,7 +786,8 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let query = arguments.get("query") + let query = arguments + .get("query") .and_then(|v| v.as_str()) .ok_or_else(|| ProvisioningError::InvalidInput("query is required".to_string()))?; @@ -640,13 +796,16 @@ impl ProvisioningMCPServer { info!("Enhanced AI query: {}", query); // This would be async in real implementation - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.ai_query(query, context) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.ai_query(query, context))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🤖 AI Analysis:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🤖 AI Analysis:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -654,17 +813,22 @@ impl ProvisioningMCPServer { } } - fn handle_get_infrastructure_status(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_get_infrastructure_status( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { info!("Getting infrastructure status via API"); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.get_infrastructure_status() - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.get_infrastructure_status())?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🏗️ Infrastructure Status:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🏗️ Infrastructure Status:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -679,13 +843,16 @@ impl ProvisioningMCPServer { info!("Getting system metrics for timerange: {:?}", timerange); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.get_system_metrics(timerange) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.get_system_metrics(timerange))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("📊 System Metrics:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "📊 System Metrics:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -701,13 +868,16 @@ impl ProvisioningMCPServer { info!("Getting logs - level: {:?}, since: {:?}", level, since); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.get_logs(level, since) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.get_logs(level, since))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("📋 System Logs:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "📋 System Logs:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -718,17 +888,23 @@ impl ProvisioningMCPServer { fn handle_start_api_server(&self) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let port = arguments.get("port").and_then(|v| v.as_u64()).map(|p| p as u16); + let port = arguments + .get("port") + .and_then(|v| v.as_u64()) + .map(|p| p as u16); info!("Starting API server on port: {:?}", port); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.start_api_server(port) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.start_api_server(port))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🚀 API Server:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🚀 API Server:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -742,15 +918,21 @@ impl ProvisioningMCPServer { let template = arguments.get("template").and_then(|v| v.as_str()); let name = arguments.get("name").and_then(|v| v.as_str()); - info!("Creating dashboard - template: {:?}, name: {:?}", template, name); + info!( + "Creating dashboard - template: {:?}, name: {:?}", + template, name + ); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.create_dashboard(template, name) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.create_dashboard(template, name))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("📊 Dashboard Creation:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "📊 Dashboard Creation:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -761,20 +943,27 @@ impl ProvisioningMCPServer { fn handle_start_dashboard(&self) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let name = arguments.get("name") + let name = arguments + .get("name") .and_then(|v| v.as_str()) .ok_or_else(|| ProvisioningError::InvalidInput("name is required".to_string()))?; - let port = arguments.get("port").and_then(|v| v.as_u64()).map(|p| p as u16); + let port = arguments + .get("port") + .and_then(|v| v.as_u64()) + .map(|p| p as u16); info!("Starting dashboard: {} on port: {:?}", name, port); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.start_dashboard(name, port) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.start_dashboard(name, port))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🌐 Dashboard Started:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🌐 Dashboard Started:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -785,19 +974,23 @@ impl ProvisioningMCPServer { fn handle_start_ai_agents(&self) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let agents = arguments.get("agents") + let agents = arguments + .get("agents") .and_then(|v| v.as_array()) .map(|arr| arr.iter().filter_map(|v| v.as_str()).collect::>()); info!("Starting AI agents: {:?}", agents); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.start_ai_agents(agents) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.start_ai_agents(agents))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🤖 AI Agents Started:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🤖 AI Agents Started:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -809,13 +1002,16 @@ impl ProvisioningMCPServer { |request: CallToolRequest| -> Result { info!("Getting AI agents status"); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.get_agents_status() - )?; + let result = + tokio::runtime::Handle::current().block_on(self.api_tools.get_agents_status())?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🤖 Agents Status:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🤖 Agents Status:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -826,18 +1022,22 @@ impl ProvisioningMCPServer { fn handle_create_servers(&self) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let config = arguments.get("config") + let config = arguments + .get("config") .ok_or_else(|| ProvisioningError::InvalidInput("config is required".to_string()))?; info!("Creating servers with config: {:?}", config); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.create_servers(config) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.create_servers(config))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🖥️ Servers Created:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🖥️ Servers Created:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -852,13 +1052,16 @@ impl ProvisioningMCPServer { info!("Listing servers for provider: {:?}", provider); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.list_servers(provider) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.list_servers(provider))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("📋 Server List:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "📋 Server List:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -869,20 +1072,26 @@ impl ProvisioningMCPServer { fn handle_delete_servers(&self) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let server_names = arguments.get("server_names") + let server_names = arguments + .get("server_names") .and_then(|v| v.as_array()) .map(|arr| arr.iter().filter_map(|v| v.as_str()).collect::>()) - .ok_or_else(|| ProvisioningError::InvalidInput("server_names is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("server_names is required".to_string()) + })?; info!("Deleting servers: {:?}", server_names); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.delete_servers(&server_names) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.delete_servers(&server_names))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🗑️ Servers Deleted:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🗑️ Servers Deleted:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -890,21 +1099,27 @@ impl ProvisioningMCPServer { } } - fn handle_create_cluster_enhanced(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_create_cluster_enhanced( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let config = arguments.get("config") + let config = arguments + .get("config") .ok_or_else(|| ProvisioningError::InvalidInput("config is required".to_string()))?; info!("Creating cluster with config: {:?}", config); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.create_cluster(config) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.create_cluster(config))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🚢 Cluster Created:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🚢 Cluster Created:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -912,23 +1127,36 @@ impl ProvisioningMCPServer { } } - fn handle_generate_infrastructure(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_generate_infrastructure( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let description = arguments.get("description") + let description = arguments + .get("description") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("description is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("description is required".to_string()) + })?; let output_type = arguments.get("output_type").and_then(|v| v.as_str()); - info!("Generating infrastructure from description: {}", description); + info!( + "Generating infrastructure from description: {}", + description + ); let result = tokio::runtime::Handle::current().block_on( - self.api_tools.generate_infrastructure(description, output_type) + self.api_tools + .generate_infrastructure(description, output_type), )?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🏗️ Generated Infrastructure:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🏗️ Generated Infrastructure:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -936,17 +1164,22 @@ impl ProvisioningMCPServer { } } - fn handle_get_cost_optimization(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_get_cost_optimization( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { info!("Getting cost optimization recommendations"); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.get_cost_optimization() - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.get_cost_optimization())?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("💰 Cost Optimization:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "💰 Cost Optimization:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -954,17 +1187,22 @@ impl ProvisioningMCPServer { } } - fn handle_get_security_analysis(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_get_security_analysis( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { info!("Getting security analysis"); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.get_security_analysis() - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.get_security_analysis())?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🛡️ Security Analysis:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🛡️ Security Analysis:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -972,17 +1210,22 @@ impl ProvisioningMCPServer { } } - fn handle_get_performance_analysis(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_get_performance_analysis( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { info!("Getting performance analysis"); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.get_performance_analysis() - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.get_performance_analysis())?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("⚡ Performance Analysis:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "⚡ Performance Analysis:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -997,13 +1240,16 @@ impl ProvisioningMCPServer { info!("Predicting issues for timeframe: {:?}", timeframe); - let result = tokio::runtime::Handle::current().block_on( - self.api_tools.predict_issues(timeframe) - )?; + let result = tokio::runtime::Handle::current() + .block_on(self.api_tools.predict_issues(timeframe))?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🔮 Issue Predictions:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🔮 Issue Predictions:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -1027,8 +1273,12 @@ impl ProvisioningMCPServer { })?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("⚙️ Installer Settings:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "⚙️ Installer Settings:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -1039,9 +1289,7 @@ impl ProvisioningMCPServer { fn handle_complete_config(&self) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let partial_config = arguments.get("config") - .cloned() - .unwrap_or(json!({})); + let partial_config = arguments.get("config").cloned().unwrap_or(json!({})); info!("Completing partial configuration"); @@ -1052,8 +1300,12 @@ impl ProvisioningMCPServer { })?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("✅ Completed Configuration:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "✅ Completed Configuration:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -1064,7 +1316,8 @@ impl ProvisioningMCPServer { fn handle_validate_config(&self) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let config = arguments.get("config") + let config = arguments + .get("config") .ok_or_else(|| ProvisioningError::InvalidInput("config is required".to_string()))? .clone(); @@ -1076,12 +1329,20 @@ impl ProvisioningMCPServer { tools.validate_config(config) })?; - let is_valid = result.get("valid").and_then(|v| v.as_bool()).unwrap_or(false); + let is_valid = result + .get("valid") + .and_then(|v| v.as_bool()) + .unwrap_or(false); let icon = if is_valid { "✅" } else { "❌" }; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("{} Configuration Validation:\n\n{}", icon, serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "{} Configuration Validation:\n\n{}", + icon, + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(!is_valid), meta: None, structured_content: None, @@ -1092,7 +1353,8 @@ impl ProvisioningMCPServer { fn handle_get_defaults(&self) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let mode = arguments.get("mode") + let mode = arguments + .get("mode") .and_then(|v| v.as_str()) .ok_or_else(|| ProvisioningError::InvalidInput("mode is required".to_string()))?; @@ -1105,8 +1367,12 @@ impl ProvisioningMCPServer { })?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("📋 Mode Defaults:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "📋 Mode Defaults:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -1114,7 +1380,9 @@ impl ProvisioningMCPServer { } } - fn handle_platform_recommendations(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_platform_recommendations( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { info!("Getting platform recommendations"); @@ -1125,8 +1393,12 @@ impl ProvisioningMCPServer { })?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🎯 Platform Recommendations:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🎯 Platform Recommendations:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -1134,10 +1406,13 @@ impl ProvisioningMCPServer { } } - fn handle_service_recommendations(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_service_recommendations( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let mode_str = arguments.get("mode") + let mode_str = arguments + .get("mode") .and_then(|v| v.as_str()) .ok_or_else(|| ProvisioningError::InvalidInput("mode is required".to_string()))?; @@ -1146,14 +1421,20 @@ impl ProvisioningMCPServer { let settings_tools = self.settings_tools.clone(); let result = tokio::runtime::Handle::current().block_on(async move { let tools = settings_tools.lock().await; - let mode = tools::settings::DeploymentMode::from_str(mode_str) - .ok_or_else(|| ProvisioningError::invalid_input(format!("Invalid mode: {}", mode_str)))?; + let mode = + tools::settings::DeploymentMode::from_str(mode_str).ok_or_else(|| { + ProvisioningError::invalid_input(format!("Invalid mode: {}", mode_str)) + })?; Ok::<_, ProvisioningError>(tools.get_service_recommendations(&mode)) })?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("🔧 Service Recommendations:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "🔧 Service Recommendations:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -1161,10 +1442,13 @@ impl ProvisioningMCPServer { } } - fn handle_resource_recommendations(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_resource_recommendations( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); - let mode_str = arguments.get("mode") + let mode_str = arguments + .get("mode") .and_then(|v| v.as_str()) .ok_or_else(|| ProvisioningError::InvalidInput("mode is required".to_string()))?; @@ -1173,14 +1457,20 @@ impl ProvisioningMCPServer { let settings_tools = self.settings_tools.clone(); let result = tokio::runtime::Handle::current().block_on(async move { let tools = settings_tools.lock().await; - let mode = tools::settings::DeploymentMode::from_str(mode_str) - .ok_or_else(|| ProvisioningError::invalid_input(format!("Invalid mode: {}", mode_str)))?; + let mode = + tools::settings::DeploymentMode::from_str(mode_str).ok_or_else(|| { + ProvisioningError::invalid_input(format!("Invalid mode: {}", mode_str)) + })?; Ok::<_, ProvisioningError>(tools.get_resource_recommendations(&mode)) })?; Ok(CallToolResult { - content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent(rust_mcp_sdk::schema::TextContent::from(format!("💾 Resource Recommendations:\n\n{}", serde_json::to_string_pretty(&result)?), - ))], + content: vec![rust_mcp_sdk::schema::ContentBlock::TextContent( + rust_mcp_sdk::schema::TextContent::from(format!( + "💾 Resource Recommendations:\n\n{}", + serde_json::to_string_pretty(&result)? + )), + )], is_error: Some(false), meta: None, structured_content: None, @@ -1192,7 +1482,9 @@ impl ProvisioningMCPServer { // Guidance Tool Handlers // ============================================================================ - fn handle_check_system_status(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_check_system_status( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |_request: CallToolRequest| -> Result { info!("Checking system status"); @@ -1212,7 +1504,9 @@ impl ProvisioningMCPServer { } } - fn handle_suggest_next_action(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_suggest_next_action( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |_request: CallToolRequest| -> Result { info!("Suggesting next action"); @@ -1303,16 +1597,27 @@ impl ProvisioningMCPServer { let mut output = String::from("🔧 Issue Diagnosis:\n\n"); output.push_str(&format!("**Error:** {}\n\n", diagnosis.error)); output.push_str(&format!("**Root Cause:** {}\n\n", diagnosis.root_cause)); - output.push_str(&format!("**Confidence:** {:.1}%\n\n", diagnosis.confidence * 100.0)); + output.push_str(&format!( + "**Confidence:** {:.1}%\n\n", + diagnosis.confidence * 100.0 + )); output.push_str("**Suggested Fixes:**\n\n"); for (idx, fix) in diagnosis.fixes.iter().enumerate() { - output.push_str(&format!("{}. {} (Risk: {:?})\n", idx + 1, fix.description, fix.risk)); + output.push_str(&format!( + "{}. {} (Risk: {:?})\n", + idx + 1, + fix.description, + fix.risk + )); output.push_str(" Commands:\n"); for cmd in &fix.commands { output.push_str(&format!(" ```\n {}\n ```\n", cmd)); } - output.push_str(&format!(" Expected outcome: {}\n\n", fix.expected_outcome)); + output.push_str(&format!( + " Expected outcome: {}\n\n", + fix.expected_outcome + )); } if !diagnosis.related_docs.is_empty() { @@ -1333,13 +1638,17 @@ impl ProvisioningMCPServer { } } - fn handle_validate_config_file(&self) -> impl Fn(CallToolRequest) -> Result + '_ { + fn handle_validate_config_file( + &self, + ) -> impl Fn(CallToolRequest) -> Result + '_ { |request: CallToolRequest| -> Result { let arguments = request.params.arguments.unwrap_or_default(); let config_path = arguments .get("config_path") .and_then(|v| v.as_str()) - .ok_or_else(|| ProvisioningError::InvalidInput("config_path is required".to_string()))?; + .ok_or_else(|| { + ProvisioningError::InvalidInput("config_path is required".to_string()) + })?; info!("Validating configuration file: {}", config_path); diff --git a/crates/orchestrator/Cargo.toml b/crates/orchestrator/Cargo.toml index 4543038..f26c298 100644 --- a/crates/orchestrator/Cargo.toml +++ b/crates/orchestrator/Cargo.toml @@ -2,7 +2,7 @@ authors.workspace = true description = "Cloud-native infrastructure orchestrator with Nushell integration" edition.workspace = true -name = "provisioning-orchestrator" +name = "orchestrator" version.workspace = true [dependencies] @@ -45,28 +45,28 @@ clap = { workspace = true } tracing-subscriber = { workspace = true } # Docker/Container management -bollard = "0.17" +bollard = { workspace = true } # HTTP client for DNS/OCI/services reqwest = { workspace = true } # HTTP service clients (machines, init, AI) - enables remote service calls -service-clients = { path = "../service-clients" } +service-clients = { workspace = true } # Platform configuration management -platform-config = { path = "../platform-config" } +platform-config = { workspace = true } # LRU cache for OCI manifests -lru = "0.12" +lru = { workspace = true } # Authorization policy engine -cedar-policy = "4.2" +cedar-policy = { workspace = true } # File system watcher for hot reload -notify = "6.1" +notify = { workspace = true } # Base64 encoding/decoding -base64 = "0.22" +base64 = { workspace = true } # JWT token validation jsonwebtoken = { workspace = true } @@ -78,14 +78,14 @@ rsa = { workspace = true } sha2 = { workspace = true } # SSH key management -ed25519-dalek = "2.1" +ed25519-dalek = { workspace = true } # SSH client library (pure Rust, async-first) -russh = "0.44" -russh-keys = "0.44" +russh = { workspace = true } +russh-keys = { workspace = true } # Path expansion for tilde (~) handling -shellexpand = "3.1" +shellexpand = { workspace = true } # ============================================================================ # FEATURE-GATED OPTIONAL DEPENDENCIES @@ -141,9 +141,18 @@ http-api = ["core"] # SurrealDB: Optional storage backend surrealdb = ["dep:surrealdb"] -# Default: Recommended for standard deployments -# Includes core, audit, compliance, platform, ssh, workflow -default = ["core", "audit", "compliance", "platform", "ssh", "workflow", "http-api"] +# Default: All features enabled +default = [ + "core", + "audit", + "compliance", + "platform", + "ssh", + "workflow", + "testing", + "http-api", + "surrealdb", +] # Full: All features enabled (development and testing) all = [ @@ -170,11 +179,10 @@ tower = { workspace = true, features = ["util"] } name = "provisioning_orchestrator" path = "src/lib.rs" -# Binary target (requires testing feature for test environment API) +# Binary target (uses all features by default) [[bin]] name = "provisioning-orchestrator" path = "src/main.rs" -required-features = ["all"] [[bench]] harness = false diff --git a/crates/orchestrator/Dockerfile b/crates/orchestrator/Dockerfile index 29ead8d..930119c 100644 --- a/crates/orchestrator/Dockerfile +++ b/crates/orchestrator/Dockerfile @@ -1,8 +1,32 @@ -# Multi-stage build for Orchestrator -# Builds from platform workspace root +# Multi-stage build for provisioning-orchestrator +# Generated from Nickel template - DO NOT EDIT DIRECTLY +# Source: provisioning/schemas/platform/templates/docker/Dockerfile.chef.ncl -# Build stage - Using nightly for consistency with control-center -FROM rustlang/rust:nightly-bookworm AS builder +# ============================================================================ +# Stage 1: PLANNER - Generate dependency recipe +# ============================================================================ +FROM rust:1.82-trixie AS planner + +WORKDIR /workspace + +# Install cargo-chef +RUN cargo install cargo-chef --version 0.1.67 + +# Copy workspace manifests +COPY Cargo.toml Cargo.lock ./ +COPY crates ./crates +COPY daemon-cli ./daemon-cli +COPY secretumvault ./secretumvault +COPY prov-ecosystem ./prov-ecosystem +COPY stratumiops ./stratumiops + +# Generate recipe.json (dependency graph) +RUN cargo chef prepare --recipe-path recipe.json --bin provisioning-orchestrator + +# ============================================================================ +# Stage 2: CACHER - Build dependencies only +# ============================================================================ +FROM rust:1.82-trixie AS cacher WORKDIR /workspace @@ -12,20 +36,52 @@ RUN apt-get update && apt-get install -y \ libssl-dev \ && rm -rf /var/lib/apt/lists/* -# Copy entire platform workspace (required for workspace dependencies) -COPY Cargo.toml Cargo.lock ./ -COPY orchestrator ./orchestrator -COPY control-center ./control-center -COPY control-center-ui ./control-center-ui -COPY mcp-server ./mcp-server -COPY installer ./installer +# Install cargo-chef +RUN cargo install cargo-chef --version 0.1.67 + +# sccache disabled + +# Copy recipe from planner +COPY --from=planner /workspace/recipe.json recipe.json + +# Build dependencies - This layer will be cached +RUN cargo chef cook --release --recipe-path recipe.json + +# ============================================================================ +# Stage 3: BUILDER - Build source code +# ============================================================================ +FROM rust:1.82-trixie AS builder -# Build orchestrator (workspace-aware) WORKDIR /workspace -RUN cargo build --release --package provisioning-orchestrator -# Runtime stage -FROM debian:bookworm-slim +# Install build dependencies +RUN apt-get update && apt-get install -y \ + pkg-config \ + libssl-dev \ + && rm -rf /var/lib/apt/lists/* + +# sccache disabled + +# Copy cached dependencies from cacher stage +COPY --from=cacher /workspace/target target +COPY --from=cacher /usr/local/cargo /usr/local/cargo + +# Copy source code +COPY Cargo.toml Cargo.lock ./ +COPY crates ./crates +COPY daemon-cli ./daemon-cli +COPY secretumvault ./secretumvault +COPY prov-ecosystem ./prov-ecosystem +COPY stratumiops ./stratumiops + +# Build release binary with parallelism +ENV CARGO_BUILD_JOBS=4 +RUN cargo build --release --package provisioning-orchestrator + +# ============================================================================ +# Stage 4: RUNTIME - Minimal runtime image +# ============================================================================ +FROM debian:trixie-slim # Install runtime dependencies RUN apt-get update && apt-get install -y \ @@ -35,30 +91,29 @@ RUN apt-get update && apt-get install -y \ # Create non-root user RUN useradd -m -u 1000 provisioning && \ - mkdir -p /data /var/log/orchestrator && \ - chown -R provisioning:provisioning /data /var/log/orchestrator + mkdir -p /data /var/log/provisioning-orchestrator && \ + chown -R provisioning:provisioning /data /var/log/provisioning-orchestrator # Copy binary from builder COPY --from=builder /workspace/target/release/provisioning-orchestrator /usr/local/bin/provisioning-orchestrator RUN chmod +x /usr/local/bin/provisioning-orchestrator -# Copy default configuration -COPY orchestrator/config.defaults.toml /etc/provisioning/config.defaults.toml +COPY crates/provisioning-orchestrator/config.defaults.toml /etc/provisioning/config.defaults.toml # Switch to non-root user USER provisioning WORKDIR /app -# Expose port -EXPOSE 8080 +# Expose service port +EXPOSE 9090 -# Set environment variables +# Environment variables ENV RUST_LOG=info ENV DATA_DIR=/data # Health check HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ - CMD curl -f http://localhost:8080/health || exit 1 + CMD curl -f http://localhost:9090/health || exit 1 # Run the binary CMD ["provisioning-orchestrator"] diff --git a/crates/orchestrator/rollback_instructions_14043518-e459-4316-aadd-6ee6d221e644.txt b/crates/orchestrator/rollback_instructions_14043518-e459-4316-aadd-6ee6d221e644.txt deleted file mode 100644 index b4f040b..0000000 --- a/crates/orchestrator/rollback_instructions_14043518-e459-4316-aadd-6ee6d221e644.txt +++ /dev/null @@ -1,20 +0,0 @@ -Manual Rollback Instructions for Migration migration-123 -============================================= - -Migration ID: migration-123 -Rollback ID: 14043518-e459-4316-aadd-6ee6d221e644 -Timestamp: 2026-01-06 12:47:28 UTC - -Steps to rollback: -1. Stop the orchestrator service -2. If backup exists at '/var/folders/my/jvl5hd5x6rgbhks6yszk213c0000gn/T/.tmpdtjZLt/backup.json', restore it to target storage -3. Verify target storage contains original data -4. Remove any partially migrated data -5. Restart orchestrator service - -Storage-specific cleanup commands: -- For filesystem: Remove or rename target directory -- For SurrealDB embedded: Delete database files in target directory -- For SurrealDB server: Connect and drop/recreate database - -IMPORTANT: Test data integrity before resuming operations! diff --git a/crates/orchestrator/rollback_instructions_1e9b4914-f290-4bec-80f2-35128250f9fd.txt b/crates/orchestrator/rollback_instructions_1e9b4914-f290-4bec-80f2-35128250f9fd.txt deleted file mode 100644 index c35baf2..0000000 --- a/crates/orchestrator/rollback_instructions_1e9b4914-f290-4bec-80f2-35128250f9fd.txt +++ /dev/null @@ -1,20 +0,0 @@ -Manual Rollback Instructions for Migration migration-123 -============================================= - -Migration ID: migration-123 -Rollback ID: 1e9b4914-f290-4bec-80f2-35128250f9fd -Timestamp: 2026-01-06 12:50:41 UTC - -Steps to rollback: -1. Stop the orchestrator service -2. If backup exists at '/var/folders/my/jvl5hd5x6rgbhks6yszk213c0000gn/T/.tmp9wM3YA/backup.json', restore it to target storage -3. Verify target storage contains original data -4. Remove any partially migrated data -5. Restart orchestrator service - -Storage-specific cleanup commands: -- For filesystem: Remove or rename target directory -- For SurrealDB embedded: Delete database files in target directory -- For SurrealDB server: Connect and drop/recreate database - -IMPORTANT: Test data integrity before resuming operations! diff --git a/crates/orchestrator/rollback_instructions_21c8a4af-2562-4304-b5ec-90fb1b5fd0ab.txt b/crates/orchestrator/rollback_instructions_21c8a4af-2562-4304-b5ec-90fb1b5fd0ab.txt deleted file mode 100644 index f64d3f8..0000000 --- a/crates/orchestrator/rollback_instructions_21c8a4af-2562-4304-b5ec-90fb1b5fd0ab.txt +++ /dev/null @@ -1,20 +0,0 @@ -Manual Rollback Instructions for Migration migration-123 -============================================= - -Migration ID: migration-123 -Rollback ID: 21c8a4af-2562-4304-b5ec-90fb1b5fd0ab -Timestamp: 2026-01-06 13:10:16 UTC - -Steps to rollback: -1. Stop the orchestrator service -2. If backup exists at '/var/folders/my/jvl5hd5x6rgbhks6yszk213c0000gn/T/.tmpnoxnXR/backup.json', restore it to target storage -3. Verify target storage contains original data -4. Remove any partially migrated data -5. Restart orchestrator service - -Storage-specific cleanup commands: -- For filesystem: Remove or rename target directory -- For SurrealDB embedded: Delete database files in target directory -- For SurrealDB server: Connect and drop/recreate database - -IMPORTANT: Test data integrity before resuming operations! diff --git a/crates/orchestrator/rollback_instructions_317e31fa-b549-49c9-a212-1f13445d913f.txt b/crates/orchestrator/rollback_instructions_317e31fa-b549-49c9-a212-1f13445d913f.txt deleted file mode 100644 index 753a2f7..0000000 --- a/crates/orchestrator/rollback_instructions_317e31fa-b549-49c9-a212-1f13445d913f.txt +++ /dev/null @@ -1,20 +0,0 @@ -Manual Rollback Instructions for Migration migration-123 -============================================= - -Migration ID: migration-123 -Rollback ID: 317e31fa-b549-49c9-a212-1f13445d913f -Timestamp: 2026-01-06 12:49:14 UTC - -Steps to rollback: -1. Stop the orchestrator service -2. If backup exists at '/var/folders/my/jvl5hd5x6rgbhks6yszk213c0000gn/T/.tmpe5B6TH/backup.json', restore it to target storage -3. Verify target storage contains original data -4. Remove any partially migrated data -5. Restart orchestrator service - -Storage-specific cleanup commands: -- For filesystem: Remove or rename target directory -- For SurrealDB embedded: Delete database files in target directory -- For SurrealDB server: Connect and drop/recreate database - -IMPORTANT: Test data integrity before resuming operations! diff --git a/crates/orchestrator/rollback_instructions_5da5d888-527e-4aac-ab53-93e9a30014cc.txt b/crates/orchestrator/rollback_instructions_5da5d888-527e-4aac-ab53-93e9a30014cc.txt deleted file mode 100644 index 4b46dee..0000000 --- a/crates/orchestrator/rollback_instructions_5da5d888-527e-4aac-ab53-93e9a30014cc.txt +++ /dev/null @@ -1,20 +0,0 @@ -Manual Rollback Instructions for Migration migration-123 -============================================= - -Migration ID: migration-123 -Rollback ID: 5da5d888-527e-4aac-ab53-93e9a30014cc -Timestamp: 2026-01-06 12:53:52 UTC - -Steps to rollback: -1. Stop the orchestrator service -2. If backup exists at '/var/folders/my/jvl5hd5x6rgbhks6yszk213c0000gn/T/.tmpOI6ga7/backup.json', restore it to target storage -3. Verify target storage contains original data -4. Remove any partially migrated data -5. Restart orchestrator service - -Storage-specific cleanup commands: -- For filesystem: Remove or rename target directory -- For SurrealDB embedded: Delete database files in target directory -- For SurrealDB server: Connect and drop/recreate database - -IMPORTANT: Test data integrity before resuming operations! diff --git a/crates/orchestrator/rollback_instructions_7c16746f-24b0-4bcc-8a49-b5dc6bc1f0c7.txt b/crates/orchestrator/rollback_instructions_7c16746f-24b0-4bcc-8a49-b5dc6bc1f0c7.txt deleted file mode 100644 index c6b869f..0000000 --- a/crates/orchestrator/rollback_instructions_7c16746f-24b0-4bcc-8a49-b5dc6bc1f0c7.txt +++ /dev/null @@ -1,20 +0,0 @@ -Manual Rollback Instructions for Migration migration-123 -============================================= - -Migration ID: migration-123 -Rollback ID: 7c16746f-24b0-4bcc-8a49-b5dc6bc1f0c7 -Timestamp: 2026-01-06 12:59:15 UTC - -Steps to rollback: -1. Stop the orchestrator service -2. If backup exists at '/var/folders/my/jvl5hd5x6rgbhks6yszk213c0000gn/T/.tmp8JuU01/backup.json', restore it to target storage -3. Verify target storage contains original data -4. Remove any partially migrated data -5. Restart orchestrator service - -Storage-specific cleanup commands: -- For filesystem: Remove or rename target directory -- For SurrealDB embedded: Delete database files in target directory -- For SurrealDB server: Connect and drop/recreate database - -IMPORTANT: Test data integrity before resuming operations! diff --git a/crates/orchestrator/rollback_instructions_cb3ced5a-ab49-4754-ba90-c815ab0948ba.txt b/crates/orchestrator/rollback_instructions_cb3ced5a-ab49-4754-ba90-c815ab0948ba.txt deleted file mode 100644 index 22a0440..0000000 --- a/crates/orchestrator/rollback_instructions_cb3ced5a-ab49-4754-ba90-c815ab0948ba.txt +++ /dev/null @@ -1,20 +0,0 @@ -Manual Rollback Instructions for Migration migration-123 -============================================= - -Migration ID: migration-123 -Rollback ID: cb3ced5a-ab49-4754-ba90-c815ab0948ba -Timestamp: 2026-01-06 12:53:37 UTC - -Steps to rollback: -1. Stop the orchestrator service -2. If backup exists at '/var/folders/my/jvl5hd5x6rgbhks6yszk213c0000gn/T/.tmpI1vYTj/backup.json', restore it to target storage -3. Verify target storage contains original data -4. Remove any partially migrated data -5. Restart orchestrator service - -Storage-specific cleanup commands: -- For filesystem: Remove or rename target directory -- For SurrealDB embedded: Delete database files in target directory -- For SurrealDB server: Connect and drop/recreate database - -IMPORTANT: Test data integrity before resuming operations! diff --git a/crates/orchestrator/src/container_manager.rs b/crates/orchestrator/src/container_manager.rs index b7b63c9..2e7706d 100644 --- a/crates/orchestrator/src/container_manager.rs +++ b/crates/orchestrator/src/container_manager.rs @@ -8,16 +8,14 @@ use std::default::Default; use anyhow::{anyhow, Context, Result}; use bollard::{ - container::{ - Config, CreateContainerOptions, ListContainersOptions, LogsOptions, NetworkingConfig, - RemoveContainerOptions, StartContainerOptions, StopContainerOptions, - }, exec::{CreateExecOptions, StartExecResults}, - image::CreateImageOptions, - network::CreateNetworkOptions, - service::{ - ContainerSummary, EndpointSettings, HostConfig, Ipam, IpamConfig, RestartPolicy, - RestartPolicyNameEnum, + models::{ + ContainerCreateBody, ContainerSummary, EndpointSettings, HostConfig, Ipam, IpamConfig, + NetworkingConfig, RestartPolicy, RestartPolicyNameEnum, + }, + query_parameters::{ + CreateContainerOptions, CreateImageOptions, ListContainersOptions, LogsOptions, + RemoveContainerOptions, StartContainerOptions, StopContainerOptions, }, Docker, }; @@ -58,6 +56,8 @@ impl ContainerManager { /// Create network for test environment pub async fn create_network(&self, config: &NetworkConfig) -> Result { + use bollard::models::NetworkCreateRequest; + let ipam_config = IpamConfig { subnet: Some(config.subnet.clone()), gateway: None, @@ -71,36 +71,32 @@ impl ContainerManager { options: None, }; - let create_options = CreateNetworkOptions { + let mut labels = HashMap::new(); + labels.insert( + "managed_by".to_string(), + "provisioning_orchestrator".to_string(), + ); + labels.insert("type".to_string(), "test_environment".to_string()); + + let create_request = NetworkCreateRequest { name: config.name.clone(), - check_duplicate: true, - driver: "bridge".to_string(), - internal: false, - attachable: true, - ingress: false, - ipam, - enable_ipv6: false, - options: HashMap::new(), - labels: { - let mut labels = HashMap::new(); - labels.insert( - "managed_by".to_string(), - "provisioning_orchestrator".to_string(), - ); - labels.insert("type".to_string(), "test_environment".to_string()); - labels - }, + driver: Some("bridge".to_string()), + internal: Some(false), + attachable: Some(true), + ipam: Some(ipam), + enable_ipv4: Some(true), + options: Some(HashMap::new()), + labels: Some(labels), + ..Default::default() }; let response = self .docker - .create_network(create_options) + .create_network(create_request) .await .context("Failed to create Docker network")?; - let network_id = response - .id - .ok_or_else(|| anyhow!("Network ID not returned"))?; + let network_id = response.id; info!("Created network {} ({})", config.name, network_id); Ok(network_id) @@ -119,12 +115,12 @@ impl ContainerManager { /// Pull image if not exists pub async fn ensure_image(&self, image: &str) -> Result<()> { - let options = Some(CreateImageOptions { - from_image: image, + let options = CreateImageOptions { + from_image: Some(image.to_string()), ..Default::default() - }); + }; - let mut stream = self.docker.create_image(options, None, None); + let mut stream = self.docker.create_image(Some(options), None, None); while let Some(result) = stream.next().await { match result { @@ -176,7 +172,6 @@ impl ContainerManager { endpoint_config.insert( net_id.to_string(), EndpointSettings { - network_id: Some(net_id.to_string()), ..Default::default() }, ); @@ -190,31 +185,30 @@ impl ContainerManager { .collect(); // Container configuration - let config = Config { + let mut labels = HashMap::new(); + labels.insert( + "managed_by".to_string(), + "provisioning_orchestrator".to_string(), + ); + labels.insert("type".to_string(), "test_container".to_string()); + labels.insert("test_name".to_string(), name.to_string()); + + let config = ContainerCreateBody { image: Some(image.to_string()), hostname: Some(name.to_string()), - env: Some(env), + env: if env.is_empty() { None } else { Some(env) }, cmd: command, host_config: Some(host_config), networking_config: Some(NetworkingConfig { - endpoints_config: endpoint_config, - }), - labels: Some({ - let mut labels = HashMap::new(); - labels.insert( - "managed_by".to_string(), - "provisioning_orchestrator".to_string(), - ); - labels.insert("type".to_string(), "test_container".to_string()); - labels.insert("test_name".to_string(), name.to_string()); - labels + endpoints_config: Some(endpoint_config), }), + labels: Some(labels), ..Default::default() }; let options = CreateContainerOptions { - name: name.to_string(), - platform: None, + name: Some(name.to_string()), + platform: String::new(), }; let response = self @@ -240,7 +234,7 @@ impl ContainerManager { /// Start container pub async fn start_container(&self, container_id: &str) -> Result<()> { self.docker - .start_container(container_id, None::>) + .start_container(container_id, None::) .await .context(format!("Failed to start container {}", container_id))?; @@ -251,7 +245,8 @@ impl ContainerManager { /// Stop container pub async fn stop_container(&self, container_id: &str, timeout: Option) -> Result<()> { let options = StopContainerOptions { - t: timeout.unwrap_or(10), + t: Some(timeout.unwrap_or(10) as i32), + signal: None, }; self.docker @@ -329,7 +324,7 @@ impl ContainerManager { container_id: &str, tail: Option<&str>, ) -> Result { - let options = LogsOptions:: { + let options = LogsOptions { stdout: true, stderr: true, tail: tail.unwrap_or("100").to_string(), @@ -386,15 +381,15 @@ impl ContainerManager { let mut filters = HashMap::new(); filters.insert("label".to_string(), vec![format!("{}={}", label, value)]); - let options = Some(ListContainersOptions { + let options = ListContainersOptions { all: true, - filters, + filters: Some(filters), ..Default::default() - }); + }; let containers = self .docker - .list_containers(options) + .list_containers(Some(options)) .await .context("Failed to list containers")?; diff --git a/crates/orchestrator/src/lib.rs b/crates/orchestrator/src/lib.rs index c585525..f7d66ce 100644 --- a/crates/orchestrator/src/lib.rs +++ b/crates/orchestrator/src/lib.rs @@ -85,14 +85,36 @@ pub fn validate_storage_type(s: &str) -> Result { // CLI arguments structure #[derive(clap::Parser, Clone)] -#[command(author, version, about, long_about = None)] +#[command(author, version, about = "Multi-service task orchestration and batch workflow engine")] +#[command(long_about = "Orchestrator - Manages distributed task execution, batch workflows, and cluster provisioning with state management and rollback recovery")] +#[command(after_help = "CONFIGURATION HIERARCHY (highest to lowest priority):\n 1. CLI: -c/--config (explicit file)\n 2. CLI: --config-dir --mode (directory + mode)\n 3. CLI: --config-dir (searches for orchestrator.ncl|toml|json)\n 4. CLI: --mode (searches in provisioning/platform/config/)\n 5. ENV: ORCHESTRATOR_CONFIG (explicit file)\n 6. ENV: PROVISIONING_CONFIG_DIR (searches for orchestrator.ncl|toml|json)\n 7. ENV: ORCHESTRATOR_MODE (mode-based in default path)\n 8. Built-in defaults\n\nEXAMPLES:\n # Explicit config file\n orchestrator -c ~/my-config.toml\n\n # Config directory with mode\n orchestrator --config-dir ~/configs --mode enterprise\n\n # Config directory (auto-discover file)\n orchestrator --config-dir ~/.config/provisioning\n\n # Via environment variables\n export ORCHESTRATOR_CONFIG=~/.config/orchestrator.toml\n orchestrator\n\n # Mode-based configuration\n orchestrator --mode solo")] pub struct Args { + /// Configuration file path (highest priority) + /// + /// Accepts absolute or relative path. Supports .ncl, .toml, and .json formats. + #[arg(short = 'c', long, env = "ORCHESTRATOR_CONFIG")] + pub config: Option, + + /// Configuration directory (searches for orchestrator.ncl|toml|json) + /// + /// Searches for configuration files in order of preference: .ncl > .toml > .json + /// Can also search for mode-specific files: orchestrator.{mode}.{ncl|toml|json} + #[arg(long, env = "PROVISIONING_CONFIG_DIR")] + pub config_dir: Option, + + /// Deployment mode (solo, multiuser, cicd, enterprise) + /// + /// Determines which configuration profile to use. Searches in: + /// provisioning/platform/config/orchestrator.{mode}.{ncl|toml} + #[arg(short = 'm', long, env = "ORCHESTRATOR_MODE")] + pub mode: Option, + /// Port to listen on - #[arg(short, long, default_value = "9090")] + #[arg(short = 'p', long, default_value = "9090")] pub port: u16, /// Data directory for storage - #[arg(short, long, default_value = "./data")] + #[arg(short = 'd', long, default_value = "./data")] pub data_dir: String, /// Storage backend type diff --git a/crates/orchestrator/src/main.rs b/crates/orchestrator/src/main.rs index 94cca7a..78f992d 100644 --- a/crates/orchestrator/src/main.rs +++ b/crates/orchestrator/src/main.rs @@ -1007,6 +1007,7 @@ async fn main() -> Result<()> { let app = Router::new() .route("/health", get(health_check)) + .route("/api/v1/health", get(health_check)) .route("/tasks", get(list_tasks)) .route("/tasks/{id}", get(get_task_status)) .route("/workflows/servers/create", post(create_server_workflow)) diff --git a/crates/orchestrator/_data/queue.rkvs/tasks/237315de-8a7f-430a-8804-65d050f3bfb0.json b/crates/orchestrator/wrks/_data/queue.rkvs/tasks/237315de-8a7f-430a-8804-65d050f3bfb0.json similarity index 100% rename from crates/orchestrator/_data/queue.rkvs/tasks/237315de-8a7f-430a-8804-65d050f3bfb0.json rename to crates/orchestrator/wrks/_data/queue.rkvs/tasks/237315de-8a7f-430a-8804-65d050f3bfb0.json diff --git a/crates/orchestrator/_data/queue.rkvs/tasks/7ff31593-cb5f-4a52-88ff-3a3d9bfbf931.json b/crates/orchestrator/wrks/_data/queue.rkvs/tasks/7ff31593-cb5f-4a52-88ff-3a3d9bfbf931.json similarity index 100% rename from crates/orchestrator/_data/queue.rkvs/tasks/7ff31593-cb5f-4a52-88ff-3a3d9bfbf931.json rename to crates/orchestrator/wrks/_data/queue.rkvs/tasks/7ff31593-cb5f-4a52-88ff-3a3d9bfbf931.json diff --git a/crates/orchestrator/_data/queue.rkvs/tasks/b14f9a93-318b-4d56-aa73-a5c1e38a2a9b.json b/crates/orchestrator/wrks/_data/queue.rkvs/tasks/b14f9a93-318b-4d56-aa73-a5c1e38a2a9b.json similarity index 100% rename from crates/orchestrator/_data/queue.rkvs/tasks/b14f9a93-318b-4d56-aa73-a5c1e38a2a9b.json rename to crates/orchestrator/wrks/_data/queue.rkvs/tasks/b14f9a93-318b-4d56-aa73-a5c1e38a2a9b.json diff --git a/crates/orchestrator/_data/queue.rkvs/tasks/c2050e55-46d9-47bc-abcd-8b137a6ee459.json b/crates/orchestrator/wrks/_data/queue.rkvs/tasks/c2050e55-46d9-47bc-abcd-8b137a6ee459.json similarity index 100% rename from crates/orchestrator/_data/queue.rkvs/tasks/c2050e55-46d9-47bc-abcd-8b137a6ee459.json rename to crates/orchestrator/wrks/_data/queue.rkvs/tasks/c2050e55-46d9-47bc-abcd-8b137a6ee459.json diff --git a/crates/orchestrator/_data/status.json b/crates/orchestrator/wrks/_data/status.json similarity index 100% rename from crates/orchestrator/_data/status.json rename to crates/orchestrator/wrks/_data/status.json diff --git a/crates/orchestrator/_data/tasks/999c70f4-3fa4-4879-bbd2-e85f5d0027f3.json b/crates/orchestrator/wrks/_data/tasks/999c70f4-3fa4-4879-bbd2-e85f5d0027f3.json similarity index 100% rename from crates/orchestrator/_data/tasks/999c70f4-3fa4-4879-bbd2-e85f5d0027f3.json rename to crates/orchestrator/wrks/_data/tasks/999c70f4-3fa4-4879-bbd2-e85f5d0027f3.json diff --git a/crates/orchestrator/_data/tasks/task-001.json b/crates/orchestrator/wrks/_data/tasks/task-001.json similarity index 100% rename from crates/orchestrator/_data/tasks/task-001.json rename to crates/orchestrator/wrks/_data/tasks/task-001.json diff --git a/crates/orchestrator/_data/tasks/task-002.json b/crates/orchestrator/wrks/_data/tasks/task-002.json similarity index 100% rename from crates/orchestrator/_data/tasks/task-002.json rename to crates/orchestrator/wrks/_data/tasks/task-002.json diff --git a/crates/orchestrator/_data/tasks/task-003.json b/crates/orchestrator/wrks/_data/tasks/task-003.json similarity index 100% rename from crates/orchestrator/_data/tasks/task-003.json rename to crates/orchestrator/wrks/_data/tasks/task-003.json diff --git a/crates/platform-config/src/hierarchy.rs b/crates/platform-config/src/hierarchy.rs index f785fe6..37fbd15 100644 --- a/crates/platform-config/src/hierarchy.rs +++ b/crates/platform-config/src/hierarchy.rs @@ -8,7 +8,7 @@ const CONFIG_BASE_PATH: &str = "provisioning/platform/config"; /// 2. Variable de entorno {SERVICE}_MODE + búsqueda de archivo /// 3. Fallback a defaults pub fn resolve_config_path(service_name: &str) -> Option { - // Paso 1: Check {SERVICE}_CONFIG env var (explicit path) + // Priority 1: Check {SERVICE}_CONFIG env var (explicit path) let env_var = format!("{}_CONFIG", service_name.to_uppercase().replace('-', "_")); if let Ok(path) = env::var(&env_var) { let config_path = PathBuf::from(path); @@ -22,7 +22,18 @@ pub fn resolve_config_path(service_name: &str) -> Option { } } - // Paso 2: Check {SERVICE}_MODE env var + find config file + // Priority 2: Check PROVISIONING_CONFIG_DIR env var + if let Ok(dir) = env::var("PROVISIONING_CONFIG_DIR") { + if let Some(config) = super::resolver::find_config_in_dir(std::path::Path::new(&dir), service_name) { + tracing::debug!( + "Using config from PROVISIONING_CONFIG_DIR: {:?}", + config + ); + return Some(config); + } + } + + // Priority 3: Check {SERVICE}_MODE env var + find config file let mode_var = format!("{}_MODE", service_name.to_uppercase().replace('-', "_")); let mode = env::var(&mode_var).unwrap_or_else(|_| "solo".to_string()); @@ -36,7 +47,7 @@ pub fn resolve_config_path(service_name: &str) -> Option { return Some(path); } - // Paso 3: Fallback - no config file found + // Fallback - no config file found tracing::debug!( "No config file found for {}.{} - using defaults", service_name, diff --git a/crates/platform-config/src/lib.rs b/crates/platform-config/src/lib.rs index 671c83b..6b4c84b 100644 --- a/crates/platform-config/src/lib.rs +++ b/crates/platform-config/src/lib.rs @@ -53,6 +53,7 @@ pub mod format; pub mod hierarchy; pub mod loader; pub mod nickel; +pub mod resolver; // Re-export main types pub use error::{ConfigError, Result}; @@ -60,3 +61,4 @@ pub use format::ConfigLoader; pub use hierarchy::{config_base_path, find_config_file, resolve_config_path}; pub use loader::{ConfigLoaderExt, ConfigValidator}; pub use nickel::is_nickel_available; +pub use resolver::{ConfigResolver, find_config_in_dir, find_config_in_dir_with_mode}; diff --git a/crates/platform-config/src/resolver.rs b/crates/platform-config/src/resolver.rs new file mode 100644 index 0000000..96ce9e9 --- /dev/null +++ b/crates/platform-config/src/resolver.rs @@ -0,0 +1,212 @@ +use std::path::{Path, PathBuf}; + +/// Resolves configuration file paths with CLI flags priority +#[derive(Debug, Clone, Default)] +pub struct ConfigResolver { + cli_config: Option, + cli_config_dir: Option, + cli_mode: Option, +} + +impl ConfigResolver { + /// Create a new ConfigResolver with no CLI overrides + pub fn new() -> Self { + Self { + cli_config: None, + cli_config_dir: None, + cli_mode: None, + } + } + + /// Set explicit config file path (highest priority) + pub fn with_cli_config(mut self, path: Option) -> Self { + self.cli_config = path; + self + } + + /// Set config directory (searches for {service}.{ncl|toml|json}) + pub fn with_cli_config_dir(mut self, dir: Option) -> Self { + self.cli_config_dir = dir; + self + } + + /// Set deployment mode for config resolution + pub fn with_cli_mode(mut self, mode: Option) -> Self { + self.cli_mode = mode; + self + } + + /// Resolve config file path with priority order: + /// 1. CLI flag: -config (explicit file) + /// 2. CLI flag: --config-dir --mode (directory + mode) + /// 3. CLI flag: --config-dir (directory, default naming) + /// 4. CLI flag: --mode (mode in default path) + /// 5. ENV var: {SERVICE}_CONFIG (explicit file) + /// 6. ENV var: PROVISIONING_CONFIG_DIR (directory, default naming) + /// 7. ENV var: {SERVICE}_MODE (mode in default path) + /// 8. None (fallback to defaults) + pub fn resolve(&self, service_name: &str) -> Option { + // Priority 1: CLI flag explicit path + if let Some(ref path) = self.cli_config { + tracing::debug!("Using CLI-provided config file: {:?}", path); + return Some(path.clone()); + } + + // Priority 2: CLI config-dir + mode + if let Some(ref dir) = self.cli_config_dir { + if let Some(ref mode) = self.cli_mode { + if let Some(config) = find_config_in_dir_with_mode(dir, service_name, mode) { + tracing::debug!( + "Using config file from CLI config-dir with mode: {:?}", + config + ); + return Some(config); + } + } + } + + // Priority 3: CLI config-dir only + if let Some(ref dir) = self.cli_config_dir { + if let Some(config) = find_config_in_dir(dir, service_name) { + tracing::debug!("Using config file from CLI config-dir: {:?}", config); + return Some(config); + } + } + + // Priority 4: CLI mode only (searches in default path) + if let Some(ref mode) = self.cli_mode { + if let Some(config) = super::hierarchy::find_config_file(service_name, mode) { + tracing::debug!("Using config file with CLI mode: {:?}", config); + return Some(config); + } + } + + // Priority 5-7: Fall back to environment variable resolution + super::hierarchy::resolve_config_path(service_name) + } +} + +/// Search for config file in directory with specific mode +/// Searches in order: {service}.{mode}.ncl, {service}.{mode}.toml, {service}.{mode}.json +pub fn find_config_in_dir_with_mode(dir: &Path, service_name: &str, mode: &str) -> Option { + for ext in &["ncl", "toml", "json"] { + let path = dir.join(format!("{}.{}.{}", service_name, mode, ext)); + if path.exists() { + tracing::trace!("Found config with mode: {:?}", path); + return Some(path); + } + } + None +} + +/// Search for config file in directory with default naming +/// Searches in order: {service}.ncl, {service}.toml, {service}.json +pub fn find_config_in_dir(dir: &Path, service_name: &str) -> Option { + for ext in &["ncl", "toml", "json"] { + let path = dir.join(format!("{}.{}", service_name, ext)); + if path.exists() { + tracing::trace!("Found config in dir: {:?}", path); + return Some(path); + } + } + None +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + use tempfile::TempDir; + + #[test] + fn test_cli_config_highest_priority() { + let resolver = ConfigResolver::new() + .with_cli_config(Some(PathBuf::from("/explicit/path.toml"))); + + let resolved = resolver.resolve("orchestrator"); + assert_eq!(resolved, Some(PathBuf::from("/explicit/path.toml"))); + } + + #[test] + fn test_config_dir_searches_extensions_in_order() { + let temp_dir = TempDir::new().unwrap(); + let ncl_path = temp_dir.path().join("orchestrator.ncl"); + let toml_path = temp_dir.path().join("orchestrator.toml"); + + // Create both files + fs::write(&ncl_path, "{}").unwrap(); + fs::write(&toml_path, "[orchestrator]").unwrap(); + + let resolver = ConfigResolver::new() + .with_cli_config_dir(Some(temp_dir.path().to_path_buf())); + + let resolved = resolver.resolve("orchestrator").unwrap(); + // Should prefer .ncl over .toml + assert_eq!(resolved, ncl_path); + } + + #[test] + fn test_config_dir_with_mode() { + let temp_dir = TempDir::new().unwrap(); + let enterprise_path = temp_dir.path().join("orchestrator.enterprise.toml"); + fs::write(&enterprise_path, "[orchestrator]").unwrap(); + + let resolver = ConfigResolver::new() + .with_cli_config_dir(Some(temp_dir.path().to_path_buf())) + .with_cli_mode(Some("enterprise".to_string())); + + let resolved = resolver.resolve("orchestrator").unwrap(); + assert_eq!(resolved, enterprise_path); + } + + #[test] + fn test_find_config_in_dir_prefers_ncl() { + let temp_dir = TempDir::new().unwrap(); + let ncl_path = temp_dir.path().join("test-service.ncl"); + let toml_path = temp_dir.path().join("test-service.toml"); + let json_path = temp_dir.path().join("test-service.json"); + + fs::write(&ncl_path, "{}").unwrap(); + fs::write(&toml_path, "[test]").unwrap(); + fs::write(&json_path, "{}").unwrap(); + + let result = find_config_in_dir(temp_dir.path(), "test-service").unwrap(); + assert_eq!(result, ncl_path); + } + + #[test] + fn test_find_config_in_dir_with_mode_json_fallback() { + let temp_dir = TempDir::new().unwrap(); + let json_path = temp_dir.path().join("test-service.solo.json"); + fs::write(&json_path, "{}").unwrap(); + + let result = find_config_in_dir_with_mode(temp_dir.path(), "test-service", "solo").unwrap(); + assert_eq!(result, json_path); + } + + #[test] + fn test_no_config_found_returns_none() { + let temp_dir = TempDir::new().unwrap(); + + let resolver = ConfigResolver::new() + .with_cli_config_dir(Some(temp_dir.path().to_path_buf())); + + let resolved = resolver.resolve("nonexistent-service"); + assert!(resolved.is_none()); + } + + #[test] + fn test_cli_config_dir_overrides_env_var() { + let temp_dir = TempDir::new().unwrap(); + let config_path = temp_dir.path().join("orchestrator.toml"); + fs::write(&config_path, "[orchestrator]").unwrap(); + + // This test just verifies the resolver logic works correctly + // (actual env var override would need temp_env crate) + let resolver = ConfigResolver::new() + .with_cli_config_dir(Some(temp_dir.path().to_path_buf())); + + let resolved = resolver.resolve("orchestrator").unwrap(); + assert_eq!(resolved, config_path); + } +} diff --git a/crates/rag/Cargo.toml b/crates/rag/Cargo.toml index a6d4ef1..09c9089 100644 --- a/crates/rag/Cargo.toml +++ b/crates/rag/Cargo.toml @@ -2,9 +2,13 @@ authors.workspace = true description = "RAG system for provisioning platform with Rig framework and SurrealDB" edition.workspace = true -name = "provisioning-rag" +name = "rag" version.workspace = true +[[bin]] +name = "provisioning-rag" +path = "src/main.rs" + [dependencies] # ============================================================================ # WORKSPACE DEPENDENCIES - Core async runtime and traits @@ -41,7 +45,7 @@ reqwest = { workspace = true } # REST API Framework (Phase 8) # ============================================================================ axum = { workspace = true } -http = "1" +http = { workspace = true } hyper = { workspace = true, features = ["full"] } tower = { workspace = true } tower-http = { workspace = true, features = ["cors", "trace"] } @@ -61,7 +65,11 @@ walkdir = { workspace = true } config = { workspace = true } # Platform configuration management -platform-config = { path = "../platform-config" } +platform-config = { workspace = true } + +# Stratum ecosystem - embeddings and LLM abstraction +stratum-embeddings = { workspace = true, features = ["openai-provider", "ollama-provider", "fastembed-provider"] } +stratum-llm = { workspace = true, features = ["anthropic", "openai", "ollama"] } # Regex for document parsing regex = { workspace = true } @@ -97,13 +105,7 @@ name = "phase8_benchmarks" name = "provisioning_rag" path = "src/lib.rs" -# Binary target (optional CLI tool) -[[bin]] -name = "provisioning-rag" -path = "src/main.rs" -required-features = ["cli"] - # Features [features] cli = [] -default = [] +default = ["cli"] diff --git a/crates/rag/docker/Dockerfile b/crates/rag/docker/Dockerfile deleted file mode 100644 index 5f9eed7..0000000 --- a/crates/rag/docker/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -# Multi-stage build for Provisioning RAG Service -# Stage 1: Builder -FROM rust:1.80.1 as builder - -WORKDIR /app - -# Install dependencies -RUN apt-get update && apt-get install -y \ - pkg-config \ - libssl-dev \ - && rm -rf /var/lib/apt/lists/* - -# Copy workspace and source -COPY Cargo.toml Cargo.lock ./ -COPY provisioning/platform/rag ./rag -COPY provisioning/platform/rag/src ./src -COPY provisioning/platform/rag/benches ./benches - -# Build the orchestrator binary in release mode -RUN cd rag && cargo build --release \ - && cp target/release/provisioning-rag /app/provisioning-rag - -# Stage 2: Runtime -FROM debian:bookworm-slim - -WORKDIR /app - -# Install runtime dependencies -RUN apt-get update && apt-get install -y \ - ca-certificates \ - curl \ - openssl \ - && rm -rf /var/lib/apt/lists/* - -# Copy binary from builder -COPY --from=builder /app/provisioning-rag /app/ - -# Create non-root user for security -RUN useradd -m -u 1000 provisioning && \ - chown -R provisioning:provisioning /app - -USER provisioning - -# Environment variables -ENV PROVISIONING_LOG_LEVEL=info -ENV PROVISIONING_API_HOST=0.0.0.0 -ENV PROVISIONING_API_PORT=9090 -ENV PROVISIONING_CACHE_SIZE=1000 -ENV PROVISIONING_CACHE_TTL_SECS=3600 - -# Expose API port -EXPOSE 9090 - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:9090/health || exit 1 - -# Start the service -CMD ["/app/provisioning-rag"] diff --git a/crates/rag/src/embeddings.rs b/crates/rag/src/embeddings.rs index 6a027a9..42c87be 100644 --- a/crates/rag/src/embeddings.rs +++ b/crates/rag/src/embeddings.rs @@ -1,197 +1,157 @@ -//! Embeddings module using Rig framework +//! Embeddings module using stratum-embeddings +use std::collections::HashMap; use std::sync::Arc; +use std::time::Duration; use serde::{Deserialize, Serialize}; -use tokio::sync::Mutex; +use stratum_embeddings::{ + EmbeddingOptions, EmbeddingService, FastEmbedProvider, MemoryCache, OllamaModel, + OllamaProvider, OpenAiModel, OpenAiProvider, +}; use crate::config::EmbeddingConfig; use crate::error::Result; -/// Document chunk to be embedded #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DocumentChunk { - /// Unique chunk ID pub id: String, - - /// Source document path pub source_path: String, - - /// Document type (markdown, kcl, nushell, rust) pub doc_type: String, - - /// Chunk content (text to embed) pub content: String, - - /// Document category for filtering pub category: Option, - - /// Metadata (headings, function names, etc.) - pub metadata: std::collections::HashMap, + pub metadata: HashMap, } -/// Embedded document with vector #[derive(Debug, Clone, Serialize, Deserialize)] pub struct EmbeddedDocument { - /// Chunk ID pub id: String, - - /// Source document path pub source_path: String, - - /// Document type pub doc_type: String, - - /// Original content pub content: String, - - /// Embedding vector pub embedding: Vec, - - /// Metadata - pub metadata: std::collections::HashMap, + pub metadata: HashMap, } -/// OpenAI HTTP client wrapper for embeddings -#[derive(Clone)] -struct OpenAiClient { - api_key: String, - model: String, - dimension: usize, +enum Service { + OpenAi(EmbeddingService), + Ollama(EmbeddingService), + FastEmbed(EmbeddingService), } -impl OpenAiClient { - /// Create new OpenAI client - fn new(api_key: String, model: String, dimension: usize) -> Self { - Self { - api_key, - model, - dimension, - } - } - - /// Generate embedding via OpenAI API - async fn embed(&self, text: &str) -> Result> { - let client = reqwest::Client::new(); - - #[derive(serde::Serialize)] - struct EmbeddingRequest { - input: String, - model: String, - dimensions: Option, - } - - #[derive(serde::Deserialize)] - struct EmbeddingResponse { - data: Vec, - } - - #[derive(serde::Deserialize)] - struct EmbeddingData { - embedding: Vec, - } - - let request = EmbeddingRequest { - input: text.to_string(), - model: self.model.clone(), - dimensions: if self.dimension == 1536 { - None - } else { - Some(self.dimension) - }, - }; - - let response = client - .post("https://api.openai.com/v1/embeddings") - .header("Authorization", format!("Bearer {}", self.api_key)) - .header("Content-Type", "application/json") - .json(&request) - .send() - .await - .map_err(|e| { - crate::error::RagError::embedding(format!("Failed to call OpenAI API: {}", e)) - })?; - - if !response.status().is_success() { - let error_text = response - .text() - .await - .unwrap_or_else(|_| "Unknown error".to_string()); - return Err(crate::error::RagError::embedding(format!( - "OpenAI API error: {}", - error_text - ))); - } - - let embedding_response: EmbeddingResponse = response.json().await.map_err(|e| { - crate::error::RagError::embedding(format!("Failed to parse OpenAI response: {}", e)) - })?; - - embedding_response - .data - .first() - .map(|d| d.embedding.clone()) - .ok_or_else(|| crate::error::RagError::embedding("No embedding data in response")) - } -} - -/// Embedding engine using OpenAI via HTTP pub struct EmbeddingEngine { config: EmbeddingConfig, - client: Arc>>, + service: Service, } impl EmbeddingEngine { - /// Create a new embedding engine pub fn new(config: EmbeddingConfig) -> Result { - // Validate configuration - match config.provider.as_str() { + let cache = MemoryCache::new(1000, Duration::from_secs(3600)); + + let service = match config.provider.as_str() { "openai" => { - if config.openai_api_key.is_none() && !config.fallback_local { + let api_key = config + .openai_api_key + .clone() + .or_else(|| std::env::var("OPENAI_API_KEY").ok()); + + let api_key_str = if let Some(key) = api_key { + key + } else if config.fallback_local { + "dummy".to_string() // Will fail, but fallback will take over + } else { return Err(crate::error::RagError::config( - "OpenAI API key required for OpenAI provider (or enable fallback_local). \ - Set OPENAI_API_KEY environment variable.", + "OpenAI API key required. Set OPENAI_API_KEY or enable fallback_local", )); + }; + + let model = match config.model.as_str() { + "text-embedding-3-small" => OpenAiModel::TextEmbedding3Small, + "text-embedding-3-large" => OpenAiModel::TextEmbedding3Large, + "text-embedding-ada-002" => OpenAiModel::TextEmbeddingAda002, + _ => { + tracing::warn!( + "Unknown model '{}', using text-embedding-3-small", + config.model + ); + OpenAiModel::TextEmbedding3Small + } + }; + + let provider = OpenAiProvider::new(api_key_str, model).map_err(|e| { + crate::error::RagError::embedding(format!("OpenAI provider failed: {}", e)) + })?; + + let mut svc = EmbeddingService::new(provider).with_cache(cache); + + if config.fallback_local { + tracing::info!("Fallback to local embeddings enabled"); + let fallback = Arc::new(FastEmbedProvider::small().map_err(|e| { + crate::error::RagError::embedding(format!("Fallback failed: {}", e)) + })?); + svc = svc.with_fallback(fallback); } + + Service::OpenAi(svc) } "ollama" => { - tracing::info!("Using Ollama for local embeddings (no API costs)"); + use stratum_embeddings::OllamaModel; + + let model = if config.model == "nomic-embed-text" { + OllamaModel::NomicEmbed + } else if config.model == "mxbai-embed-large" { + OllamaModel::MxbaiEmbed + } else if config.model == "all-minilm" { + OllamaModel::AllMiniLm + } else { + OllamaModel::Custom(config.model.clone(), config.dimension) + }; + + let provider = OllamaProvider::new(model).map_err(|e| { + crate::error::RagError::embedding(format!("Ollama provider failed: {}", e)) + })?; + + Service::Ollama(EmbeddingService::new(provider).with_cache(cache)) } "local" => { - tracing::info!("Using local embedding model (no API costs)"); + tracing::info!("Using FastEmbed local embeddings (no API costs)"); + let provider = FastEmbedProvider::small().map_err(|e| { + crate::error::RagError::embedding(format!("FastEmbed provider failed: {}", e)) + })?; + + Service::FastEmbed(EmbeddingService::new(provider).with_cache(cache)) } _ => { return Err(crate::error::RagError::config(format!( - "Unknown embedding provider: {}. Supported: openai, ollama, local", + "Unknown provider: {}. Supported: openai, ollama, local", config.provider ))); } - } + }; tracing::info!( - "Initialized embedding engine: {} (provider: {}, dimension: {})", + "Initialized stratum-embeddings: {} (provider: {}, dim: {})", config.model, config.provider, config.dimension ); - Ok(Self { - config, - client: Arc::new(Mutex::new(None)), - }) + Ok(Self { config, service }) } - /// Get embedding configuration pub fn config(&self) -> &EmbeddingConfig { &self.config } - /// Embed a single chunk using Rig framework - /// - /// This is a stub implementation that creates zero vectors. - /// In production, this would call the Rig embeddings API. pub async fn embed_chunk(&self, chunk: &DocumentChunk) -> Result { - let embedding = self.generate_embedding(&chunk.content).await?; + let options = EmbeddingOptions::default_with_cache(); + + let embedding = match &self.service { + Service::OpenAi(svc) => svc.embed(&chunk.content, &options).await, + Service::Ollama(svc) => svc.embed(&chunk.content, &options).await, + Service::FastEmbed(svc) => svc.embed(&chunk.content, &options).await, + } + .map_err(|e| crate::error::RagError::embedding(format!("Embedding failed: {}", e)))?; Ok(EmbeddedDocument { id: chunk.id.clone(), @@ -203,133 +163,33 @@ impl EmbeddingEngine { }) } - /// Embed multiple chunks in batch pub async fn embed_batch(&self, chunks: &[DocumentChunk]) -> Result> { - let mut results = Vec::new(); + let options = EmbeddingOptions::default_with_cache(); + let texts: Vec = chunks.iter().map(|c| c.content.clone()).collect(); - for (idx, chunk) in chunks.iter().enumerate() { - let embedded = self.embed_chunk(chunk).await?; - results.push(embedded); - - // Log progress - if (idx + 1) % 10 == 0 { - tracing::debug!("Embedded {}/{} chunks", idx + 1, chunks.len()); - } + let result = match &self.service { + Service::OpenAi(svc) => svc.embed_batch(texts, &options).await, + Service::Ollama(svc) => svc.embed_batch(texts, &options).await, + Service::FastEmbed(svc) => svc.embed_batch(texts, &options).await, } + .map_err(|e| crate::error::RagError::embedding(format!("Batch embed failed: {}", e)))?; - tracing::info!("Embedded {} chunks total", chunks.len()); + let results = chunks + .iter() + .zip(result.embeddings) + .map(|(chunk, embedding)| EmbeddedDocument { + id: chunk.id.clone(), + source_path: chunk.source_path.clone(), + doc_type: chunk.doc_type.clone(), + content: chunk.content.clone(), + embedding, + metadata: chunk.metadata.clone(), + }) + .collect(); + tracing::info!("Embedded {} chunks (stratum-embeddings)", chunks.len()); Ok(results) } - - /// Generate embedding for text using configured provider - async fn generate_embedding(&self, text: &str) -> Result> { - if text.is_empty() { - return Err(crate::error::RagError::embedding( - "Empty text cannot be embedded", - )); - } - - tracing::trace!("Generating embedding for text: {} chars", text.len()); - - match self.config.provider.as_str() { - "openai" => self.embed_openai(text).await, - "ollama" => self.embed_ollama(text).await, - "local" => self.embed_local(text).await, - _ => Err(crate::error::RagError::embedding(format!( - "Unknown embedding provider: {}", - self.config.provider - ))), - } - } - - /// Generate embedding via OpenAI API - async fn embed_openai(&self, text: &str) -> Result> { - let api_key = self - .config - .openai_api_key - .clone() - .or_else(|| std::env::var("OPENAI_API_KEY").ok()) - .ok_or_else(|| { - crate::error::RagError::embedding( - "OpenAI API key not found. Set OPENAI_API_KEY env var or config.", - ) - })?; - - let mut client_lock = self.client.lock().await; - if client_lock.is_none() { - *client_lock = Some(OpenAiClient::new( - api_key.clone(), - self.config.model.clone(), - self.config.dimension, - )); - } - - let client = client_lock.as_ref().unwrap(); - client.embed(text).await - } - - /// Generate embedding via Ollama (local) - async fn embed_ollama(&self, text: &str) -> Result> { - let client = reqwest::Client::new(); - - #[derive(serde::Serialize)] - struct OllamaRequest { - model: String, - prompt: String, - } - - #[derive(serde::Deserialize)] - struct OllamaResponse { - embedding: Vec, - } - - let request = OllamaRequest { - model: self.config.model.clone(), - prompt: text.to_string(), - }; - - let response = client - .post("http://localhost:11434/api/embeddings") - .json(&request) - .send() - .await - .map_err(|e| { - crate::error::RagError::embedding(format!( - "Failed to call Ollama API (ensure Ollama is running on localhost:11434): {}", - e - )) - })?; - - if !response.status().is_success() { - let error_text = response - .text() - .await - .unwrap_or_else(|_| "Unknown error".to_string()); - return Err(crate::error::RagError::embedding(format!( - "Ollama API error: {}", - error_text - ))); - } - - let embedding_response: OllamaResponse = response.json().await.map_err(|e| { - crate::error::RagError::embedding(format!("Failed to parse Ollama response: {}", e)) - })?; - - Ok(embedding_response.embedding) - } - - /// Generate embedding using local model (stub for future implementation) - async fn embed_local(&self, text: &str) -> Result> { - tracing::warn!( - "Local embeddings not fully implemented. Returning zero vector for now. For \ - production local embeddings, use Ollama or integrate huggingface transformers." - ); - - // Return zero vector of correct dimension - // Future: integrate sentence-transformers or ONNX models - Ok(vec![0.0; self.config.dimension]) - } } #[cfg(test)] @@ -337,57 +197,54 @@ mod tests { use super::*; #[test] - fn test_embedding_engine_creation_openai() { + fn test_engine_openai() { let config = EmbeddingConfig { provider: "openai".to_string(), openai_api_key: Some("test-key".to_string()), ..Default::default() }; - - let result = EmbeddingEngine::new(config); - assert!(result.is_ok()); + assert!(EmbeddingEngine::new(config).is_ok()); } #[test] - fn test_embedding_engine_creation_openai_no_key() { + fn test_engine_ollama() { let config = EmbeddingConfig { - provider: "openai".to_string(), - openai_api_key: None, - fallback_local: false, + provider: "ollama".to_string(), + model: "nomic-embed-text".to_string(), ..Default::default() }; - - let result = EmbeddingEngine::new(config); - assert!(result.is_err()); + assert!(EmbeddingEngine::new(config).is_ok()); } #[test] - fn test_local_embedding_engine() { + fn test_engine_local() { let config = EmbeddingConfig { provider: "local".to_string(), + dimension: 384, ..Default::default() }; - - let engine = EmbeddingEngine::new(config); - assert!(engine.is_ok()); + assert!(EmbeddingEngine::new(config).is_ok()); } #[tokio::test] async fn test_embed_chunk() { let config = EmbeddingConfig { provider: "local".to_string(), + dimension: 384, ..Default::default() }; - let engine = EmbeddingEngine::new(config).unwrap(); + let mut metadata = HashMap::new(); + metadata.insert("section".to_string(), "test".to_string()); + let chunk = DocumentChunk { id: "test-1".to_string(), - source_path: "test.md".to_string(), + source_path: "/test/doc.md".to_string(), doc_type: "markdown".to_string(), - content: "This is test content".to_string(), - category: None, - metadata: std::collections::HashMap::new(), + content: "Test document".to_string(), + category: Some("test".to_string()), + metadata, }; let result = engine.embed_chunk(&chunk).await; @@ -395,6 +252,6 @@ mod tests { let embedded = result.unwrap(); assert_eq!(embedded.id, "test-1"); - assert_eq!(embedded.embedding.len(), 1536); // Default dimension + assert_eq!(embedded.embedding.len(), 384); } } diff --git a/crates/rag/src/llm.rs b/crates/rag/src/llm.rs index 48f65d7..67b00d1 100644 --- a/crates/rag/src/llm.rs +++ b/crates/rag/src/llm.rs @@ -1,64 +1,52 @@ -//! LLM (Large Language Model) integration module -//! Provides Claude API integration for RAG-based answer generation +//! LLM integration using stratum-llm -use serde::{Deserialize, Serialize}; +use stratum_llm::{ + AnthropicProvider, ConfiguredProvider, CredentialSource, GenerationOptions, Message, + ProviderChain, Role, UnifiedClient, +}; use tracing::info; use crate::error::Result; -/// Claude API request message -#[derive(Debug, Clone, Serialize)] -pub struct ClaudeMessage { - pub role: String, - pub content: String, -} - -/// Claude API response -#[derive(Debug, Clone, Deserialize)] -pub struct ClaudeResponse { - pub content: Vec, - pub stop_reason: String, -} - -/// Claude response content -#[derive(Debug, Clone, Deserialize)] -pub struct ClaudeContent { - #[serde(rename = "type")] - pub content_type: String, - pub text: Option, -} - -/// LLM Client for Claude API pub struct LlmClient { - api_key: String, + client: UnifiedClient, pub model: String, - base_url: String, } impl LlmClient { - /// Create a new Claude LLM client pub fn new(model: String) -> Result { - // Get API key from environment - let api_key = std::env::var("ANTHROPIC_API_KEY").unwrap_or_else(|_| { - tracing::warn!("ANTHROPIC_API_KEY not set - Claude API calls will fail"); - String::new() - }); + let api_key = std::env::var("ANTHROPIC_API_KEY").ok(); - Ok(Self { - api_key, - model, - base_url: "https://api.anthropic.com/v1".to_string(), - }) - } - - /// Generate an answer using Claude - pub async fn generate_answer(&self, query: &str, context: &str) -> Result { - // If no API key, return placeholder - if self.api_key.is_empty() { - return Ok(self.generate_placeholder(query, context)); + if api_key.is_none() { + tracing::warn!("ANTHROPIC_API_KEY not set - LLM calls will fail"); } - // Build the system prompt + let provider = + AnthropicProvider::new(api_key.unwrap_or_default(), model.clone()); + + let configured = ConfiguredProvider { + provider: Box::new(provider), + credential_source: CredentialSource::EnvVar { + name: "ANTHROPIC_API_KEY".to_string(), + }, + priority: 0, + }; + + let chain = ProviderChain::with_providers(vec![configured]); + + let client = UnifiedClient::builder() + .with_chain(chain) + .build() + .map_err(|e| { + crate::error::RagError::LlmError(format!("Failed to build LLM client: {}", e)) + })?; + + info!("Initialized stratum-llm client: {}", model); + + Ok(Self { client, model }) + } + + pub async fn generate_answer(&self, query: &str, context: &str) -> Result { let system_prompt = format!( r#"You are a helpful assistant answering questions about a provisioning platform. You have been provided with relevant documentation context below. @@ -71,118 +59,32 @@ Be concise and accurate. context ); - // Build the user message - let user_message = query.to_string(); + let messages = vec![ + Message { + role: Role::System, + content: system_prompt, + }, + Message { + role: Role::User, + content: query.to_string(), + }, + ]; - // Call Claude API - self.call_claude_api(&system_prompt, &user_message).await - } + let options = GenerationOptions { + max_tokens: Some(1024), + ..Default::default() + }; - /// Call Claude API with messages - async fn call_claude_api(&self, system: &str, user_message: &str) -> Result { - let client = reqwest::Client::new(); - - // Build request payload - let payload = serde_json::json!({ - "model": self.model, - "max_tokens": 1024, - "system": system, - "messages": [ - { - "role": "user", - "content": user_message - } - ] - }); - - // Make the API request - let response = client - .post(format!("{}/messages", self.base_url)) - .header("anthropic-version", "2023-06-01") - .header("x-api-key", &self.api_key) - .json(&payload) - .send() + let response = self + .client + .generate(&messages, Some(&options)) .await .map_err(|e| { - crate::error::RagError::LlmError(format!("Claude API request failed: {}", e)) + crate::error::RagError::LlmError(format!("LLM generation failed: {}", e)) })?; - // Check status - if !response.status().is_success() { - let status = response.status(); - let error_text = response - .text() - .await - .unwrap_or_else(|_| "Unknown error".to_string()); - return Err(crate::error::RagError::LlmError(format!( - "Claude API error {}: {}", - status, error_text - ))); - } - - // Parse response - let claude_response: ClaudeResponse = response.json().await.map_err(|e| { - crate::error::RagError::LlmError(format!("Failed to parse Claude response: {}", e)) - })?; - - // Extract text from response - let answer = claude_response - .content - .first() - .and_then(|c| c.text.clone()) - .ok_or_else(|| { - crate::error::RagError::LlmError("No text in Claude response".to_string()) - })?; - - info!( - "Claude API call successful, generated {} characters", - answer.len() - ); - Ok(answer) - } - - /// Generate placeholder answer when API key is missing - fn generate_placeholder(&self, query: &str, context: &str) -> String { - format!( - "Based on the provided context about the provisioning platform:\n\n{}\n\n(Note: This \ - is a placeholder response. Set ANTHROPIC_API_KEY environment variable for full \ - Claude integration.)", - self.format_context_summary(query, context) - ) - } - - /// Format a summary of the context - fn format_context_summary(&self, query: &str, context: &str) -> String { - let context_lines = context.lines().count(); - let query_lower = query.to_lowercase(); - - if query_lower.contains("deploy") || query_lower.contains("create") { - format!( - "Your question about deployment is addressed in {} lines of documentation. The \ - system supports multi-cloud deployment across AWS, UpCloud, and local \ - environments.", - context_lines - ) - } else if query_lower.contains("architecture") || query_lower.contains("design") { - format!( - "The provisioning platform uses a modular architecture as described in {} lines \ - of documentation. Core components include Orchestrator, Control Center, and MCP \ - Server integration.", - context_lines - ) - } else if query_lower.contains("security") || query_lower.contains("auth") { - format!( - "Security features are documented in {} lines. The system implements JWT-based \ - authentication, Cedar-based authorization, and dynamic secrets management.", - context_lines - ) - } else { - format!( - "Your question is addressed in the provided {} lines of documentation. Please \ - review the context above for details.", - context_lines - ) - } + info!("Generated answer: {} characters", response.content.len()); + Ok(response.content) } } @@ -192,38 +94,14 @@ mod tests { #[test] fn test_llm_client_creation() { - let client = LlmClient::new("claude-opus-4-1".to_string()); + let client = LlmClient::new("claude-opus-4".to_string()); assert!(client.is_ok()); } #[test] - fn test_placeholder_generation() { - let client = LlmClient { - api_key: String::new(), - model: "claude-opus-4-1".to_string(), - base_url: "https://api.anthropic.com/v1".to_string(), - }; - - let query = "How do I deploy the platform?"; - let context = "Deployment is done using provisioning commands"; - - let answer = client.generate_placeholder(query, context); - assert!(answer.contains("deployment")); - assert!(answer.contains("placeholder")); - } - - #[test] - fn test_context_summary_formatting() { - let client = LlmClient { - api_key: String::new(), - model: "claude-opus-4-1".to_string(), - base_url: "https://api.anthropic.com/v1".to_string(), - }; - - let deployment_query = "How do I deploy?"; - let context = "Line 1\nLine 2\nLine 3"; - let summary = client.format_context_summary(deployment_query, context); - assert!(summary.contains("deployment")); - assert!(summary.contains("3")); + fn test_llm_client_model() { + let client = LlmClient::new("claude-sonnet-4".to_string()); + assert!(client.is_ok()); + assert_eq!(client.unwrap().model, "claude-sonnet-4"); } } diff --git a/crates/rag/src/main.rs b/crates/rag/src/main.rs index 856eeb8..59cafd3 100644 --- a/crates/rag/src/main.rs +++ b/crates/rag/src/main.rs @@ -1,12 +1,34 @@ //! RAG system command-line tool +use clap::Parser; use provisioning_rag::config::RagConfig; +use std::path::PathBuf; + +#[derive(Parser, Debug)] +#[command(name = "rag")] +#[command(about = "Retrieval-Augmented Generation system")] +struct Args { + /// Configuration file path (highest priority) + #[arg(short = 'c', long, env = "RAG_CONFIG")] + config: Option, + + /// Configuration directory (searches for rag.ncl|toml|json) + #[arg(long, env = "PROVISIONING_CONFIG_DIR")] + config_dir: Option, + + /// Deployment mode (solo, multiuser, cicd, enterprise) + #[arg(short = 'm', long, env = "RAG_MODE")] + mode: Option, +} #[tokio::main] async fn main() -> anyhow::Result<()> { // Initialize logging tracing_subscriber::fmt::init(); + // Parse CLI arguments + let _args = Args::parse(); + // Load configuration let _config = RagConfig::default(); diff --git a/crates/service-clients/Cargo.toml b/crates/service-clients/Cargo.toml index e88c3d0..224434e 100644 --- a/crates/service-clients/Cargo.toml +++ b/crates/service-clients/Cargo.toml @@ -1,11 +1,11 @@ [package] -authors = { workspace = true } +authors.workspace = true description = "HTTP service client wrappers for provisioning platform services" -edition = { workspace = true } -license = { workspace = true } +edition.workspace = true +license.workspace = true name = "service-clients" -repository = { workspace = true } -version = { workspace = true } +repository.workspace = true +version.workspace = true [dependencies] async-trait = { workspace = true } @@ -15,9 +15,7 @@ serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } - -# Service types (optional - only if not using generic types) -machines = { path = "../../../../submodules/prov-ecosystem/crates/machines" } +machines = { workspace = true } [dev-dependencies] tempfile = { workspace = true } diff --git a/crates/vault-service/Cargo.toml b/crates/vault-service/Cargo.toml index 0142d96..8998005 100644 --- a/crates/vault-service/Cargo.toml +++ b/crates/vault-service/Cargo.toml @@ -1,9 +1,15 @@ [package] -authors = ["Provisioning Team"] +authors.workspace = true description = "Vault Service for Provisioning Platform with secrets and key management (Age dev, Cosmian KMS prod, RustyVault self-hosted)" -edition = "2021" +edition.workspace = true +license.workspace = true name = "vault-service" -version = "0.2.0" +repository.workspace = true +version.workspace = true + +[[bin]] +name = "provisioning-vault-service" +path = "src/main.rs" [dependencies] # Async runtime @@ -23,10 +29,10 @@ toml = { workspace = true } reqwest = { workspace = true } # Age encryption (development) -age = "0.11" +age = { workspace = true } # RustyVault (self-hosted Vault alternative) -rusty_vault = "0.2.1" +rusty_vault = { workspace = true } # Cryptography base64 = { workspace = true } @@ -46,18 +52,15 @@ chrono = { workspace = true, features = ["serde"] } # Configuration config = { workspace = true } -# SecretumVault (Enterprise secrets management) +# SecretumVault (Enterprise secrets management - optional) secretumvault = { workspace = true } [dev-dependencies] +http-body-util = { workspace = true } mockito = { workspace = true } tempfile = { workspace = true } tokio-test = { workspace = true } -[[bin]] -name = "vault-service" -path = "src/main.rs" - [lib] name = "vault_service" path = "src/lib.rs" diff --git a/secretumvault b/secretumvault new file mode 160000 index 0000000..91eefc8 --- /dev/null +++ b/secretumvault @@ -0,0 +1 @@ +Subproject commit 91eefc86fa03826997401facee620f3b6dfd65e1 diff --git a/stratumiops b/stratumiops new file mode 160000 index 0000000..9864f88 --- /dev/null +++ b/stratumiops @@ -0,0 +1 @@ +Subproject commit 9864f88c14ac030f0fa914fd46c2cf4c1a412fc0 diff --git a/syntaxis b/syntaxis new file mode 160000 index 0000000..48d7503 --- /dev/null +++ b/syntaxis @@ -0,0 +1 @@ +Subproject commit 48d7503b4817e38dd86f494e09022b77c0652159