feat(platform): control plane — NATS JetStream + SurrealDB + SOLID enforcement
New crates
- platform-nats: async_nats JetStream bridge; pull/push consumers, explicit ACK,
subject prefixing under provisioning.>, 6 stream definitions on startup
- platform-db: SurrealDB pool (embedded RocksDB solo, Surreal<Mem> tests,
WebSocket server multi-user); migrate() with DEFINE TABLE IF NOT EXISTS DDL
Service integrations
- orchestrator: NATS pub on task state transitions, execution_logs → SurrealDB,
webhook handler (HMAC-SHA256), AuditCollector (batch INSERT, 100-event/1s flush)
- control-center: solo_auth_middleware (intentional bypass, --mode solo only),
NATS session events, WebSocket bridge via JetStream subscription (no polling)
- vault-service: NATS lease flow; credentials over HTTPS only (lease_id in NATS);
SurrealDB storage backend with MVCC retry + exponential backoff
- secretumvault: complete SurrealDB backend replacing HashMap; 9 unit + 19 integration tests
- extension-registry: NATS lifecycle events, vault:// credential resolver with TTL cache,
cache invalidation via provisioning.workspace.*.deploy.done
Clippy workspace clean
cargo clippy --workspace -- -D warnings: 0 errors
Patterns fixed: derivable_impls (#[default] on enum variants), excessive_nesting
(let-else, boolean arithmetic in retain, extracted helpers), io_error_other,
redundant_closure, iter_kv_map, manual_range_contains, pathbuf_instead_of_path
This commit is contained in:
parent
fc1c699795
commit
93b0e5225c
@ -41,6 +41,40 @@ repos:
|
|||||||
# pass_filenames: false
|
# pass_filenames: false
|
||||||
# stages: [pre-push]
|
# stages: [pre-push]
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# SOLID Architecture Boundary Enforcement
|
||||||
|
# ============================================================================
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: solid-boundary-check
|
||||||
|
name: SOLID Architecture Boundaries
|
||||||
|
entry: bash -c '
|
||||||
|
VIOLATIONS=$(git diff --cached --name-only --diff-filter=ACM |
|
||||||
|
grep -E "\.(nu|rs)$" |
|
||||||
|
grep -v "templates/" |
|
||||||
|
grep -v "extensions/providers/" |
|
||||||
|
grep -v "orchestrator/" |
|
||||||
|
xargs grep -lE "^\^hcloud|^\^aws |^\^doctl|hcloud server" 2>/dev/null |
|
||||||
|
grep -v "^$") ;
|
||||||
|
if [ -n "$VIOLATIONS" ]; then
|
||||||
|
echo "SOLID VIOLATION: Provider API calls outside orchestrator:";
|
||||||
|
echo "$VIOLATIONS";
|
||||||
|
exit 1;
|
||||||
|
fi ;
|
||||||
|
SSH_VIOLATIONS=$(git diff --cached --name-only --diff-filter=ACM |
|
||||||
|
grep -E "\.(rs)$" |
|
||||||
|
grep -E "control-center|vault-service" |
|
||||||
|
xargs grep -lE "ssh2?::|russh::" 2>/dev/null) ;
|
||||||
|
if [ -n "$SSH_VIOLATIONS" ]; then
|
||||||
|
echo "SOLID VIOLATION: SSH code outside orchestrator:";
|
||||||
|
echo "$SSH_VIOLATIONS";
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
'
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
||||||
|
stages: [pre-commit]
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Nushell Hooks (ACTIVE)
|
# Nushell Hooks (ACTIVE)
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
|
|||||||
14
Cargo.toml
14
Cargo.toml
@ -2,6 +2,8 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"crates/platform-config",
|
"crates/platform-config",
|
||||||
|
"crates/platform-nats",
|
||||||
|
"crates/platform-db",
|
||||||
"crates/service-clients",
|
"crates/service-clients",
|
||||||
"crates/ai-service",
|
"crates/ai-service",
|
||||||
"crates/extension-registry",
|
"crates/extension-registry",
|
||||||
@ -97,12 +99,18 @@ resolver = "2"
|
|||||||
sqlx = { version = "0.8", features = ["runtime-tokio-rustls", "sqlite", "chrono", "uuid"] }
|
sqlx = { version = "0.8", features = ["runtime-tokio-rustls", "sqlite", "chrono", "uuid"] }
|
||||||
surrealdb = { version = "2.6", features = ["kv-mem", "protocol-ws", "protocol-http"] }
|
surrealdb = { version = "2.6", features = ["kv-mem", "protocol-ws", "protocol-http"] }
|
||||||
|
|
||||||
|
# ============================================================================
|
||||||
|
# MESSAGING (NATS)
|
||||||
|
# ============================================================================
|
||||||
|
async-nats = "0.40"
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# SECURITY AND CRYPTOGRAPHY
|
# SECURITY AND CRYPTOGRAPHY
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
aes-gcm = "0.10"
|
aes-gcm = "0.10"
|
||||||
argon2 = "0.5"
|
argon2 = "0.5"
|
||||||
base64 = "0.22"
|
base64 = "0.22"
|
||||||
|
git2 = { version = "0.20", default-features = false, features = ["https", "ssh"] }
|
||||||
hmac = "0.12"
|
hmac = "0.12"
|
||||||
jsonwebtoken = { version = "10.3", features = ["rust_crypto"] }
|
jsonwebtoken = { version = "10.3", features = ["rust_crypto"] }
|
||||||
rand = { version = "0.9", features = ["std_rng", "os_rng"] }
|
rand = { version = "0.9", features = ["std_rng", "os_rng"] }
|
||||||
@ -261,6 +269,8 @@ resolver = "2"
|
|||||||
# INTERNAL WORKSPACE CRATES (Local path dependencies)
|
# INTERNAL WORKSPACE CRATES (Local path dependencies)
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
platform-config = { path = "./crates/platform-config" }
|
platform-config = { path = "./crates/platform-config" }
|
||||||
|
platform-nats = { path = "./crates/platform-nats" }
|
||||||
|
platform-db = { path = "./crates/platform-db" }
|
||||||
service-clients = { path = "./crates/service-clients" }
|
service-clients = { path = "./crates/service-clients" }
|
||||||
rag = { path = "./crates/rag" }
|
rag = { path = "./crates/rag" }
|
||||||
mcp-server = { path = "./crates/mcp-server" }
|
mcp-server = { path = "./crates/mcp-server" }
|
||||||
@ -282,9 +292,9 @@ resolver = "2"
|
|||||||
stratum-llm = { path = "./stratumiops/crates/stratum-llm", features = ["anthropic", "openai", "ollama"] }
|
stratum-llm = { path = "./stratumiops/crates/stratum-llm", features = ["anthropic", "openai", "ollama"] }
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# SECRETUMVAULT (Enterprise Secrets Management - optional)
|
# SECRETUMVAULT (Enterprise Secrets Management - canonical source)
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
secretumvault = { path = "./secretumvault" }
|
secretumvault = { path = "../../../Development/secretumvault", features = ["surrealdb-storage", "filesystem", "server", "cedar"] }
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# WASM/WEB-SPECIFIC DEPENDENCIES
|
# WASM/WEB-SPECIFIC DEPENDENCIES
|
||||||
|
|||||||
@ -28,6 +28,9 @@ toml = { workspace = true }
|
|||||||
# Platform configuration
|
# Platform configuration
|
||||||
platform-config = { workspace = true }
|
platform-config = { workspace = true }
|
||||||
|
|
||||||
|
# Centralized observability (logging, metrics, health, tracing)
|
||||||
|
observability = { workspace = true, features = ["logging", "metrics-prometheus", "health"] }
|
||||||
|
|
||||||
# Error handling
|
# Error handling
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
|
|||||||
@ -1,240 +1,93 @@
|
|||||||
use std::env;
|
use std::collections::HashMap;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
|
||||||
|
use anyhow::Context;
|
||||||
use platform_config::ConfigLoader;
|
use platform_config::ConfigLoader;
|
||||||
/// AI Service configuration
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Main AI Service configuration
|
/// AI Service configuration
|
||||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
pub struct AiServiceConfig {
|
pub struct AiServiceConfig {
|
||||||
/// Server configuration
|
pub ai_service: AiServiceSettings,
|
||||||
#[serde(default)]
|
}
|
||||||
pub server: ServerConfig,
|
|
||||||
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
/// RAG integration configuration
|
pub struct AiServiceSettings {
|
||||||
#[serde(default)]
|
pub server: ServerConfig,
|
||||||
pub rag: RagIntegrationConfig,
|
pub rag: RagConfig,
|
||||||
|
pub mcp: McpConfig,
|
||||||
/// MCP integration configuration
|
pub dag: DagConfig,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub mcp: McpIntegrationConfig,
|
pub monitoring: Option<MonitoringConfig>,
|
||||||
|
#[serde(default)]
|
||||||
/// DAG execution configuration
|
pub logging: Option<LoggingConfig>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub dag: DagConfig,
|
pub build: Option<DockerBuildConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Server configuration
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct ServerConfig {
|
pub struct ServerConfig {
|
||||||
/// Server bind address
|
|
||||||
#[serde(default = "default_host")]
|
|
||||||
pub host: String,
|
pub host: String,
|
||||||
|
|
||||||
/// Server port
|
|
||||||
#[serde(default = "default_server_port")]
|
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
|
|
||||||
/// Number of worker threads
|
|
||||||
#[serde(default = "default_workers")]
|
|
||||||
pub workers: usize,
|
|
||||||
|
|
||||||
/// TCP keep-alive timeout (seconds)
|
|
||||||
#[serde(default = "default_keep_alive")]
|
|
||||||
pub keep_alive: u64,
|
|
||||||
|
|
||||||
/// Request timeout (milliseconds)
|
|
||||||
#[serde(default = "default_request_timeout")]
|
|
||||||
pub request_timeout: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// RAG integration configuration
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct RagIntegrationConfig {
|
|
||||||
/// Enable RAG integration
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub enabled: bool,
|
pub workers: Option<usize>,
|
||||||
|
|
||||||
/// RAG service URL
|
|
||||||
#[serde(default = "default_rag_url")]
|
|
||||||
pub rag_service_url: String,
|
|
||||||
|
|
||||||
/// Request timeout (milliseconds)
|
|
||||||
#[serde(default = "default_rag_timeout")]
|
|
||||||
pub timeout: u64,
|
|
||||||
|
|
||||||
/// Max retries for failed requests
|
|
||||||
#[serde(default = "default_max_retries")]
|
|
||||||
pub max_retries: u32,
|
|
||||||
|
|
||||||
/// Enable response caching
|
|
||||||
#[serde(default = "default_true")]
|
|
||||||
pub cache_enabled: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// MCP integration configuration
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct McpIntegrationConfig {
|
|
||||||
/// Enable MCP integration
|
|
||||||
#[serde(default)]
|
|
||||||
pub enabled: bool,
|
|
||||||
|
|
||||||
/// MCP service URL
|
|
||||||
#[serde(default = "default_mcp_url")]
|
|
||||||
pub mcp_service_url: String,
|
|
||||||
|
|
||||||
/// Request timeout (milliseconds)
|
|
||||||
#[serde(default = "default_mcp_timeout")]
|
|
||||||
pub timeout: u64,
|
|
||||||
|
|
||||||
/// Max retries for failed requests
|
|
||||||
#[serde(default = "default_max_retries")]
|
|
||||||
pub max_retries: u32,
|
|
||||||
|
|
||||||
/// MCP protocol version
|
|
||||||
#[serde(default = "default_protocol_version")]
|
|
||||||
pub protocol_version: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// DAG execution configuration
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct DagConfig {
|
|
||||||
/// Maximum concurrent tasks
|
|
||||||
#[serde(default = "default_max_concurrent_tasks")]
|
|
||||||
pub max_concurrent_tasks: usize,
|
|
||||||
|
|
||||||
/// Task timeout (milliseconds)
|
|
||||||
#[serde(default = "default_task_timeout")]
|
|
||||||
pub task_timeout: u64,
|
|
||||||
|
|
||||||
/// Number of retry attempts
|
|
||||||
#[serde(default = "default_dag_retry_attempts")]
|
|
||||||
pub retry_attempts: u32,
|
|
||||||
|
|
||||||
/// Delay between retries (milliseconds)
|
|
||||||
#[serde(default = "default_retry_delay")]
|
|
||||||
pub retry_delay: u64,
|
|
||||||
|
|
||||||
/// Task queue size
|
|
||||||
#[serde(default = "default_queue_size")]
|
|
||||||
pub queue_size: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default value functions
|
|
||||||
fn default_host() -> String {
|
|
||||||
"127.0.0.1".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_server_port() -> u16 {
|
|
||||||
8082
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_workers() -> usize {
|
|
||||||
4
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_keep_alive() -> u64 {
|
|
||||||
75
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_request_timeout() -> u64 {
|
|
||||||
30000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_rag_url() -> String {
|
|
||||||
"http://localhost:8083".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_rag_timeout() -> u64 {
|
|
||||||
30000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_mcp_url() -> String {
|
|
||||||
"http://localhost:8084".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_mcp_timeout() -> u64 {
|
|
||||||
30000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_max_retries() -> u32 {
|
|
||||||
3
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_true() -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_protocol_version() -> String {
|
|
||||||
"1.0".to_string()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_max_concurrent_tasks() -> usize {
|
|
||||||
10
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_task_timeout() -> u64 {
|
|
||||||
600000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_dag_retry_attempts() -> u32 {
|
|
||||||
3
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_retry_delay() -> u64 {
|
|
||||||
1000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_queue_size() -> usize {
|
|
||||||
1000
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ServerConfig {
|
impl Default for ServerConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
host: default_host(),
|
host: "127.0.0.1".to_string(),
|
||||||
port: default_server_port(),
|
port: 8082,
|
||||||
workers: default_workers(),
|
workers: Some(4),
|
||||||
keep_alive: default_keep_alive(),
|
|
||||||
request_timeout: default_request_timeout(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for RagIntegrationConfig {
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
fn default() -> Self {
|
pub struct RagConfig {
|
||||||
Self {
|
pub enabled: bool,
|
||||||
enabled: false,
|
pub rag_service_url: Option<String>,
|
||||||
rag_service_url: default_rag_url(),
|
pub timeout: Option<u64>,
|
||||||
timeout: default_rag_timeout(),
|
|
||||||
max_retries: default_max_retries(),
|
|
||||||
cache_enabled: default_true(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for McpIntegrationConfig {
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
fn default() -> Self {
|
pub struct McpConfig {
|
||||||
Self {
|
pub enabled: bool,
|
||||||
enabled: false,
|
pub mcp_service_url: Option<String>,
|
||||||
mcp_service_url: default_mcp_url(),
|
pub timeout: Option<u64>,
|
||||||
timeout: default_mcp_timeout(),
|
|
||||||
max_retries: default_max_retries(),
|
|
||||||
protocol_version: default_protocol_version(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for DagConfig {
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
fn default() -> Self {
|
pub struct DagConfig {
|
||||||
Self {
|
pub max_concurrent_tasks: Option<usize>,
|
||||||
max_concurrent_tasks: default_max_concurrent_tasks(),
|
pub task_timeout: Option<u64>,
|
||||||
task_timeout: default_task_timeout(),
|
pub retry_attempts: Option<u32>,
|
||||||
retry_attempts: default_dag_retry_attempts(),
|
}
|
||||||
retry_delay: default_retry_delay(),
|
|
||||||
queue_size: default_queue_size(),
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
}
|
pub struct MonitoringConfig {
|
||||||
|
pub enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct LoggingConfig {
|
||||||
|
pub level: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct DockerBuildConfig {
|
||||||
|
pub base_image: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub build_args: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AiServiceConfig {
|
||||||
|
pub fn load() -> anyhow::Result<Self> {
|
||||||
|
let config_json = platform_config::load_service_config_from_ncl("ai-service")
|
||||||
|
.context("Failed to load ai-service configuration from Nickel")?;
|
||||||
|
|
||||||
|
serde_json::from_value(config_json)
|
||||||
|
.context("Failed to deserialize ai-service configuration")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,20 +98,23 @@ impl ConfigLoader for AiServiceConfig {
|
|||||||
|
|
||||||
fn load_from_hierarchy() -> std::result::Result<Self, Box<dyn std::error::Error + Send + Sync>>
|
fn load_from_hierarchy() -> std::result::Result<Self, Box<dyn std::error::Error + Send + Sync>>
|
||||||
{
|
{
|
||||||
let service = Self::service_name();
|
if let Some(path) = platform_config::resolve_config_path(Self::service_name()) {
|
||||||
|
|
||||||
if let Some(path) = platform_config::resolve_config_path(service) {
|
|
||||||
return Self::from_path(&path);
|
return Self::from_path(&path);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to defaults
|
|
||||||
Ok(Self::default())
|
Ok(Self::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_env_overrides(
|
fn apply_env_overrides(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
) -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
Self::apply_env_overrides_internal(self);
|
if let Ok(host) = std::env::var("AI_SERVICE_SERVER_HOST") {
|
||||||
|
self.ai_service.server.host = host;
|
||||||
|
}
|
||||||
|
if let Ok(port) = std::env::var("AI_SERVICE_SERVER_PORT") {
|
||||||
|
if let Ok(p) = port.parse() {
|
||||||
|
self.ai_service.server.port = p;
|
||||||
|
}
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,16 +122,11 @@ impl ConfigLoader for AiServiceConfig {
|
|||||||
path: P,
|
path: P,
|
||||||
) -> std::result::Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
) -> std::result::Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
||||||
let path = path.as_ref();
|
let path = path.as_ref();
|
||||||
let json_value = platform_config::format::load_config(path).map_err(|e| {
|
let json_value = platform_config::format::load_config(path)
|
||||||
let err: Box<dyn std::error::Error + Send + Sync> = Box::new(e);
|
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
|
||||||
err
|
|
||||||
})?;
|
|
||||||
|
|
||||||
serde_json::from_value(json_value).map_err(|e| {
|
serde_json::from_value(json_value).map_err(|e| {
|
||||||
let err_msg = format!(
|
let err_msg = format!("Failed to deserialize ai-service config: {}", e);
|
||||||
"Failed to deserialize AI service config from {:?}: {}",
|
|
||||||
path, e
|
|
||||||
);
|
|
||||||
Box::new(std::io::Error::new(
|
Box::new(std::io::Error::new(
|
||||||
std::io::ErrorKind::InvalidData,
|
std::io::ErrorKind::InvalidData,
|
||||||
err_msg,
|
err_msg,
|
||||||
@ -284,87 +135,6 @@ impl ConfigLoader for AiServiceConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AiServiceConfig {
|
|
||||||
/// Load configuration from hierarchical sources with mode support
|
|
||||||
///
|
|
||||||
/// Priority order:
|
|
||||||
/// 1. AI_SERVICE_CONFIG environment variable (explicit path)
|
|
||||||
/// 2. AI_SERVICE_MODE environment variable (mode-specific file)
|
|
||||||
/// 3. Default configuration
|
|
||||||
///
|
|
||||||
/// After loading, applies environment variable overrides.
|
|
||||||
pub fn load_from_hierarchy() -> Result<Self, Box<dyn std::error::Error>> {
|
|
||||||
<Self as ConfigLoader>::load_from_hierarchy().map_err(|_e| {
|
|
||||||
Box::new(std::io::Error::other("Failed to load AI service config"))
|
|
||||||
as Box<dyn std::error::Error>
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Internal: Apply environment variable overrides (mutable reference)
|
|
||||||
///
|
|
||||||
/// Overrides take precedence over loaded config values.
|
|
||||||
/// Pattern: AI_SERVICE_{SECTION}_{KEY}
|
|
||||||
fn apply_env_overrides_internal(config: &mut Self) {
|
|
||||||
// Server overrides
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_SERVER_HOST") {
|
|
||||||
config.server.host = val;
|
|
||||||
}
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_SERVER_PORT") {
|
|
||||||
if let Ok(port) = val.parse() {
|
|
||||||
config.server.port = port;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_SERVER_WORKERS") {
|
|
||||||
if let Ok(workers) = val.parse() {
|
|
||||||
config.server.workers = workers;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RAG integration overrides
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_RAG_ENABLED") {
|
|
||||||
config.rag.enabled = val.parse().unwrap_or(config.rag.enabled);
|
|
||||||
}
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_RAG_URL") {
|
|
||||||
config.rag.rag_service_url = val;
|
|
||||||
}
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_RAG_TIMEOUT") {
|
|
||||||
if let Ok(timeout) = val.parse() {
|
|
||||||
config.rag.timeout = timeout;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MCP integration overrides
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_MCP_ENABLED") {
|
|
||||||
config.mcp.enabled = val.parse().unwrap_or(config.mcp.enabled);
|
|
||||||
}
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_MCP_URL") {
|
|
||||||
config.mcp.mcp_service_url = val;
|
|
||||||
}
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_MCP_TIMEOUT") {
|
|
||||||
if let Ok(timeout) = val.parse() {
|
|
||||||
config.mcp.timeout = timeout;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DAG overrides
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_DAG_MAX_CONCURRENT_TASKS") {
|
|
||||||
if let Ok(tasks) = val.parse() {
|
|
||||||
config.dag.max_concurrent_tasks = tasks;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_DAG_TASK_TIMEOUT") {
|
|
||||||
if let Ok(timeout) = val.parse() {
|
|
||||||
config.dag.task_timeout = timeout;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Ok(val) = env::var("AI_SERVICE_DAG_RETRY_ATTEMPTS") {
|
|
||||||
if let Ok(retries) = val.parse() {
|
|
||||||
config.dag.retry_attempts = retries;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@ -372,26 +142,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_default_config() {
|
fn test_default_config() {
|
||||||
let config = AiServiceConfig::default();
|
let config = AiServiceConfig::default();
|
||||||
assert_eq!(config.server.port, 8082);
|
assert_eq!(config.ai_service.server.port, 8082);
|
||||||
assert_eq!(config.server.workers, 4);
|
assert!(!config.ai_service.rag.enabled);
|
||||||
assert!(!config.rag.enabled);
|
assert!(!config.ai_service.mcp.enabled);
|
||||||
assert!(!config.mcp.enabled);
|
|
||||||
assert_eq!(config.dag.max_concurrent_tasks, 10);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_server_config_defaults() {
|
|
||||||
let server = ServerConfig::default();
|
|
||||||
assert_eq!(server.host, "127.0.0.1");
|
|
||||||
assert_eq!(server.port, 8082);
|
|
||||||
assert_eq!(server.workers, 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_dag_config_defaults() {
|
|
||||||
let dag = DagConfig::default();
|
|
||||||
assert_eq!(dag.max_concurrent_tasks, 10);
|
|
||||||
assert_eq!(dag.task_timeout, 600000);
|
|
||||||
assert_eq!(dag.retry_attempts, 3);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -7,7 +7,6 @@ use std::sync::Arc;
|
|||||||
use ai_service::{handlers, AiService, DEFAULT_PORT};
|
use ai_service::{handlers, AiService, DEFAULT_PORT};
|
||||||
use axum::Router;
|
use axum::Router;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
|
|
||||||
|
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
#[command(name = "ai-service")]
|
#[command(name = "ai-service")]
|
||||||
@ -36,15 +35,28 @@ struct Args {
|
|||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
// Initialize tracing
|
// Parse CLI arguments FIRST (so --help works before any other processing)
|
||||||
tracing_subscriber::registry()
|
|
||||||
.with(tracing_subscriber::EnvFilter::new(
|
|
||||||
std::env::var("RUST_LOG").unwrap_or_else(|_| "ai_service=info,axum=debug".to_string()),
|
|
||||||
))
|
|
||||||
.with(tracing_subscriber::fmt::layer())
|
|
||||||
.init();
|
|
||||||
|
|
||||||
let args = Args::parse();
|
let args = Args::parse();
|
||||||
|
|
||||||
|
// Initialize centralized observability (logging, metrics, health checks)
|
||||||
|
let _guard = observability::init_from_env("ai-service", env!("CARGO_PKG_VERSION"))?;
|
||||||
|
|
||||||
|
// Check if ai-service is enabled in deployment-mode.ncl
|
||||||
|
if let Ok(deployment) = platform_config::load_deployment_mode() {
|
||||||
|
if let Ok(enabled) = deployment.is_service_enabled("ai_service") {
|
||||||
|
if !enabled {
|
||||||
|
tracing::warn!("⚠ AI Service is DISABLED in deployment-mode.ncl");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
tracing::info!("✓ AI Service is ENABLED in deployment-mode.ncl");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to load ai-service.ncl
|
||||||
|
if let Ok(config) = platform_config::load_service_config_from_ncl("ai-service") {
|
||||||
|
tracing::info!("✓ Loaded ai-service configuration from NCL");
|
||||||
|
tracing::debug!("Config: {:?}", config);
|
||||||
|
}
|
||||||
let addr: SocketAddr = format!("{}:{}", args.host, args.port).parse()?;
|
let addr: SocketAddr = format!("{}:{}", args.host, args.port).parse()?;
|
||||||
|
|
||||||
// Create service
|
// Create service
|
||||||
|
|||||||
@ -100,7 +100,10 @@ async fn test_explicit_tool_call_rag_ask() {
|
|||||||
args: json!({"question": "What is Nushell?"}),
|
args: json!({"question": "What is Nushell?"}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("MCP tool call failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("MCP tool call failed");
|
||||||
assert_eq!(response.result["status"], "success");
|
assert_eq!(response.result["status"], "success");
|
||||||
assert_eq!(response.result["tool"], "rag_ask_question");
|
assert_eq!(response.result["tool"], "rag_ask_question");
|
||||||
}
|
}
|
||||||
@ -116,7 +119,10 @@ async fn test_explicit_tool_call_guidance_status() {
|
|||||||
args: json!({}),
|
args: json!({}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("guidance_check_system_status failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("guidance_check_system_status failed");
|
||||||
assert_eq!(response.result["status"], "healthy");
|
assert_eq!(response.result["status"], "healthy");
|
||||||
assert_eq!(response.result["tool"], "guidance_check_system_status");
|
assert_eq!(response.result["tool"], "guidance_check_system_status");
|
||||||
}
|
}
|
||||||
@ -131,7 +137,10 @@ async fn test_explicit_tool_call_settings() {
|
|||||||
args: json!({}),
|
args: json!({}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("MCP tool call failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("MCP tool call failed");
|
||||||
assert_eq!(response.result["status"], "success");
|
assert_eq!(response.result["status"], "success");
|
||||||
// Verify real SettingsTools data is returned (not empty placeholder)
|
// Verify real SettingsTools data is returned (not empty placeholder)
|
||||||
assert!(
|
assert!(
|
||||||
@ -152,7 +161,10 @@ async fn test_settings_tools_platform_recommendations() {
|
|||||||
args: json!({}),
|
args: json!({}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("MCP tool call failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("MCP tool call failed");
|
||||||
assert_eq!(response.result["status"], "success");
|
assert_eq!(response.result["status"], "success");
|
||||||
// Should have real recommendations array from SettingsTools platform detection
|
// Should have real recommendations array from SettingsTools platform detection
|
||||||
assert!(response.result.get("recommendations").is_some());
|
assert!(response.result.get("recommendations").is_some());
|
||||||
@ -168,7 +180,10 @@ async fn test_settings_tools_mode_defaults() {
|
|||||||
args: json!({"mode": "solo"}),
|
args: json!({"mode": "solo"}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("MCP tool call failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("MCP tool call failed");
|
||||||
assert_eq!(response.result["status"], "success");
|
assert_eq!(response.result["status"], "success");
|
||||||
// Verify real mode defaults (resource requirements)
|
// Verify real mode defaults (resource requirements)
|
||||||
assert!(response.result.get("min_cpu_cores").is_some());
|
assert!(response.result.get("min_cpu_cores").is_some());
|
||||||
@ -185,7 +200,10 @@ async fn test_explicit_tool_call_iac() {
|
|||||||
args: json!({"path": "/tmp/infra"}),
|
args: json!({"path": "/tmp/infra"}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("MCP tool call failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("MCP tool call failed");
|
||||||
assert_eq!(response.result["status"], "success");
|
assert_eq!(response.result["status"], "success");
|
||||||
// Verify real technology detection (returns technologies array)
|
// Verify real technology detection (returns technologies array)
|
||||||
assert!(response.result.get("technologies").is_some());
|
assert!(response.result.get("technologies").is_some());
|
||||||
@ -202,7 +220,10 @@ async fn test_iac_detect_technologies_real() {
|
|||||||
args: json!({"path": "../../provisioning"}),
|
args: json!({"path": "../../provisioning"}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("MCP tool call failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("MCP tool call failed");
|
||||||
assert_eq!(response.result["status"], "success");
|
assert_eq!(response.result["status"], "success");
|
||||||
|
|
||||||
// Should detect technologies as an array
|
// Should detect technologies as an array
|
||||||
@ -221,7 +242,10 @@ async fn test_iac_analyze_completeness() {
|
|||||||
args: json!({"path": "/tmp/test-infra"}),
|
args: json!({"path": "/tmp/test-infra"}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("MCP tool call failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("MCP tool call failed");
|
||||||
assert_eq!(response.result["status"], "success");
|
assert_eq!(response.result["status"], "success");
|
||||||
// Verify real analysis data
|
// Verify real analysis data
|
||||||
assert!(response.result.get("complete").is_some());
|
assert!(response.result.get("complete").is_some());
|
||||||
@ -365,7 +389,10 @@ async fn test_tool_execution_with_required_args() {
|
|||||||
args: json!({"query": "kubernetes"}),
|
args: json!({"query": "kubernetes"}),
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = service.call_mcp_tool(req).await.expect("MCP tool call failed");
|
let response = service
|
||||||
|
.call_mcp_tool(req)
|
||||||
|
.await
|
||||||
|
.expect("MCP tool call failed");
|
||||||
assert_eq!(response.result["status"], "success");
|
assert_eq!(response.result["status"], "success");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -35,7 +35,7 @@ pub struct WorkflowTask {
|
|||||||
pub error: Option<String>,
|
pub error: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Server creation workflow request
|
/// Server creation workflow request - Complete auditable unit
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct CreateServerWorkflow {
|
pub struct CreateServerWorkflow {
|
||||||
pub infra: String,
|
pub infra: String,
|
||||||
@ -43,6 +43,23 @@ pub struct CreateServerWorkflow {
|
|||||||
pub servers: Vec<String>,
|
pub servers: Vec<String>,
|
||||||
pub check_mode: bool,
|
pub check_mode: bool,
|
||||||
pub wait: bool,
|
pub wait: bool,
|
||||||
|
// Template and execution context
|
||||||
|
#[serde(default)]
|
||||||
|
pub template_path: Option<String>, // Path to template used: /provisioning/extensions/providers/.../hetzner_servers.j2
|
||||||
|
#[serde(default)]
|
||||||
|
pub template_vars_compressed: Option<String>, // Gzip+Base64 encoded template variables
|
||||||
|
// Generated script (compressed for transmission)
|
||||||
|
#[serde(default)]
|
||||||
|
pub script_compressed: Option<String>, // Gzip+Base64 encoded script
|
||||||
|
#[serde(default)]
|
||||||
|
pub script_encoding: Option<String>, // Encoding type: "gzip+base64"
|
||||||
|
// Compression metrics
|
||||||
|
#[serde(default)]
|
||||||
|
pub compression_ratio: Option<f32>, // Overall compression ratio
|
||||||
|
#[serde(default)]
|
||||||
|
pub original_size: Option<u64>, // Original script size
|
||||||
|
#[serde(default)]
|
||||||
|
pub compressed_size: Option<u64>, // Compressed size
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Task service workflow request
|
/// Task service workflow request
|
||||||
|
|||||||
@ -0,0 +1 @@
|
|||||||
|
|
||||||
@ -32,10 +32,16 @@ uuid = { workspace = true }
|
|||||||
sqlx = { workspace = true }
|
sqlx = { workspace = true }
|
||||||
surrealdb = { workspace = true }
|
surrealdb = { workspace = true }
|
||||||
|
|
||||||
|
# Platform shared crates (optional NATS bridge)
|
||||||
|
platform-nats = { workspace = true, optional = true }
|
||||||
|
|
||||||
# Configuration and CLI
|
# Configuration and CLI
|
||||||
clap = { workspace = true }
|
clap = { workspace = true }
|
||||||
config = { workspace = true }
|
config = { workspace = true }
|
||||||
|
|
||||||
|
# Centralized observability (logging, metrics, health, tracing)
|
||||||
|
observability = { workspace = true, features = ["logging", "metrics-prometheus", "health"] }
|
||||||
|
|
||||||
# Error handling
|
# Error handling
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
@ -153,8 +159,11 @@ compliance = ["core"]
|
|||||||
# Modules: anomaly (detection)
|
# Modules: anomaly (detection)
|
||||||
experimental = ["core"]
|
experimental = ["core"]
|
||||||
|
|
||||||
|
# NATS event bus integration
|
||||||
|
nats = ["dep:platform-nats"]
|
||||||
|
|
||||||
# Default: All features enabled
|
# Default: All features enabled
|
||||||
default = ["core", "kms", "audit", "mfa", "compliance", "experimental"]
|
default = ["core", "kms", "audit", "mfa", "compliance", "experimental", "nats"]
|
||||||
|
|
||||||
# Full: All features enabled (development and testing)
|
# Full: All features enabled (development and testing)
|
||||||
all = ["core", "kms", "audit", "mfa", "compliance", "experimental"]
|
all = ["core", "kms", "audit", "mfa", "compliance", "experimental"]
|
||||||
|
|||||||
@ -56,19 +56,20 @@ pub trait AppStateBuilder: Send + Sync {
|
|||||||
/// Default AppState builder - uses standard initialization
|
/// Default AppState builder - uses standard initialization
|
||||||
pub struct DefaultAppStateBuilder {
|
pub struct DefaultAppStateBuilder {
|
||||||
config: Config,
|
config: Config,
|
||||||
|
solo_mode: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DefaultAppStateBuilder {
|
impl DefaultAppStateBuilder {
|
||||||
/// Create a new default builder with configuration
|
/// Create a new default builder with configuration
|
||||||
pub fn new(config: Config) -> Self {
|
pub fn new(config: Config, solo_mode: bool) -> Self {
|
||||||
Self { config }
|
Self { config, solo_mode }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl AppStateBuilder for DefaultAppStateBuilder {
|
impl AppStateBuilder for DefaultAppStateBuilder {
|
||||||
async fn build(&self) -> Result<AppState> {
|
async fn build(&self) -> Result<AppState> {
|
||||||
AppState::new(self.config.clone()).await
|
AppState::new(self.config.clone(), self.solo_mode).await
|
||||||
}
|
}
|
||||||
|
|
||||||
fn name(&self) -> &'static str {
|
fn name(&self) -> &'static str {
|
||||||
@ -76,18 +77,11 @@ impl AppStateBuilder for DefaultAppStateBuilder {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Factory function for creating AppState with standard configuration
|
/// Factory function for creating AppState with standard configuration.
|
||||||
///
|
///
|
||||||
/// This is the primary API for new code. Replaces direct `AppState::new()`
|
/// `solo_mode = true` disables JWT auth and injects a local-admin context.
|
||||||
/// calls.
|
pub async fn create_app_state(config: Config, solo_mode: bool) -> Result<AppState> {
|
||||||
///
|
let builder = DefaultAppStateBuilder::new(config, solo_mode);
|
||||||
/// # Usage
|
|
||||||
///
|
|
||||||
/// ```rust,ignore
|
|
||||||
/// let app_state = create_app_state(config).await?;
|
|
||||||
/// ```
|
|
||||||
pub async fn create_app_state(config: Config) -> Result<AppState> {
|
|
||||||
let builder = DefaultAppStateBuilder::new(config);
|
|
||||||
builder.build().await
|
builder.build().await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -473,8 +473,7 @@ mod tests {
|
|||||||
// Generate fresh RSA keys for testing
|
// Generate fresh RSA keys for testing
|
||||||
use crate::services::jwt::generate_rsa_key_pair;
|
use crate::services::jwt::generate_rsa_key_pair;
|
||||||
|
|
||||||
let keys = generate_rsa_key_pair()
|
let keys = generate_rsa_key_pair().expect("Failed to generate test RSA keys");
|
||||||
.expect("Failed to generate test RSA keys");
|
|
||||||
|
|
||||||
(
|
(
|
||||||
keys.private_key_pem.into_bytes(),
|
keys.private_key_pem.into_bytes(),
|
||||||
|
|||||||
@ -157,7 +157,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_password_strength_fair() {
|
fn test_password_strength_fair() {
|
||||||
let service = PasswordService::new();
|
let service = PasswordService::new();
|
||||||
// Fair: 8-9 chars with 0-2 complexity types (lowercase, uppercase, digit, special)
|
// Fair: 8-9 chars with 0-2 complexity types (lowercase, uppercase, digit,
|
||||||
|
// special)
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
service.evaluate_strength("password1"), // 9 chars, 2 types: lowercase + digit
|
service.evaluate_strength("password1"), // 9 chars, 2 types: lowercase + digit
|
||||||
PasswordStrength::Fair
|
PasswordStrength::Fair
|
||||||
|
|||||||
@ -215,10 +215,10 @@ impl Default for ControlCenterConfig {
|
|||||||
impl ControlCenterConfig {
|
impl ControlCenterConfig {
|
||||||
/// Load configuration with hierarchical fallback logic:
|
/// Load configuration with hierarchical fallback logic:
|
||||||
/// 1. Environment variable CONTROL_CENTER_CONFIG (explicit config path)
|
/// 1. Environment variable CONTROL_CENTER_CONFIG (explicit config path)
|
||||||
/// 2. Mode-specific config:
|
/// 2. CLI config resolution via platform_config::ConfigResolver
|
||||||
/// provisioning/platform/config/control-center.{mode}.toml
|
/// 3. Built-in defaults
|
||||||
/// 3. System defaults: config.defaults.toml
|
|
||||||
///
|
///
|
||||||
|
/// Supports both .ncl (Nickel) and .toml formats.
|
||||||
/// Then environment variables (CONTROL_CENTER_*) override specific fields.
|
/// Then environment variables (CONTROL_CENTER_*) override specific fields.
|
||||||
pub fn load() -> Result<Self> {
|
pub fn load() -> Result<Self> {
|
||||||
let mut config = Self::load_from_hierarchy()?;
|
let mut config = Self::load_from_hierarchy()?;
|
||||||
@ -233,22 +233,20 @@ impl ControlCenterConfig {
|
|||||||
return Self::from_file(&config_path);
|
return Self::from_file(&config_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Priority 2: Mode-specific config (provisioning/platform/config/)
|
// Priority 2: Use platform_config resolver (supports NCL and TOML)
|
||||||
if let Ok(mode) = std::env::var("CONTROL_CENTER_MODE") {
|
let resolver = platform_config::ConfigResolver::new()
|
||||||
let mode_config_path =
|
.with_cli_config_dir(
|
||||||
format!("provisioning/platform/config/control-center.{}.toml", mode);
|
std::env::var("PROVISIONING_CONFIG_DIR")
|
||||||
if Path::new(&mode_config_path).exists() {
|
.ok()
|
||||||
return Self::from_file(&mode_config_path);
|
.map(PathBuf::from),
|
||||||
}
|
)
|
||||||
|
.with_cli_mode(std::env::var("CONTROL_CENTER_MODE").ok());
|
||||||
|
|
||||||
|
if let Some(path) = resolver.resolve("control-center") {
|
||||||
|
return Self::from_file(&path);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Priority 3: System defaults
|
// Priority 3: Built-in defaults if file doesn't exist
|
||||||
let defaults_path = Path::new("config/control-center.defaults.toml");
|
|
||||||
if defaults_path.exists() {
|
|
||||||
return Self::from_file(defaults_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Priority 4: Built-in defaults if file doesn't exist
|
|
||||||
Ok(Self::default())
|
Ok(Self::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -314,20 +312,22 @@ impl ControlCenterConfig {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load configuration from file with environment variable interpolation
|
/// Load configuration from file (.ncl or .toml) with environment variable
|
||||||
|
/// interpolation
|
||||||
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
|
pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||||
let content = std::fs::read_to_string(path.as_ref()).map_err(|e| {
|
let path_ref = path.as_ref();
|
||||||
|
|
||||||
|
// Use platform_config to load NCL or TOML
|
||||||
|
let json_value = platform_config::format::load_config(path_ref).map_err(|e| {
|
||||||
ControlCenterError::Infrastructure(infrastructure::InfrastructureError::Configuration(
|
ControlCenterError::Infrastructure(infrastructure::InfrastructureError::Configuration(
|
||||||
format!("Failed to read config file {:?}: {}", path.as_ref(), e),
|
format!("Failed to load config from {:?}: {}", path_ref, e),
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// Interpolate environment variables
|
// Deserialize from JSON value
|
||||||
let interpolated = Self::interpolate_env_vars(&content)?;
|
let config: Self = serde_json::from_value(json_value).map_err(|e| {
|
||||||
|
|
||||||
let config: Self = toml::from_str(&interpolated).map_err(|e| {
|
|
||||||
ControlCenterError::Infrastructure(infrastructure::InfrastructureError::Configuration(
|
ControlCenterError::Infrastructure(infrastructure::InfrastructureError::Configuration(
|
||||||
format!("Failed to parse config: {}", e),
|
format!("Failed to deserialize config: {}", e),
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
|||||||
@ -3,6 +3,8 @@ pub mod deployment_events;
|
|||||||
pub mod iac_deployment;
|
pub mod iac_deployment;
|
||||||
pub mod iac_detection;
|
pub mod iac_detection;
|
||||||
pub mod iac_rules;
|
pub mod iac_rules;
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub mod nats_bridge;
|
||||||
pub mod permission;
|
pub mod permission;
|
||||||
pub mod role;
|
pub mod role;
|
||||||
pub mod secrets;
|
pub mod secrets;
|
||||||
|
|||||||
77
crates/control-center/src/handlers/nats_bridge.rs
Normal file
77
crates/control-center/src/handlers/nats_bridge.rs
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
//! NATS→WebSocket bridge for real-time task status updates.
|
||||||
|
//!
|
||||||
|
//! Subscribes to `provisioning.tasks.*.status` via JetStream durable consumer
|
||||||
|
//! and re-broadcasts each event to all connected WebSocket clients, eliminating
|
||||||
|
//! polling between control-center and orchestrator.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use futures::StreamExt;
|
||||||
|
use platform_nats::NatsBridge;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tracing::{error, info, warn};
|
||||||
|
use uuid::Uuid;
|
||||||
|
|
||||||
|
use crate::handlers::websocket::{WebSocketEvent, WebSocketManager};
|
||||||
|
|
||||||
|
const STREAM_NAME: &str = "TASKS";
|
||||||
|
const CONSUMER_NAME: &str = "cc-task-status-bridge";
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
struct TaskStatusPayload {
|
||||||
|
pub task_id: String,
|
||||||
|
pub status: String,
|
||||||
|
pub progress: Option<u32>,
|
||||||
|
pub message: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn the NATS→WebSocket bridge as a background tokio task.
|
||||||
|
pub fn spawn_nats_bridge(nats: Arc<NatsBridge>, ws_manager: Arc<WebSocketManager>) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
run_bridge(nats, ws_manager).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_bridge(nats: Arc<NatsBridge>, ws_manager: Arc<WebSocketManager>) {
|
||||||
|
let mut messages = match nats.subscribe_pull(STREAM_NAME, CONSUMER_NAME).await {
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => {
|
||||||
|
error!("NATS bridge: subscribe failed — {e}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("NATS→WebSocket bridge running on stream {STREAM_NAME}");
|
||||||
|
|
||||||
|
while let Some(msg_result) = messages.next().await {
|
||||||
|
match msg_result {
|
||||||
|
Ok(msg) => {
|
||||||
|
match serde_json::from_slice::<TaskStatusPayload>(&msg.payload) {
|
||||||
|
Ok(payload) => {
|
||||||
|
let event = WebSocketEvent {
|
||||||
|
event_type: "task_status_update".to_string(),
|
||||||
|
data: serde_json::json!({
|
||||||
|
"task_id": payload.task_id,
|
||||||
|
"status": payload.status,
|
||||||
|
"progress": payload.progress,
|
||||||
|
"message": payload.message,
|
||||||
|
}),
|
||||||
|
timestamp: chrono::Utc::now(),
|
||||||
|
target_user: None,
|
||||||
|
};
|
||||||
|
ws_manager.broadcast_event(event).await;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("NATS bridge: deserialize failed — {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Err(e) = msg.ack().await {
|
||||||
|
warn!("NATS bridge: ack failed — {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("NATS bridge: message error — {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -117,11 +117,17 @@ pub struct AppState {
|
|||||||
pub monitoring_service: Arc<MonitoringService>,
|
pub monitoring_service: Arc<MonitoringService>,
|
||||||
pub orchestrator_client: Arc<OrchestratorClient>,
|
pub orchestrator_client: Arc<OrchestratorClient>,
|
||||||
pub config: Config,
|
pub config: Config,
|
||||||
|
/// When true, auth middleware is replaced by a no-op that injects LocalUser
|
||||||
|
/// context.
|
||||||
|
pub solo_mode: bool,
|
||||||
|
/// NATS bridge for task status subscription (optional feature)
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub nats: Option<Arc<platform_nats::NatsBridge>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppState {
|
impl AppState {
|
||||||
/// Create a new application state instance
|
/// Create a new application state instance
|
||||||
pub async fn new(config: Config) -> Result<Self> {
|
pub async fn new(config: Config, solo_mode: bool) -> Result<Self> {
|
||||||
// Initialize database service
|
// Initialize database service
|
||||||
let database_service = Arc::new(DatabaseService::new(config.database.clone()).await?);
|
let database_service = Arc::new(DatabaseService::new(config.database.clone()).await?);
|
||||||
|
|
||||||
@ -248,6 +254,22 @@ impl AppState {
|
|||||||
monitoring_service,
|
monitoring_service,
|
||||||
orchestrator_client,
|
orchestrator_client,
|
||||||
config,
|
config,
|
||||||
|
solo_mode,
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
nats: {
|
||||||
|
use platform_nats::{NatsBridge, NatsConfig};
|
||||||
|
match NatsBridge::connect(&NatsConfig::default()).await {
|
||||||
|
Ok(bridge) => {
|
||||||
|
let bridge = std::sync::Arc::new(bridge);
|
||||||
|
tracing::info!("Connected to NATS");
|
||||||
|
Some(bridge)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("NATS connection failed (bridge disabled): {}", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -31,7 +31,7 @@ use control_center::handlers::{
|
|||||||
websocket::websocket_handler,
|
websocket::websocket_handler,
|
||||||
};
|
};
|
||||||
use control_center::middleware::{
|
use control_center::middleware::{
|
||||||
auth::auth_middleware,
|
auth::{auth_middleware, solo_auth_middleware},
|
||||||
cors::create_cors_from_env,
|
cors::create_cors_from_env,
|
||||||
rate_limit::{RateLimitConfig, RateLimitLayer},
|
rate_limit::{RateLimitConfig, RateLimitLayer},
|
||||||
};
|
};
|
||||||
@ -40,25 +40,35 @@ use hyper::http::StatusCode;
|
|||||||
use tokio::signal;
|
use tokio::signal;
|
||||||
use tower::ServiceBuilder;
|
use tower::ServiceBuilder;
|
||||||
use tower_http::{compression::CompressionLayer, timeout::TimeoutLayer, trace::TraceLayer};
|
use tower_http::{compression::CompressionLayer, timeout::TimeoutLayer, trace::TraceLayer};
|
||||||
use tracing::{error, info};
|
use tracing::{error, info, warn};
|
||||||
use tracing_subscriber::EnvFilter;
|
|
||||||
|
|
||||||
#[derive(Parser)]
|
#[derive(Parser)]
|
||||||
#[command(name = "control-center")]
|
#[command(name = "control-center")]
|
||||||
#[command(about = "Control Center - JWT Authentication & User Management Service")]
|
#[command(about = "Control Center - JWT Authentication & User Management Service")]
|
||||||
#[command(version = env!("CARGO_PKG_VERSION"))]
|
#[command(version = env!("CARGO_PKG_VERSION"))]
|
||||||
#[command(after_help = "CONFIGURATION HIERARCHY (highest to lowest priority):\n 1. CLI: -c/--config <path> (explicit file)\n 2. CLI: --config-dir <dir> --mode <mode> (directory + mode)\n 3. CLI: --config-dir <dir> (searches for control-center.ncl|toml|json)\n 4. CLI: --mode <mode> (searches in provisioning/platform/config/)\n 5. ENV: CONTROL_CENTER_CONFIG (explicit file)\n 6. ENV: PROVISIONING_CONFIG_DIR (searches for control-center.ncl|toml|json)\n 7. ENV: CONTROL_CENTER_MODE (mode-based in default path)\n 8. Built-in defaults")]
|
#[command(
|
||||||
|
after_help = "CONFIGURATION HIERARCHY (highest to lowest priority):\n 1. CLI: -c/--config \
|
||||||
|
<path> (explicit file)\n 2. CLI: --config-dir <dir> --mode <mode> (directory + \
|
||||||
|
mode)\n 3. CLI: --config-dir <dir> (searches for \
|
||||||
|
control-center.ncl|toml|json)\n 4. CLI: --mode <mode> (searches in \
|
||||||
|
provisioning/platform/config/)\n 5. ENV: CONTROL_CENTER_CONFIG (explicit \
|
||||||
|
file)\n 6. ENV: PROVISIONING_CONFIG_DIR (searches for \
|
||||||
|
control-center.ncl|toml|json)\n 7. ENV: CONTROL_CENTER_MODE (mode-based in \
|
||||||
|
default path)\n 8. Built-in defaults"
|
||||||
|
)]
|
||||||
struct Cli {
|
struct Cli {
|
||||||
/// Configuration file path (highest priority)
|
/// Configuration file path (highest priority)
|
||||||
///
|
///
|
||||||
/// Accepts absolute or relative path. Supports .ncl, .toml, and .json formats.
|
/// Accepts absolute or relative path. Supports .ncl, .toml, and .json
|
||||||
|
/// formats.
|
||||||
#[arg(short = 'c', long, env = "CONTROL_CENTER_CONFIG")]
|
#[arg(short = 'c', long, env = "CONTROL_CENTER_CONFIG")]
|
||||||
config: Option<PathBuf>,
|
config: Option<PathBuf>,
|
||||||
|
|
||||||
/// Configuration directory (searches for control-center.ncl|toml|json)
|
/// Configuration directory (searches for control-center.ncl|toml|json)
|
||||||
///
|
///
|
||||||
/// Searches for configuration files in order of preference: .ncl > .toml > .json
|
/// Searches for configuration files in order of preference: .ncl > .toml >
|
||||||
/// Can also search for mode-specific files: control-center.{mode}.{ncl|toml|json}
|
/// .json Can also search for mode-specific files:
|
||||||
|
/// control-center.{mode}.{ncl|toml|json}
|
||||||
#[arg(long, env = "PROVISIONING_CONFIG_DIR")]
|
#[arg(long, env = "PROVISIONING_CONFIG_DIR")]
|
||||||
config_dir: Option<PathBuf>,
|
config_dir: Option<PathBuf>,
|
||||||
|
|
||||||
@ -98,14 +108,26 @@ async fn main() -> Result<()> {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize logging
|
// Initialize centralized observability (logging, metrics, health checks)
|
||||||
let log_level = if cli.debug { "debug" } else { "info" };
|
let _guard = observability::init_from_env("control-center", env!("CARGO_PKG_VERSION"))
|
||||||
let filter = EnvFilter::new(format!("control_center={},tower_http=info", log_level));
|
.map_err(|e| control_center::ControlCenterError::from(anyhow::anyhow!(e)))?;
|
||||||
|
|
||||||
tracing_subscriber::fmt()
|
// Check if control-center is enabled in deployment-mode.ncl
|
||||||
.with_env_filter(filter)
|
if let Ok(deployment) = platform_config::load_deployment_mode() {
|
||||||
.with_target(false)
|
if let Ok(enabled) = deployment.is_service_enabled("control-center") {
|
||||||
.init();
|
if !enabled {
|
||||||
|
warn!("⚠ Control Center is DISABLED in deployment-mode.ncl");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
info!("✓ Control Center is ENABLED in deployment-mode.ncl");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to load control-center.ncl
|
||||||
|
if let Ok(config) = platform_config::load_service_config_from_ncl("control-center") {
|
||||||
|
info!("✓ Loaded control-center configuration from NCL");
|
||||||
|
tracing::debug!("Config: {:?}", config);
|
||||||
|
}
|
||||||
|
|
||||||
// Resolve config file path using new resolver
|
// Resolve config file path using new resolver
|
||||||
let resolver = platform_config::ConfigResolver::new()
|
let resolver = platform_config::ConfigResolver::new()
|
||||||
@ -135,7 +157,11 @@ async fn main() -> Result<()> {
|
|||||||
);
|
);
|
||||||
|
|
||||||
// Initialize application state
|
// Initialize application state
|
||||||
let app_state = Arc::new(AppState::new(config.clone()).await?);
|
let solo_mode = cli.mode.as_deref() == Some("solo");
|
||||||
|
if solo_mode {
|
||||||
|
warn!("⚠ Solo mode: JWT authentication is DISABLED — local operator access only");
|
||||||
|
}
|
||||||
|
let app_state = Arc::new(AppState::new(config.clone(), solo_mode).await?);
|
||||||
|
|
||||||
// Health check
|
// Health check
|
||||||
if let Err(e) = app_state.health_check().await {
|
if let Err(e) = app_state.health_check().await {
|
||||||
@ -256,12 +282,18 @@ async fn create_router(app_state: Arc<AppState>) -> Result<Router> {
|
|||||||
.route("/secrets/monitoring/alerts", get(get_alert_summary))
|
.route("/secrets/monitoring/alerts", get(get_alert_summary))
|
||||||
.route("/secrets/monitoring/expiring", get(get_expiring_secrets))
|
.route("/secrets/monitoring/expiring", get(get_expiring_secrets))
|
||||||
// WebSocket route
|
// WebSocket route
|
||||||
.route("/ws", get(websocket_handler))
|
.route("/ws", get(websocket_handler));
|
||||||
// Apply authentication middleware to all protected routes
|
|
||||||
.route_layer(middleware::from_fn_with_state(
|
// In solo mode skip JWT validation and inject a fixed local-admin context
|
||||||
|
// instead.
|
||||||
|
let protected_routes = if app_state.solo_mode {
|
||||||
|
protected_routes.route_layer(middleware::from_fn(solo_auth_middleware))
|
||||||
|
} else {
|
||||||
|
protected_routes.route_layer(middleware::from_fn_with_state(
|
||||||
app_state.jwt_service.clone(),
|
app_state.jwt_service.clone(),
|
||||||
auth_middleware,
|
auth_middleware,
|
||||||
));
|
))
|
||||||
|
};
|
||||||
|
|
||||||
// Combine all routes
|
// Combine all routes
|
||||||
let app = Router::new()
|
let app = Router::new()
|
||||||
@ -307,6 +339,15 @@ async fn start_background_tasks(app_state: Arc<AppState>) {
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// NATS → WebSocket bridge: forward task status events to all connected WS
|
||||||
|
// clients
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
if let Some(nats) = &app_state.nats {
|
||||||
|
use control_center::handlers::nats_bridge::spawn_nats_bridge;
|
||||||
|
spawn_nats_bridge(Arc::clone(nats), Arc::clone(&app_state.websocket_manager));
|
||||||
|
info!("NATS→WebSocket bridge started");
|
||||||
|
}
|
||||||
|
|
||||||
info!("Background tasks started");
|
info!("Background tasks started");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -75,6 +75,28 @@ pub async fn auth_middleware(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Solo mode authentication bypass middleware.
|
||||||
|
///
|
||||||
|
/// Injects a fixed local-admin `UserContext` for every request without
|
||||||
|
/// validating any JWT. Applied only when the service starts with `--mode solo`.
|
||||||
|
/// All handlers receive the same context — roles = ["admin"], mfa_verified =
|
||||||
|
/// true.
|
||||||
|
pub async fn solo_auth_middleware(
|
||||||
|
mut request: Request,
|
||||||
|
next: Next,
|
||||||
|
) -> std::result::Result<Response, StatusCode> {
|
||||||
|
let user_context = UserContext {
|
||||||
|
user_id: Uuid::nil(),
|
||||||
|
session_id: Uuid::nil(),
|
||||||
|
roles: vec!["admin".to_string()],
|
||||||
|
mfa_verified: true,
|
||||||
|
ip_address: Some("127.0.0.1".to_string()),
|
||||||
|
approval_id: None,
|
||||||
|
};
|
||||||
|
request.extensions_mut().insert(user_context);
|
||||||
|
Ok(next.run(request).await)
|
||||||
|
}
|
||||||
|
|
||||||
/// Optional authentication middleware that allows unauthenticated requests
|
/// Optional authentication middleware that allows unauthenticated requests
|
||||||
pub async fn optional_auth_middleware(
|
pub async fn optional_auth_middleware(
|
||||||
State(jwt_service): State<Arc<JwtService>>,
|
State(jwt_service): State<Arc<JwtService>>,
|
||||||
@ -373,7 +395,7 @@ mod tests {
|
|||||||
async fn test_auth_header_parsing() {
|
async fn test_auth_header_parsing() {
|
||||||
let jwt_service = Arc::new(
|
let jwt_service = Arc::new(
|
||||||
JwtService::new(create_test_jwt_config())
|
JwtService::new(create_test_jwt_config())
|
||||||
.expect("Failed to create JWT service for test")
|
.expect("Failed to create JWT service for test"),
|
||||||
);
|
);
|
||||||
|
|
||||||
let user_id = Uuid::new_v4();
|
let user_id = Uuid::new_v4();
|
||||||
|
|||||||
@ -187,9 +187,9 @@ impl RefreshTokenClaims {
|
|||||||
|
|
||||||
/// Generate RSA key pair for JWT signing (RS256)
|
/// Generate RSA key pair for JWT signing (RS256)
|
||||||
pub fn generate_rsa_key_pair() -> Result<RsaKeys> {
|
pub fn generate_rsa_key_pair() -> Result<RsaKeys> {
|
||||||
|
use rsa::pkcs8::{EncodePrivateKey, EncodePublicKey, LineEnding};
|
||||||
use rsa::rand_core::OsRng;
|
use rsa::rand_core::OsRng;
|
||||||
use rsa::{RsaPrivateKey, RsaPublicKey};
|
use rsa::{RsaPrivateKey, RsaPublicKey};
|
||||||
use rsa::pkcs8::{EncodePrivateKey, EncodePublicKey, LineEnding};
|
|
||||||
|
|
||||||
// Generate 2048-bit RSA key pair with OS randomness for cryptographic security
|
// Generate 2048-bit RSA key pair with OS randomness for cryptographic security
|
||||||
let private_key =
|
let private_key =
|
||||||
@ -253,8 +253,7 @@ mod tests {
|
|||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_jwt_token_generation_and_verification() {
|
async fn test_jwt_token_generation_and_verification() {
|
||||||
let config = create_test_jwt_config();
|
let config = create_test_jwt_config();
|
||||||
let jwt_service = JwtService::new(config)
|
let jwt_service = JwtService::new(config).expect("Failed to create JWT service for test");
|
||||||
.expect("Failed to create JWT service for test");
|
|
||||||
|
|
||||||
let user_id = Uuid::new_v4();
|
let user_id = Uuid::new_v4();
|
||||||
let session_id = Uuid::new_v4();
|
let session_id = Uuid::new_v4();
|
||||||
|
|||||||
@ -378,7 +378,8 @@ fn test_invalid_signature_detection() {
|
|||||||
.expect("Failed to generate token pair");
|
.expect("Failed to generate token pair");
|
||||||
|
|
||||||
// Service 2 with different public key tries to validate
|
// Service 2 with different public key tries to validate
|
||||||
// This should fail because the token was signed with key1 but we're validating with key2
|
// This should fail because the token was signed with key1 but we're validating
|
||||||
|
// with key2
|
||||||
let jwt_service2 = JwtService::new(
|
let jwt_service2 = JwtService::new(
|
||||||
&private_key1,
|
&private_key1,
|
||||||
&public_key2, // Different public key!
|
&public_key2, // Different public key!
|
||||||
|
|||||||
@ -28,6 +28,9 @@ toml = { workspace = true }
|
|||||||
# Platform configuration
|
# Platform configuration
|
||||||
platform-config = { workspace = true }
|
platform-config = { workspace = true }
|
||||||
|
|
||||||
|
# Centralized observability (logging, metrics, health, tracing)
|
||||||
|
observability = { workspace = true, features = ["logging", "metrics-prometheus", "health"] }
|
||||||
|
|
||||||
# Error handling
|
# Error handling
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
|
|||||||
@ -1,150 +1,96 @@
|
|||||||
//! Provisioning Daemon configuration wrapper
|
//! Provisioning Daemon configuration wrapper
|
||||||
//!
|
//!
|
||||||
//! This module wraps the external daemon library's configuration system
|
//! Loads configuration from provisioning-daemon.ncl using platform-config crate
|
||||||
//! with support for hierarchical loading and environment variable overrides.
|
//! This module handles loading the service operational config (port, polling,
|
||||||
|
//! workers) and converting it to the format needed by daemon-cli.
|
||||||
|
|
||||||
use std::env;
|
use std::net::SocketAddr;
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
|
|
||||||
use platform_config::ConfigLoader;
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// Wrapper for external daemon configuration
|
/// Service operational configuration loaded from provisioning-daemon.ncl
|
||||||
///
|
|
||||||
/// Provides hierarchical configuration loading and environment variable
|
|
||||||
/// overrides for the provisioning-daemon service.
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct ProvisioningDaemonConfigWrapper {
|
pub struct ProvisioningDaemonConfig {
|
||||||
/// Configuration path used for loading
|
pub server: ServerConfig,
|
||||||
#[serde(skip)]
|
#[serde(default)]
|
||||||
pub config_path: PathBuf,
|
pub daemon: DaemonConfig,
|
||||||
|
#[serde(default)]
|
||||||
|
pub logging: LoggingConfig,
|
||||||
|
#[serde(default)]
|
||||||
|
pub actions: ActionsConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProvisioningDaemonConfigWrapper {
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
/// Load configuration from hierarchical sources with mode support
|
pub struct ServerConfig {
|
||||||
///
|
#[serde(default = "default_host")]
|
||||||
/// Priority order:
|
pub host: String,
|
||||||
/// 1. DAEMON_CONFIG environment variable (explicit path)
|
pub port: u16,
|
||||||
/// 2. DAEMON_MODE environment variable (mode-specific file)
|
}
|
||||||
/// 3. Default configuration path
|
|
||||||
///
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
/// This method always succeeds - it resolves a path regardless of whether
|
pub struct DaemonConfig {
|
||||||
/// the file exists. The external daemon library handles file validation.
|
#[serde(default)]
|
||||||
pub fn load_from_hierarchy() -> Self {
|
pub enabled: bool,
|
||||||
let config_path = Self::resolve_config_path();
|
#[serde(default = "default_poll_interval")]
|
||||||
Self { config_path }
|
pub poll_interval: u64,
|
||||||
|
#[serde(default = "default_max_workers")]
|
||||||
|
pub max_workers: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct LoggingConfig {
|
||||||
|
#[serde(default = "default_log_level")]
|
||||||
|
pub level: String,
|
||||||
|
pub file: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
|
pub struct ActionsConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub auto_cleanup: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub auto_update: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_host() -> String {
|
||||||
|
"0.0.0.0".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_poll_interval() -> u64 {
|
||||||
|
60
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_max_workers() -> usize {
|
||||||
|
2
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_log_level() -> String {
|
||||||
|
"info".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProvisioningDaemonConfig {
|
||||||
|
/// Load configuration from provisioning-daemon.ncl via platform-config
|
||||||
|
pub fn load() -> Result<Self, Box<dyn std::error::Error>> {
|
||||||
|
let config_json = platform_config::load_service_config_from_ncl("provisioning-daemon")?;
|
||||||
|
|
||||||
|
// The Nickel file returns { provisioning_daemon: { ... } }
|
||||||
|
// Extract the inner config object
|
||||||
|
let config_value = if let Some(inner) = config_json.get("provisioning_daemon") {
|
||||||
|
inner.clone()
|
||||||
|
} else {
|
||||||
|
config_json
|
||||||
|
};
|
||||||
|
|
||||||
|
let config: ProvisioningDaemonConfig = serde_json::from_value(config_value)?;
|
||||||
|
Ok(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve the configuration path from environment variables or defaults
|
/// Get the bind address for the HTTP server
|
||||||
fn resolve_config_path() -> PathBuf {
|
|
||||||
if let Ok(path) = env::var("DAEMON_CONFIG") {
|
|
||||||
// Explicit config path provided
|
|
||||||
return PathBuf::from(path);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(mode) = env::var("DAEMON_MODE") {
|
|
||||||
// Mode-specific config file
|
|
||||||
return PathBuf::from(format!(
|
|
||||||
"provisioning/platform/config/provisioning-daemon.{}.toml",
|
|
||||||
mode
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default fallback - use a basic default config
|
|
||||||
// In production, this would point to a system config location
|
|
||||||
PathBuf::from("provisioning/platform/config/provisioning-daemon.solo.toml")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the resolved configuration path
|
|
||||||
pub fn path(&self) -> &PathBuf {
|
|
||||||
&self.config_path
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load configuration using the external daemon library's loader
|
|
||||||
///
|
|
||||||
/// This delegates to the external library's configuration loading
|
|
||||||
/// mechanism. The external library handles the actual parsing and
|
|
||||||
/// validation.
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn load_with_external(&self) -> Result<(), Box<dyn std::error::Error>> {
|
pub fn bind_addr(&self) -> SocketAddr {
|
||||||
// This would integrate with the external daemon_cli library
|
format!("{}:{}", self.server.host, self.server.port)
|
||||||
// Example: daemon_cli::DaemonConfig::load(&self.config_path)?;
|
.parse()
|
||||||
// For now, this is a placeholder that demonstrates the pattern
|
.unwrap_or_else(|_| ([127, 0, 0, 1], 9090).into())
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply environment variable overrides
|
|
||||||
///
|
|
||||||
/// Environment variables follow the pattern: DAEMON_{SECTION}_{KEY}
|
|
||||||
///
|
|
||||||
/// Supported overrides:
|
|
||||||
/// - DAEMON_POLL_INTERVAL (seconds)
|
|
||||||
/// - DAEMON_MAX_WORKERS (count)
|
|
||||||
/// - DAEMON_LOGGING_LEVEL (debug/info/warn/error)
|
|
||||||
/// - DAEMON_AUTO_CLEANUP (true/false)
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn apply_env_overrides(&mut self) {
|
|
||||||
// Environment variable overrides are handled by the external library
|
|
||||||
// or by pre-setting them before calling the external loader.
|
|
||||||
//
|
|
||||||
// The pattern is:
|
|
||||||
// 1. Load the config file (via external library)
|
|
||||||
// 2. Parse environment variable overrides
|
|
||||||
// 3. Apply overrides on top of loaded config
|
|
||||||
//
|
|
||||||
// Since the external library may not support this pattern directly,
|
|
||||||
// this wrapper allows the calling code to:
|
|
||||||
// - Check env vars before calling the loader
|
|
||||||
// - Inject config path via environment
|
|
||||||
// - Handle overrides at the application level
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the configuration path as a string
|
|
||||||
pub fn path_str(&self) -> String {
|
|
||||||
self.config_path.to_string_lossy().to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for ProvisioningDaemonConfigWrapper {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
config_path: PathBuf::from(
|
|
||||||
"provisioning/platform/config/provisioning-daemon.solo.toml",
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ConfigLoader for ProvisioningDaemonConfigWrapper {
|
|
||||||
fn service_name() -> &'static str {
|
|
||||||
"provisioning-daemon"
|
|
||||||
}
|
|
||||||
|
|
||||||
fn load_from_hierarchy() -> std::result::Result<Self, Box<dyn std::error::Error + Send + Sync>>
|
|
||||||
{
|
|
||||||
let service = Self::service_name();
|
|
||||||
|
|
||||||
if let Some(path) = platform_config::resolve_config_path(service) {
|
|
||||||
return Self::from_path(&path);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to defaults
|
|
||||||
Ok(Self::default())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn apply_env_overrides(
|
|
||||||
&mut self,
|
|
||||||
) -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
|
||||||
// No-op for wrapper - env overrides handled by external daemon library
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_path<P: AsRef<Path>>(
|
|
||||||
path: P,
|
|
||||||
) -> std::result::Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
|
||||||
Ok(Self {
|
|
||||||
config_path: path.as_ref().to_path_buf(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,22 +99,63 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_default_config_path() {
|
fn test_config_defaults() {
|
||||||
let config = ProvisioningDaemonConfigWrapper::default();
|
let config = ProvisioningDaemonConfig {
|
||||||
assert!(config.path_str().contains("provisioning-daemon.solo.toml"));
|
server: ServerConfig {
|
||||||
|
host: "0.0.0.0".to_string(),
|
||||||
|
port: 9095,
|
||||||
|
},
|
||||||
|
daemon: DaemonConfig {
|
||||||
|
enabled: true,
|
||||||
|
poll_interval: 60,
|
||||||
|
max_workers: 2,
|
||||||
|
},
|
||||||
|
logging: LoggingConfig {
|
||||||
|
level: "info".to_string(),
|
||||||
|
file: None,
|
||||||
|
},
|
||||||
|
actions: ActionsConfig {
|
||||||
|
auto_cleanup: false,
|
||||||
|
auto_update: false,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(config.server.port, 9095);
|
||||||
|
assert!(config.daemon.enabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_config_path_method() {
|
fn test_bind_addr() {
|
||||||
let config = ProvisioningDaemonConfigWrapper::default();
|
let config = ProvisioningDaemonConfig {
|
||||||
let path = config.path();
|
server: ServerConfig {
|
||||||
assert!(path.to_string_lossy().contains("provisioning-daemon"));
|
host: "127.0.0.1".to_string(),
|
||||||
}
|
port: 9095,
|
||||||
|
},
|
||||||
|
daemon: Default::default(),
|
||||||
|
logging: Default::default(),
|
||||||
|
actions: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
#[test]
|
let addr = config.bind_addr();
|
||||||
fn test_resolve_config_path_default() {
|
assert_eq!(addr.port(), 9095);
|
||||||
// Without env vars, should return default
|
}
|
||||||
let wrapper = ProvisioningDaemonConfigWrapper::load_from_hierarchy();
|
}
|
||||||
assert!(wrapper.path_str().contains("provisioning-daemon"));
|
|
||||||
|
impl Default for DaemonConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
enabled: true,
|
||||||
|
poll_interval: default_poll_interval(),
|
||||||
|
max_workers: default_max_workers(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for LoggingConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
level: default_log_level(),
|
||||||
|
file: None,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -11,7 +11,6 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use axum::Router;
|
use axum::Router;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use config::ProvisioningDaemonConfigWrapper;
|
|
||||||
use daemon_cli::{
|
use daemon_cli::{
|
||||||
api::api_routes,
|
api::api_routes,
|
||||||
core::{DaemonConfig, HierarchicalCache, Result},
|
core::{DaemonConfig, HierarchicalCache, Result},
|
||||||
@ -22,7 +21,6 @@ use daemon_cli::{
|
|||||||
AppState,
|
AppState,
|
||||||
};
|
};
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
use tracing_subscriber::EnvFilter;
|
|
||||||
|
|
||||||
/// Provisioning daemon - Nushell execution and configuration rendering service
|
/// Provisioning daemon - Nushell execution and configuration rendering service
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
@ -59,38 +57,87 @@ struct Args {
|
|||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
let args = Args::parse();
|
let args = Args::parse();
|
||||||
|
|
||||||
// Initialize logging
|
// Initialize centralized observability (logging, metrics, health checks)
|
||||||
let log_level = if args.verbose { "debug" } else { "info" };
|
let _guard = observability::init_from_env("daemon", env!("CARGO_PKG_VERSION"))
|
||||||
let filter = EnvFilter::new(format!(
|
.map_err(|e| daemon_cli::core::DaemonError::http_server_error(e.to_string()))?;
|
||||||
"provisioning_daemon={},tower_http=info,daemon_cli=info",
|
|
||||||
log_level
|
|
||||||
));
|
|
||||||
|
|
||||||
tracing_subscriber::fmt()
|
// Check if daemon is enabled in deployment-mode.ncl
|
||||||
.with_env_filter(filter)
|
if let Ok(deployment) = platform_config::load_deployment_mode() {
|
||||||
.with_target(false)
|
if let Ok(enabled) = deployment.is_service_enabled("provisioning_daemon") {
|
||||||
.init();
|
if !enabled {
|
||||||
|
tracing::warn!("⚠ Provisioning Daemon is DISABLED in deployment-mode.ncl");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
tracing::info!("✓ Provisioning Daemon is ENABLED in deployment-mode.ncl");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tracing::info!(
|
tracing::info!(
|
||||||
"Starting Provisioning Daemon v{}",
|
"Starting Provisioning Daemon v{}",
|
||||||
env!("CARGO_PKG_VERSION")
|
env!("CARGO_PKG_VERSION")
|
||||||
);
|
);
|
||||||
|
|
||||||
// Load configuration with hierarchical support
|
// Load provisioning-daemon configuration from Nickel via platform-config
|
||||||
let config_wrapper = if let Some(explicit_path) = args.config {
|
tracing::info!("Loading provisioning-daemon configuration from provisioning-daemon.ncl");
|
||||||
// Use explicit path from CLI argument
|
let provisioning_config = match config::ProvisioningDaemonConfig::load() {
|
||||||
ProvisioningDaemonConfigWrapper {
|
Ok(cfg) => {
|
||||||
config_path: explicit_path,
|
let home = std::env::var("HOME").unwrap_or_else(|_| "~".to_string());
|
||||||
|
#[cfg(target_os = "macos")]
|
||||||
|
let config_path = format!(
|
||||||
|
"{}/Library/Application \
|
||||||
|
Support/provisioning/platform/config/provisioning-daemon.ncl",
|
||||||
|
home
|
||||||
|
);
|
||||||
|
#[cfg(not(target_os = "macos"))]
|
||||||
|
let config_path = format!(
|
||||||
|
"{}/.config/provisioning/platform/config/provisioning-daemon.ncl",
|
||||||
|
home
|
||||||
|
);
|
||||||
|
|
||||||
|
tracing::info!("✓ Loaded configuration from: {}", config_path);
|
||||||
|
tracing::info!(" Server: {}:{}", cfg.server.host, cfg.server.port);
|
||||||
|
tracing::info!(
|
||||||
|
" Daemon: enabled={}, poll_interval={}s, max_workers={}",
|
||||||
|
cfg.daemon.enabled,
|
||||||
|
cfg.daemon.poll_interval,
|
||||||
|
cfg.daemon.max_workers
|
||||||
|
);
|
||||||
|
cfg
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("✗ Failed to load provisioning-daemon.ncl: {}", e);
|
||||||
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// Use hierarchical loading (env vars or defaults)
|
|
||||||
ProvisioningDaemonConfigWrapper::load_from_hierarchy()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
tracing::debug!("Loading configuration from: {}", config_wrapper.path_str());
|
// Load daemon-cli configuration from TOML (if provided) or use defaults
|
||||||
|
// The daemon-cli library needs its own infrastructure config separate from the
|
||||||
|
// service config
|
||||||
|
let mut config = if let Some(explicit_path) = args.config {
|
||||||
|
// Use explicit path from CLI argument
|
||||||
|
DaemonConfig::load(Some(explicit_path))?
|
||||||
|
} else {
|
||||||
|
// Load from config_dir if provided, otherwise use default
|
||||||
|
if let Some(config_dir) = args.config_dir {
|
||||||
|
let config_path = config_dir.join("provisioning-daemon-cli.toml");
|
||||||
|
if config_path.exists() {
|
||||||
|
DaemonConfig::load(Some(config_path))?
|
||||||
|
} else {
|
||||||
|
// Use defaults if no daemon-cli config found
|
||||||
|
DaemonConfig::default()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
DaemonConfig::default()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Load configuration using external daemon library
|
// Override the daemon-cli bind port with the port from provisioning-daemon.ncl
|
||||||
let config = DaemonConfig::load(Some(config_wrapper.path().clone()))?;
|
// This ensures consistency: the service config (provisioning-daemon.ncl)
|
||||||
|
// controls the port
|
||||||
|
config.server.bind = format!(
|
||||||
|
"{}:{}",
|
||||||
|
provisioning_config.server.host, provisioning_config.server.port
|
||||||
|
);
|
||||||
|
|
||||||
// Handle special commands
|
// Handle special commands
|
||||||
if args.validate_config {
|
if args.validate_config {
|
||||||
@ -106,7 +153,12 @@ async fn main() -> Result<()> {
|
|||||||
|
|
||||||
// Validate configuration
|
// Validate configuration
|
||||||
config.validate()?;
|
config.validate()?;
|
||||||
tracing::info!("Configuration validated: bind={}", config.server.bind);
|
tracing::info!("✓ Daemon-cli configuration validated");
|
||||||
|
tracing::info!(
|
||||||
|
" Bind address: {} (from provisioning-daemon.ncl)",
|
||||||
|
config.server.bind
|
||||||
|
);
|
||||||
|
tracing::info!(" Executor: {}", config.server.executor_strategy);
|
||||||
|
|
||||||
// Create cache system
|
// Create cache system
|
||||||
let cache = HierarchicalCache::new()?;
|
let cache = HierarchicalCache::new()?;
|
||||||
@ -141,14 +193,14 @@ async fn main() -> Result<()> {
|
|||||||
// Create router
|
// Create router
|
||||||
let app = Router::new().nest("/api/v1", api_routes(state.clone()));
|
let app = Router::new().nest("/api/v1", api_routes(state.clone()));
|
||||||
|
|
||||||
// Start server
|
// Start server using the configured bind address
|
||||||
let addr = config.bind_addr()?;
|
let addr = config.bind_addr()?;
|
||||||
let listener = TcpListener::bind(addr).await?;
|
let listener = TcpListener::bind(addr).await?;
|
||||||
|
|
||||||
tracing::info!("✓ Provisioning daemon listening on http://{}", addr);
|
tracing::info!("✓ Provisioning daemon listening on http://{}", addr);
|
||||||
tracing::info!("API documentation: http://{}/api/v1/health", addr);
|
tracing::info!(" API documentation: http://{}/api/v1/health", addr);
|
||||||
tracing::info!("Config rendering: http://{}/api/v1/render", addr);
|
tracing::info!(" Config rendering: http://{}/api/v1/render", addr);
|
||||||
tracing::info!("i18n translation: http://{}/api/v1/translate", addr);
|
tracing::info!(" i18n translation: http://{}/api/v1/translate", addr);
|
||||||
|
|
||||||
// Run server
|
// Run server
|
||||||
axum::serve(listener, app).await?;
|
axum::serve(listener, app).await?;
|
||||||
|
|||||||
@ -27,6 +27,9 @@ serde_json = { workspace = true }
|
|||||||
# Platform configuration
|
# Platform configuration
|
||||||
platform-config = { workspace = true }
|
platform-config = { workspace = true }
|
||||||
|
|
||||||
|
# Centralized observability (logging, metrics, health, tracing)
|
||||||
|
observability = { workspace = true, features = ["logging", "metrics-prometheus", "health"] }
|
||||||
|
|
||||||
# Error handling
|
# Error handling
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
@ -61,9 +64,16 @@ lru = { workspace = true }
|
|||||||
# Parking lot for synchronization
|
# Parking lot for synchronization
|
||||||
parking_lot = { workspace = true }
|
parking_lot = { workspace = true }
|
||||||
|
|
||||||
|
# Platform NATS bridge (optional)
|
||||||
|
platform-nats = { workspace = true, optional = true }
|
||||||
|
|
||||||
# TOML parsing
|
# TOML parsing
|
||||||
toml = { workspace = true }
|
toml = { workspace = true }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
nats = ["dep:platform-nats"]
|
||||||
|
default = []
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
http-body-util = { workspace = true }
|
http-body-util = { workspace = true }
|
||||||
hyper = { workspace = true }
|
hyper = { workspace = true }
|
||||||
|
|||||||
@ -11,6 +11,8 @@ use tracing::{debug, error, info};
|
|||||||
use crate::cache::ExtensionCache;
|
use crate::cache::ExtensionCache;
|
||||||
use crate::client::{ClientFactory, DistributionClient, SourceClient};
|
use crate::client::{ClientFactory, DistributionClient, SourceClient};
|
||||||
use crate::error::{RegistryError, Result};
|
use crate::error::{RegistryError, Result};
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
use crate::events::{spawn_cache_invalidator, EventPublisher};
|
||||||
use crate::models::*;
|
use crate::models::*;
|
||||||
|
|
||||||
/// Application state
|
/// Application state
|
||||||
@ -20,13 +22,19 @@ pub struct AppState {
|
|||||||
pub distribution_clients: Vec<Arc<dyn DistributionClient>>,
|
pub distribution_clients: Vec<Arc<dyn DistributionClient>>,
|
||||||
pub cache: Arc<ExtensionCache>,
|
pub cache: Arc<ExtensionCache>,
|
||||||
pub start_time: Instant,
|
pub start_time: Instant,
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub events: Option<Arc<EventPublisher>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppState {
|
impl AppState {
|
||||||
pub fn new(config: crate::config::Config) -> Result<Self> {
|
/// Create application state.
|
||||||
let (source_clients, distribution_clients) = ClientFactory::create_from_config(&config)?;
|
///
|
||||||
|
/// `vault_url` enables vault:// token resolution (e.g. `Some("http://127.0.0.1:9094")`).
|
||||||
|
/// Pass `None` to use filesystem-only token resolution.
|
||||||
|
pub async fn new(config: crate::config::Config, vault_url: Option<&str>) -> Result<Self> {
|
||||||
|
let (source_clients, distribution_clients) =
|
||||||
|
ClientFactory::create_from_config(&config, vault_url).await?;
|
||||||
|
|
||||||
// Initialize cache
|
|
||||||
let cache = Arc::new(ExtensionCache::new(
|
let cache = Arc::new(ExtensionCache::new(
|
||||||
config.cache.capacity,
|
config.cache.capacity,
|
||||||
config.cache.ttl_seconds,
|
config.cache.ttl_seconds,
|
||||||
@ -39,8 +47,19 @@ impl AppState {
|
|||||||
distribution_clients,
|
distribution_clients,
|
||||||
cache,
|
cache,
|
||||||
start_time: Instant::now(),
|
start_time: Instant::now(),
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
events: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Attach a connected NatsBridge and spawn the cache invalidator
|
||||||
|
/// subscriber.
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub fn with_nats(mut self, bridge: Arc<platform_nats::NatsBridge>) -> Self {
|
||||||
|
spawn_cache_invalidator(Arc::clone(&bridge), Arc::clone(&self.cache));
|
||||||
|
self.events = Some(Arc::new(EventPublisher::new(bridge)));
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// List all extensions
|
/// List all extensions
|
||||||
@ -254,6 +273,12 @@ pub async fn download_extension(
|
|||||||
.download_extension(extension_type, &name, &version)
|
.download_extension(extension_type, &name, &version)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
if let Some(events) = &state.events {
|
||||||
|
let events = Arc::clone(events);
|
||||||
|
let (ty, n, v) = (extension_type, name.clone(), version.clone());
|
||||||
|
tokio::spawn(async move { events.publish_installed(ty, &n, &v).await });
|
||||||
|
}
|
||||||
return Ok((
|
return Ok((
|
||||||
StatusCode::OK,
|
StatusCode::OK,
|
||||||
[(header::CONTENT_TYPE, "application/octet-stream")],
|
[(header::CONTENT_TYPE, "application/octet-stream")],
|
||||||
@ -269,6 +294,12 @@ pub async fn download_extension(
|
|||||||
.download_extension(extension_type, &name, &version)
|
.download_extension(extension_type, &name, &version)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
if let Some(events) = &state.events {
|
||||||
|
let events = Arc::clone(events);
|
||||||
|
let (ty, n, v) = (extension_type, name.clone(), version.clone());
|
||||||
|
tokio::spawn(async move { events.publish_installed(ty, &n, &v).await });
|
||||||
|
}
|
||||||
return Ok((
|
return Ok((
|
||||||
StatusCode::OK,
|
StatusCode::OK,
|
||||||
[(header::CONTENT_TYPE, "application/octet-stream")],
|
[(header::CONTENT_TYPE, "application/octet-stream")],
|
||||||
|
|||||||
@ -2,9 +2,10 @@
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use tracing::info;
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use super::traits::{DistributionClient, SourceClient};
|
use super::traits::{DistributionClient, SourceClient};
|
||||||
|
use super::vault_resolver::VaultResolver;
|
||||||
use super::{ForgejoClient, GitHubClient};
|
use super::{ForgejoClient, GitHubClient};
|
||||||
use crate::config::Config;
|
use crate::config::Config;
|
||||||
use crate::error::{RegistryError, Result};
|
use crate::error::{RegistryError, Result};
|
||||||
@ -15,46 +16,64 @@ use crate::oci::OciClient as OciClientImpl;
|
|||||||
pub struct ClientFactory;
|
pub struct ClientFactory;
|
||||||
|
|
||||||
impl ClientFactory {
|
impl ClientFactory {
|
||||||
/// Create all configured clients from configuration
|
/// Create all configured clients from configuration, resolving any vault://
|
||||||
pub fn create_from_config(
|
/// token references.
|
||||||
|
///
|
||||||
|
/// When `vault_url` is `Some`, any `token_path` starting with `vault://` is
|
||||||
|
/// resolved via the vault-service HTTP API with a 5-minute in-memory
|
||||||
|
/// cache. When `vault_url` is `None`, vault:// paths cause an error.
|
||||||
|
pub async fn create_from_config(
|
||||||
config: &Config,
|
config: &Config,
|
||||||
|
vault_url: Option<&str>,
|
||||||
) -> Result<(Vec<Arc<dyn SourceClient>>, Vec<Arc<dyn DistributionClient>>)> {
|
) -> Result<(Vec<Arc<dyn SourceClient>>, Vec<Arc<dyn DistributionClient>>)> {
|
||||||
|
let resolver = vault_url.map(|u| VaultResolver::new(u.to_string()));
|
||||||
|
|
||||||
let mut source_clients: Vec<Arc<dyn SourceClient>> = Vec::new();
|
let mut source_clients: Vec<Arc<dyn SourceClient>> = Vec::new();
|
||||||
let mut distribution_clients: Vec<Arc<dyn DistributionClient>> = Vec::new();
|
let mut distribution_clients: Vec<Arc<dyn DistributionClient>> = Vec::new();
|
||||||
|
|
||||||
// Create Gitea clients (source-based)
|
// Create Gitea clients (source-based)
|
||||||
if let Some(ref gitea_config) = config.gitea {
|
for gitea_config in &config.sources.gitea {
|
||||||
let client = GiteaClientImpl::new(gitea_config)?;
|
let token = resolve_token(&gitea_config.token_path, resolver.as_ref()).await?;
|
||||||
let wrapped = Arc::new(client) as Arc<dyn SourceClient>;
|
let client = GiteaClientImpl::new(gitea_config, token)?;
|
||||||
source_clients.push(wrapped);
|
source_clients.push(Arc::new(client) as Arc<dyn SourceClient>);
|
||||||
info!("Registered Gitea client: {}", gitea_config.url);
|
info!("Registered Gitea client: {}", gitea_config.url);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create Forgejo clients (source-based)
|
// Create Forgejo clients (source-based)
|
||||||
if let Some(ref forgejo_config) = config.gitea {
|
for forgejo_config in &config.sources.forgejo {
|
||||||
let client = ForgejoClient::new(forgejo_config)?;
|
let token = resolve_token(&forgejo_config.token_path, resolver.as_ref()).await?;
|
||||||
let wrapped = Arc::new(client) as Arc<dyn SourceClient>;
|
let client = ForgejoClient::new(forgejo_config, token)?;
|
||||||
source_clients.push(wrapped);
|
source_clients.push(Arc::new(client) as Arc<dyn SourceClient>);
|
||||||
info!("Registered Forgejo client: {}", forgejo_config.url);
|
info!("Registered Forgejo client: {}", forgejo_config.url);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create GitHub clients (source-based)
|
// Create GitHub clients (source-based)
|
||||||
if let Some(ref github_config) = config.gitea {
|
for github_config in &config.sources.github {
|
||||||
let client = GitHubClient::new(github_config)?;
|
// GitHub tokens are optional; a missing file is treated as unauthenticated
|
||||||
let wrapped = Arc::new(client) as Arc<dyn SourceClient>;
|
// access
|
||||||
source_clients.push(wrapped);
|
let token = resolve_token_optional(&github_config.token_path, resolver.as_ref()).await;
|
||||||
|
if token.is_none() {
|
||||||
|
warn!(
|
||||||
|
"GitHub client '{}' has no token — API rate limits apply",
|
||||||
|
github_config.organization
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let client = GitHubClient::new(github_config, token)?;
|
||||||
|
source_clients.push(Arc::new(client) as Arc<dyn SourceClient>);
|
||||||
info!("Registered GitHub client: {}", github_config.organization);
|
info!("Registered GitHub client: {}", github_config.organization);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create OCI clients (distribution-based)
|
// Create OCI clients (distribution-based)
|
||||||
if let Some(ref oci_config) = config.oci {
|
for oci_config in &config.distributions.oci {
|
||||||
let client = OciClientImpl::new(oci_config)?;
|
let auth_token = match &oci_config.auth_token_path {
|
||||||
let wrapped = Arc::new(client) as Arc<dyn DistributionClient>;
|
Some(path) => Some(resolve_token(path, resolver.as_ref()).await?),
|
||||||
distribution_clients.push(wrapped);
|
None => None,
|
||||||
|
};
|
||||||
|
let client = OciClientImpl::new(oci_config, auth_token)?;
|
||||||
|
distribution_clients.push(Arc::new(client) as Arc<dyn DistributionClient>);
|
||||||
info!("Registered OCI client: {}", oci_config.registry);
|
info!("Registered OCI client: {}", oci_config.registry);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure at least one backend is configured
|
|
||||||
if source_clients.is_empty() && distribution_clients.is_empty() {
|
if source_clients.is_empty() && distribution_clients.is_empty() {
|
||||||
return Err(RegistryError::Config(
|
return Err(RegistryError::Config(
|
||||||
"No backends configured (gitea, forgejo, github, or oci required)".to_string(),
|
"No backends configured (gitea, forgejo, github, or oci required)".to_string(),
|
||||||
@ -70,3 +89,37 @@ impl ClientFactory {
|
|||||||
Ok((source_clients, distribution_clients))
|
Ok((source_clients, distribution_clients))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Resolve a token path to its plaintext value.
|
||||||
|
///
|
||||||
|
/// Paths prefixed with `vault://` are resolved via VaultResolver (requires
|
||||||
|
/// resolver to be Some). All other paths are read from the filesystem.
|
||||||
|
async fn resolve_token(path: &str, resolver: Option<&VaultResolver>) -> Result<String> {
|
||||||
|
if path.starts_with("vault://") {
|
||||||
|
let resolver = resolver.ok_or_else(|| {
|
||||||
|
RegistryError::Config(format!(
|
||||||
|
"token_path '{}' is a vault:// reference but no vault_url was provided",
|
||||||
|
path
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
return resolver
|
||||||
|
.try_resolve(path)
|
||||||
|
.await
|
||||||
|
.expect("try_resolve always returns Some for vault:// prefixed paths")
|
||||||
|
.map_err(|e| {
|
||||||
|
RegistryError::Config(format!("Vault resolution failed for '{}': {}", path, e))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
std::fs::read_to_string(path)
|
||||||
|
.map(|s| s.trim().to_string())
|
||||||
|
.map_err(|e| RegistryError::Config(format!("Failed to read token from '{}': {}", path, e)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve a token path, returning `None` on any failure.
|
||||||
|
///
|
||||||
|
/// Used for optional tokens (e.g., GitHub) where unauthenticated access is
|
||||||
|
/// acceptable.
|
||||||
|
async fn resolve_token_optional(path: &str, resolver: Option<&VaultResolver>) -> Option<String> {
|
||||||
|
resolve_token(path, resolver).await.ok()
|
||||||
|
}
|
||||||
|
|||||||
@ -19,14 +19,17 @@ pub struct ForgejoClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ForgejoClient {
|
impl ForgejoClient {
|
||||||
/// Create new Forgejo client from Gitea config
|
/// Create new Forgejo client with a pre-resolved token.
|
||||||
pub fn new(config: &crate::config::GiteaConfig) -> Result<Self> {
|
///
|
||||||
|
/// Token resolution (file read or vault:// fetch) is the caller's
|
||||||
|
/// responsibility.
|
||||||
|
pub fn new(config: &crate::config::GiteaConfig, token: String) -> Result<Self> {
|
||||||
let backend_id = config
|
let backend_id = config
|
||||||
.id
|
.id
|
||||||
.clone()
|
.clone()
|
||||||
.unwrap_or_else(|| format!("forgejo-{}", config.organization));
|
.unwrap_or_else(|| format!("forgejo-{}", config.organization));
|
||||||
|
|
||||||
let inner = crate::gitea::GiteaClient::new(config)?;
|
let inner = crate::gitea::GiteaClient::new(config, token)?;
|
||||||
|
|
||||||
Ok(Self { backend_id, inner })
|
Ok(Self { backend_id, inner })
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,501 +0,0 @@
|
|||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use reqwest::Client;
|
|
||||||
use tracing::debug;
|
|
||||||
use url::Url;
|
|
||||||
|
|
||||||
use super::traits::{BackendType, ExtensionClient, SourceClient};
|
|
||||||
use crate::config::GiteaConfig;
|
|
||||||
use crate::error::{RegistryError, Result};
|
|
||||||
use crate::gitea::models::{GiteaRelease, GiteaRepository};
|
|
||||||
use crate::models::{Extension, ExtensionSource, ExtensionType, ExtensionVersion};
|
|
||||||
|
|
||||||
/// Gitea API client
|
|
||||||
pub struct GiteaClient {
|
|
||||||
id: String,
|
|
||||||
base_url: Url,
|
|
||||||
organization: String,
|
|
||||||
token: String,
|
|
||||||
client: Client,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GiteaClient {
|
|
||||||
/// Create new Gitea client
|
|
||||||
pub fn new(config: &GiteaConfig) -> Result<Self> {
|
|
||||||
let base_url = Url::parse(&config.url)
|
|
||||||
.map_err(|e| RegistryError::Config(format!("Invalid Gitea URL: {}", e)))?;
|
|
||||||
|
|
||||||
let token = config.read_token()?;
|
|
||||||
|
|
||||||
let client = Client::builder()
|
|
||||||
.timeout(Duration::from_secs(config.timeout_seconds))
|
|
||||||
.danger_accept_invalid_certs(!config.verify_ssl)
|
|
||||||
.build()
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Failed to create HTTP client: {}", e)))?;
|
|
||||||
|
|
||||||
let id = config
|
|
||||||
.id
|
|
||||||
.clone()
|
|
||||||
.unwrap_or_else(|| "gitea-default".to_string());
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
id,
|
|
||||||
base_url,
|
|
||||||
organization: config.organization.clone(),
|
|
||||||
token,
|
|
||||||
client,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the backend ID
|
|
||||||
pub fn id(&self) -> &str {
|
|
||||||
&self.id
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List all extensions from organization repositories
|
|
||||||
pub async fn list_extensions_impl(
|
|
||||||
&self,
|
|
||||||
extension_type: Option<ExtensionType>,
|
|
||||||
) -> Result<Vec<Extension>> {
|
|
||||||
debug!(
|
|
||||||
"Fetching repositories for organization: {}",
|
|
||||||
self.organization
|
|
||||||
);
|
|
||||||
|
|
||||||
let repos = self.list_repositories().await?;
|
|
||||||
let mut extensions = Vec::new();
|
|
||||||
|
|
||||||
for repo in repos {
|
|
||||||
// Skip archived repositories
|
|
||||||
if repo.archived {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse extension type from repository name prefix
|
|
||||||
let Some(ext_type) = self.parse_extension_type(&repo.name) else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Filter by type if specified
|
|
||||||
if let Some(filter_type) = extension_type {
|
|
||||||
if ext_type != filter_type {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get latest release for this repository
|
|
||||||
let Ok(releases) = self.list_releases(&repo.name).await else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(latest_release) = releases.first() {
|
|
||||||
if let Some(extension) = self.release_to_extension(&repo, latest_release, ext_type)
|
|
||||||
{
|
|
||||||
extensions.push(extension);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(extensions)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get specific extension metadata
|
|
||||||
pub async fn get_extension_impl(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
) -> Result<Extension> {
|
|
||||||
let repo_name = self.format_repo_name(extension_type, name);
|
|
||||||
debug!("Fetching extension: {}", repo_name);
|
|
||||||
|
|
||||||
let repo = self.get_repository(&repo_name).await?;
|
|
||||||
let releases = self.list_releases(&repo_name).await?;
|
|
||||||
|
|
||||||
let latest_release = releases.first().ok_or_else(|| {
|
|
||||||
RegistryError::NotFound(format!("No releases found for {}", repo_name))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
self.release_to_extension(&repo, latest_release, extension_type)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
RegistryError::NotFound(format!("Invalid extension metadata for {}", repo_name))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List all versions for an extension
|
|
||||||
pub async fn list_versions_impl(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
) -> Result<Vec<ExtensionVersion>> {
|
|
||||||
let repo_name = self.format_repo_name(extension_type, name);
|
|
||||||
debug!("Fetching versions for: {}", repo_name);
|
|
||||||
|
|
||||||
let releases = self.list_releases(&repo_name).await?;
|
|
||||||
|
|
||||||
Ok(releases
|
|
||||||
.iter()
|
|
||||||
.map(|release| ExtensionVersion {
|
|
||||||
version: release.tag_name.clone(),
|
|
||||||
published_at: release.published_at.unwrap_or(release.created_at),
|
|
||||||
download_url: release
|
|
||||||
.assets
|
|
||||||
.first()
|
|
||||||
.map(|a| a.browser_download_url.clone()),
|
|
||||||
checksum: None,
|
|
||||||
size: release.assets.first().map(|a| a.size),
|
|
||||||
})
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Download extension asset
|
|
||||||
pub async fn download_extension_impl(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
version: &str,
|
|
||||||
) -> Result<Bytes> {
|
|
||||||
let repo_name = self.format_repo_name(extension_type, name);
|
|
||||||
debug!("Downloading extension: {} version {}", repo_name, version);
|
|
||||||
|
|
||||||
let release = self.get_release(&repo_name, version).await?;
|
|
||||||
|
|
||||||
let asset = release.assets.first().ok_or_else(|| {
|
|
||||||
RegistryError::NotFound(format!("No assets found for release {}", version))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
self.download_asset(&asset.browser_download_url).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List repositories in organization
|
|
||||||
async fn list_repositories(&self) -> Result<Vec<GiteaRepository>> {
|
|
||||||
let url = self
|
|
||||||
.base_url
|
|
||||||
.join(&format!("api/v1/orgs/{}/repos", self.organization))
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Invalid URL: {}", e)))?;
|
|
||||||
|
|
||||||
let response = self
|
|
||||||
.client
|
|
||||||
.get(url)
|
|
||||||
.header("Authorization", format!("token {}", self.token))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Request failed: {}", e)))?;
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Gitea(format!(
|
|
||||||
"Failed to list repositories: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
.json()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Failed to parse response: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get specific repository
|
|
||||||
async fn get_repository(&self, repo_name: &str) -> Result<GiteaRepository> {
|
|
||||||
let url = self
|
|
||||||
.base_url
|
|
||||||
.join(&format!("api/v1/repos/{}/{}", self.organization, repo_name))
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Invalid URL: {}", e)))?;
|
|
||||||
|
|
||||||
let response = self
|
|
||||||
.client
|
|
||||||
.get(url)
|
|
||||||
.header("Authorization", format!("token {}", self.token))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Request failed: {}", e)))?;
|
|
||||||
|
|
||||||
if response.status().as_u16() == 404 {
|
|
||||||
return Err(RegistryError::NotFound(format!(
|
|
||||||
"Repository not found: {}",
|
|
||||||
repo_name
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Gitea(format!(
|
|
||||||
"Failed to get repository: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
.json()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Failed to parse response: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List releases for repository
|
|
||||||
async fn list_releases(&self, repo_name: &str) -> Result<Vec<GiteaRelease>> {
|
|
||||||
let url = self
|
|
||||||
.base_url
|
|
||||||
.join(&format!(
|
|
||||||
"api/v1/repos/{}/{}/releases",
|
|
||||||
self.organization, repo_name
|
|
||||||
))
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Invalid URL: {}", e)))?;
|
|
||||||
|
|
||||||
let response = self
|
|
||||||
.client
|
|
||||||
.get(url)
|
|
||||||
.header("Authorization", format!("token {}", self.token))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Request failed: {}", e)))?;
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Gitea(format!(
|
|
||||||
"Failed to list releases: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut releases: Vec<GiteaRelease> = response
|
|
||||||
.json()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Failed to parse response: {}", e)))?;
|
|
||||||
|
|
||||||
// Filter out drafts and prereleases, sort by date
|
|
||||||
releases.retain(|r| !r.draft && !r.prerelease);
|
|
||||||
releases.sort_by(|a, b| {
|
|
||||||
let a_date = a.published_at.unwrap_or(a.created_at);
|
|
||||||
let b_date = b.published_at.unwrap_or(b.created_at);
|
|
||||||
b_date.cmp(&a_date)
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(releases)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get specific release
|
|
||||||
async fn get_release(&self, repo_name: &str, tag: &str) -> Result<GiteaRelease> {
|
|
||||||
let url = self
|
|
||||||
.base_url
|
|
||||||
.join(&format!(
|
|
||||||
"api/v1/repos/{}/{}/releases/tags/{}",
|
|
||||||
self.organization, repo_name, tag
|
|
||||||
))
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Invalid URL: {}", e)))?;
|
|
||||||
|
|
||||||
let response = self
|
|
||||||
.client
|
|
||||||
.get(url)
|
|
||||||
.header("Authorization", format!("token {}", self.token))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Request failed: {}", e)))?;
|
|
||||||
|
|
||||||
if response.status().as_u16() == 404 {
|
|
||||||
return Err(RegistryError::NotFound(format!(
|
|
||||||
"Release not found: {}",
|
|
||||||
tag
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Gitea(format!(
|
|
||||||
"Failed to get release: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
.json()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Failed to parse response: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Download asset from URL
|
|
||||||
async fn download_asset(&self, url: &str) -> Result<Bytes> {
|
|
||||||
let response = self
|
|
||||||
.client
|
|
||||||
.get(url)
|
|
||||||
.header("Authorization", format!("token {}", self.token))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Download failed: {}", e)))?;
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Gitea(format!(
|
|
||||||
"Download failed: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
.bytes()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Failed to read response: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse extension type from repository name
|
|
||||||
fn parse_extension_type(&self, repo_name: &str) -> Option<ExtensionType> {
|
|
||||||
if repo_name.ends_with("_prov") {
|
|
||||||
Some(ExtensionType::Provider)
|
|
||||||
} else if repo_name.ends_with("_taskserv") {
|
|
||||||
Some(ExtensionType::Taskserv)
|
|
||||||
} else if repo_name.ends_with("_cluster") {
|
|
||||||
Some(ExtensionType::Cluster)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Format repository name from extension type and name
|
|
||||||
fn format_repo_name(&self, extension_type: ExtensionType, name: &str) -> String {
|
|
||||||
let suffix = match extension_type {
|
|
||||||
ExtensionType::Provider => "_prov",
|
|
||||||
ExtensionType::Taskserv => "_taskserv",
|
|
||||||
ExtensionType::Cluster => "_cluster",
|
|
||||||
};
|
|
||||||
format!("{}{}", name, suffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert Gitea release to Extension
|
|
||||||
fn release_to_extension(
|
|
||||||
&self,
|
|
||||||
repo: &GiteaRepository,
|
|
||||||
release: &GiteaRelease,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
) -> Option<Extension> {
|
|
||||||
let name = self.extract_extension_name(&repo.name, extension_type)?;
|
|
||||||
|
|
||||||
Some(Extension {
|
|
||||||
name,
|
|
||||||
extension_type,
|
|
||||||
version: release.tag_name.clone(),
|
|
||||||
description: repo.description.clone().unwrap_or_default(),
|
|
||||||
author: Some(repo.owner.login.clone()),
|
|
||||||
repository: Some(repo.html_url.clone()),
|
|
||||||
source: ExtensionSource::Gitea,
|
|
||||||
published_at: release.published_at.unwrap_or(release.created_at),
|
|
||||||
download_url: release
|
|
||||||
.assets
|
|
||||||
.first()
|
|
||||||
.map(|a| a.browser_download_url.clone()),
|
|
||||||
checksum: None,
|
|
||||||
size: release.assets.first().map(|a| a.size),
|
|
||||||
tags: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Extract extension name from repository name
|
|
||||||
fn extract_extension_name(
|
|
||||||
&self,
|
|
||||||
repo_name: &str,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
) -> Option<String> {
|
|
||||||
let suffix = match extension_type {
|
|
||||||
ExtensionType::Provider => "_prov",
|
|
||||||
ExtensionType::Taskserv => "_taskserv",
|
|
||||||
ExtensionType::Cluster => "_cluster",
|
|
||||||
};
|
|
||||||
|
|
||||||
repo_name.strip_suffix(suffix).map(|s| s.to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check health of Gitea connection
|
|
||||||
pub async fn health_check_impl(&self) -> Result<()> {
|
|
||||||
let url = self
|
|
||||||
.base_url
|
|
||||||
.join("api/v1/version")
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Invalid URL: {}", e)))?;
|
|
||||||
|
|
||||||
let response = self
|
|
||||||
.client
|
|
||||||
.get(url)
|
|
||||||
.timeout(Duration::from_secs(5))
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Health check failed: {}", e)))?;
|
|
||||||
|
|
||||||
if response.status().is_success() {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(RegistryError::Gitea(format!(
|
|
||||||
"Health check returned: {}",
|
|
||||||
response.status()
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl ExtensionClient for GiteaClient {
|
|
||||||
async fn list_extensions(
|
|
||||||
&self,
|
|
||||||
extension_type: Option<ExtensionType>,
|
|
||||||
) -> Result<Vec<Extension>> {
|
|
||||||
self.list_extensions_impl(extension_type).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_extension(&self, extension_type: ExtensionType, name: &str) -> Result<Extension> {
|
|
||||||
self.get_extension_impl(extension_type, name).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list_versions(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
) -> Result<Vec<ExtensionVersion>> {
|
|
||||||
self.list_versions_impl(extension_type, name).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn download_extension(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
version: &str,
|
|
||||||
) -> Result<Bytes> {
|
|
||||||
self.download_extension_impl(extension_type, name, version)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn health_check(&self) -> Result<()> {
|
|
||||||
self.health_check_impl().await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn backend_id(&self) -> String {
|
|
||||||
self.id.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn backend_type(&self) -> BackendType {
|
|
||||||
BackendType::Gitea
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl SourceClient for GiteaClient {
|
|
||||||
async fn get_repository_url(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
) -> Result<String> {
|
|
||||||
let repo_name = self.format_repo_name(extension_type, name);
|
|
||||||
let repo = self.get_repository(&repo_name).await?;
|
|
||||||
Ok(repo.html_url)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list_releases(&self, repo_name: &str) -> Result<Vec<String>> {
|
|
||||||
let releases = self.list_releases(repo_name).await?;
|
|
||||||
Ok(releases.iter().map(|r| r.tag_name.clone()).collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_release_notes(
|
|
||||||
&self,
|
|
||||||
_extension_type: ExtensionType,
|
|
||||||
_name: &str,
|
|
||||||
version: &str,
|
|
||||||
) -> Result<Option<String>> {
|
|
||||||
// Gitea doesn't provide structured release notes via the API
|
|
||||||
// We'd need to scrape the HTML or use a custom field
|
|
||||||
// For now, just return None
|
|
||||||
let _version = version;
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@ -24,10 +24,12 @@ pub struct GitHubClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GitHubClient {
|
impl GitHubClient {
|
||||||
/// Create new GitHub client from Gitea config (reused for simplicity)
|
/// Create new GitHub client with a pre-resolved token.
|
||||||
pub fn new(config: &crate::config::GiteaConfig) -> Result<Self> {
|
///
|
||||||
let token = config.read_token().ok();
|
/// Token resolution (file read or vault:// fetch) is the caller's
|
||||||
|
/// responsibility. Pass `None` for unauthenticated access (lower rate
|
||||||
|
/// limits apply).
|
||||||
|
pub fn new(config: &crate::config::GiteaConfig, token: Option<String>) -> Result<Self> {
|
||||||
let client = Client::builder()
|
let client = Client::builder()
|
||||||
.timeout(Duration::from_secs(config.timeout_seconds))
|
.timeout(Duration::from_secs(config.timeout_seconds))
|
||||||
.danger_accept_invalid_certs(!config.verify_ssl)
|
.danger_accept_invalid_certs(!config.verify_ssl)
|
||||||
|
|||||||
@ -8,6 +8,7 @@ pub mod factory;
|
|||||||
pub mod forgejo;
|
pub mod forgejo;
|
||||||
pub mod github;
|
pub mod github;
|
||||||
pub mod traits;
|
pub mod traits;
|
||||||
|
pub mod vault_resolver;
|
||||||
|
|
||||||
pub use factory::ClientFactory;
|
pub use factory::ClientFactory;
|
||||||
pub use forgejo::ForgejoClient;
|
pub use forgejo::ForgejoClient;
|
||||||
|
|||||||
@ -1,481 +0,0 @@
|
|||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use bytes::Bytes;
|
|
||||||
use chrono::Utc;
|
|
||||||
use reqwest::Client;
|
|
||||||
use tracing::debug;
|
|
||||||
|
|
||||||
use super::traits::{BackendType, DistributionClient, ExtensionClient};
|
|
||||||
use crate::config::OciConfig;
|
|
||||||
use crate::error::{RegistryError, Result};
|
|
||||||
use crate::models::{Extension, ExtensionSource, ExtensionType, ExtensionVersion};
|
|
||||||
use crate::oci::models::{OciCatalog, OciManifest, OciTagsList};
|
|
||||||
|
|
||||||
/// OCI registry client
|
|
||||||
pub struct OciClient {
|
|
||||||
id: String,
|
|
||||||
registry: String,
|
|
||||||
namespace: String,
|
|
||||||
auth_token: Option<String>,
|
|
||||||
client: Client,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OciClient {
|
|
||||||
/// Create new OCI client
|
|
||||||
pub fn new(config: &OciConfig) -> Result<Self> {
|
|
||||||
let auth_token = config.read_token()?;
|
|
||||||
|
|
||||||
let client = Client::builder()
|
|
||||||
.timeout(Duration::from_secs(config.timeout_seconds))
|
|
||||||
.danger_accept_invalid_certs(!config.verify_ssl)
|
|
||||||
.build()
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Failed to create HTTP client: {}", e)))?;
|
|
||||||
|
|
||||||
let id = config
|
|
||||||
.id
|
|
||||||
.clone()
|
|
||||||
.unwrap_or_else(|| "oci-default".to_string());
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
id,
|
|
||||||
registry: config.registry.clone(),
|
|
||||||
namespace: config.namespace.clone(),
|
|
||||||
auth_token,
|
|
||||||
client,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the backend ID
|
|
||||||
pub fn id(&self) -> &str {
|
|
||||||
&self.id
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List all extensions from OCI registry
|
|
||||||
pub async fn list_extensions_impl(
|
|
||||||
&self,
|
|
||||||
extension_type: Option<ExtensionType>,
|
|
||||||
) -> Result<Vec<Extension>> {
|
|
||||||
debug!("Fetching artifacts from OCI registry: {}", self.registry);
|
|
||||||
|
|
||||||
let catalog = self.list_catalog().await?;
|
|
||||||
let mut extensions = Vec::new();
|
|
||||||
|
|
||||||
for repo_name in catalog.repositories {
|
|
||||||
// Skip if not in our namespace
|
|
||||||
if !repo_name.starts_with(&format!("{}/", self.namespace)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse extension type and name from repository
|
|
||||||
let Some((ext_type, _name)) = self.parse_repository_name(&repo_name) else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Filter by type if specified
|
|
||||||
if let Some(filter_type) = extension_type {
|
|
||||||
if ext_type != filter_type {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get tags for this repository
|
|
||||||
let Ok(tags) = self.list_tags(&repo_name).await else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(latest_tag) = tags.tags.first() else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Ok(manifest) = self.get_manifest(&repo_name, latest_tag).await {
|
|
||||||
if let Some(extension) =
|
|
||||||
self.manifest_to_extension(&repo_name, latest_tag, &manifest, ext_type)
|
|
||||||
{
|
|
||||||
extensions.push(extension);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(extensions)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get specific extension metadata
|
|
||||||
pub async fn get_extension_impl(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
) -> Result<Extension> {
|
|
||||||
let repo_name = self.format_repository_name(extension_type, name);
|
|
||||||
debug!("Fetching extension: {}", repo_name);
|
|
||||||
|
|
||||||
let tags = self.list_tags(&repo_name).await?;
|
|
||||||
let latest_tag = tags
|
|
||||||
.tags
|
|
||||||
.first()
|
|
||||||
.ok_or_else(|| RegistryError::NotFound(format!("No tags found for {}", repo_name)))?;
|
|
||||||
|
|
||||||
let manifest = self.get_manifest(&repo_name, latest_tag).await?;
|
|
||||||
|
|
||||||
self.manifest_to_extension(&repo_name, latest_tag, &manifest, extension_type)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
RegistryError::NotFound(format!("Invalid extension metadata for {}", repo_name))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List all versions for an extension
|
|
||||||
pub async fn list_versions_impl(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
) -> Result<Vec<ExtensionVersion>> {
|
|
||||||
let repo_name = self.format_repository_name(extension_type, name);
|
|
||||||
debug!("Fetching versions for: {}", repo_name);
|
|
||||||
|
|
||||||
let tags = self.list_tags(&repo_name).await?;
|
|
||||||
|
|
||||||
let mut versions = Vec::new();
|
|
||||||
for tag in tags.tags {
|
|
||||||
if let Ok(manifest) = self.get_manifest(&repo_name, &tag).await {
|
|
||||||
let size = manifest.layers.iter().map(|l| l.size).sum();
|
|
||||||
|
|
||||||
versions.push(ExtensionVersion {
|
|
||||||
version: tag.clone(),
|
|
||||||
published_at: Utc::now(),
|
|
||||||
download_url: None,
|
|
||||||
checksum: Some(manifest.config.digest.clone()),
|
|
||||||
size: Some(size),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(versions)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Pull extension artifact
|
|
||||||
pub async fn download_extension_impl(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
tag: &str,
|
|
||||||
) -> Result<Bytes> {
|
|
||||||
let repo_name = self.format_repository_name(extension_type, name);
|
|
||||||
debug!("Pulling extension: {} tag {}", repo_name, tag);
|
|
||||||
|
|
||||||
let manifest = self.get_manifest(&repo_name, tag).await?;
|
|
||||||
|
|
||||||
// Download first layer (assuming single-layer artifacts)
|
|
||||||
let layer = manifest
|
|
||||||
.layers
|
|
||||||
.first()
|
|
||||||
.ok_or_else(|| RegistryError::Oci("No layers found in manifest".to_string()))?;
|
|
||||||
|
|
||||||
self.download_blob(&repo_name, &layer.digest).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List catalog (all repositories)
|
|
||||||
async fn list_catalog(&self) -> Result<OciCatalog> {
|
|
||||||
let url = format!("https://{}/v2/_catalog", self.registry);
|
|
||||||
|
|
||||||
let mut request = self.client.get(&url);
|
|
||||||
|
|
||||||
if let Some(ref token) = self.auth_token {
|
|
||||||
request = request.header("Authorization", format!("Bearer {}", token));
|
|
||||||
}
|
|
||||||
|
|
||||||
let response = request
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Request failed: {}", e)))?;
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Oci(format!(
|
|
||||||
"Failed to list catalog: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
.json()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Failed to parse response: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// List tags for repository
|
|
||||||
async fn list_tags(&self, repository: &str) -> Result<OciTagsList> {
|
|
||||||
let url = format!("https://{}/v2/{}/tags/list", self.registry, repository);
|
|
||||||
|
|
||||||
let mut request = self.client.get(&url);
|
|
||||||
|
|
||||||
if let Some(ref token) = self.auth_token {
|
|
||||||
request = request.header("Authorization", format!("Bearer {}", token));
|
|
||||||
}
|
|
||||||
|
|
||||||
let response = request
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Request failed: {}", e)))?;
|
|
||||||
|
|
||||||
if response.status().as_u16() == 404 {
|
|
||||||
return Err(RegistryError::NotFound(format!(
|
|
||||||
"Repository not found: {}",
|
|
||||||
repository
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Oci(format!(
|
|
||||||
"Failed to list tags: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
.json()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Failed to parse response: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get manifest for tag
|
|
||||||
async fn get_manifest(&self, repository: &str, tag: &str) -> Result<OciManifest> {
|
|
||||||
let url = format!(
|
|
||||||
"https://{}/v2/{}/manifests/{}",
|
|
||||||
self.registry, repository, tag
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut request = self
|
|
||||||
.client
|
|
||||||
.get(&url)
|
|
||||||
.header("Accept", "application/vnd.oci.image.manifest.v1+json");
|
|
||||||
|
|
||||||
if let Some(ref token) = self.auth_token {
|
|
||||||
request = request.header("Authorization", format!("Bearer {}", token));
|
|
||||||
}
|
|
||||||
|
|
||||||
let response = request
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Request failed: {}", e)))?;
|
|
||||||
|
|
||||||
if response.status().as_u16() == 404 {
|
|
||||||
return Err(RegistryError::NotFound(format!(
|
|
||||||
"Manifest not found: {}:{}",
|
|
||||||
repository, tag
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Oci(format!(
|
|
||||||
"Failed to get manifest: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
.json()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Failed to parse manifest: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Download blob by digest
|
|
||||||
async fn download_blob(&self, repository: &str, digest: &str) -> Result<Bytes> {
|
|
||||||
let url = format!(
|
|
||||||
"https://{}/v2/{}/blobs/{}",
|
|
||||||
self.registry, repository, digest
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut request = self.client.get(&url);
|
|
||||||
|
|
||||||
if let Some(ref token) = self.auth_token {
|
|
||||||
request = request.header("Authorization", format!("Bearer {}", token));
|
|
||||||
}
|
|
||||||
|
|
||||||
let response = request
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Download failed: {}", e)))?;
|
|
||||||
|
|
||||||
if !response.status().is_success() {
|
|
||||||
return Err(RegistryError::Oci(format!(
|
|
||||||
"Download failed: {}",
|
|
||||||
response.status()
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
response
|
|
||||||
.bytes()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Failed to read response: {}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse repository name into extension type and name
|
|
||||||
fn parse_repository_name(&self, repo_name: &str) -> Option<(ExtensionType, String)> {
|
|
||||||
let name = repo_name.strip_prefix(&format!("{}/", self.namespace))?;
|
|
||||||
|
|
||||||
name.strip_suffix("-provider")
|
|
||||||
.map(|base_name| (ExtensionType::Provider, base_name.to_string()))
|
|
||||||
.or_else(|| {
|
|
||||||
name.strip_suffix("-taskserv")
|
|
||||||
.map(|base_name| (ExtensionType::Taskserv, base_name.to_string()))
|
|
||||||
})
|
|
||||||
.or_else(|| {
|
|
||||||
name.strip_suffix("-cluster")
|
|
||||||
.map(|base_name| (ExtensionType::Cluster, base_name.to_string()))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Format repository name from extension type and name
|
|
||||||
fn format_repository_name(&self, extension_type: ExtensionType, name: &str) -> String {
|
|
||||||
let suffix = match extension_type {
|
|
||||||
ExtensionType::Provider => "-provider",
|
|
||||||
ExtensionType::Taskserv => "-taskserv",
|
|
||||||
ExtensionType::Cluster => "-cluster",
|
|
||||||
};
|
|
||||||
format!("{}/{}{}", self.namespace, name, suffix)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert OCI manifest to Extension
|
|
||||||
fn manifest_to_extension(
|
|
||||||
&self,
|
|
||||||
repo_name: &str,
|
|
||||||
tag: &str,
|
|
||||||
manifest: &OciManifest,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
) -> Option<Extension> {
|
|
||||||
let (_, name) = self.parse_repository_name(repo_name)?;
|
|
||||||
|
|
||||||
let description = manifest
|
|
||||||
.annotations
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|a| a.get("org.opencontainers.image.description"))
|
|
||||||
.cloned()
|
|
||||||
.unwrap_or_default();
|
|
||||||
|
|
||||||
let author = manifest
|
|
||||||
.annotations
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|a| a.get("org.opencontainers.image.authors"))
|
|
||||||
.cloned();
|
|
||||||
|
|
||||||
let repository = manifest
|
|
||||||
.annotations
|
|
||||||
.as_ref()
|
|
||||||
.and_then(|a| a.get("org.opencontainers.image.url"))
|
|
||||||
.cloned();
|
|
||||||
|
|
||||||
let size = manifest.layers.iter().map(|l| l.size).sum();
|
|
||||||
|
|
||||||
Some(Extension {
|
|
||||||
name,
|
|
||||||
extension_type,
|
|
||||||
version: tag.to_string(),
|
|
||||||
description,
|
|
||||||
author,
|
|
||||||
repository,
|
|
||||||
source: ExtensionSource::Oci,
|
|
||||||
published_at: Utc::now(),
|
|
||||||
download_url: None,
|
|
||||||
checksum: Some(manifest.config.digest.clone()),
|
|
||||||
size: Some(size),
|
|
||||||
tags: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check health of OCI connection
|
|
||||||
pub async fn health_check_impl(&self) -> Result<()> {
|
|
||||||
let url = format!("https://{}/v2/", self.registry);
|
|
||||||
|
|
||||||
let mut request = self.client.get(&url).timeout(Duration::from_secs(5));
|
|
||||||
|
|
||||||
if let Some(ref token) = self.auth_token {
|
|
||||||
request = request.header("Authorization", format!("Bearer {}", token));
|
|
||||||
}
|
|
||||||
|
|
||||||
let response = request
|
|
||||||
.send()
|
|
||||||
.await
|
|
||||||
.map_err(|e| RegistryError::Oci(format!("Health check failed: {}", e)))?;
|
|
||||||
|
|
||||||
if response.status().is_success() || response.status().as_u16() == 401 {
|
|
||||||
// 401 means registry is up but auth is required
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(RegistryError::Oci(format!(
|
|
||||||
"Health check returned: {}",
|
|
||||||
response.status()
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl ExtensionClient for OciClient {
|
|
||||||
async fn list_extensions(
|
|
||||||
&self,
|
|
||||||
extension_type: Option<ExtensionType>,
|
|
||||||
) -> Result<Vec<Extension>> {
|
|
||||||
self.list_extensions_impl(extension_type).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_extension(&self, extension_type: ExtensionType, name: &str) -> Result<Extension> {
|
|
||||||
self.get_extension_impl(extension_type, name).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list_versions(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
) -> Result<Vec<ExtensionVersion>> {
|
|
||||||
self.list_versions_impl(extension_type, name).await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn download_extension(
|
|
||||||
&self,
|
|
||||||
extension_type: ExtensionType,
|
|
||||||
name: &str,
|
|
||||||
version: &str,
|
|
||||||
) -> Result<Bytes> {
|
|
||||||
self.download_extension_impl(extension_type, name, version)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn health_check(&self) -> Result<()> {
|
|
||||||
self.health_check_impl().await
|
|
||||||
}
|
|
||||||
|
|
||||||
fn backend_id(&self) -> String {
|
|
||||||
self.id.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn backend_type(&self) -> BackendType {
|
|
||||||
BackendType::Oci
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl DistributionClient for OciClient {
|
|
||||||
async fn get_manifest(&self, repository: &str, tag: &str) -> Result<serde_json::Value> {
|
|
||||||
let manifest = self.get_manifest(repository, tag).await?;
|
|
||||||
Ok(serde_json::to_value(manifest)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn list_catalog(&self) -> Result<Vec<String>> {
|
|
||||||
let catalog = self.list_catalog().await?;
|
|
||||||
Ok(catalog.repositories)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_digest(&self, repository: &str, tag: &str) -> Result<String> {
|
|
||||||
let manifest = self.get_manifest(repository, tag).await?;
|
|
||||||
Ok(manifest.config.digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn verify_artifact(
|
|
||||||
&self,
|
|
||||||
repository: &str,
|
|
||||||
tag: &str,
|
|
||||||
expected_digest: &str,
|
|
||||||
) -> Result<bool> {
|
|
||||||
let manifest = self.get_manifest(repository, tag).await?;
|
|
||||||
Ok(manifest.config.digest == expected_digest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
115
crates/extension-registry/src/client/vault_resolver.rs
Normal file
115
crates/extension-registry/src/client/vault_resolver.rs
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
use reqwest::Client;
|
||||||
|
use serde::Deserialize;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
use crate::error::{RegistryError, Result};
|
||||||
|
|
||||||
|
const TOKEN_TTL: Duration = Duration::from_secs(300); // 5-minute in-memory cache
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
struct SecretResponse {
|
||||||
|
value: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct CachedToken {
|
||||||
|
value: String,
|
||||||
|
expires_at: Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolves `vault://path` references to their plaintext values
|
||||||
|
/// via the vault-service HTTP API.
|
||||||
|
///
|
||||||
|
/// Tokens are cached in-memory for `TOKEN_TTL` to avoid repeated requests
|
||||||
|
/// on every client creation.
|
||||||
|
pub struct VaultResolver {
|
||||||
|
vault_url: String,
|
||||||
|
http: Client,
|
||||||
|
cache: Arc<Mutex<HashMap<String, CachedToken>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VaultResolver {
|
||||||
|
pub fn new(vault_url: String) -> Self {
|
||||||
|
Self {
|
||||||
|
vault_url,
|
||||||
|
http: Client::builder()
|
||||||
|
.timeout(Duration::from_secs(5))
|
||||||
|
.build()
|
||||||
|
.expect("reqwest client"),
|
||||||
|
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve `vault://path/to/secret` → plaintext value.
|
||||||
|
///
|
||||||
|
/// Returns `None` if the path is not a `vault://` reference.
|
||||||
|
pub async fn try_resolve(&self, token_path: &str) -> Option<Result<String>> {
|
||||||
|
let secret_path = token_path.strip_prefix("vault://")?;
|
||||||
|
|
||||||
|
// Check cache first
|
||||||
|
{
|
||||||
|
let cache = self.cache.lock();
|
||||||
|
if let Some(cached) = cache.get(secret_path) {
|
||||||
|
if cached.expires_at > Instant::now() {
|
||||||
|
return Some(Ok(cached.value.clone()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch from vault-service
|
||||||
|
let url = format!("{}/v1/{}", self.vault_url, secret_path);
|
||||||
|
let resp = match self.http.get(&url).send().await {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
return Some(Err(RegistryError::Config(format!(
|
||||||
|
"vault resolve HTTP error for '{}': {}",
|
||||||
|
secret_path, e
|
||||||
|
))))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !resp.status().is_success() {
|
||||||
|
return Some(Err(RegistryError::Config(format!(
|
||||||
|
"vault resolve failed for '{}': HTTP {}",
|
||||||
|
secret_path,
|
||||||
|
resp.status()
|
||||||
|
))));
|
||||||
|
}
|
||||||
|
|
||||||
|
let body: SecretResponse = match resp.json().await {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
return Some(Err(RegistryError::Config(format!(
|
||||||
|
"vault resolve parse error for '{}': {}",
|
||||||
|
secret_path, e
|
||||||
|
))))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(path = %secret_path, "Token resolved from vault");
|
||||||
|
|
||||||
|
// Cache the result
|
||||||
|
{
|
||||||
|
let mut cache = self.cache.lock();
|
||||||
|
cache.insert(
|
||||||
|
secret_path.to_string(),
|
||||||
|
CachedToken {
|
||||||
|
value: body.value.clone(),
|
||||||
|
expires_at: Instant::now() + TOKEN_TTL,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Ok(body.value))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Expire all cached tokens, forcing re-resolution on next access.
|
||||||
|
pub fn invalidate_cache(&self) {
|
||||||
|
self.cache.lock().clear();
|
||||||
|
warn!("VaultResolver: token cache invalidated");
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -27,6 +27,26 @@ pub struct Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
|
/// Load configuration from Nickel (extension-registry.ncl) with fallback to
|
||||||
|
/// hierarchy
|
||||||
|
pub fn load() -> Result<Self> {
|
||||||
|
let mut config = Self::load_from_nickel()
|
||||||
|
.or_else(|_| Self::load_from_hierarchy())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
Self::migrate_to_multiinstance(&mut config);
|
||||||
|
Self::apply_env_overrides_internal(&mut config);
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load configuration from Nickel extension-registry.ncl
|
||||||
|
fn load_from_nickel() -> Result<Self> {
|
||||||
|
let config_json = platform_config::load_service_config_from_ncl("extension-registry")
|
||||||
|
.map_err(|e| RegistryError::Config(format!("Failed to load from Nickel: {}", e)))?;
|
||||||
|
|
||||||
|
serde_json::from_value(config_json).map_err(RegistryError::Json)
|
||||||
|
}
|
||||||
|
|
||||||
/// Load configuration from hierarchical sources with mode support
|
/// Load configuration from hierarchical sources with mode support
|
||||||
///
|
///
|
||||||
/// Priority order:
|
/// Priority order:
|
||||||
@ -196,7 +216,10 @@ impl ConfigLoader for Config {
|
|||||||
let service = Self::service_name();
|
let service = Self::service_name();
|
||||||
|
|
||||||
if let Some(path) = platform_config::resolve_config_path(service) {
|
if let Some(path) = platform_config::resolve_config_path(service) {
|
||||||
return Self::from_path(&path);
|
return Self::from_path(&path).map_err(|e| {
|
||||||
|
Box::new(std::io::Error::other(e.to_string()))
|
||||||
|
as Box<dyn std::error::Error + Send + Sync>
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to defaults
|
// Fallback to defaults
|
||||||
@ -317,7 +340,9 @@ impl GiteaConfig {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !Path::new(&self.token_path).exists() {
|
// vault:// references are resolved at runtime; file check only for filesystem
|
||||||
|
// paths
|
||||||
|
if !self.token_path.starts_with("vault://") && !Path::new(&self.token_path).exists() {
|
||||||
return Err(RegistryError::Config(format!(
|
return Err(RegistryError::Config(format!(
|
||||||
"Gitea token file not found: {}",
|
"Gitea token file not found: {}",
|
||||||
self.token_path
|
self.token_path
|
||||||
@ -327,8 +352,15 @@ impl GiteaConfig {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read token from file
|
/// Read token from file. Only call for non-vault:// paths.
|
||||||
pub fn read_token(&self) -> Result<String> {
|
pub fn read_token(&self) -> Result<String> {
|
||||||
|
if self.token_path.starts_with("vault://") {
|
||||||
|
return Err(RegistryError::Config(
|
||||||
|
"token_path is a vault:// reference — use ClientFactory::create_from_config_async \
|
||||||
|
to resolve"
|
||||||
|
.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
std::fs::read_to_string(&self.token_path)
|
std::fs::read_to_string(&self.token_path)
|
||||||
.map(|s| s.trim().to_string())
|
.map(|s| s.trim().to_string())
|
||||||
.map_err(|e| RegistryError::Config(format!("Failed to read Gitea token: {}", e)))
|
.map_err(|e| RegistryError::Config(format!("Failed to read Gitea token: {}", e)))
|
||||||
@ -364,7 +396,9 @@ impl OciConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let Some(ref token_path) = self.auth_token_path {
|
if let Some(ref token_path) = self.auth_token_path {
|
||||||
if !Path::new(token_path).exists() {
|
// vault:// references are resolved at runtime; file check only for filesystem
|
||||||
|
// paths
|
||||||
|
if !token_path.starts_with("vault://") && !Path::new(token_path).exists() {
|
||||||
return Err(RegistryError::Config(format!(
|
return Err(RegistryError::Config(format!(
|
||||||
"OCI token file not found: {}",
|
"OCI token file not found: {}",
|
||||||
token_path
|
token_path
|
||||||
@ -375,9 +409,16 @@ impl OciConfig {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read auth token from file
|
/// Read auth token from file. Only call for non-vault:// paths.
|
||||||
pub fn read_token(&self) -> Result<Option<String>> {
|
pub fn read_token(&self) -> Result<Option<String>> {
|
||||||
if let Some(ref path) = self.auth_token_path {
|
if let Some(ref path) = self.auth_token_path {
|
||||||
|
if path.starts_with("vault://") {
|
||||||
|
return Err(RegistryError::Config(
|
||||||
|
"auth_token_path is a vault:// reference — use \
|
||||||
|
ClientFactory::create_from_config_async to resolve"
|
||||||
|
.to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
std::fs::read_to_string(path)
|
std::fs::read_to_string(path)
|
||||||
.map(|s| Some(s.trim().to_string()))
|
.map(|s| Some(s.trim().to_string()))
|
||||||
.map_err(|e| RegistryError::Config(format!("Failed to read OCI token: {}", e)))
|
.map_err(|e| RegistryError::Config(format!("Failed to read OCI token: {}", e)))
|
||||||
@ -443,8 +484,7 @@ fn default_ttl() -> u64 {
|
|||||||
|
|
||||||
/// Load configuration from file
|
/// Load configuration from file
|
||||||
pub fn load_config(path: &str) -> Result<Config> {
|
pub fn load_config(path: &str) -> Result<Config> {
|
||||||
let contents = std::fs::read_to_string(path)
|
let contents = std::fs::read_to_string(path).map_err(RegistryError::Io)?;
|
||||||
.map_err(|e| RegistryError::Config(format!("Failed to read config file: {}", e)))?;
|
|
||||||
|
|
||||||
let config: Config = toml::from_str(&contents)
|
let config: Config = toml::from_str(&contents)
|
||||||
.map_err(|e| RegistryError::Config(format!("Failed to parse config: {}", e)))?;
|
.map_err(|e| RegistryError::Config(format!("Failed to parse config: {}", e)))?;
|
||||||
|
|||||||
105
crates/extension-registry/src/events.rs
Normal file
105
crates/extension-registry/src/events.rs
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use futures::StreamExt;
|
||||||
|
use platform_nats::NatsBridge;
|
||||||
|
use serde::Serialize;
|
||||||
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
|
use crate::cache::ExtensionCache;
|
||||||
|
use crate::models::ExtensionType;
|
||||||
|
|
||||||
|
/// Publishes NATS events for extension lifecycle operations.
|
||||||
|
pub struct EventPublisher {
|
||||||
|
bridge: Arc<NatsBridge>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
struct ExtensionEvent<'a> {
|
||||||
|
name: &'a str,
|
||||||
|
version: &'a str,
|
||||||
|
#[serde(rename = "type")]
|
||||||
|
extension_type: ExtensionType,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EventPublisher {
|
||||||
|
pub fn new(bridge: Arc<NatsBridge>) -> Self {
|
||||||
|
Self { bridge }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Publish `provisioning.extensions.{type}.installed`.
|
||||||
|
///
|
||||||
|
/// Fire-and-forget: logs error on failure but never propagates to handler.
|
||||||
|
pub async fn publish_installed(
|
||||||
|
&self,
|
||||||
|
extension_type: ExtensionType,
|
||||||
|
name: &str,
|
||||||
|
version: &str,
|
||||||
|
) {
|
||||||
|
let subject = format!("extensions.{}.installed", extension_type);
|
||||||
|
let payload = ExtensionEvent {
|
||||||
|
name,
|
||||||
|
version,
|
||||||
|
extension_type,
|
||||||
|
};
|
||||||
|
match self.bridge.publish_json(&subject, &payload).await {
|
||||||
|
Ok(_) => {
|
||||||
|
info!(
|
||||||
|
subject = %subject,
|
||||||
|
extension = %name,
|
||||||
|
version = %version,
|
||||||
|
"Extension installed event published"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
subject = %subject,
|
||||||
|
extension = %name,
|
||||||
|
"Failed to publish extension event: {}", e
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn a background task that subscribes to workspace deploy-done events
|
||||||
|
/// and invalidates the extension cache on each notification.
|
||||||
|
///
|
||||||
|
/// Subject: `provisioning.workspace.*.deploy.done` (filter on WORKSPACE stream)
|
||||||
|
pub fn spawn_cache_invalidator(bridge: Arc<NatsBridge>, cache: Arc<ExtensionCache>) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
run_cache_invalidator(bridge, cache).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_cache_invalidator(bridge: Arc<NatsBridge>, cache: Arc<ExtensionCache>) {
|
||||||
|
const STREAM: &str = "WORKSPACE";
|
||||||
|
const CONSUMER: &str = "ext-registry-cache-invalidator";
|
||||||
|
|
||||||
|
let mut messages = match bridge.subscribe_pull(STREAM, CONSUMER).await {
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Extension registry cache invalidator: subscribe failed — {e}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("Extension registry cache invalidator running on stream {STREAM}");
|
||||||
|
|
||||||
|
while let Some(msg_result) = messages.next().await {
|
||||||
|
match msg_result {
|
||||||
|
Ok(msg) => {
|
||||||
|
// Subject pattern: provisioning.workspace.{ws_id}.deploy.done
|
||||||
|
// Filter is applied at JetStream level; any message here triggers invalidation.
|
||||||
|
let subject = msg.subject.as_str();
|
||||||
|
info!(subject = %subject, "Workspace deploy detected — invalidating extension cache");
|
||||||
|
cache.invalidate_all();
|
||||||
|
if let Err(e) = msg.ack().await {
|
||||||
|
warn!("Cache invalidator: ack failed — {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
error!("Cache invalidator: message error — {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -24,20 +24,21 @@ pub struct GiteaClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl GiteaClient {
|
impl GiteaClient {
|
||||||
/// Create new Gitea client
|
/// Create new Gitea client with a pre-resolved token.
|
||||||
pub fn new(config: &GiteaConfig) -> Result<Self> {
|
///
|
||||||
|
/// Token resolution (file read or vault:// fetch) is the caller's
|
||||||
|
/// responsibility. Use `ClientFactory::create_from_config_async` for
|
||||||
|
/// automatic resolution.
|
||||||
|
pub fn new(config: &GiteaConfig, token: String) -> Result<Self> {
|
||||||
let base_url = Url::parse(&config.url)
|
let base_url = Url::parse(&config.url)
|
||||||
.map_err(|e| RegistryError::Config(format!("Invalid Gitea URL: {}", e)))?;
|
.map_err(|e| RegistryError::Config(format!("Invalid Gitea URL: {}", e)))?;
|
||||||
|
|
||||||
let token = config.read_token()?;
|
|
||||||
|
|
||||||
let client = Client::builder()
|
let client = Client::builder()
|
||||||
.timeout(Duration::from_secs(config.timeout_seconds))
|
.timeout(Duration::from_secs(config.timeout_seconds))
|
||||||
.danger_accept_invalid_certs(!config.verify_ssl)
|
.danger_accept_invalid_certs(!config.verify_ssl)
|
||||||
.build()
|
.build()
|
||||||
.map_err(|e| RegistryError::Gitea(format!("Failed to create HTTP client: {}", e)))?;
|
.map_err(|e| RegistryError::Gitea(format!("Failed to create HTTP client: {}", e)))?;
|
||||||
|
|
||||||
// Generate backend ID from config URL and organization, or use provided ID
|
|
||||||
let backend_id = config
|
let backend_id = config
|
||||||
.id
|
.id
|
||||||
.clone()
|
.clone()
|
||||||
|
|||||||
@ -10,6 +10,8 @@ pub mod cache;
|
|||||||
pub mod client;
|
pub mod client;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub mod events;
|
||||||
pub mod gitea;
|
pub mod gitea;
|
||||||
pub mod models;
|
pub mod models;
|
||||||
pub mod oci;
|
pub mod oci;
|
||||||
|
|||||||
@ -3,7 +3,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use axum::Router;
|
use axum::Router;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
use extension_registry::{ExtensionRegistry, API_VERSION, DEFAULT_PORT};
|
use extension_registry::{config::Config, ExtensionRegistry, API_VERSION, DEFAULT_PORT};
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
mod handlers;
|
mod handlers;
|
||||||
@ -36,19 +36,42 @@ struct Cli {
|
|||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
// Initialize tracing
|
// Parse CLI arguments FIRST (so --help works before any other processing)
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_env_filter(
|
|
||||||
tracing_subscriber::EnvFilter::from_default_env()
|
|
||||||
.add_directive("info".parse().unwrap()),
|
|
||||||
)
|
|
||||||
.init();
|
|
||||||
|
|
||||||
// Parse CLI arguments
|
|
||||||
let cli = Cli::parse();
|
let cli = Cli::parse();
|
||||||
|
|
||||||
|
// Initialize centralized observability (logging, metrics, health checks)
|
||||||
|
let _guard = observability::init_from_env("extension-registry", env!("CARGO_PKG_VERSION"))?;
|
||||||
|
|
||||||
|
// Check if extension-registry is enabled in deployment-mode.ncl
|
||||||
|
if let Ok(deployment) = platform_config::load_deployment_mode() {
|
||||||
|
if let Ok(enabled) = deployment.is_service_enabled("extension_registry") {
|
||||||
|
if !enabled {
|
||||||
|
tracing::warn!("⚠ Extension Registry is DISABLED in deployment-mode.ncl");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
tracing::info!("✓ Extension Registry is ENABLED in deployment-mode.ncl");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load configuration from Nickel or hierarchy
|
||||||
|
let mut config = Config::load()?;
|
||||||
|
|
||||||
|
// Apply CLI overrides if provided
|
||||||
|
if !cli.host.is_empty() && cli.host != "0.0.0.0" {
|
||||||
|
config.server.host = cli.host.clone();
|
||||||
|
}
|
||||||
|
if cli.port != 0 {
|
||||||
|
config.server.port = cli.port;
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
"🔧 Loaded extension-registry configuration (host: {}, port: {})",
|
||||||
|
config.server.host,
|
||||||
|
config.server.port
|
||||||
|
);
|
||||||
|
|
||||||
// Create registry service
|
// Create registry service
|
||||||
let addr: SocketAddr = format!("{}:{}", cli.host, cli.port).parse()?;
|
let addr: SocketAddr = format!("{}:{}", config.server.host, config.server.port).parse()?;
|
||||||
let registry = Arc::new(ExtensionRegistry::new(addr));
|
let registry = Arc::new(ExtensionRegistry::new(addr));
|
||||||
|
|
||||||
// Create application state
|
// Create application state
|
||||||
|
|||||||
@ -24,17 +24,17 @@ pub struct OciClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl OciClient {
|
impl OciClient {
|
||||||
/// Create new OCI client
|
/// Create new OCI client with a pre-resolved auth token.
|
||||||
pub fn new(config: &OciConfig) -> Result<Self> {
|
///
|
||||||
let auth_token = config.read_token()?;
|
/// Token resolution (file read or vault:// fetch) is the caller's
|
||||||
|
/// responsibility. Pass `None` for unauthenticated registry access.
|
||||||
|
pub fn new(config: &OciConfig, auth_token: Option<String>) -> Result<Self> {
|
||||||
let client = Client::builder()
|
let client = Client::builder()
|
||||||
.timeout(Duration::from_secs(config.timeout_seconds))
|
.timeout(Duration::from_secs(config.timeout_seconds))
|
||||||
.danger_accept_invalid_certs(!config.verify_ssl)
|
.danger_accept_invalid_certs(!config.verify_ssl)
|
||||||
.build()
|
.build()
|
||||||
.map_err(|e| RegistryError::Oci(format!("Failed to create HTTP client: {}", e)))?;
|
.map_err(|e| RegistryError::Oci(format!("Failed to create HTTP client: {}", e)))?;
|
||||||
|
|
||||||
// Generate backend ID from registry and namespace, or use provided ID
|
|
||||||
let backend_id = config
|
let backend_id = config
|
||||||
.id
|
.id
|
||||||
.clone()
|
.clone()
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
use axum::body::Body;
|
use axum::body::Body;
|
||||||
use axum::http::{Request, StatusCode};
|
use axum::http::{Request, StatusCode};
|
||||||
use extension_registry::{build_routes, AppState, Config};
|
|
||||||
use extension_registry::config::OciConfig;
|
use extension_registry::config::OciConfig;
|
||||||
|
use extension_registry::{build_routes, AppState, Config};
|
||||||
use http_body_util::BodyExt;
|
use http_body_util::BodyExt;
|
||||||
use tower::ServiceExt;
|
use tower::ServiceExt;
|
||||||
|
|
||||||
@ -30,7 +30,9 @@ fn create_test_config() -> Config {
|
|||||||
#[ignore] // Requires OCI registry service to be running
|
#[ignore] // Requires OCI registry service to be running
|
||||||
async fn test_health_check() {
|
async fn test_health_check() {
|
||||||
let config = create_test_config();
|
let config = create_test_config();
|
||||||
let state = AppState::new(config).expect("Failed to create app state");
|
let state = AppState::new(config, None)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create app state");
|
||||||
let app = build_routes(state);
|
let app = build_routes(state);
|
||||||
|
|
||||||
let response = app
|
let response = app
|
||||||
@ -57,7 +59,9 @@ async fn test_health_check() {
|
|||||||
#[ignore] // Requires OCI registry or Gitea service to be running
|
#[ignore] // Requires OCI registry or Gitea service to be running
|
||||||
async fn test_list_extensions_empty() {
|
async fn test_list_extensions_empty() {
|
||||||
let config = create_test_config();
|
let config = create_test_config();
|
||||||
let state = AppState::new(config).expect("Failed to create app state");
|
let state = AppState::new(config, None)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create app state");
|
||||||
let app = build_routes(state);
|
let app = build_routes(state);
|
||||||
|
|
||||||
let response = app
|
let response = app
|
||||||
@ -84,7 +88,9 @@ async fn test_list_extensions_empty() {
|
|||||||
async fn test_get_nonexistent_extension() {
|
async fn test_get_nonexistent_extension() {
|
||||||
let config = create_test_config();
|
let config = create_test_config();
|
||||||
|
|
||||||
let state = AppState::new(config).expect("Failed to create app state");
|
let state = AppState::new(config, None)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create app state");
|
||||||
let app = build_routes(state);
|
let app = build_routes(state);
|
||||||
|
|
||||||
let response = app
|
let response = app
|
||||||
@ -105,7 +111,9 @@ async fn test_get_nonexistent_extension() {
|
|||||||
async fn test_metrics_endpoint() {
|
async fn test_metrics_endpoint() {
|
||||||
let config = create_test_config();
|
let config = create_test_config();
|
||||||
|
|
||||||
let state = AppState::new(config).expect("Failed to create app state");
|
let state = AppState::new(config, None)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create app state");
|
||||||
let app = build_routes(state);
|
let app = build_routes(state);
|
||||||
|
|
||||||
let response = app
|
let response = app
|
||||||
@ -131,7 +139,9 @@ async fn test_metrics_endpoint() {
|
|||||||
async fn test_cache_stats_endpoint() {
|
async fn test_cache_stats_endpoint() {
|
||||||
let config = create_test_config();
|
let config = create_test_config();
|
||||||
|
|
||||||
let state = AppState::new(config).expect("Failed to create app state");
|
let state = AppState::new(config, None)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create app state");
|
||||||
let app = build_routes(state);
|
let app = build_routes(state);
|
||||||
|
|
||||||
let response = app
|
let response = app
|
||||||
@ -159,7 +169,9 @@ async fn test_cache_stats_endpoint() {
|
|||||||
async fn test_invalid_extension_type() {
|
async fn test_invalid_extension_type() {
|
||||||
let config = create_test_config();
|
let config = create_test_config();
|
||||||
|
|
||||||
let state = AppState::new(config).expect("Failed to create app state");
|
let state = AppState::new(config, None)
|
||||||
|
.await
|
||||||
|
.expect("Failed to create app state");
|
||||||
let app = build_routes(state);
|
let app = build_routes(state);
|
||||||
|
|
||||||
let response = app
|
let response = app
|
||||||
|
|||||||
@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "BAAI/bge-small-en-v1.5",
|
||||||
|
"architectures": [
|
||||||
|
"BertModel"
|
||||||
|
],
|
||||||
|
"attention_probs_dropout_prob": 0.1,
|
||||||
|
"classifier_dropout": null,
|
||||||
|
"hidden_act": "gelu",
|
||||||
|
"hidden_dropout_prob": 0.1,
|
||||||
|
"hidden_size": 384,
|
||||||
|
"id2label": {
|
||||||
|
"0": "LABEL_0"
|
||||||
|
},
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 1536,
|
||||||
|
"label2id": {
|
||||||
|
"LABEL_0": 0
|
||||||
|
},
|
||||||
|
"layer_norm_eps": 1e-12,
|
||||||
|
"max_position_embeddings": 512,
|
||||||
|
"model_type": "bert",
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_hidden_layers": 12,
|
||||||
|
"pad_token_id": 0,
|
||||||
|
"position_embedding_type": "absolute",
|
||||||
|
"transformers_version": "4.34.0.dev0",
|
||||||
|
"type_vocab_size": 2,
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 30522
|
||||||
|
}
|
||||||
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"clean_up_tokenization_spaces": true,
|
||||||
|
"cls_token": "[CLS]",
|
||||||
|
"do_basic_tokenize": true,
|
||||||
|
"do_lower_case": true,
|
||||||
|
"mask_token": "[MASK]",
|
||||||
|
"model_max_length": 512,
|
||||||
|
"never_split": null,
|
||||||
|
"pad_token": "[PAD]",
|
||||||
|
"sep_token": "[SEP]",
|
||||||
|
"strip_accents": null,
|
||||||
|
"tokenize_chinese_chars": true,
|
||||||
|
"tokenizer_class": "BertTokenizer",
|
||||||
|
"unk_token": "[UNK]"
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"cls_token": "[CLS]",
|
||||||
|
"mask_token": "[MASK]",
|
||||||
|
"pad_token": "[PAD]",
|
||||||
|
"sep_token": "[SEP]",
|
||||||
|
"unk_token": "[UNK]"
|
||||||
|
}
|
||||||
@ -0,0 +1 @@
|
|||||||
|
ea104dacec62c0de699686887e3f920caeb4f3e3
|
||||||
@ -0,0 +1 @@
|
|||||||
|
../../blobs/0c4d86248983ce46dfc09a9091b6f56bb0224550
|
||||||
@ -0,0 +1 @@
|
|||||||
|
../../../blobs/828e1496d7fabb79cfa4dcd84fa38625c0d3d21da474a00f08db0f559940cf35
|
||||||
@ -0,0 +1 @@
|
|||||||
|
../../blobs/a8b3208c2884c4efb86e49300fdd3dc877220cdf
|
||||||
@ -0,0 +1 @@
|
|||||||
|
../../blobs/688882a79f44442ddc1f60d70334a7ff5df0fb47
|
||||||
@ -0,0 +1 @@
|
|||||||
|
../../blobs/37fca74771bc76a8e01178ce3a6055a0995f8093
|
||||||
@ -29,6 +29,9 @@ toml = { workspace = true }
|
|||||||
# Platform configuration
|
# Platform configuration
|
||||||
platform-config = { workspace = true }
|
platform-config = { workspace = true }
|
||||||
|
|
||||||
|
# Centralized observability (logging, metrics, health, tracing)
|
||||||
|
observability = { workspace = true, features = ["logging", "metrics-prometheus", "health"] }
|
||||||
|
|
||||||
# Error handling
|
# Error handling
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
|
|||||||
@ -1,106 +1,454 @@
|
|||||||
//! Configuration management for the Provisioning MCP Server
|
//! Configuration management for the Provisioning MCP Server
|
||||||
|
|
||||||
use std::env;
|
use std::collections::HashMap;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::Path;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
use platform_config::ConfigLoader;
|
use platform_config::ConfigLoader;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
/// MCP Server configuration
|
||||||
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// Path to the provisioning system
|
pub mcp_server: MCPServerSettings,
|
||||||
pub provisioning_path: PathBuf,
|
|
||||||
|
|
||||||
/// AI provider configuration
|
|
||||||
pub ai: AIConfig,
|
|
||||||
|
|
||||||
/// Server configuration
|
|
||||||
pub server: ServerConfig,
|
|
||||||
|
|
||||||
/// Debug mode
|
|
||||||
pub debug: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct AIConfig {
|
pub struct MCPServerSettings {
|
||||||
/// Enable AI capabilities
|
pub workspace: WorkspaceConfig,
|
||||||
pub enabled: bool,
|
pub server: ServerConfig,
|
||||||
|
pub protocol: ProtocolConfig,
|
||||||
/// AI provider (openai, claude, generic)
|
#[serde(default)]
|
||||||
pub provider: String,
|
pub tools: Option<ToolsConfig>,
|
||||||
|
#[serde(default)]
|
||||||
/// API endpoint URL
|
pub prompts: Option<PromptsConfig>,
|
||||||
pub api_endpoint: Option<String>,
|
#[serde(default)]
|
||||||
|
pub resources: Option<ResourcesConfig>,
|
||||||
/// API key (loaded from environment)
|
#[serde(default)]
|
||||||
pub api_key: Option<String>,
|
pub sampling: Option<SamplingConfig>,
|
||||||
|
#[serde(default)]
|
||||||
/// Model name
|
pub capabilities: Option<CapabilitiesConfig>,
|
||||||
pub model: Option<String>,
|
#[serde(default)]
|
||||||
|
pub orchestrator_integration: Option<OrchestratorIntegrationConfig>,
|
||||||
/// Maximum tokens for responses
|
#[serde(default)]
|
||||||
pub max_tokens: u32,
|
pub control_center_integration: Option<ControlCenterIntegrationConfig>,
|
||||||
|
#[serde(default)]
|
||||||
/// Temperature for creativity (0.0-1.0)
|
pub security: Option<SecurityConfig>,
|
||||||
pub temperature: f32,
|
#[serde(default)]
|
||||||
|
pub monitoring: Option<MonitoringConfig>,
|
||||||
/// Request timeout in seconds
|
#[serde(default)]
|
||||||
pub timeout: u64,
|
pub logging: Option<LoggingConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub performance: Option<PerformanceConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub build: Option<DockerBuildConfig>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Workspace configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct WorkspaceConfig {
|
||||||
|
pub name: String,
|
||||||
|
pub path: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub metadata: HashMap<String, serde_json::Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Server configuration
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct ServerConfig {
|
pub struct ServerConfig {
|
||||||
/// Server name/identifier
|
pub host: String,
|
||||||
pub name: String,
|
pub port: u16,
|
||||||
|
#[serde(default)]
|
||||||
/// Server version
|
pub workers: usize,
|
||||||
pub version: String,
|
|
||||||
|
|
||||||
/// Enable resource capabilities
|
|
||||||
pub enable_resources: bool,
|
|
||||||
|
|
||||||
/// Enable tool change notifications
|
|
||||||
pub enable_tool_notifications: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Config {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
provisioning_path: PathBuf::from("/usr/local/provisioning"),
|
|
||||||
ai: AIConfig::default(),
|
|
||||||
server: ServerConfig::default(),
|
|
||||||
debug: false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for AIConfig {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
enabled: true,
|
|
||||||
provider: "openai".to_string(),
|
|
||||||
api_endpoint: None,
|
|
||||||
api_key: None,
|
|
||||||
model: Some("gpt-4".to_string()),
|
|
||||||
max_tokens: 2048,
|
|
||||||
temperature: 0.3,
|
|
||||||
timeout: 30,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ServerConfig {
|
impl Default for ServerConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
name: "provisioning-server-rust".to_string(),
|
host: "127.0.0.1".to_string(),
|
||||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
port: 3000,
|
||||||
enable_resources: true,
|
workers: 4,
|
||||||
enable_tool_notifications: true,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Protocol configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ProtocolConfig {
|
||||||
|
#[serde(default = "default_version")]
|
||||||
|
pub version: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub transport: Option<TransportConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct TransportConfig {
|
||||||
|
pub endpoint: Option<String>,
|
||||||
|
pub ws_path: Option<String>,
|
||||||
|
pub timeout: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tools configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ToolsConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
#[serde(default = "default_max_concurrent_tools")]
|
||||||
|
pub max_concurrent: usize,
|
||||||
|
#[serde(default = "default_tool_timeout")]
|
||||||
|
pub timeout: u64,
|
||||||
|
pub categories: Option<Vec<String>>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub validation: Option<ValidationConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub cache: Option<CacheConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ValidationConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub strict_mode: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct CacheConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub ttl: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prompts configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct PromptsConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
#[serde(default = "default_max_templates")]
|
||||||
|
pub max_templates: usize,
|
||||||
|
#[serde(default)]
|
||||||
|
pub cache: Option<CacheConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub versioning: Option<VersioningConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct VersioningConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub max_versions: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resources configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ResourcesConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
#[serde(default = "default_max_resource_size")]
|
||||||
|
pub max_size: u64,
|
||||||
|
pub types: Option<Vec<String>>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub cache: Option<ResourceCacheConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub validation: Option<ResourceValidationConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ResourceCacheConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub max_size_mb: Option<u64>,
|
||||||
|
pub ttl: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ResourceValidationConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub max_depth: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sampling configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct SamplingConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub max_tokens: Option<u64>,
|
||||||
|
pub model: Option<String>,
|
||||||
|
pub temperature: Option<f32>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub cache: Option<CacheConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Capabilities configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct CapabilitiesConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub tools: Option<CapabilityConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub prompts: Option<CapabilityConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub resources: Option<ResourcesCapabilityConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub sampling: Option<SamplingCapabilityConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct CapabilityConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub list_changed_callback: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ResourcesCapabilityConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enabled: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub list_changed_callback: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub subscribe: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct SamplingCapabilityConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Orchestrator integration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct OrchestratorIntegrationConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub endpoint: Option<String>,
|
||||||
|
pub token: Option<String>,
|
||||||
|
pub workspace: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Control Center integration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ControlCenterIntegrationConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub endpoint: Option<String>,
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub enforce_rbac: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Security configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct SecurityConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub tls: Option<TlsConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub rbac: Option<RbacConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct TlsConfig {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub cert_path: Option<String>,
|
||||||
|
pub key_path: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct RbacConfig {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub policy_file: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Monitoring configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct MonitoringConfig {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub metrics_interval_seconds: u64,
|
||||||
|
pub health_check_interval_seconds: u64,
|
||||||
|
pub min_memory_mb: u64,
|
||||||
|
pub max_cpu_percent: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MonitoringConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
enabled: true,
|
||||||
|
metrics_interval_seconds: 60,
|
||||||
|
health_check_interval_seconds: 30,
|
||||||
|
min_memory_mb: 512,
|
||||||
|
max_cpu_percent: 80.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Logging configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct LoggingConfig {
|
||||||
|
pub level: String,
|
||||||
|
pub format: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub outputs: Option<Vec<String>>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub file: Option<FileLoggingConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub syslog: Option<SyslogConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub fields: Option<FieldsConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub sampling: Option<LogSamplingConfig>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub modules: Option<serde_json::Value>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub performance: Option<PerformanceLoggingConfig>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct FileLoggingConfig {
|
||||||
|
pub path: Option<String>,
|
||||||
|
pub max_size: Option<u64>,
|
||||||
|
pub max_backups: Option<usize>,
|
||||||
|
pub max_age: Option<u64>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub compress: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct SyslogConfig {
|
||||||
|
pub address: Option<String>,
|
||||||
|
pub facility: Option<String>,
|
||||||
|
pub protocol: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct FieldsConfig {
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub service_name: bool,
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub hostname: bool,
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub pid: bool,
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub timestamp: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub caller: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub stack_trace: bool,
|
||||||
|
pub custom: Option<Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct LogSamplingConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub initial: Option<u64>,
|
||||||
|
pub thereafter: Option<u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct PerformanceLoggingConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub enabled: bool,
|
||||||
|
pub slow_threshold: Option<u64>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub memory_info: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for LoggingConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
level: "info".to_string(),
|
||||||
|
format: "text".to_string(),
|
||||||
|
outputs: None,
|
||||||
|
file: None,
|
||||||
|
syslog: None,
|
||||||
|
fields: None,
|
||||||
|
sampling: None,
|
||||||
|
modules: None,
|
||||||
|
performance: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performance configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct PerformanceConfig {
|
||||||
|
pub pool_size: Option<usize>,
|
||||||
|
pub buffer_size: Option<usize>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub compression: bool,
|
||||||
|
pub compression_level: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Docker build configuration
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct DockerBuildConfig {
|
||||||
|
pub base_image: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub build_args: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serde defaults
|
||||||
|
fn default_version() -> String {
|
||||||
|
"1.0".to_string()
|
||||||
|
}
|
||||||
|
fn default_true() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
fn default_max_concurrent_tools() -> usize {
|
||||||
|
5
|
||||||
|
}
|
||||||
|
fn default_tool_timeout() -> u64 {
|
||||||
|
30000
|
||||||
|
}
|
||||||
|
fn default_max_templates() -> usize {
|
||||||
|
100
|
||||||
|
}
|
||||||
|
fn default_max_resource_size() -> u64 {
|
||||||
|
104857600
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MCPServerSettings {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
workspace: WorkspaceConfig {
|
||||||
|
name: "mcp-server".to_string(),
|
||||||
|
path: "/tmp/mcp-server".to_string(),
|
||||||
|
metadata: HashMap::new(),
|
||||||
|
},
|
||||||
|
server: ServerConfig::default(),
|
||||||
|
protocol: ProtocolConfig {
|
||||||
|
version: "1.0".to_string(),
|
||||||
|
transport: None,
|
||||||
|
},
|
||||||
|
tools: None,
|
||||||
|
prompts: None,
|
||||||
|
resources: None,
|
||||||
|
sampling: None,
|
||||||
|
capabilities: None,
|
||||||
|
orchestrator_integration: None,
|
||||||
|
control_center_integration: None,
|
||||||
|
security: None,
|
||||||
|
monitoring: Some(MonitoringConfig::default()),
|
||||||
|
logging: Some(LoggingConfig::default()),
|
||||||
|
performance: None,
|
||||||
|
build: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
/// Load configuration from Nickel mcp-server.ncl
|
||||||
|
pub fn load() -> Result<Self> {
|
||||||
|
let config_json = platform_config::load_service_config_from_ncl("mcp-server")
|
||||||
|
.context("Failed to load mcp-server configuration from Nickel")?;
|
||||||
|
|
||||||
|
let config: Config = serde_json::from_value(config_json)
|
||||||
|
.context("Failed to deserialize mcp-server configuration")?;
|
||||||
|
|
||||||
|
Ok(config)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl ConfigLoader for Config {
|
impl ConfigLoader for Config {
|
||||||
fn service_name() -> &'static str {
|
fn service_name() -> &'static str {
|
||||||
"mcp-server"
|
"mcp-server"
|
||||||
@ -108,25 +456,27 @@ impl ConfigLoader for Config {
|
|||||||
|
|
||||||
fn load_from_hierarchy() -> std::result::Result<Self, Box<dyn std::error::Error + Send + Sync>>
|
fn load_from_hierarchy() -> std::result::Result<Self, Box<dyn std::error::Error + Send + Sync>>
|
||||||
{
|
{
|
||||||
// Use platform-config's hierarchy resolution
|
|
||||||
let service = Self::service_name();
|
let service = Self::service_name();
|
||||||
|
|
||||||
if let Some(path) = platform_config::resolve_config_path(service) {
|
if let Some(path) = platform_config::resolve_config_path(service) {
|
||||||
return Self::from_path(&path);
|
return Self::from_path(&path);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to defaults
|
|
||||||
Ok(Self::default())
|
Ok(Self::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn apply_env_overrides(
|
fn apply_env_overrides(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
) -> std::result::Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
self.load_from_env().map_err(|e| {
|
if let Ok(host) = std::env::var("MCP_SERVER_HOST") {
|
||||||
// Convert anyhow::Error to Box<dyn Error>
|
self.mcp_server.server.host = host;
|
||||||
let err_msg = format!("{}", e);
|
}
|
||||||
Box::new(std::io::Error::other(err_msg)) as Box<dyn std::error::Error + Send + Sync>
|
if let Ok(port) = std::env::var("MCP_SERVER_PORT") {
|
||||||
})
|
if let Ok(p) = port.parse() {
|
||||||
|
self.mcp_server.server.port = p;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_path<P: AsRef<Path>>(
|
fn from_path<P: AsRef<Path>>(
|
||||||
@ -139,7 +489,10 @@ impl ConfigLoader for Config {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
serde_json::from_value(json_value).map_err(|e| {
|
serde_json::from_value(json_value).map_err(|e| {
|
||||||
let err_msg = format!("Failed to deserialize config from {:?}: {}", path, e);
|
let err_msg = format!(
|
||||||
|
"Failed to deserialize mcp-server config from {:?}: {}",
|
||||||
|
path, e
|
||||||
|
);
|
||||||
Box::new(std::io::Error::new(
|
Box::new(std::io::Error::new(
|
||||||
std::io::ErrorKind::InvalidData,
|
std::io::ErrorKind::InvalidData,
|
||||||
err_msg,
|
err_msg,
|
||||||
@ -148,142 +501,14 @@ impl ConfigLoader for Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Config {
|
#[cfg(test)]
|
||||||
/// Load configuration with hierarchical fallback logic:
|
mod tests {
|
||||||
/// 1. Explicit config path (parameter or MCP_SERVER_CONFIG env var)
|
use super::*;
|
||||||
/// 2. Mode-specific config:
|
|
||||||
/// provisioning/platform/config/mcp-server.{mode}.ncl or .toml
|
|
||||||
/// 3. Built-in defaults
|
|
||||||
///
|
|
||||||
/// Then environment variables override specific fields.
|
|
||||||
pub fn load(
|
|
||||||
config_path: Option<PathBuf>,
|
|
||||||
provisioning_path: Option<PathBuf>,
|
|
||||||
debug: bool,
|
|
||||||
) -> Result<Self> {
|
|
||||||
let mut config = if let Some(path) = config_path {
|
|
||||||
Self::from_path(&path)
|
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to load from path: {}", e))?
|
|
||||||
} else {
|
|
||||||
<Self as ConfigLoader>::load()
|
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to load config: {}", e))?
|
|
||||||
};
|
|
||||||
|
|
||||||
// Override with command line arguments
|
#[test]
|
||||||
if let Some(path) = provisioning_path {
|
fn test_default_config() {
|
||||||
config.provisioning_path = path;
|
let config = Config::default();
|
||||||
}
|
assert_eq!(config.mcp_server.server.port, 3000);
|
||||||
config.debug = debug;
|
assert_eq!(config.mcp_server.protocol.version, "1.0");
|
||||||
|
|
||||||
// Validate configuration
|
|
||||||
config.validate()?;
|
|
||||||
|
|
||||||
Ok(config)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load configuration from file (legacy wrapper for compatibility)
|
|
||||||
fn from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
|
|
||||||
Self::from_path(&path).map_err(|e| anyhow::anyhow!("Failed to load from file: {}", e))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load configuration from environment variables
|
|
||||||
fn load_from_env(&mut self) -> Result<()> {
|
|
||||||
// Provisioning path
|
|
||||||
if let Ok(path) = env::var("PROVISIONING_PATH") {
|
|
||||||
self.provisioning_path = PathBuf::from(path);
|
|
||||||
}
|
|
||||||
|
|
||||||
// AI configuration
|
|
||||||
if let Ok(enabled) = env::var("PROVISIONING_AI_ENABLED") {
|
|
||||||
self.ai.enabled = enabled.parse().unwrap_or(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(provider) = env::var("PROVISIONING_AI_PROVIDER") {
|
|
||||||
self.ai.provider = provider;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(endpoint) = env::var("PROVISIONING_AI_ENDPOINT") {
|
|
||||||
self.ai.api_endpoint = Some(endpoint);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load API keys from environment
|
|
||||||
self.ai.api_key = match self.ai.provider.as_str() {
|
|
||||||
"openai" => env::var("OPENAI_API_KEY").ok(),
|
|
||||||
"claude" => env::var("ANTHROPIC_API_KEY").ok(),
|
|
||||||
"generic" => env::var("LLM_API_KEY").ok(),
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Ok(model) = env::var("PROVISIONING_AI_MODEL") {
|
|
||||||
self.ai.model = Some(model);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(max_tokens) = env::var("PROVISIONING_AI_MAX_TOKENS") {
|
|
||||||
self.ai.max_tokens = max_tokens.parse().unwrap_or(2048);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(temperature) = env::var("PROVISIONING_AI_TEMPERATURE") {
|
|
||||||
self.ai.temperature = temperature.parse().unwrap_or(0.3);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(timeout) = env::var("PROVISIONING_AI_TIMEOUT") {
|
|
||||||
self.ai.timeout = timeout.parse().unwrap_or(30);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug mode
|
|
||||||
if let Ok(debug) = env::var("PROVISIONING_DEBUG") {
|
|
||||||
self.debug = debug.parse().unwrap_or(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Validate the configuration
|
|
||||||
fn validate(&self) -> Result<()> {
|
|
||||||
// Validate provisioning path exists
|
|
||||||
if !self.provisioning_path.exists() {
|
|
||||||
return Err(anyhow::anyhow!(
|
|
||||||
"Provisioning path does not exist: {}",
|
|
||||||
self.provisioning_path.display()
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the main provisioning script exists
|
|
||||||
let provisioning_script = self.provisioning_path.join("core/nulib/provisioning");
|
|
||||||
if !provisioning_script.exists() {
|
|
||||||
return Err(anyhow::anyhow!(
|
|
||||||
"Provisioning script not found: {}",
|
|
||||||
provisioning_script.display()
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate AI configuration if enabled
|
|
||||||
if self.ai.enabled {
|
|
||||||
if self.ai.api_key.is_none() {
|
|
||||||
tracing::warn!(
|
|
||||||
"AI is enabled but no API key found for provider: {}",
|
|
||||||
self.ai.provider
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.ai.temperature < 0.0 || self.ai.temperature > 1.0 {
|
|
||||||
return Err(anyhow::anyhow!(
|
|
||||||
"AI temperature must be between 0.0 and 1.0, got: {}",
|
|
||||||
self.ai.temperature
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the provisioning command path
|
|
||||||
pub fn provisioning_command(&self) -> PathBuf {
|
|
||||||
self.provisioning_path.join("core/nulib/provisioning")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if AI is available
|
|
||||||
pub fn is_ai_available(&self) -> bool {
|
|
||||||
self.ai.enabled && self.ai.api_key.is_some()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -233,14 +233,14 @@ impl ProvisioningEngine {
|
|||||||
fn execute_provisioning_command(&self, args: &[String]) -> Result<String> {
|
fn execute_provisioning_command(&self, args: &[String]) -> Result<String> {
|
||||||
debug!("Executing command: {:?}", args);
|
debug!("Executing command: {:?}", args);
|
||||||
|
|
||||||
let cmd_path = self.config.provisioning_command();
|
let cmd_path = "provisioning"; // Use default command name
|
||||||
|
|
||||||
let output = std::process::Command::new(&cmd_path)
|
let output = std::process::Command::new(cmd_path)
|
||||||
.args(args)
|
.args(args)
|
||||||
.stdout(Stdio::piped())
|
.stdout(Stdio::piped())
|
||||||
.stderr(Stdio::piped())
|
.stderr(Stdio::piped())
|
||||||
.output()
|
.output()
|
||||||
.with_context(|| format!("Failed to execute command: {}", cmd_path.display()))?;
|
.with_context(|| format!("Failed to execute command: {}", cmd_path))?;
|
||||||
|
|
||||||
if !output.status.success() {
|
if !output.status.success() {
|
||||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -10,13 +10,27 @@ use serde_json::{json, Value};
|
|||||||
pub struct ProvisioningTools {
|
pub struct ProvisioningTools {
|
||||||
client: Client,
|
client: Client,
|
||||||
api_base_url: String,
|
api_base_url: String,
|
||||||
|
api_port: u16,
|
||||||
|
dashboard_port: u16,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProvisioningTools {
|
impl ProvisioningTools {
|
||||||
pub fn new(api_base_url: Option<String>) -> Self {
|
pub fn new(api_base_url: Option<String>) -> Self {
|
||||||
|
Self::with_ports(api_base_url, None, None)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_ports(
|
||||||
|
api_base_url: Option<String>,
|
||||||
|
api_port: Option<u16>,
|
||||||
|
dashboard_port: Option<u16>,
|
||||||
|
) -> Self {
|
||||||
|
let api_port = api_port.unwrap_or(3000);
|
||||||
|
let dashboard_port = dashboard_port.unwrap_or(8080);
|
||||||
Self {
|
Self {
|
||||||
client: Client::new(),
|
client: Client::new(),
|
||||||
api_base_url: api_base_url.unwrap_or_else(|| "http://localhost:3000".to_string()),
|
api_base_url: api_base_url.unwrap_or_else(|| format!("http://localhost:{}", api_port)),
|
||||||
|
api_port,
|
||||||
|
dashboard_port,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -163,7 +177,7 @@ impl ProvisioningTools {
|
|||||||
|
|
||||||
/// Start API server
|
/// Start API server
|
||||||
pub async fn start_api_server(&self, port: Option<u16>) -> Result<Value> {
|
pub async fn start_api_server(&self, port: Option<u16>) -> Result<Value> {
|
||||||
let port = port.unwrap_or(3000);
|
let port = port.unwrap_or(self.api_port);
|
||||||
let port_str = port.to_string();
|
let port_str = port.to_string();
|
||||||
|
|
||||||
self.execute_provisioning_command(&["api", "start", "--port", &port_str, "--background"])
|
self.execute_provisioning_command(&["api", "start", "--port", &port_str, "--background"])
|
||||||
@ -210,7 +224,7 @@ impl ProvisioningTools {
|
|||||||
|
|
||||||
/// Start existing dashboard
|
/// Start existing dashboard
|
||||||
pub async fn start_dashboard(&self, name: &str, port: Option<u16>) -> Result<Value> {
|
pub async fn start_dashboard(&self, name: &str, port: Option<u16>) -> Result<Value> {
|
||||||
let port = port.unwrap_or(8080);
|
let port = port.unwrap_or(self.dashboard_port);
|
||||||
let port_str = port.to_string();
|
let port_str = port.to_string();
|
||||||
|
|
||||||
self.execute_provisioning_command(&["dashboard", "start", name, &port_str])
|
self.execute_provisioning_command(&["dashboard", "start", name, &port_str])
|
||||||
|
|||||||
@ -49,6 +49,11 @@ bollard = { workspace = true }
|
|||||||
|
|
||||||
# HTTP client for DNS/OCI/services
|
# HTTP client for DNS/OCI/services
|
||||||
reqwest = { workspace = true }
|
reqwest = { workspace = true }
|
||||||
|
zeroize = { workspace = true }
|
||||||
|
hmac = { workspace = true }
|
||||||
|
hex = { workspace = true }
|
||||||
|
git2 = { workspace = true, optional = true }
|
||||||
|
parking_lot = { workspace = true }
|
||||||
|
|
||||||
# HTTP service clients (machines, init, AI) - enables remote service calls
|
# HTTP service clients (machines, init, AI) - enables remote service calls
|
||||||
service-clients = { workspace = true }
|
service-clients = { workspace = true }
|
||||||
@ -56,6 +61,9 @@ service-clients = { workspace = true }
|
|||||||
# Platform configuration management
|
# Platform configuration management
|
||||||
platform-config = { workspace = true }
|
platform-config = { workspace = true }
|
||||||
|
|
||||||
|
# Centralized observability (logging, metrics, health, tracing)
|
||||||
|
observability = { workspace = true, features = ["logging", "metrics-prometheus", "health"] }
|
||||||
|
|
||||||
# LRU cache for OCI manifests
|
# LRU cache for OCI manifests
|
||||||
lru = { workspace = true }
|
lru = { workspace = true }
|
||||||
|
|
||||||
@ -94,6 +102,10 @@ shellexpand = { workspace = true }
|
|||||||
# SurrealDB storage backend (optional)
|
# SurrealDB storage backend (optional)
|
||||||
surrealdb = { workspace = true, optional = true }
|
surrealdb = { workspace = true, optional = true }
|
||||||
|
|
||||||
|
# Platform shared crates
|
||||||
|
platform-nats = { workspace = true, optional = true }
|
||||||
|
platform-db = { workspace = true, optional = true }
|
||||||
|
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# FEATURES - Module Organization for Coupling Reduction
|
# FEATURES - Module Organization for Coupling Reduction
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
@ -141,6 +153,12 @@ http-api = ["core"]
|
|||||||
# SurrealDB: Optional storage backend
|
# SurrealDB: Optional storage backend
|
||||||
surrealdb = ["dep:surrealdb"]
|
surrealdb = ["dep:surrealdb"]
|
||||||
|
|
||||||
|
# NATS event bus integration
|
||||||
|
nats = ["dep:platform-nats", "dep:platform-db"]
|
||||||
|
|
||||||
|
# GitOps webhook handler (requires git2)
|
||||||
|
gitops = ["dep:git2"]
|
||||||
|
|
||||||
# Default: All features enabled
|
# Default: All features enabled
|
||||||
default = [
|
default = [
|
||||||
"core",
|
"core",
|
||||||
|
|||||||
@ -97,6 +97,9 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_default_builder_name() {
|
fn test_default_builder_name() {
|
||||||
let args = Args {
|
let args = Args {
|
||||||
|
config: None,
|
||||||
|
config_dir: None,
|
||||||
|
mode: None,
|
||||||
port: 9090,
|
port: 9090,
|
||||||
data_dir: "./data".to_string(),
|
data_dir: "./data".to_string(),
|
||||||
storage_type: "filesystem".to_string(),
|
storage_type: "filesystem".to_string(),
|
||||||
|
|||||||
108
crates/orchestrator/src/audit/collector.rs
Normal file
108
crates/orchestrator/src/audit/collector.rs
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
//! Global audit event collector.
|
||||||
|
//!
|
||||||
|
//! Subscribes to `provisioning.audit.>` via JetStream durable consumer,
|
||||||
|
//! buffers events, and batch-inserts into SurrealDB `audit:events`.
|
||||||
|
//! Flush triggers: 100 events accumulated OR 1-second timer, whichever first.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use futures::StreamExt;
|
||||||
|
use platform_db::SurrealPool;
|
||||||
|
use platform_nats::NatsBridge;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use tokio::time;
|
||||||
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
|
const BATCH_SIZE: usize = 100;
|
||||||
|
const FLUSH_INTERVAL: Duration = Duration::from_secs(1);
|
||||||
|
const STREAM_NAME: &str = "AUDIT";
|
||||||
|
const CONSUMER_NAME: &str = "audit-collector";
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct AuditRecord {
|
||||||
|
pub source_service: String,
|
||||||
|
pub event_type: String,
|
||||||
|
pub actor: Option<String>,
|
||||||
|
pub target: Option<String>,
|
||||||
|
pub session_id: Option<String>,
|
||||||
|
pub workspace_id: Option<String>,
|
||||||
|
pub timestamp: chrono::DateTime<chrono::Utc>,
|
||||||
|
pub payload: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runs the audit collector loop until the process shuts down.
|
||||||
|
///
|
||||||
|
/// Intended to be spawned as a background `tokio::task`.
|
||||||
|
pub async fn run_audit_collector(nats: Arc<NatsBridge>, db: Arc<SurrealPool>) {
|
||||||
|
let mut ticker = time::interval(FLUSH_INTERVAL);
|
||||||
|
let mut buffer: Vec<AuditRecord> = Vec::with_capacity(BATCH_SIZE);
|
||||||
|
|
||||||
|
let mut messages = match nats.subscribe_pull(STREAM_NAME, CONSUMER_NAME).await {
|
||||||
|
Ok(m) => m,
|
||||||
|
Err(e) => {
|
||||||
|
error!("audit collector: failed to subscribe — {e}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("audit collector running (batch={BATCH_SIZE}, flush=1s)");
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
msg = messages.next() => {
|
||||||
|
match msg {
|
||||||
|
Some(Ok(m)) => {
|
||||||
|
match serde_json::from_slice::<AuditRecord>(&m.payload) {
|
||||||
|
Ok(event) => {
|
||||||
|
buffer.push(event);
|
||||||
|
if buffer.len() >= BATCH_SIZE {
|
||||||
|
let batch = std::mem::take(&mut buffer);
|
||||||
|
flush_batch(batch, Arc::clone(&db)).await;
|
||||||
|
}
|
||||||
|
if let Err(e) = m.ack().await {
|
||||||
|
warn!("audit collector: ack failed — {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("audit collector: deserialize failed — {e}");
|
||||||
|
let _ = m.ack().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(Err(e)) => {
|
||||||
|
error!("audit collector: message error — {e}");
|
||||||
|
}
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = ticker.tick() => {
|
||||||
|
if !buffer.is_empty() {
|
||||||
|
let batch = std::mem::take(&mut buffer);
|
||||||
|
flush_batch(batch, Arc::clone(&db)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn flush_batch(events: Vec<AuditRecord>, db: Arc<SurrealPool>) {
|
||||||
|
let surreal = db.db();
|
||||||
|
if let Err(e) = surreal.query("USE NS audit DB provisioning").await {
|
||||||
|
error!("audit flush: USE NS failed — {e}");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let count = events.len();
|
||||||
|
for event in events {
|
||||||
|
if let Err(e) = surreal
|
||||||
|
.create::<Option<AuditRecord>>("events")
|
||||||
|
.content(event)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
error!("audit flush: INSERT failed — {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(count = count, "audit events flushed to SurrealDB");
|
||||||
|
}
|
||||||
@ -35,6 +35,8 @@
|
|||||||
//! # }
|
//! # }
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub mod collector;
|
||||||
pub mod logger;
|
pub mod logger;
|
||||||
pub mod storage;
|
pub mod storage;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|||||||
@ -279,6 +279,14 @@ pub enum ActionType {
|
|||||||
SystemBackup,
|
SystemBackup,
|
||||||
SystemRestore,
|
SystemRestore,
|
||||||
|
|
||||||
|
// Workspace operations
|
||||||
|
WorkspaceCreate,
|
||||||
|
WorkspaceDelete,
|
||||||
|
WorkspaceUpdate,
|
||||||
|
WorkspaceSwitch,
|
||||||
|
WorkspaceList,
|
||||||
|
WorkspaceSync,
|
||||||
|
|
||||||
// Unknown/Custom
|
// Unknown/Custom
|
||||||
Unknown,
|
Unknown,
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -47,6 +47,12 @@ pub struct CreateServerWorkflow {
|
|||||||
pub servers: Vec<String>,
|
pub servers: Vec<String>,
|
||||||
pub check_mode: bool,
|
pub check_mode: bool,
|
||||||
pub wait: bool,
|
pub wait: bool,
|
||||||
|
// Rendered and compressed script prepared by CLI
|
||||||
|
// If present, orchestrator executes this script directly without constructing commands
|
||||||
|
#[serde(default)]
|
||||||
|
pub script_compressed: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub script_encoding: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
@ -85,20 +91,43 @@ pub fn validate_storage_type(s: &str) -> Result<String, String> {
|
|||||||
|
|
||||||
// CLI arguments structure
|
// CLI arguments structure
|
||||||
#[derive(clap::Parser, Clone)]
|
#[derive(clap::Parser, Clone)]
|
||||||
#[command(author, version, about = "Multi-service task orchestration and batch workflow engine")]
|
#[command(
|
||||||
#[command(long_about = "Orchestrator - Manages distributed task execution, batch workflows, and cluster provisioning with state management and rollback recovery")]
|
author,
|
||||||
#[command(after_help = "CONFIGURATION HIERARCHY (highest to lowest priority):\n 1. CLI: -c/--config <path> (explicit file)\n 2. CLI: --config-dir <dir> --mode <mode> (directory + mode)\n 3. CLI: --config-dir <dir> (searches for orchestrator.ncl|toml|json)\n 4. CLI: --mode <mode> (searches in provisioning/platform/config/)\n 5. ENV: ORCHESTRATOR_CONFIG (explicit file)\n 6. ENV: PROVISIONING_CONFIG_DIR (searches for orchestrator.ncl|toml|json)\n 7. ENV: ORCHESTRATOR_MODE (mode-based in default path)\n 8. Built-in defaults\n\nEXAMPLES:\n # Explicit config file\n orchestrator -c ~/my-config.toml\n\n # Config directory with mode\n orchestrator --config-dir ~/configs --mode enterprise\n\n # Config directory (auto-discover file)\n orchestrator --config-dir ~/.config/provisioning\n\n # Via environment variables\n export ORCHESTRATOR_CONFIG=~/.config/orchestrator.toml\n orchestrator\n\n # Mode-based configuration\n orchestrator --mode solo")]
|
version,
|
||||||
|
about = "Multi-service task orchestration and batch workflow engine"
|
||||||
|
)]
|
||||||
|
#[command(
|
||||||
|
long_about = "Orchestrator - Manages distributed task execution, batch workflows, and cluster \
|
||||||
|
provisioning with state management and rollback recovery"
|
||||||
|
)]
|
||||||
|
#[command(
|
||||||
|
after_help = "CONFIGURATION HIERARCHY (highest to lowest priority):\n 1. CLI: -c/--config \
|
||||||
|
<path> (explicit file)\n 2. CLI: --config-dir <dir> --mode <mode> (directory + \
|
||||||
|
mode)\n 3. CLI: --config-dir <dir> (searches for orchestrator.ncl|toml|json)\n \
|
||||||
|
4. CLI: --mode <mode> (searches in provisioning/platform/config/)\n 5. ENV: \
|
||||||
|
ORCHESTRATOR_CONFIG (explicit file)\n 6. ENV: PROVISIONING_CONFIG_DIR \
|
||||||
|
(searches for orchestrator.ncl|toml|json)\n 7. ENV: ORCHESTRATOR_MODE \
|
||||||
|
(mode-based in default path)\n 8. Built-in defaults\n\nEXAMPLES:\n # Explicit \
|
||||||
|
config file\n orchestrator -c ~/my-config.toml\n\n # Config directory with \
|
||||||
|
mode\n orchestrator --config-dir ~/configs --mode enterprise\n\n # Config \
|
||||||
|
directory (auto-discover file)\n orchestrator --config-dir \
|
||||||
|
~/.config/provisioning\n\n # Via environment variables\n export \
|
||||||
|
ORCHESTRATOR_CONFIG=~/.config/orchestrator.toml\n orchestrator\n\n # \
|
||||||
|
Mode-based configuration\n orchestrator --mode solo"
|
||||||
|
)]
|
||||||
pub struct Args {
|
pub struct Args {
|
||||||
/// Configuration file path (highest priority)
|
/// Configuration file path (highest priority)
|
||||||
///
|
///
|
||||||
/// Accepts absolute or relative path. Supports .ncl, .toml, and .json formats.
|
/// Accepts absolute or relative path. Supports .ncl, .toml, and .json
|
||||||
|
/// formats.
|
||||||
#[arg(short = 'c', long, env = "ORCHESTRATOR_CONFIG")]
|
#[arg(short = 'c', long, env = "ORCHESTRATOR_CONFIG")]
|
||||||
pub config: Option<std::path::PathBuf>,
|
pub config: Option<std::path::PathBuf>,
|
||||||
|
|
||||||
/// Configuration directory (searches for orchestrator.ncl|toml|json)
|
/// Configuration directory (searches for orchestrator.ncl|toml|json)
|
||||||
///
|
///
|
||||||
/// Searches for configuration files in order of preference: .ncl > .toml > .json
|
/// Searches for configuration files in order of preference: .ncl > .toml >
|
||||||
/// Can also search for mode-specific files: orchestrator.{mode}.{ncl|toml|json}
|
/// .json Can also search for mode-specific files:
|
||||||
|
/// orchestrator.{mode}.{ncl|toml|json}
|
||||||
#[arg(long, env = "PROVISIONING_CONFIG_DIR")]
|
#[arg(long, env = "PROVISIONING_CONFIG_DIR")]
|
||||||
pub config_dir: Option<std::path::PathBuf>,
|
pub config_dir: Option<std::path::PathBuf>,
|
||||||
|
|
||||||
@ -109,9 +138,9 @@ pub struct Args {
|
|||||||
#[arg(short = 'm', long, env = "ORCHESTRATOR_MODE")]
|
#[arg(short = 'm', long, env = "ORCHESTRATOR_MODE")]
|
||||||
pub mode: Option<String>,
|
pub mode: Option<String>,
|
||||||
|
|
||||||
/// Port to listen on
|
/// Port to listen on (overrides config if specified)
|
||||||
#[arg(short = 'p', long, default_value = "9090")]
|
#[arg(short = 'p', long)]
|
||||||
pub port: u16,
|
pub port: Option<u16>,
|
||||||
|
|
||||||
/// Data directory for storage
|
/// Data directory for storage
|
||||||
#[arg(short = 'd', long, default_value = "./data")]
|
#[arg(short = 'd', long, default_value = "./data")]
|
||||||
@ -203,6 +232,9 @@ pub mod break_glass;
|
|||||||
#[cfg(feature = "compliance")]
|
#[cfg(feature = "compliance")]
|
||||||
pub mod compliance;
|
pub mod compliance;
|
||||||
|
|
||||||
|
// GitOps: Webhook handler and git pull executor
|
||||||
|
pub mod webhooks;
|
||||||
|
|
||||||
// Platform: Infrastructure integration
|
// Platform: Infrastructure integration
|
||||||
#[cfg(feature = "platform")]
|
#[cfg(feature = "platform")]
|
||||||
pub mod dns;
|
pub mod dns;
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
use std::env;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
@ -9,11 +10,13 @@ use axum::{
|
|||||||
Router,
|
Router,
|
||||||
};
|
};
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use platform_config::{load_deployment_mode, PlatformStartup};
|
||||||
// Use types from the library
|
// Use types from the library
|
||||||
use provisioning_orchestrator::{
|
use provisioning_orchestrator::{
|
||||||
audit::{AuditEvent, AuditFilter, AuditQuery, RetentionPolicy, SiemFormat},
|
audit::{AuditEvent, AuditFilter, AuditQuery, RetentionPolicy, SiemFormat},
|
||||||
batch::{BatchOperationRequest, BatchOperationResult},
|
batch::{BatchOperationRequest, BatchOperationResult},
|
||||||
compliance_routes,
|
compliance_routes,
|
||||||
|
config::OrchestratorConfig,
|
||||||
monitor::{MonitoringEvent, MonitoringEventType, SystemHealthStatus},
|
monitor::{MonitoringEvent, MonitoringEventType, SystemHealthStatus},
|
||||||
rollback::{Checkpoint, RollbackResult, RollbackStatistics},
|
rollback::{Checkpoint, RollbackResult, RollbackStatistics},
|
||||||
state::{ProgressInfo, StateManagerStatistics, StateSnapshot, SystemMetrics},
|
state::{ProgressInfo, StateManagerStatistics, StateSnapshot, SystemMetrics},
|
||||||
@ -21,6 +24,7 @@ use provisioning_orchestrator::{
|
|||||||
CreateTestEnvironmentRequest, RunTestRequest, TestEnvironment, TestEnvironmentResponse,
|
CreateTestEnvironmentRequest, RunTestRequest, TestEnvironment, TestEnvironmentResponse,
|
||||||
TestResult,
|
TestResult,
|
||||||
},
|
},
|
||||||
|
webhooks::{handle_webhook, WebhookState, WorkspaceRegistry},
|
||||||
workflow::WorkflowExecutionState,
|
workflow::WorkflowExecutionState,
|
||||||
AppState, Args, ClusterWorkflow, CreateServerWorkflow, SharedState, TaskStatus,
|
AppState, Args, ClusterWorkflow, CreateServerWorkflow, SharedState, TaskStatus,
|
||||||
TaskservWorkflow, WorkflowTask,
|
TaskservWorkflow, WorkflowTask,
|
||||||
@ -63,29 +67,37 @@ async fn create_server_workflow(
|
|||||||
) -> Result<Json<ApiResponse<String>>, StatusCode> {
|
) -> Result<Json<ApiResponse<String>>, StatusCode> {
|
||||||
let task_id = Uuid::new_v4().to_string();
|
let task_id = Uuid::new_v4().to_string();
|
||||||
|
|
||||||
let task = WorkflowTask {
|
// PROPER ARCHITECTURE: CLI renders script, orchestrator executes it
|
||||||
|
// If script_compressed is provided: execute it (that's ALL the orchestrator
|
||||||
|
// does) If NOT provided: error (legacy mode should not happen)
|
||||||
|
let task = if let Some(ref script_compressed) = workflow.script_compressed {
|
||||||
|
// CLI has provided the COMPLETE SCRIPT ready to execute
|
||||||
|
// No command construction, no decision logic
|
||||||
|
// Just: decompress -> execute
|
||||||
|
|
||||||
|
// Store script in temp file for execution
|
||||||
|
let script_file = format!("/tmp/orchestrator_script_{}.tar.gz.b64", task_id);
|
||||||
|
std::fs::write(&script_file, script_compressed).ok();
|
||||||
|
|
||||||
|
WorkflowTask {
|
||||||
id: task_id.clone(),
|
id: task_id.clone(),
|
||||||
name: "create_servers".to_string(),
|
name: if workflow.servers.is_empty() {
|
||||||
command: format!("{} servers create", state.args.provisioning_path),
|
"execute_servers_script_all".to_string()
|
||||||
|
} else {
|
||||||
|
format!("execute_servers_script_{}", workflow.servers.join("_"))
|
||||||
|
},
|
||||||
|
// Execute the decompressed script directly
|
||||||
|
command: "bash".to_string(),
|
||||||
args: vec![
|
args: vec![
|
||||||
"--infra".to_string(),
|
"-c".to_string(),
|
||||||
workflow.infra.clone(),
|
// Decompress: base64 decode -> gunzip -> extract script.sh -> execute
|
||||||
"--settings".to_string(),
|
// CRITICAL: Use '+x' to DISABLE debug mode and prevent credential exposure
|
||||||
workflow.settings.clone(),
|
// Even if script contains 'set -x', it won't execute with +x flag
|
||||||
if workflow.check_mode {
|
format!(
|
||||||
"--check".to_string()
|
"base64 -d < {} | gunzip | tar -xOf - script.sh | bash +x",
|
||||||
} else {
|
script_file
|
||||||
"".to_string()
|
),
|
||||||
},
|
],
|
||||||
if workflow.wait {
|
|
||||||
"--wait".to_string()
|
|
||||||
} else {
|
|
||||||
"".to_string()
|
|
||||||
},
|
|
||||||
]
|
|
||||||
.into_iter()
|
|
||||||
.filter(|s| !s.is_empty())
|
|
||||||
.collect(),
|
|
||||||
dependencies: vec![],
|
dependencies: vec![],
|
||||||
status: TaskStatus::Pending,
|
status: TaskStatus::Pending,
|
||||||
created_at: chrono::Utc::now(),
|
created_at: chrono::Utc::now(),
|
||||||
@ -93,15 +105,65 @@ async fn create_server_workflow(
|
|||||||
completed_at: None,
|
completed_at: None,
|
||||||
output: None,
|
output: None,
|
||||||
error: None,
|
error: None,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// LEGACY: Construct command from parameters (deprecated)
|
||||||
|
let mut args = vec![
|
||||||
|
"--infra".to_string(),
|
||||||
|
workflow.infra.clone(),
|
||||||
|
"--settings".to_string(),
|
||||||
|
workflow.settings.clone(),
|
||||||
|
];
|
||||||
|
|
||||||
|
for server in &workflow.servers {
|
||||||
|
args.push(server.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
if workflow.check_mode {
|
||||||
|
args.push("--check".to_string());
|
||||||
|
}
|
||||||
|
if workflow.wait {
|
||||||
|
args.push("--wait".to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
WorkflowTask {
|
||||||
|
id: task_id.clone(),
|
||||||
|
name: if workflow.servers.is_empty() {
|
||||||
|
"create_servers_all".to_string()
|
||||||
|
} else {
|
||||||
|
format!("create_servers_{}", workflow.servers.join("_"))
|
||||||
|
},
|
||||||
|
command: format!("{} servers create", state.args.provisioning_path),
|
||||||
|
args,
|
||||||
|
dependencies: vec![],
|
||||||
|
status: TaskStatus::Pending,
|
||||||
|
created_at: chrono::Utc::now(),
|
||||||
|
started_at: None,
|
||||||
|
completed_at: None,
|
||||||
|
output: None,
|
||||||
|
error: None,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let server_summary = if workflow.servers.is_empty() {
|
||||||
|
"all servers".to_string()
|
||||||
|
} else {
|
||||||
|
format!("{} server(s)", workflow.servers.len())
|
||||||
};
|
};
|
||||||
|
|
||||||
match state.task_storage.enqueue(task, 5).await {
|
match state.task_storage.enqueue(task, 5).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
info!("Enqueued server creation workflow: {}", task_id);
|
info!(
|
||||||
|
"Enqueued server creation workflow ({}): {} | infra: {}",
|
||||||
|
server_summary, task_id, workflow.infra
|
||||||
|
);
|
||||||
Ok(Json(ApiResponse::success(task_id)))
|
Ok(Json(ApiResponse::success(task_id)))
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to enqueue task: {}", e);
|
error!(
|
||||||
|
"Failed to enqueue server creation task ({}): {}",
|
||||||
|
server_summary, e
|
||||||
|
);
|
||||||
Err(StatusCode::INTERNAL_SERVER_ERROR)
|
Err(StatusCode::INTERNAL_SERVER_ERROR)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -885,6 +947,11 @@ async fn process_tasks(state: SharedState) {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
state
|
||||||
|
.publish_task_status(&task.id, "running", Some(0), None)
|
||||||
|
.await;
|
||||||
|
|
||||||
info!("Processing task: {} ({})", task.id, task.name);
|
info!("Processing task: {} ({})", task.id, task.name);
|
||||||
|
|
||||||
let task_start = std::time::Instant::now();
|
let task_start = std::time::Instant::now();
|
||||||
@ -901,6 +968,11 @@ async fn process_tasks(state: SharedState) {
|
|||||||
task.status = TaskStatus::Completed;
|
task.status = TaskStatus::Completed;
|
||||||
task.completed_at = Some(chrono::Utc::now());
|
task.completed_at = Some(chrono::Utc::now());
|
||||||
|
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
state
|
||||||
|
.publish_task_status(&task.id, "completed", Some(100), None)
|
||||||
|
.await;
|
||||||
|
|
||||||
// Record metrics
|
// Record metrics
|
||||||
metrics_collector.record_task_completion(task_duration.as_millis() as u64);
|
metrics_collector.record_task_completion(task_duration.as_millis() as u64);
|
||||||
|
|
||||||
@ -939,6 +1011,11 @@ async fn process_tasks(state: SharedState) {
|
|||||||
task.status = TaskStatus::Failed;
|
task.status = TaskStatus::Failed;
|
||||||
task.completed_at = Some(chrono::Utc::now());
|
task.completed_at = Some(chrono::Utc::now());
|
||||||
|
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
state
|
||||||
|
.publish_task_status(&task.id, "failed", None, Some(&e.to_string()))
|
||||||
|
.await;
|
||||||
|
|
||||||
// Record metrics
|
// Record metrics
|
||||||
metrics_collector.record_task_failure();
|
metrics_collector.record_task_failure();
|
||||||
|
|
||||||
@ -988,17 +1065,134 @@ async fn process_tasks(state: SharedState) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Solo mode helpers: spawn nats-server child process and wait for readiness
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
mod solo_nats {
|
||||||
|
use anyhow::{Context, Result};
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
use tokio::process::Command;
|
||||||
|
use tokio::time::{timeout, Duration, Instant};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
/// Spawn `nats-server` as a child process with JetStream enabled.
|
||||||
|
///
|
||||||
|
/// The returned `Child` holds the process alive; drop it to kill the
|
||||||
|
/// server. Uses `kill_on_drop(true)` so the process is cleaned up when
|
||||||
|
/// `Child` is dropped.
|
||||||
|
pub async fn spawn_nats_server(data_dir: &str) -> Result<tokio::process::Child> {
|
||||||
|
let nats_store_dir = format!("{}/nats", data_dir);
|
||||||
|
std::fs::create_dir_all(&nats_store_dir)
|
||||||
|
.context("Failed to create NATS storage directory")?;
|
||||||
|
|
||||||
|
let child = Command::new("nats-server")
|
||||||
|
.args(["-js", "-sd", &nats_store_dir, "-p", "4222"])
|
||||||
|
.kill_on_drop(true)
|
||||||
|
.spawn()
|
||||||
|
.context("Failed to spawn nats-server — ensure nats-server is in PATH")?;
|
||||||
|
|
||||||
|
wait_for_nats(4222).await?;
|
||||||
|
info!("✓ NATS server (solo mode) ready on port 4222");
|
||||||
|
Ok(child)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt TCP connect to 127.0.0.1:{port} in a loop until ready or
|
||||||
|
/// timeout.
|
||||||
|
async fn wait_for_nats(port: u16) -> Result<()> {
|
||||||
|
let addr = format!("127.0.0.1:{}", port);
|
||||||
|
let deadline = Instant::now() + Duration::from_secs(10);
|
||||||
|
loop {
|
||||||
|
if Instant::now() > deadline {
|
||||||
|
return Err(anyhow::anyhow!(
|
||||||
|
"NATS server did not become ready within 10 seconds on port {}",
|
||||||
|
port
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if timeout(Duration::from_millis(200), TcpStream::connect(&addr))
|
||||||
|
.await
|
||||||
|
.is_ok()
|
||||||
|
{
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> Result<()> {
|
async fn main() -> Result<()> {
|
||||||
tracing_subscriber::fmt::init();
|
// Parse CLI arguments FIRST (so --help works before any other processing)
|
||||||
|
|
||||||
let args = Args::parse();
|
let args = Args::parse();
|
||||||
let port = args.port;
|
|
||||||
|
|
||||||
info!("Starting provisioning orchestrator on port {}", port);
|
// Initialize centralized observability (logging, metrics, health checks)
|
||||||
|
let _guard = observability::init_from_env("orchestrator", env!("CARGO_PKG_VERSION"))
|
||||||
|
.context("Failed to initialize observability")?;
|
||||||
|
|
||||||
|
// Initialize platform startup manager
|
||||||
|
let deployment = load_deployment_mode().context("Failed to load deployment-mode.ncl")?;
|
||||||
|
|
||||||
|
// Check if orchestrator is enabled
|
||||||
|
if !deployment.is_service_enabled("orchestrator")? {
|
||||||
|
warn!("⚠ Orchestrator is DISABLED in deployment-mode.ncl");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
info!("✓ Orchestrator is ENABLED in deployment-mode.ncl");
|
||||||
|
|
||||||
|
// Validate dependencies
|
||||||
|
let startup = PlatformStartup::new(&deployment.config)
|
||||||
|
.context("Failed to initialize platform startup")?;
|
||||||
|
startup
|
||||||
|
.validate_dependencies("orchestrator")
|
||||||
|
.context("Failed to validate orchestrator dependencies")?;
|
||||||
|
|
||||||
|
// Setup Git repositories
|
||||||
|
let (_schemas_path, _configs_path) = startup
|
||||||
|
.setup_git_repos()
|
||||||
|
.context("Failed to setup Git repositories")?;
|
||||||
|
|
||||||
|
// Load orchestrator configuration from Nickel
|
||||||
|
let config = OrchestratorConfig::load().context("Failed to load orchestrator configuration")?;
|
||||||
|
|
||||||
|
// Apply CLI overrides if provided
|
||||||
|
let mut config = config;
|
||||||
|
config.apply_cli_overrides(&args);
|
||||||
|
|
||||||
|
let port = config.orchestrator.server.port;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"🔧 Loaded orchestrator configuration from NCL, binding to port {}",
|
||||||
|
port
|
||||||
|
);
|
||||||
|
|
||||||
|
// Solo mode: spawn embedded NATS server before connecting as client
|
||||||
|
// The child is kept alive for the entire lifetime of main()
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
let _solo_nats_child = if args.mode.as_deref() == Some("solo") {
|
||||||
|
info!("Solo mode: starting embedded NATS server");
|
||||||
|
Some(solo_nats::spawn_nats_server(&args.data_dir).await?)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let state = Arc::new(AppState::new(args).await?);
|
let state = Arc::new(AppState::new(args).await?);
|
||||||
|
|
||||||
|
// Build webhook state with empty registry (workspaces registered via API or
|
||||||
|
// config)
|
||||||
|
let webhook_state = Arc::new(WebhookState {
|
||||||
|
registry: Arc::new(parking_lot::RwLock::new(WorkspaceRegistry::new())),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start audit collector (NATS → SurrealDB)
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
{
|
||||||
|
use provisioning_orchestrator::audit::collector::run_audit_collector;
|
||||||
|
let nats = Arc::clone(&state.nats);
|
||||||
|
let db = Arc::clone(&state.db);
|
||||||
|
tokio::spawn(async move {
|
||||||
|
run_audit_collector(nats, db).await;
|
||||||
|
});
|
||||||
|
info!("✓ NATS audit collector started");
|
||||||
|
}
|
||||||
|
|
||||||
// Start task processor
|
// Start task processor
|
||||||
let processor_state = state.clone();
|
let processor_state = state.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
@ -1071,6 +1265,11 @@ async fn main() -> Result<()> {
|
|||||||
)
|
)
|
||||||
// Merge monitoring routes (includes /metrics, /ws, /events)
|
// Merge monitoring routes (includes /metrics, /ws, /events)
|
||||||
.merge(state.monitoring_system.create_routes())
|
.merge(state.monitoring_system.create_routes())
|
||||||
|
// Webhook handler (separate state — workspace registry)
|
||||||
|
.route(
|
||||||
|
"/api/v1/webhooks/:workspace_id",
|
||||||
|
post(handle_webhook).with_state(webhook_state),
|
||||||
|
)
|
||||||
.layer(CorsLayer::permissive())
|
.layer(CorsLayer::permissive())
|
||||||
.with_state(state);
|
.with_state(state);
|
||||||
|
|
||||||
|
|||||||
@ -107,7 +107,11 @@ impl AuditMiddleware {
|
|||||||
let action_type = determine_action_type(method.as_ref(), &path);
|
let action_type = determine_action_type(method.as_ref(), &path);
|
||||||
|
|
||||||
// Determine resource and workspace from path
|
// Determine resource and workspace from path
|
||||||
let (resource, workspace) = extract_resource_info(&path);
|
let workspace_from_header = headers
|
||||||
|
.get("X-Workspace-ID")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.map(|s| s.to_string());
|
||||||
|
let (resource, workspace) = extract_resource_info(&path, workspace_from_header);
|
||||||
|
|
||||||
// Create user info
|
// Create user info
|
||||||
let user = UserInfo {
|
let user = UserInfo {
|
||||||
@ -263,6 +267,19 @@ fn determine_action_type(method: &str, path: &str) -> ActionType {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Workspace operations
|
||||||
|
if path.contains("/workspaces") || path.contains("/workspace") {
|
||||||
|
return match method {
|
||||||
|
"POST" if path.contains("/create") => ActionType::WorkspaceCreate,
|
||||||
|
"POST" if path.contains("/switch") => ActionType::WorkspaceSwitch,
|
||||||
|
"POST" if path.contains("/sync") => ActionType::WorkspaceSync,
|
||||||
|
"PUT" | "PATCH" => ActionType::WorkspaceUpdate,
|
||||||
|
"DELETE" => ActionType::WorkspaceDelete,
|
||||||
|
"GET" => ActionType::WorkspaceList,
|
||||||
|
_ => ActionType::Unknown,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
// System operations
|
// System operations
|
||||||
if path.contains("/health") {
|
if path.contains("/health") {
|
||||||
return ActionType::SystemHealthCheck;
|
return ActionType::SystemHealthCheck;
|
||||||
@ -276,7 +293,7 @@ fn determine_action_type(method: &str, path: &str) -> ActionType {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Extract resource and workspace information from path
|
/// Extract resource and workspace information from path
|
||||||
fn extract_resource_info(path: &str) -> (String, String) {
|
fn extract_resource_info(path: &str, workspace_header: Option<String>) -> (String, String) {
|
||||||
// Try to extract resource ID from path
|
// Try to extract resource ID from path
|
||||||
let parts: Vec<&str> = path.trim_matches('/').split('/').collect();
|
let parts: Vec<&str> = path.trim_matches('/').split('/').collect();
|
||||||
|
|
||||||
@ -295,9 +312,24 @@ fn extract_resource_info(path: &str) -> (String, String) {
|
|||||||
"*".to_string()
|
"*".to_string()
|
||||||
};
|
};
|
||||||
|
|
||||||
// Workspace would typically come from a header or path prefix
|
// Try to extract workspace from header first, then from path
|
||||||
// For now, default to "default"
|
let workspace = if let Some(ws) = workspace_header {
|
||||||
let workspace = "default".to_string();
|
ws
|
||||||
|
} else if path.contains("/workspaces/") {
|
||||||
|
// Extract workspace name from path like /workspaces/{name}/...
|
||||||
|
if let Some(idx) = path.find("/workspaces/") {
|
||||||
|
let after_prefix = &path[idx + "/workspaces/".len()..];
|
||||||
|
if let Some(end_idx) = after_prefix.find('/') {
|
||||||
|
after_prefix[..end_idx].to_string()
|
||||||
|
} else {
|
||||||
|
after_prefix.to_string()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
"default".to_string()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
"default".to_string()
|
||||||
|
};
|
||||||
|
|
||||||
(resource, workspace)
|
(resource, workspace)
|
||||||
}
|
}
|
||||||
@ -336,14 +368,45 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_extract_resource_info() {
|
fn test_extract_resource_info() {
|
||||||
let (resource, workspace) = extract_resource_info("/workflows/servers/server-123");
|
let (resource, workspace) = extract_resource_info("/workflows/servers/server-123", None);
|
||||||
assert_eq!(resource, "server-123");
|
assert_eq!(resource, "server-123");
|
||||||
assert_eq!(workspace, "default");
|
assert_eq!(workspace, "default");
|
||||||
|
|
||||||
// When resource is not provided, extract_resource_info returns the resource
|
let (resource, workspace) = extract_resource_info("/workflows/servers", None);
|
||||||
// type
|
assert_eq!(resource, "servers");
|
||||||
let (resource, workspace) = extract_resource_info("/workflows/servers");
|
|
||||||
assert_eq!(resource, "servers"); // Returns the resource type, not wildcard
|
|
||||||
assert_eq!(workspace, "default");
|
assert_eq!(workspace, "default");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_workspace_action_types() {
|
||||||
|
assert_eq!(
|
||||||
|
determine_action_type("POST", "/workspaces/prod/create"),
|
||||||
|
ActionType::WorkspaceCreate
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
determine_action_type("POST", "/workspaces/prod/switch"),
|
||||||
|
ActionType::WorkspaceSwitch
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
determine_action_type("DELETE", "/workspaces/prod"),
|
||||||
|
ActionType::WorkspaceDelete
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
determine_action_type("GET", "/workspaces"),
|
||||||
|
ActionType::WorkspaceList
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_workspace_info_extraction() {
|
||||||
|
let (resource, workspace) = extract_resource_info("/workspaces/prod/config", None);
|
||||||
|
assert_eq!(workspace, "prod");
|
||||||
|
|
||||||
|
let (resource, workspace) =
|
||||||
|
extract_resource_info("/workspaces/staging/create", Some("prod".to_string()));
|
||||||
|
assert_eq!(workspace, "prod"); // Header takes precedence
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -6,6 +6,10 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::{Context, Result};
|
use anyhow::{Context, Result};
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
use platform_db::SurrealPool;
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
use platform_nats::NatsBridge;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
#[cfg(feature = "testing")]
|
#[cfg(feature = "testing")]
|
||||||
@ -46,9 +50,60 @@ pub struct AppState {
|
|||||||
pub service_orchestrator: Arc<ServiceOrchestrator>,
|
pub service_orchestrator: Arc<ServiceOrchestrator>,
|
||||||
pub audit_logger: Arc<AuditLogger>,
|
pub audit_logger: Arc<AuditLogger>,
|
||||||
pub compliance_service: Arc<ComplianceService>,
|
pub compliance_service: Arc<ComplianceService>,
|
||||||
|
/// NATS JetStream bridge — present when compiled with the `nats` feature.
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub nats: Arc<NatsBridge>,
|
||||||
|
/// Shared SurrealDB connection — present when compiled with the `nats`
|
||||||
|
/// feature.
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub db: Arc<SurrealPool>,
|
||||||
pub args: Args,
|
pub args: Args,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Sanitize sensitive data from command output before logging
|
||||||
|
/// Removes: API tokens, passwords, SSH keys, etc.
|
||||||
|
fn sanitize_credentials(output: &str) -> String {
|
||||||
|
let mut result = String::new();
|
||||||
|
let lines: Vec<&str> = output.lines().collect();
|
||||||
|
|
||||||
|
for line in lines {
|
||||||
|
let sanitized_line = if line.contains("HCLOUD_TOKEN=")
|
||||||
|
|| line.contains("AWS_SECRET_ACCESS_KEY=")
|
||||||
|
|| line.contains("VAULT_TOKEN=")
|
||||||
|
|| line.contains("API_KEY=")
|
||||||
|
|| line.contains("SECRET_KEY=")
|
||||||
|
|| line.contains("PASSWORD=")
|
||||||
|
|| line.contains("PASSWD=")
|
||||||
|
|| line.contains("PWD=")
|
||||||
|
{
|
||||||
|
// Extract the key part and redact the value
|
||||||
|
if let Some(eq_idx) = line.find('=') {
|
||||||
|
let key_part = &line[..eq_idx];
|
||||||
|
format!("{}=***REDACTED***", key_part)
|
||||||
|
} else {
|
||||||
|
line.to_string()
|
||||||
|
}
|
||||||
|
} else if line.contains("Bearer ") && !line.contains("***REDACTED***") {
|
||||||
|
line.replace(
|
||||||
|
line.split("Bearer ")
|
||||||
|
.nth(1)
|
||||||
|
.unwrap_or("")
|
||||||
|
.split_whitespace()
|
||||||
|
.next()
|
||||||
|
.unwrap_or(""),
|
||||||
|
"***REDACTED***",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
line.to_string()
|
||||||
|
};
|
||||||
|
|
||||||
|
result.push_str(&sanitized_line);
|
||||||
|
result.push('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
impl AppState {
|
impl AppState {
|
||||||
pub async fn new(args: Args) -> Result<Self> {
|
pub async fn new(args: Args) -> Result<Self> {
|
||||||
// Create storage using the factory pattern
|
// Create storage using the factory pattern
|
||||||
@ -251,6 +306,27 @@ impl AppState {
|
|||||||
|
|
||||||
info!("Successfully initialized compliance service");
|
info!("Successfully initialized compliance service");
|
||||||
|
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
let (nats, db) = {
|
||||||
|
use platform_db::{DbConfig, SurrealPool};
|
||||||
|
use platform_nats::{NatsBridge, NatsConfig};
|
||||||
|
|
||||||
|
let nats_config = NatsConfig::default();
|
||||||
|
let bridge = NatsBridge::connect(&nats_config)
|
||||||
|
.await
|
||||||
|
.context("Failed to connect to NATS")?;
|
||||||
|
let bridge = Arc::new(bridge);
|
||||||
|
|
||||||
|
let db_config = DbConfig::Embedded {
|
||||||
|
path: format!("{}/surrealdb", args.data_dir),
|
||||||
|
};
|
||||||
|
let pool = SurrealPool::connect(&db_config)
|
||||||
|
.await
|
||||||
|
.context("Failed to connect to SurrealDB")?;
|
||||||
|
|
||||||
|
(bridge, Arc::new(pool))
|
||||||
|
};
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
task_storage,
|
task_storage,
|
||||||
batch_coordinator,
|
batch_coordinator,
|
||||||
@ -266,31 +342,89 @@ impl AppState {
|
|||||||
service_orchestrator,
|
service_orchestrator,
|
||||||
audit_logger,
|
audit_logger,
|
||||||
compliance_service,
|
compliance_service,
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
nats,
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
db,
|
||||||
args,
|
args,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Publish a task status update to NATS JetStream.
|
||||||
|
///
|
||||||
|
/// Subject: `provisioning.tasks.{task_id}.status`
|
||||||
|
/// Compiled out entirely when the `nats` feature is disabled.
|
||||||
|
#[cfg(feature = "nats")]
|
||||||
|
pub async fn publish_task_status(
|
||||||
|
&self,
|
||||||
|
task_id: &str,
|
||||||
|
status: &str,
|
||||||
|
progress: Option<u32>,
|
||||||
|
message: Option<&str>,
|
||||||
|
) {
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
let subject = format!("tasks.{}.status", task_id);
|
||||||
|
let payload = serde_json::json!({
|
||||||
|
"task_id": task_id,
|
||||||
|
"status": status,
|
||||||
|
"progress": progress,
|
||||||
|
"message": message,
|
||||||
|
});
|
||||||
|
if let Err(e) = self.nats.publish_json(&subject, &payload).await {
|
||||||
|
warn!(task_id = %task_id, "Failed to publish task status to NATS: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn execute_nushell_command(&self, command: &str, args: &[String]) -> Result<String> {
|
pub async fn execute_nushell_command(&self, command: &str, args: &[String]) -> Result<String> {
|
||||||
use std::process::Stdio;
|
use std::process::Stdio;
|
||||||
|
|
||||||
use tokio::process::Command;
|
use tokio::process::Command;
|
||||||
|
|
||||||
let mut cmd = Command::new(&self.args.nu_path);
|
// Determine how to execute based on command type
|
||||||
cmd.arg("-c")
|
let mut cmd = if command == "bash" {
|
||||||
.arg(format!("{} {}", command, args.join(" ")))
|
// For bash commands: args[0] is "-c", args[1] is the full command to execute
|
||||||
.stdout(Stdio::piped())
|
let mut c = Command::new("bash");
|
||||||
.stderr(Stdio::piped());
|
if args.len() >= 2 && args[0] == "-c" {
|
||||||
|
c.arg("-c").arg(&args[1]);
|
||||||
|
} else {
|
||||||
|
// Fallback: execute bash with all args
|
||||||
|
c.args(args);
|
||||||
|
}
|
||||||
|
c
|
||||||
|
} else {
|
||||||
|
// For Nushell commands: execute via nu -c "command args"
|
||||||
|
let mut c = Command::new(&self.args.nu_path);
|
||||||
|
c.arg("-c").arg(format!("{} {}", command, args.join(" ")));
|
||||||
|
c
|
||||||
|
};
|
||||||
|
|
||||||
let output = cmd
|
cmd.stdout(Stdio::piped()).stderr(Stdio::piped());
|
||||||
.output()
|
|
||||||
.await
|
let output = cmd.output().await.context("Failed to execute command")?;
|
||||||
.context("Failed to execute Nushell command")?;
|
|
||||||
|
|
||||||
if output.status.success() {
|
if output.status.success() {
|
||||||
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
Ok(String::from_utf8_lossy(&output.stdout).to_string())
|
||||||
} else {
|
} else {
|
||||||
let error = String::from_utf8_lossy(&output.stderr);
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||||
Err(anyhow::anyhow!("Nushell command failed: {}", error))
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||||
|
|
||||||
|
// Include both stdout and stderr for better error diagnostics
|
||||||
|
let raw_output = if !stderr.is_empty() {
|
||||||
|
stderr.to_string()
|
||||||
|
} else if !stdout.is_empty() {
|
||||||
|
stdout.to_string()
|
||||||
|
} else {
|
||||||
|
format!("Command failed with exit code: {:?}", output.status.code())
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sanitize sensitive data from logs (tokens, keys, passwords)
|
||||||
|
let sanitized_output = sanitize_credentials(&raw_output);
|
||||||
|
|
||||||
|
Err(anyhow::anyhow!(
|
||||||
|
"Command execution failed: {}",
|
||||||
|
sanitized_output
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
105
crates/orchestrator/src/webhooks/git_executor.rs
Normal file
105
crates/orchestrator/src/webhooks/git_executor.rs
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
use anyhow::{Context, Result};
|
||||||
|
#[cfg(feature = "gitops")]
|
||||||
|
use git2::Repository;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
/// Pull latest changes from remote and trigger deployment logic.
|
||||||
|
///
|
||||||
|
/// Uses `git2` for fetch+merge when the `gitops` feature is enabled.
|
||||||
|
/// Falls back to advisory logging when compiled without git2.
|
||||||
|
pub async fn pull_and_deploy(workspace_id: &str, local_path: &str) -> Result<()> {
|
||||||
|
info!(
|
||||||
|
workspace_id = %workspace_id,
|
||||||
|
local_path = %local_path,
|
||||||
|
"Starting git pull + deploy"
|
||||||
|
);
|
||||||
|
|
||||||
|
#[cfg(feature = "gitops")]
|
||||||
|
{
|
||||||
|
tokio::task::spawn_blocking({
|
||||||
|
let local_path = local_path.to_string();
|
||||||
|
let workspace_id = workspace_id.to_string();
|
||||||
|
move || git_fetch_and_merge(&workspace_id, &local_path)
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.context("git executor task panicked")??;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "gitops"))]
|
||||||
|
{
|
||||||
|
warn!(
|
||||||
|
workspace_id = %workspace_id,
|
||||||
|
"gitops feature not compiled — git pull skipped (advisory only)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(workspace_id = %workspace_id, "Deployment triggered");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "gitops")]
|
||||||
|
fn git_fetch_and_merge(workspace_id: &str, local_path: &str) -> Result<()> {
|
||||||
|
let repo =
|
||||||
|
Repository::open(local_path).with_context(|| format!("open repo at {}", local_path))?;
|
||||||
|
|
||||||
|
// Fetch origin
|
||||||
|
let mut remote = repo.find_remote("origin").context("find remote 'origin'")?;
|
||||||
|
|
||||||
|
let fetch_opts = git2::FetchOptions::default();
|
||||||
|
remote
|
||||||
|
.fetch(
|
||||||
|
&[] as &[&str],
|
||||||
|
Some(&mut git2::FetchOptions::default()),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.context("fetch from origin")?;
|
||||||
|
|
||||||
|
// Find FETCH_HEAD
|
||||||
|
let fetch_head = repo
|
||||||
|
.find_reference("FETCH_HEAD")
|
||||||
|
.context("find FETCH_HEAD")?;
|
||||||
|
let fetch_commit = repo
|
||||||
|
.reference_to_annotated_commit(&fetch_head)
|
||||||
|
.context("resolve FETCH_HEAD commit")?;
|
||||||
|
|
||||||
|
// Perform merge analysis
|
||||||
|
let (analysis, _) = repo
|
||||||
|
.merge_analysis(&[&fetch_commit])
|
||||||
|
.context("merge analysis")?;
|
||||||
|
|
||||||
|
if analysis.is_up_to_date() {
|
||||||
|
info!(workspace_id = %workspace_id, "Already up to date");
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if analysis.is_fast_forward() {
|
||||||
|
let refname = format!(
|
||||||
|
"refs/heads/{}",
|
||||||
|
repo.head()
|
||||||
|
.ok()
|
||||||
|
.and_then(|h| h.shorthand().map(|s| s.to_string()))
|
||||||
|
.unwrap_or_else(|| "main".to_string())
|
||||||
|
);
|
||||||
|
let mut reference = repo
|
||||||
|
.find_reference(&refname)
|
||||||
|
.context("find HEAD reference")?;
|
||||||
|
reference
|
||||||
|
.set_target(fetch_commit.id(), "fast-forward")
|
||||||
|
.context("fast-forward HEAD")?;
|
||||||
|
repo.set_head(&refname).context("set HEAD")?;
|
||||||
|
repo.checkout_head(Some(git2::build::CheckoutBuilder::default().force()))
|
||||||
|
.context("checkout HEAD")?;
|
||||||
|
info!(
|
||||||
|
workspace_id = %workspace_id,
|
||||||
|
commit = %fetch_commit.id(),
|
||||||
|
"Fast-forwarded to latest commit"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
warn!(
|
||||||
|
workspace_id = %workspace_id,
|
||||||
|
"Non-fast-forward merge required — deployment skipped (requires manual resolution)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
170
crates/orchestrator/src/webhooks/handler.rs
Normal file
170
crates/orchestrator/src/webhooks/handler.rs
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use anyhow::{anyhow, Context, Result};
|
||||||
|
use axum::{
|
||||||
|
body::Bytes,
|
||||||
|
extract::{Path, State},
|
||||||
|
http::{HeaderMap, StatusCode},
|
||||||
|
response::Json,
|
||||||
|
};
|
||||||
|
use hmac::{Hmac, Mac};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use sha2::Sha256;
|
||||||
|
use tracing::{error, info};
|
||||||
|
|
||||||
|
type HmacSha256 = Hmac<Sha256>;
|
||||||
|
|
||||||
|
/// In-memory workspace registration (in production: backed by SurrealDB).
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct WorkspaceRegistration {
|
||||||
|
pub workspace_id: String,
|
||||||
|
pub git_remote: String,
|
||||||
|
pub branch: String,
|
||||||
|
/// HMAC-SHA256 key for webhook signature validation.
|
||||||
|
pub webhook_secret: String,
|
||||||
|
/// Local checkout path.
|
||||||
|
pub local_path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Registry of workspace registrations.
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct WorkspaceRegistry {
|
||||||
|
registrations: HashMap<String, WorkspaceRegistration>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WorkspaceRegistry {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
registrations: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn register(&mut self, reg: WorkspaceRegistration) {
|
||||||
|
self.registrations.insert(reg.workspace_id.clone(), reg);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get(&self, workspace_id: &str) -> Option<&WorkspaceRegistration> {
|
||||||
|
self.registrations.get(workspace_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shared handler state.
|
||||||
|
pub struct WebhookState {
|
||||||
|
pub registry: Arc<parking_lot::RwLock<WorkspaceRegistry>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Incoming push event payload (Forgejo/Gitea/GitHub compatible subset).
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct PushEvent {
|
||||||
|
#[serde(rename = "ref")]
|
||||||
|
pub git_ref: String,
|
||||||
|
pub head_commit: Option<HeadCommit>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
pub struct HeadCommit {
|
||||||
|
pub id: String,
|
||||||
|
pub message: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize)]
|
||||||
|
pub struct WebhookResponse {
|
||||||
|
pub status: String,
|
||||||
|
pub workspace_id: String,
|
||||||
|
pub commit_sha: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// POST /api/v1/webhooks/{workspace_id}
|
||||||
|
///
|
||||||
|
/// Validates HMAC-SHA256 signature, then triggers git pull + deployment.
|
||||||
|
pub async fn handle_webhook(
|
||||||
|
State(state): State<Arc<WebhookState>>,
|
||||||
|
Path(workspace_id): Path<String>,
|
||||||
|
headers: HeaderMap,
|
||||||
|
body: Bytes,
|
||||||
|
) -> Result<Json<WebhookResponse>, StatusCode> {
|
||||||
|
// Look up registration
|
||||||
|
let reg = {
|
||||||
|
let registry = state.registry.read();
|
||||||
|
match registry.get(&workspace_id) {
|
||||||
|
Some(r) => r.clone(),
|
||||||
|
None => {
|
||||||
|
error!("Webhook for unknown workspace: {}", workspace_id);
|
||||||
|
return Err(StatusCode::NOT_FOUND);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Validate HMAC-SHA256 signature (X-Hub-Signature-256 header)
|
||||||
|
if let Err(e) = validate_signature(&headers, &body, ®.webhook_secret) {
|
||||||
|
error!(workspace_id = %workspace_id, "Webhook signature validation failed: {}", e);
|
||||||
|
return Err(StatusCode::UNAUTHORIZED);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse push event
|
||||||
|
let push_event: PushEvent = serde_json::from_slice(&body).map_err(|e| {
|
||||||
|
error!("Failed to parse push event: {}", e);
|
||||||
|
StatusCode::BAD_REQUEST
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let commit_sha = push_event.head_commit.as_ref().map(|c| c.id.clone());
|
||||||
|
|
||||||
|
// Check branch matches expected
|
||||||
|
let expected_ref = format!("refs/heads/{}", reg.branch);
|
||||||
|
if push_event.git_ref != expected_ref {
|
||||||
|
info!(
|
||||||
|
workspace_id = %workspace_id,
|
||||||
|
git_ref = %push_event.git_ref,
|
||||||
|
expected = %expected_ref,
|
||||||
|
"Webhook for non-tracked branch — ignoring"
|
||||||
|
);
|
||||||
|
return Ok(Json(WebhookResponse {
|
||||||
|
status: "ignored".to_string(),
|
||||||
|
workspace_id,
|
||||||
|
commit_sha,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(
|
||||||
|
workspace_id = %workspace_id,
|
||||||
|
commit_sha = ?commit_sha,
|
||||||
|
"Webhook validated — triggering deployment"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Trigger git pull in a separate task (non-blocking response)
|
||||||
|
let local_path = reg.local_path.clone();
|
||||||
|
let ws_id = workspace_id.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
if let Err(e) = super::git_executor::pull_and_deploy(&ws_id, &local_path).await {
|
||||||
|
error!(workspace_id = %ws_id, "Git pull/deploy failed: {}", e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Json(WebhookResponse {
|
||||||
|
status: "accepted".to_string(),
|
||||||
|
workspace_id,
|
||||||
|
commit_sha,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_signature(headers: &HeaderMap, body: &[u8], secret: &str) -> Result<()> {
|
||||||
|
let sig_header = headers
|
||||||
|
.get("X-Hub-Signature-256")
|
||||||
|
.or_else(|| headers.get("x-hub-signature-256"))
|
||||||
|
.ok_or_else(|| anyhow!("Missing X-Hub-Signature-256 header"))?
|
||||||
|
.to_str()
|
||||||
|
.context("X-Hub-Signature-256 is not valid UTF-8")?;
|
||||||
|
|
||||||
|
let sig_hex = sig_header
|
||||||
|
.strip_prefix("sha256=")
|
||||||
|
.ok_or_else(|| anyhow!("X-Hub-Signature-256 must start with 'sha256='"))?;
|
||||||
|
|
||||||
|
let expected = hex::decode(sig_hex).context("Invalid hex in signature")?;
|
||||||
|
|
||||||
|
let mut mac = HmacSha256::new_from_slice(secret.as_bytes()).context("HMAC key error")?;
|
||||||
|
mac.update(body);
|
||||||
|
|
||||||
|
mac.verify_slice(&expected)
|
||||||
|
.map_err(|_| anyhow!("Signature mismatch"))
|
||||||
|
}
|
||||||
4
crates/orchestrator/src/webhooks/mod.rs
Normal file
4
crates/orchestrator/src/webhooks/mod.rs
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
pub mod git_executor;
|
||||||
|
pub mod handler;
|
||||||
|
|
||||||
|
pub use handler::{handle_webhook, WebhookState, WorkspaceRegistration, WorkspaceRegistry};
|
||||||
@ -8,12 +8,14 @@ version.workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
anyhow = { workspace = true }
|
anyhow = { workspace = true }
|
||||||
|
reqwest = { workspace = true, features = ["json"] }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true, features = ["full"] }
|
||||||
toml = { workspace = true }
|
toml = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
urlencoding = "2.1"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempfile = { workspace = true }
|
tempfile = { workspace = true }
|
||||||
|
|||||||
214
crates/platform-config/src/deployment.rs
Normal file
214
crates/platform-config/src/deployment.rs
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
use serde_json::json;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::error::{ConfigError, Result};
|
||||||
|
use crate::format;
|
||||||
|
|
||||||
|
/// Deployment mode configuration loaded from deployment-mode.ncl
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct DeploymentConfig {
|
||||||
|
/// The full deployment configuration as JSON
|
||||||
|
pub config: Value,
|
||||||
|
/// Path to the deployment-mode.ncl file that was loaded
|
||||||
|
pub source_path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DeploymentConfig {
|
||||||
|
/// Returns the service-specific configuration section
|
||||||
|
pub fn get_service_config(&self, service_name: &str) -> Result<Value> {
|
||||||
|
self.config.get(service_name).cloned().ok_or_else(|| {
|
||||||
|
ConfigError::validation_failed(format!(
|
||||||
|
"Service '{}' not found in deployment config",
|
||||||
|
service_name
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the enabled_services list by reading services.X.enabled
|
||||||
|
pub fn enabled_services(&self) -> Result<Vec<String>> {
|
||||||
|
self.config
|
||||||
|
.get("services")
|
||||||
|
.and_then(|v| v.as_object())
|
||||||
|
.map(|services| {
|
||||||
|
services
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(name, config)| {
|
||||||
|
if config
|
||||||
|
.get("enabled")
|
||||||
|
.and_then(|v| v.as_bool())
|
||||||
|
.unwrap_or(false)
|
||||||
|
{
|
||||||
|
Some(name.clone())
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
})
|
||||||
|
.ok_or_else(|| {
|
||||||
|
ConfigError::validation_failed("services not found or not a record".to_string())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks if a service is enabled
|
||||||
|
pub fn is_service_enabled(&self, service_name: &str) -> Result<bool> {
|
||||||
|
Ok(self.enabled_services()?.iter().any(|s| s == service_name))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pretty-prints deployment configuration details
|
||||||
|
pub fn display_info(&self) -> String {
|
||||||
|
let mut info = String::new();
|
||||||
|
info.push_str("📋 Deployment Configuration\n");
|
||||||
|
info.push_str("─────────────────────────────────\n");
|
||||||
|
|
||||||
|
if let Ok(services) = self.enabled_services() {
|
||||||
|
info.push_str("Services:\n");
|
||||||
|
for service in services {
|
||||||
|
info.push_str(&format!(" • {}\n", service));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info.push_str(&format!("Source: {}\n", self.source_path.display()));
|
||||||
|
info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Loads deployment-mode configuration from standard locations
|
||||||
|
///
|
||||||
|
/// Searches in this order:
|
||||||
|
/// 1. DEPLOYMENT_MODE_CONFIG env var (explicit path)
|
||||||
|
/// 2. Platform-specific user location:
|
||||||
|
/// - macOS: ~/Library/Application
|
||||||
|
/// Support/provisioning/platform/deployment-mode.ncl
|
||||||
|
/// - Linux: ~/.config/provisioning/platform/deployment-mode.ncl
|
||||||
|
/// 3. /etc/provisioning/platform/deployment-mode.ncl (system location)
|
||||||
|
/// 4. provisioning/platform/deployment-mode.ncl (relative to cwd)
|
||||||
|
pub fn load_deployment_mode() -> Result<DeploymentConfig> {
|
||||||
|
let path = find_deployment_mode_path()?;
|
||||||
|
tracing::info!("Loading deployment config from: {:?}", path);
|
||||||
|
|
||||||
|
let config = format::load_config(&path)?;
|
||||||
|
|
||||||
|
Ok(DeploymentConfig {
|
||||||
|
config,
|
||||||
|
source_path: path,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Finds the deployment-mode.ncl file location
|
||||||
|
fn find_deployment_mode_path() -> Result<PathBuf> {
|
||||||
|
// Priority 1: Explicit env var
|
||||||
|
if let Ok(path) = std::env::var("DEPLOYMENT_MODE_CONFIG") {
|
||||||
|
let config_path = PathBuf::from(&path);
|
||||||
|
if config_path.exists() {
|
||||||
|
tracing::debug!("Using deployment config from env var: {:?}", config_path);
|
||||||
|
return Ok(config_path);
|
||||||
|
}
|
||||||
|
return Err(ConfigError::not_found(&path));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 2: Platform-specific user config directory
|
||||||
|
if let Ok(home) = std::env::var("HOME") {
|
||||||
|
let user_path = {
|
||||||
|
#[cfg(target_os = "macos")]
|
||||||
|
{
|
||||||
|
PathBuf::from(home)
|
||||||
|
.join("Library/Application Support/provisioning/platform/deployment-mode.ncl")
|
||||||
|
}
|
||||||
|
#[cfg(not(target_os = "macos"))]
|
||||||
|
{
|
||||||
|
PathBuf::from(home).join(".config/provisioning/platform/deployment-mode.ncl")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if user_path.exists() {
|
||||||
|
tracing::debug!("Using deployment config from user home: {:?}", user_path);
|
||||||
|
return Ok(user_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 3: System config directory
|
||||||
|
let system_path = PathBuf::from("/etc/provisioning/platform/deployment-mode.ncl");
|
||||||
|
if system_path.exists() {
|
||||||
|
tracing::debug!("Using deployment config from system: {:?}", system_path);
|
||||||
|
return Ok(system_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 4: Relative to cwd
|
||||||
|
let cwd_path = PathBuf::from("provisioning/platform/deployment-mode.ncl");
|
||||||
|
if cwd_path.exists() {
|
||||||
|
tracing::debug!("Using deployment config from cwd: {:?}", cwd_path);
|
||||||
|
return Ok(cwd_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(ConfigError::not_found(
|
||||||
|
"deployment-mode.ncl not found in standard locations",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_deployment_config_display() {
|
||||||
|
let config = DeploymentConfig {
|
||||||
|
config: json!({
|
||||||
|
"enabled_services": ["orchestrator", "vault_service"],
|
||||||
|
"orchestrator": { "enabled": true },
|
||||||
|
"vault_service": { "enabled": true }
|
||||||
|
}),
|
||||||
|
source_path: PathBuf::from("/test/deployment-mode.ncl"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let display = config.display_info();
|
||||||
|
assert!(display.contains("Deployment Configuration"));
|
||||||
|
assert!(display.contains("orchestrator"));
|
||||||
|
assert!(display.contains("vault_service"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_service_config() {
|
||||||
|
let config = DeploymentConfig {
|
||||||
|
config: json!({
|
||||||
|
"orchestrator": { "port": 9090 },
|
||||||
|
"vault_service": { "port": 8082 }
|
||||||
|
}),
|
||||||
|
source_path: PathBuf::from("/test/deployment-mode.ncl"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let orch = config.get_service_config("orchestrator");
|
||||||
|
assert!(orch.is_ok());
|
||||||
|
assert_eq!(orch.unwrap()["port"], 9090);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_enabled_services() {
|
||||||
|
let config = DeploymentConfig {
|
||||||
|
config: json!({
|
||||||
|
"enabled_services": ["orchestrator", "vault_service"],
|
||||||
|
}),
|
||||||
|
source_path: PathBuf::from("/test/deployment-mode.ncl"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let services = config.enabled_services().unwrap();
|
||||||
|
assert_eq!(services.len(), 2);
|
||||||
|
assert!(services.contains(&"orchestrator".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_service_enabled() {
|
||||||
|
let config = DeploymentConfig {
|
||||||
|
config: json!({
|
||||||
|
"enabled_services": ["orchestrator"],
|
||||||
|
}),
|
||||||
|
source_path: PathBuf::from("/test/deployment-mode.ncl"),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(config.is_service_enabled("orchestrator").unwrap());
|
||||||
|
assert!(!config.is_service_enabled("vault_service").unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -33,6 +33,9 @@ pub enum ConfigError {
|
|||||||
|
|
||||||
#[error("Config validation failed: {reason}")]
|
#[error("Config validation failed: {reason}")]
|
||||||
ValidationFailed { reason: String },
|
ValidationFailed { reason: String },
|
||||||
|
|
||||||
|
#[error("Git operation failed: {reason}")]
|
||||||
|
GitFailed { reason: String },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConfigError {
|
impl ConfigError {
|
||||||
@ -83,6 +86,12 @@ impl ConfigError {
|
|||||||
reason: reason.into(),
|
reason: reason.into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn git_failed(reason: impl Into<String>) -> Self {
|
||||||
|
Self::GitFailed {
|
||||||
|
reason: reason.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Result<T> = std::result::Result<T, ConfigError>;
|
pub type Result<T> = std::result::Result<T, ConfigError>;
|
||||||
|
|||||||
246
crates/platform-config/src/extensions.rs
Normal file
246
crates/platform-config/src/extensions.rs
Normal file
@ -0,0 +1,246 @@
|
|||||||
|
use std::fs;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use crate::error::{ConfigError, Result};
|
||||||
|
|
||||||
|
/// Manages cached extension schemas from OCI registry
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ExtensionSchemaCache {
|
||||||
|
cache_dir: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct CachedExtension {
|
||||||
|
pub name: String,
|
||||||
|
pub version: String,
|
||||||
|
pub schema_path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExtensionSchemaCache {
|
||||||
|
/// Creates a new extension schema cache manager
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
/// - `base_cache_dir`: Base cache directory (typically
|
||||||
|
/// ~/.cache/provisioning/extensions)
|
||||||
|
pub fn new(base_cache_dir: PathBuf) -> Result<Self> {
|
||||||
|
// Ensure cache directory exists
|
||||||
|
fs::create_dir_all(&base_cache_dir).map_err(|e| {
|
||||||
|
ConfigError::io_error(format!(
|
||||||
|
"Failed to create extension cache directory {:?}: {}",
|
||||||
|
base_cache_dir, e
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
cache_dir: base_cache_dir,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a cache at the default location
|
||||||
|
/// (~/.cache/provisioning/extensions)
|
||||||
|
pub fn new_default() -> Result<Self> {
|
||||||
|
let home = std::env::var("HOME")
|
||||||
|
.map_err(|_| ConfigError::io_error("HOME environment variable not set"))?;
|
||||||
|
|
||||||
|
let cache_dir = PathBuf::from(format!("{}/.cache/provisioning/extensions", home));
|
||||||
|
Self::new(cache_dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Lists all cached extensions
|
||||||
|
///
|
||||||
|
/// Returns a vector of cached extensions with their paths
|
||||||
|
/// Expected structure: {cache_dir}/{name}@{version}/schemas/
|
||||||
|
pub fn list_cached_extensions(&self) -> Result<Vec<CachedExtension>> {
|
||||||
|
if !self.cache_dir.exists() {
|
||||||
|
tracing::warn!(
|
||||||
|
"Extension cache directory does not exist: {:?}",
|
||||||
|
self.cache_dir
|
||||||
|
);
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut extensions = Vec::new();
|
||||||
|
|
||||||
|
match fs::read_dir(&self.cache_dir) {
|
||||||
|
Ok(entries) => {
|
||||||
|
for entry in entries.flatten() {
|
||||||
|
let path = entry.path();
|
||||||
|
|
||||||
|
if !path.is_dir() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract name and version from directory name (format: {name}@{version})
|
||||||
|
let Some(dir_str) = path.file_name().and_then(|n| n.to_str()) else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let Some((name, version)) = dir_str.split_once('@') else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
let schema_path = path.join("schemas");
|
||||||
|
if !schema_path.exists() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
tracing::debug!(
|
||||||
|
"Found cached extension: {}@{} at {:?}",
|
||||||
|
name,
|
||||||
|
version,
|
||||||
|
schema_path
|
||||||
|
);
|
||||||
|
extensions.push(CachedExtension {
|
||||||
|
name: name.to_string(),
|
||||||
|
version: version.to_string(),
|
||||||
|
schema_path,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(
|
||||||
|
"Failed to read extension cache directory {:?}: {}",
|
||||||
|
self.cache_dir,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
return Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to read extension cache directory: {}",
|
||||||
|
e
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(extensions)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the schema path for a specific extension
|
||||||
|
pub fn get_extension_path(&self, name: &str, version: Option<&str>) -> Option<PathBuf> {
|
||||||
|
match self.list_cached_extensions() {
|
||||||
|
Ok(extensions) => {
|
||||||
|
if let Some(version) = version {
|
||||||
|
// Find exact version match
|
||||||
|
extensions
|
||||||
|
.iter()
|
||||||
|
.find(|e| e.name == name && e.version == version)
|
||||||
|
.map(|e| e.schema_path.clone())
|
||||||
|
} else {
|
||||||
|
// Find latest version (first one found, assuming sorted by version)
|
||||||
|
extensions
|
||||||
|
.iter()
|
||||||
|
.find(|e| e.name == name)
|
||||||
|
.map(|e| e.schema_path.clone())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(
|
||||||
|
"Failed to get extension path for {}:{:?}: {}",
|
||||||
|
name,
|
||||||
|
version,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prepares paths for NICKEL_IMPORT_PATH by getting all extension schema
|
||||||
|
/// directories
|
||||||
|
pub fn build_import_paths(&self) -> Result<Vec<PathBuf>> {
|
||||||
|
self.list_cached_extensions()
|
||||||
|
.map(|extensions| extensions.into_iter().map(|e| e.schema_path).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::fs;
|
||||||
|
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_extension_schema_cache_new() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache = ExtensionSchemaCache::new(temp_dir.path().to_path_buf()).unwrap();
|
||||||
|
|
||||||
|
assert!(temp_dir.path().exists());
|
||||||
|
assert_eq!(cache.cache_dir, temp_dir.path());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list_cached_extensions_empty() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache = ExtensionSchemaCache::new(temp_dir.path().to_path_buf()).unwrap();
|
||||||
|
|
||||||
|
let extensions = cache.list_cached_extensions().unwrap();
|
||||||
|
assert!(extensions.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list_cached_extensions_with_content() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache_path = temp_dir.path();
|
||||||
|
|
||||||
|
// Create mock extension structure
|
||||||
|
let ext_dir = cache_path.join("hetzner@1.0.0");
|
||||||
|
let schemas_dir = ext_dir.join("schemas");
|
||||||
|
fs::create_dir_all(&schemas_dir).unwrap();
|
||||||
|
fs::write(schemas_dir.join("main.ncl"), "{}").unwrap();
|
||||||
|
|
||||||
|
let cache = ExtensionSchemaCache::new(cache_path.to_path_buf()).unwrap();
|
||||||
|
let extensions = cache.list_cached_extensions().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(extensions.len(), 1);
|
||||||
|
assert_eq!(extensions[0].name, "hetzner");
|
||||||
|
assert_eq!(extensions[0].version, "1.0.0");
|
||||||
|
assert!(extensions[0].schema_path.exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_extension_path() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache_path = temp_dir.path();
|
||||||
|
|
||||||
|
// Create multiple extension versions
|
||||||
|
for version in &["1.0.0", "1.1.0", "2.0.0"] {
|
||||||
|
let ext_dir = cache_path.join(format!("hetzner@{}", version));
|
||||||
|
let schemas_dir = ext_dir.join("schemas");
|
||||||
|
fs::create_dir_all(&schemas_dir).unwrap();
|
||||||
|
fs::write(schemas_dir.join("main.ncl"), "{}").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let cache = ExtensionSchemaCache::new(cache_path.to_path_buf()).unwrap();
|
||||||
|
|
||||||
|
// Test exact version match
|
||||||
|
let path = cache.get_extension_path("hetzner", Some("1.1.0"));
|
||||||
|
assert!(path.is_some());
|
||||||
|
assert!(path.unwrap().join("main.ncl").exists());
|
||||||
|
|
||||||
|
// Test no version specified (should return first found)
|
||||||
|
let path = cache.get_extension_path("hetzner", None);
|
||||||
|
assert!(path.is_some());
|
||||||
|
|
||||||
|
// Test non-existent extension
|
||||||
|
let path = cache.get_extension_path("nonexistent", None);
|
||||||
|
assert!(path.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_import_paths() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache_path = temp_dir.path();
|
||||||
|
|
||||||
|
// Create multiple extensions
|
||||||
|
for (name, version) in &[("hetzner", "1.0.0"), ("aws", "2.1.0")] {
|
||||||
|
let ext_dir = cache_path.join(format!("{}@{}", name, version));
|
||||||
|
let schemas_dir = ext_dir.join("schemas");
|
||||||
|
fs::create_dir_all(&schemas_dir).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let cache = ExtensionSchemaCache::new(cache_path.to_path_buf()).unwrap();
|
||||||
|
let paths = cache.build_import_paths().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(paths.len(), 2);
|
||||||
|
for path in paths {
|
||||||
|
assert!(path.exists());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
152
crates/platform-config/src/git.rs
Normal file
152
crates/platform-config/src/git.rs
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
use crate::error::{ConfigError, Result};
|
||||||
|
|
||||||
|
/// Git repository cache configuration
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct GitRepoCache {
|
||||||
|
pub url: String,
|
||||||
|
pub branch: String,
|
||||||
|
pub cache_dir: PathBuf,
|
||||||
|
pub update_check: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GitRepoCache {
|
||||||
|
/// Create a new Git repository cache configuration
|
||||||
|
pub fn new(url: impl Into<String>, branch: impl Into<String>, cache_dir: PathBuf) -> Self {
|
||||||
|
Self {
|
||||||
|
url: url.into(),
|
||||||
|
branch: branch.into(),
|
||||||
|
cache_dir,
|
||||||
|
update_check: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure repository is cloned or updated in cache directory
|
||||||
|
///
|
||||||
|
/// Returns the path to the cached repository
|
||||||
|
pub fn ensure_cached(&self) -> Result<PathBuf> {
|
||||||
|
let expanded_cache = expand_path(&self.cache_dir)?;
|
||||||
|
let repo_path = expanded_cache.clone();
|
||||||
|
|
||||||
|
// Create cache directory if it doesn't exist
|
||||||
|
if !repo_path.exists() {
|
||||||
|
tracing::info!("Cloning {} into cache: {:?}", self.url, repo_path);
|
||||||
|
self.clone_repo(&repo_path)?;
|
||||||
|
} else if self.update_check {
|
||||||
|
tracing::debug!("Repository cached at {:?}, checking for updates", repo_path);
|
||||||
|
self.update_repo(&repo_path)?;
|
||||||
|
} else {
|
||||||
|
tracing::debug!(
|
||||||
|
"Using cached repository at {:?} (update_check disabled)",
|
||||||
|
repo_path
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(repo_path)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clone repository into specified path
|
||||||
|
fn clone_repo(&self, target_path: &Path) -> Result<()> {
|
||||||
|
let parent = target_path.parent().ok_or_else(|| {
|
||||||
|
ConfigError::git_failed(format!("Invalid cache path: {:?}", target_path))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Create parent directory if it doesn't exist
|
||||||
|
std::fs::create_dir_all(parent).map_err(|e| {
|
||||||
|
ConfigError::io_error(format!("Failed to create cache directory: {}", e))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let output = Command::new("git")
|
||||||
|
.args(["clone", "--branch", &self.branch, &self.url])
|
||||||
|
.arg(target_path)
|
||||||
|
.output()
|
||||||
|
.map_err(|e| ConfigError::git_failed(format!("Failed to clone repository: {}", e)))?;
|
||||||
|
|
||||||
|
if !output.status.success() {
|
||||||
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||||
|
return Err(ConfigError::git_failed(format!(
|
||||||
|
"Git clone failed: {}",
|
||||||
|
stderr
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::info!("✓ Successfully cloned {} to {:?}", self.url, target_path);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update existing repository
|
||||||
|
fn update_repo(&self, repo_path: &Path) -> Result<()> {
|
||||||
|
let output = Command::new("git")
|
||||||
|
.args([
|
||||||
|
"-C",
|
||||||
|
repo_path.to_str().unwrap_or("."),
|
||||||
|
"pull",
|
||||||
|
"origin",
|
||||||
|
&self.branch,
|
||||||
|
])
|
||||||
|
.output()
|
||||||
|
.map_err(|e| ConfigError::git_failed(format!("Failed to pull repository: {}", e)))?;
|
||||||
|
|
||||||
|
if !output.status.success() {
|
||||||
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||||
|
tracing::warn!("Failed to pull repository: {}", stderr);
|
||||||
|
// Don't fail - use cached version
|
||||||
|
} else {
|
||||||
|
tracing::debug!("✓ Updated repository from remote");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Expand path with ~ and environment variables
|
||||||
|
pub fn expand_path(path: &Path) -> Result<PathBuf> {
|
||||||
|
let path_str = path.to_string_lossy();
|
||||||
|
|
||||||
|
if path_str.starts_with('~') {
|
||||||
|
let home = std::env::var("HOME")
|
||||||
|
.map_err(|_| ConfigError::not_found("HOME environment variable not set"))?;
|
||||||
|
let expanded = path_str.replacen('~', &home, 1);
|
||||||
|
Ok(PathBuf::from(expanded))
|
||||||
|
} else {
|
||||||
|
Ok(path.to_path_buf())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_expand_path_with_tilde() {
|
||||||
|
let path = PathBuf::from("~/.cache/test");
|
||||||
|
let expanded = expand_path(&path).expect("Should expand path");
|
||||||
|
assert!(expanded.to_string_lossy().contains("/.cache/test"));
|
||||||
|
assert!(!expanded.to_string_lossy().contains('~'));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_expand_path_without_tilde() {
|
||||||
|
let path = PathBuf::from("/absolute/path");
|
||||||
|
let expanded = expand_path(&path).expect("Should return path unchanged");
|
||||||
|
assert_eq!(expanded, PathBuf::from("/absolute/path"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_git_repo_cache_new() {
|
||||||
|
let cache = GitRepoCache::new(
|
||||||
|
"http://localhost:3000/provisioning/provisioning-schemas.git",
|
||||||
|
"main",
|
||||||
|
PathBuf::from("~/.cache/provisioning/schemas"),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
cache.url,
|
||||||
|
"http://localhost:3000/provisioning/provisioning-schemas.git"
|
||||||
|
);
|
||||||
|
assert_eq!(cache.branch, "main");
|
||||||
|
assert!(cache.update_check);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -8,50 +8,105 @@ const CONFIG_BASE_PATH: &str = "provisioning/platform/config";
|
|||||||
/// 2. Variable de entorno {SERVICE}_MODE + búsqueda de archivo
|
/// 2. Variable de entorno {SERVICE}_MODE + búsqueda de archivo
|
||||||
/// 3. Fallback a defaults
|
/// 3. Fallback a defaults
|
||||||
pub fn resolve_config_path(service_name: &str) -> Option<PathBuf> {
|
pub fn resolve_config_path(service_name: &str) -> Option<PathBuf> {
|
||||||
|
tracing::info!("🔍 Resolving config path for service: {}", service_name);
|
||||||
|
|
||||||
// Priority 1: Check {SERVICE}_CONFIG env var (explicit path)
|
// Priority 1: Check {SERVICE}_CONFIG env var (explicit path)
|
||||||
let env_var = format!("{}_CONFIG", service_name.to_uppercase().replace('-', "_"));
|
let env_var = format!("{}_CONFIG", service_name.to_uppercase().replace('-', "_"));
|
||||||
|
tracing::debug!(
|
||||||
|
" Priority 1: Checking env var {} for explicit config path",
|
||||||
|
env_var
|
||||||
|
);
|
||||||
if let Ok(path) = env::var(&env_var) {
|
if let Ok(path) = env::var(&env_var) {
|
||||||
let config_path = PathBuf::from(path);
|
let config_path = PathBuf::from(path);
|
||||||
if config_path.exists() {
|
if config_path.exists() {
|
||||||
tracing::debug!(
|
tracing::info!(
|
||||||
"Using explicit config path from {}: {:?}",
|
"✓ Found explicit config from {}: {}",
|
||||||
env_var,
|
env_var,
|
||||||
config_path
|
config_path.display()
|
||||||
);
|
);
|
||||||
return Some(config_path);
|
return Some(config_path);
|
||||||
}
|
} else {
|
||||||
}
|
tracing::warn!(
|
||||||
|
"⚠ {} points to non-existent file: {}",
|
||||||
// Priority 2: Check PROVISIONING_CONFIG_DIR env var
|
env_var,
|
||||||
if let Ok(dir) = env::var("PROVISIONING_CONFIG_DIR") {
|
config_path.display()
|
||||||
if let Some(config) = super::resolver::find_config_in_dir(std::path::Path::new(&dir), service_name) {
|
|
||||||
tracing::debug!(
|
|
||||||
"Using config from PROVISIONING_CONFIG_DIR: {:?}",
|
|
||||||
config
|
|
||||||
);
|
);
|
||||||
return Some(config);
|
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
tracing::debug!(" {} not set (skipped)", env_var);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Priority 3: Check {SERVICE}_MODE env var + find config file
|
// Priority 2: Check PROVISIONING_CONFIG_DIR env var with smart fallback
|
||||||
|
tracing::debug!(" Priority 2: Checking PROVISIONING_CONFIG_DIR env var");
|
||||||
|
if let Ok(dir) = env::var("PROVISIONING_CONFIG_DIR") {
|
||||||
|
tracing::info!(" PROVISIONING_CONFIG_DIR = {}", dir);
|
||||||
let mode_var = format!("{}_MODE", service_name.to_uppercase().replace('-', "_"));
|
let mode_var = format!("{}_MODE", service_name.to_uppercase().replace('-', "_"));
|
||||||
let mode = env::var(&mode_var).unwrap_or_else(|_| "solo".to_string());
|
let mode = env::var(&mode_var).unwrap_or_else(|_| "solo".to_string());
|
||||||
|
|
||||||
|
if let Some(config) =
|
||||||
|
super::resolver::find_config_smart(std::path::Path::new(&dir), service_name, &mode)
|
||||||
|
{
|
||||||
|
tracing::info!(
|
||||||
|
"✓ Found config in PROVISIONING_CONFIG_DIR: {}",
|
||||||
|
config.display()
|
||||||
|
);
|
||||||
|
return Some(config);
|
||||||
|
} else {
|
||||||
|
// If PROVISIONING_CONFIG_DIR is explicitly set but file not found,
|
||||||
|
// report it clearly and don't continue to other search methods
|
||||||
|
tracing::warn!(
|
||||||
|
"⚠ PROVISIONING_CONFIG_DIR is set but no config found for '{}'",
|
||||||
|
service_name
|
||||||
|
);
|
||||||
|
tracing::info!(
|
||||||
|
"💡 Searched in:\n • {}/config/{{}}.ncl\n • {}/config/{{}}.{}.ncl\n • \
|
||||||
|
{}/{{}}.ncl\n • {}/{{}}.{}.ncl",
|
||||||
|
dir,
|
||||||
|
dir,
|
||||||
|
mode,
|
||||||
|
dir,
|
||||||
|
dir,
|
||||||
|
mode
|
||||||
|
);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tracing::debug!(" PROVISIONING_CONFIG_DIR not set (skipped)");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 3: Check {SERVICE}_MODE env var + find config file (only if
|
||||||
|
// PROVISIONING_CONFIG_DIR not set)
|
||||||
|
tracing::debug!(" Priority 3: Looking for relative config file");
|
||||||
|
let mode_var = format!("{}_MODE", service_name.to_uppercase().replace('-', "_"));
|
||||||
|
let mode = env::var(&mode_var).unwrap_or_else(|_| "solo".to_string());
|
||||||
|
tracing::info!(" Service mode: {} (from {} or default)", mode, mode_var);
|
||||||
|
|
||||||
if let Some(path) = find_config_file(service_name, &mode) {
|
if let Some(path) = find_config_file(service_name, &mode) {
|
||||||
tracing::debug!(
|
tracing::info!(
|
||||||
"Using config file for {}.{}: {:?}",
|
"✓ Found relative config file for {}.{}: {}",
|
||||||
service_name,
|
service_name,
|
||||||
mode,
|
mode,
|
||||||
path
|
path.display()
|
||||||
);
|
);
|
||||||
return Some(path);
|
return Some(path);
|
||||||
|
} else {
|
||||||
|
tracing::warn!(
|
||||||
|
"⚠ No config file found for {}.{} at relative path {}",
|
||||||
|
service_name,
|
||||||
|
mode,
|
||||||
|
format!("{}/{}.{}.ncl", CONFIG_BASE_PATH, service_name, mode)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback - no config file found
|
// Fallback - no config file found
|
||||||
tracing::debug!(
|
tracing::warn!(
|
||||||
"No config file found for {}.{} - using defaults",
|
"⚠ No config found for service '{}' - will use hardcoded defaults",
|
||||||
service_name,
|
service_name
|
||||||
mode
|
);
|
||||||
|
tracing::info!(
|
||||||
|
"💡 To fix this, set one of:\n • {env_var}=/path/to/config.ncl\n • \
|
||||||
|
PROVISIONING_CONFIG_DIR=/path/to/platform",
|
||||||
|
env_var = env_var
|
||||||
);
|
);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -64,16 +119,22 @@ pub fn find_config_file(service_name: &str, mode: &str) -> Option<PathBuf> {
|
|||||||
|
|
||||||
// Prioridad 1: .ncl
|
// Prioridad 1: .ncl
|
||||||
let ncl_path = base.join(format!("{}.{}.ncl", service_name, mode));
|
let ncl_path = base.join(format!("{}.{}.ncl", service_name, mode));
|
||||||
|
tracing::debug!(" Checking: {}", ncl_path.display());
|
||||||
if ncl_path.exists() {
|
if ncl_path.exists() {
|
||||||
tracing::trace!("Found NCL config: {:?}", ncl_path);
|
tracing::debug!(" ✓ Found NCL config");
|
||||||
return Some(ncl_path);
|
return Some(ncl_path);
|
||||||
|
} else {
|
||||||
|
tracing::debug!(" ✗ Not found (not a file or doesn't exist)");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prioridad 2: .toml
|
// Prioridad 2: .toml
|
||||||
let toml_path = base.join(format!("{}.{}.toml", service_name, mode));
|
let toml_path = base.join(format!("{}.{}.toml", service_name, mode));
|
||||||
|
tracing::debug!(" Checking: {}", toml_path.display());
|
||||||
if toml_path.exists() {
|
if toml_path.exists() {
|
||||||
tracing::trace!("Found TOML config: {:?}", toml_path);
|
tracing::debug!(" ✓ Found TOML config");
|
||||||
return Some(toml_path);
|
return Some(toml_path);
|
||||||
|
} else {
|
||||||
|
tracing::debug!(" ✗ Not found (not a file or doesn't exist)");
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
|
|||||||
@ -48,17 +48,76 @@
|
|||||||
//! let config = MyServiceConfig::load()?;
|
//! let config = MyServiceConfig::load()?;
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
|
pub mod deployment;
|
||||||
pub mod error;
|
pub mod error;
|
||||||
|
pub mod extensions;
|
||||||
pub mod format;
|
pub mod format;
|
||||||
|
pub mod git;
|
||||||
pub mod hierarchy;
|
pub mod hierarchy;
|
||||||
pub mod loader;
|
pub mod loader;
|
||||||
pub mod nickel;
|
pub mod nickel;
|
||||||
|
pub mod oci;
|
||||||
pub mod resolver;
|
pub mod resolver;
|
||||||
|
pub mod sops;
|
||||||
|
pub mod startup;
|
||||||
|
pub mod team_configs;
|
||||||
|
|
||||||
// Re-export main types
|
// Re-export main types
|
||||||
|
use std::env;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
pub use deployment::{load_deployment_mode, DeploymentConfig};
|
||||||
pub use error::{ConfigError, Result};
|
pub use error::{ConfigError, Result};
|
||||||
|
pub use extensions::{CachedExtension, ExtensionSchemaCache};
|
||||||
pub use format::ConfigLoader;
|
pub use format::ConfigLoader;
|
||||||
|
pub use git::{expand_path, GitRepoCache};
|
||||||
pub use hierarchy::{config_base_path, find_config_file, resolve_config_path};
|
pub use hierarchy::{config_base_path, find_config_file, resolve_config_path};
|
||||||
pub use loader::{ConfigLoaderExt, ConfigValidator};
|
pub use loader::{ConfigLoaderExt, ConfigValidator};
|
||||||
pub use nickel::is_nickel_available;
|
pub use nickel::is_nickel_available;
|
||||||
pub use resolver::{ConfigResolver, find_config_in_dir, find_config_in_dir_with_mode};
|
pub use resolver::{find_config_in_dir, find_config_in_dir_with_mode, ConfigResolver};
|
||||||
|
pub use startup::PlatformStartup;
|
||||||
|
|
||||||
|
/// Load service configuration from {service-name}.ncl in the platform config
|
||||||
|
/// directory
|
||||||
|
///
|
||||||
|
/// Searches in this order:
|
||||||
|
/// 1. $HOME/Library/Application
|
||||||
|
/// Support/provisioning/platform/config/{service-name}.ncl (macOS)
|
||||||
|
/// 2. $HOME/.config/provisioning/platform/config/{service-name}.ncl (Linux)
|
||||||
|
///
|
||||||
|
/// Returns the parsed JSON configuration or error if not found
|
||||||
|
pub fn load_service_config_from_ncl(service_name: &str) -> Result<serde_json::Value> {
|
||||||
|
let config_dir = if let Ok(home) = env::var("HOME") {
|
||||||
|
#[cfg(target_os = "macos")]
|
||||||
|
{
|
||||||
|
format!(
|
||||||
|
"{}/Library/Application Support/provisioning/platform/config",
|
||||||
|
home
|
||||||
|
)
|
||||||
|
}
|
||||||
|
#[cfg(not(target_os = "macos"))]
|
||||||
|
{
|
||||||
|
format!("{}/.config/provisioning/platform/config", home)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(ConfigError::not_found("Could not determine HOME directory"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let ncl_path = format!("{}/{}.ncl", config_dir, service_name);
|
||||||
|
let ncl_file = PathBuf::from(&ncl_path);
|
||||||
|
|
||||||
|
if !ncl_file.exists() {
|
||||||
|
return Err(ConfigError::not_found(format!(
|
||||||
|
"{}.ncl not found in platform config directory",
|
||||||
|
service_name
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let json_str = nickel::export_to_json(&ncl_path)?;
|
||||||
|
serde_json::from_str(&json_str).map_err(|e| {
|
||||||
|
ConfigError::deserialization_failed(format!(
|
||||||
|
"Failed to parse {} config as JSON: {}",
|
||||||
|
service_name, e
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@ -1,4 +1,5 @@
|
|||||||
use std::path::Path;
|
use std::env;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
use std::process::Command;
|
use std::process::Command;
|
||||||
|
|
||||||
use crate::error::{ConfigError, Result};
|
use crate::error::{ConfigError, Result};
|
||||||
@ -12,6 +13,114 @@ pub fn is_nickel_available() -> bool {
|
|||||||
.unwrap_or(false)
|
.unwrap_or(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Scans ~/.cache/provisioning/extensions/ for cached extension schemas
|
||||||
|
fn scan_extension_schemas(home: &str) -> Vec<PathBuf> {
|
||||||
|
let extensions_cache = PathBuf::from(format!("{}/.cache/provisioning/extensions", home));
|
||||||
|
|
||||||
|
if !extensions_cache.exists() {
|
||||||
|
return Vec::new();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut extension_paths = Vec::new();
|
||||||
|
|
||||||
|
match std::fs::read_dir(&extensions_cache) {
|
||||||
|
Ok(entries) => {
|
||||||
|
for entry in entries.flatten() {
|
||||||
|
let path = entry.path();
|
||||||
|
if path.is_dir() {
|
||||||
|
// Look for schemas subdirectory in each extension dir
|
||||||
|
// Expect structure: extensions/{name}@{version}/schemas
|
||||||
|
let schemas_path = path.join("schemas");
|
||||||
|
if schemas_path.exists() {
|
||||||
|
tracing::debug!("Found extension schemas cache at: {:?}", schemas_path);
|
||||||
|
extension_paths.push(schemas_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("Failed to scan extension schemas cache: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extension_paths
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Construye el NICKEL_IMPORT_PATH correcto usando:
|
||||||
|
/// 1. Variable de entorno NICKEL_IMPORT_PATH si existe (additive, prepended)
|
||||||
|
/// 2. Extension schemas (scan ~/.cache/provisioning/extensions/*/schemas)
|
||||||
|
/// 3. Git cache directories (schemas, configs)
|
||||||
|
/// 4. Paths desde el script principal (PROVISIONING_USER_PLATFORM)
|
||||||
|
/// 5. Path relativo al directorio actual (provisioning/schemas)
|
||||||
|
fn build_nickel_import_path() -> Result<String> {
|
||||||
|
let mut paths = Vec::new();
|
||||||
|
|
||||||
|
// Priority 1: Explicit NICKEL_IMPORT_PATH env var (additive - prepend to final
|
||||||
|
// path)
|
||||||
|
let explicit_path = env::var("NICKEL_IMPORT_PATH").ok();
|
||||||
|
|
||||||
|
// Priority 2: Extension schemas (scan
|
||||||
|
// ~/.cache/provisioning/extensions/*/schemas)
|
||||||
|
let home = env::var("HOME").ok();
|
||||||
|
|
||||||
|
if let Some(ref home) = home {
|
||||||
|
let mut ext_paths = scan_extension_schemas(home);
|
||||||
|
paths.append(&mut ext_paths);
|
||||||
|
|
||||||
|
// Priority 3: Git cache directories (if they exist)
|
||||||
|
let schemas_cache = PathBuf::from(format!("{}/.cache/provisioning/schemas", home));
|
||||||
|
if schemas_cache.exists() {
|
||||||
|
tracing::debug!("Found Git schemas cache at: {:?}", schemas_cache);
|
||||||
|
paths.push(schemas_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
let configs_cache = PathBuf::from(format!("{}/.cache/provisioning/configs", home));
|
||||||
|
if configs_cache.exists() {
|
||||||
|
tracing::debug!("Found Git configs cache at: {:?}", configs_cache);
|
||||||
|
paths.push(configs_cache);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 4: Find provisioning/schemas relative to current directory or
|
||||||
|
// project root
|
||||||
|
if let Ok(cwd) = env::current_dir() {
|
||||||
|
let schemas_path = cwd.join("provisioning").join("schemas");
|
||||||
|
if schemas_path.exists() {
|
||||||
|
tracing::debug!("Found provisioning/schemas at: {:?}", schemas_path);
|
||||||
|
paths.push(schemas_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 5: Use PROVISIONING_USER_PLATFORM from main provisioning script
|
||||||
|
if let Ok(user_platform) = env::var("PROVISIONING_USER_PLATFORM") {
|
||||||
|
let user_platform_path = PathBuf::from(&user_platform);
|
||||||
|
if user_platform_path.exists() {
|
||||||
|
tracing::debug!("Found PROVISIONING_USER_PLATFORM at: {}", user_platform);
|
||||||
|
paths.push(user_platform_path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build final path, optionally prepend explicit env var
|
||||||
|
if paths.is_empty() {
|
||||||
|
tracing::warn!("No NICKEL_IMPORT_PATH sources found, using default 'provisioning/schemas'");
|
||||||
|
paths.push(PathBuf::from("provisioning/schemas"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut import_path = paths
|
||||||
|
.iter()
|
||||||
|
.map(|p| p.to_string_lossy().to_string())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(":");
|
||||||
|
|
||||||
|
// If explicit NICKEL_IMPORT_PATH was set, prepend it to allow overrides
|
||||||
|
if let Some(explicit) = explicit_path {
|
||||||
|
tracing::debug!("Prepending explicit NICKEL_IMPORT_PATH from environment");
|
||||||
|
import_path = format!("{}:{}", explicit, import_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(import_path)
|
||||||
|
}
|
||||||
|
|
||||||
/// Exporta un archivo NCL a JSON usando el CLI de Nickel
|
/// Exporta un archivo NCL a JSON usando el CLI de Nickel
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
@ -33,12 +142,16 @@ pub fn export_to_json<P: AsRef<Path>>(ncl_path: P) -> Result<String> {
|
|||||||
|
|
||||||
tracing::debug!("Exporting NCL config to JSON: {:?}", ncl_path);
|
tracing::debug!("Exporting NCL config to JSON: {:?}", ncl_path);
|
||||||
|
|
||||||
|
// Build NICKEL_IMPORT_PATH with proper schema directories
|
||||||
|
let import_path = build_nickel_import_path()?;
|
||||||
|
tracing::debug!("Using NICKEL_IMPORT_PATH: {}", import_path);
|
||||||
|
|
||||||
let output = Command::new("nickel")
|
let output = Command::new("nickel")
|
||||||
.arg("export")
|
.arg("export")
|
||||||
.arg("--format")
|
.arg("--format")
|
||||||
.arg("json")
|
.arg("json")
|
||||||
.arg(ncl_path)
|
.arg(ncl_path)
|
||||||
.env("NICKEL_IMPORT_PATH", "provisioning/schemas")
|
.env("NICKEL_IMPORT_PATH", &import_path)
|
||||||
.output()
|
.output()
|
||||||
.map_err(|e| ConfigError::io_error(format!("Failed to execute nickel CLI: {}", e)))?;
|
.map_err(|e| ConfigError::io_error(format!("Failed to execute nickel CLI: {}", e)))?;
|
||||||
|
|
||||||
|
|||||||
267
crates/platform-config/src/oci.rs
Normal file
267
crates/platform-config/src/oci.rs
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use crate::error::{ConfigError, Result};
|
||||||
|
|
||||||
|
/// Client for interacting with OCI registries (Zot, Docker Registry, etc.)
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct OciClient {
|
||||||
|
registry_url: String,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
username: Option<String>,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
password: Option<String>,
|
||||||
|
tls_verify: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OCI Image metadata
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ImageMetadata {
|
||||||
|
pub name: String,
|
||||||
|
pub tag: String,
|
||||||
|
pub digest: String,
|
||||||
|
pub size_bytes: u64,
|
||||||
|
pub created: String,
|
||||||
|
pub config: Option<HashMap<String, String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OCI Repository information
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct RepositoryInfo {
|
||||||
|
pub name: String,
|
||||||
|
pub tag_count: usize,
|
||||||
|
pub manifest_count: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OciClient {
|
||||||
|
/// Create a new OCI client
|
||||||
|
pub fn new(registry_url: String, tls_verify: bool) -> Self {
|
||||||
|
Self {
|
||||||
|
registry_url,
|
||||||
|
username: None,
|
||||||
|
password: None,
|
||||||
|
tls_verify,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new OCI client with authentication
|
||||||
|
pub fn with_auth(
|
||||||
|
registry_url: String,
|
||||||
|
username: String,
|
||||||
|
password: String,
|
||||||
|
tls_verify: bool,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
registry_url,
|
||||||
|
username: Some(username),
|
||||||
|
password: Some(password),
|
||||||
|
tls_verify,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the registry URL
|
||||||
|
pub fn registry_url(&self) -> &str {
|
||||||
|
&self.registry_url
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if registry is accessible
|
||||||
|
pub async fn health_check(&self) -> Result<bool> {
|
||||||
|
let url = format!("{}/v2/_catalog", self.registry_url);
|
||||||
|
|
||||||
|
match reqwest::Client::builder()
|
||||||
|
.danger_accept_invalid_certs(!self.tls_verify)
|
||||||
|
.build()
|
||||||
|
{
|
||||||
|
Ok(client) => match client.get(&url).send().await {
|
||||||
|
Ok(response) => Ok(response.status().is_success()),
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("OCI registry health check failed: {}", e);
|
||||||
|
Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to reach OCI registry at {}: {}",
|
||||||
|
self.registry_url, e
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to create HTTP client: {}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// List all repositories in the registry
|
||||||
|
pub async fn list_repositories(&self) -> Result<Vec<String>> {
|
||||||
|
let url = format!("{}/v2/_catalog", self.registry_url);
|
||||||
|
|
||||||
|
match reqwest::Client::builder()
|
||||||
|
.danger_accept_invalid_certs(!self.tls_verify)
|
||||||
|
.build()
|
||||||
|
{
|
||||||
|
Ok(client) => match client.get(&url).send().await {
|
||||||
|
Ok(response) => match response.json::<serde_json::Value>().await {
|
||||||
|
Ok(data) => {
|
||||||
|
if let Some(repos) = data.get("repositories").and_then(|v| v.as_array()) {
|
||||||
|
let repository_names: Vec<String> = repos
|
||||||
|
.iter()
|
||||||
|
.filter_map(|v| v.as_str().map(|s| s.to_string()))
|
||||||
|
.collect();
|
||||||
|
Ok(repository_names)
|
||||||
|
} else {
|
||||||
|
Ok(Vec::new())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("Failed to parse registry response: {}", e);
|
||||||
|
Err(ConfigError::deserialization_failed(format!(
|
||||||
|
"Invalid registry response: {}",
|
||||||
|
e
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to list repositories: {}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
},
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to create HTTP client: {}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// List all tags for a repository
|
||||||
|
pub async fn list_tags(&self, repository: &str) -> Result<Vec<String>> {
|
||||||
|
let url = format!(
|
||||||
|
"{}/v2/{}/tags/list",
|
||||||
|
self.registry_url,
|
||||||
|
urlencoding::encode(repository)
|
||||||
|
);
|
||||||
|
|
||||||
|
match reqwest::Client::builder()
|
||||||
|
.danger_accept_invalid_certs(!self.tls_verify)
|
||||||
|
.build()
|
||||||
|
{
|
||||||
|
Ok(client) => match client.get(&url).send().await {
|
||||||
|
Ok(response) => match response.json::<serde_json::Value>().await {
|
||||||
|
Ok(data) => {
|
||||||
|
if let Some(tags) = data.get("tags").and_then(|v| v.as_array()) {
|
||||||
|
let tag_names: Vec<String> = tags
|
||||||
|
.iter()
|
||||||
|
.filter_map(|v| v.as_str().map(|s| s.to_string()))
|
||||||
|
.collect();
|
||||||
|
Ok(tag_names)
|
||||||
|
} else {
|
||||||
|
Ok(Vec::new())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("Failed to parse tags response: {}", e);
|
||||||
|
Err(ConfigError::deserialization_failed(format!(
|
||||||
|
"Invalid tags response: {}",
|
||||||
|
e
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to list tags for {}: {}",
|
||||||
|
repository, e
|
||||||
|
))),
|
||||||
|
},
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to create HTTP client: {}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get manifest for an image
|
||||||
|
pub async fn get_manifest(&self, repository: &str, tag: &str) -> Result<serde_json::Value> {
|
||||||
|
let url = format!(
|
||||||
|
"{}/v2/{}/manifests/{}",
|
||||||
|
self.registry_url,
|
||||||
|
urlencoding::encode(repository),
|
||||||
|
urlencoding::encode(tag)
|
||||||
|
);
|
||||||
|
|
||||||
|
match reqwest::Client::builder()
|
||||||
|
.danger_accept_invalid_certs(!self.tls_verify)
|
||||||
|
.build()
|
||||||
|
{
|
||||||
|
Ok(client) => match client.get(&url).send().await {
|
||||||
|
Ok(response) => match response.json::<serde_json::Value>().await {
|
||||||
|
Ok(manifest) => Ok(manifest),
|
||||||
|
Err(e) => Err(ConfigError::deserialization_failed(format!(
|
||||||
|
"Failed to parse manifest: {}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
},
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to get manifest for {}:{}: {}",
|
||||||
|
repository, tag, e
|
||||||
|
))),
|
||||||
|
},
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to create HTTP client: {}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_oci_client_new() {
|
||||||
|
let client = OciClient::new("http://localhost:5001".to_string(), true);
|
||||||
|
assert_eq!(client.registry_url(), "http://localhost:5001");
|
||||||
|
assert!(client.tls_verify);
|
||||||
|
assert!(client.username.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_oci_client_with_auth() {
|
||||||
|
let client = OciClient::with_auth(
|
||||||
|
"http://localhost:5001".to_string(),
|
||||||
|
"user".to_string(),
|
||||||
|
"pass".to_string(),
|
||||||
|
false,
|
||||||
|
);
|
||||||
|
assert_eq!(client.registry_url(), "http://localhost:5001");
|
||||||
|
assert!(!client.tls_verify);
|
||||||
|
assert!(client.username.is_some());
|
||||||
|
assert!(client.password.is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_oci_health_check_localhost() {
|
||||||
|
// This test requires Zot running at localhost:5001
|
||||||
|
let client = OciClient::new("http://localhost:5001".to_string(), false);
|
||||||
|
match client.health_check().await {
|
||||||
|
Ok(healthy) => {
|
||||||
|
println!(
|
||||||
|
"✓ Zot registry health: {}",
|
||||||
|
if healthy { "OK" } else { "UNHEALTHY" }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ℹ Zot registry unavailable (expected in CI): {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_oci_list_repositories_empty() {
|
||||||
|
// This test requires Zot running at localhost:5001
|
||||||
|
let client = OciClient::new("http://localhost:5001".to_string(), false);
|
||||||
|
match client.list_repositories().await {
|
||||||
|
Ok(repos) => {
|
||||||
|
println!("✓ Found {} repositories in Zot", repos.len());
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ℹ Could not list repositories (expected in CI): {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,5 +1,7 @@
|
|||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use tracing::debug;
|
||||||
|
|
||||||
/// Resolves configuration file paths with CLI flags priority
|
/// Resolves configuration file paths with CLI flags priority
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct ConfigResolver {
|
pub struct ConfigResolver {
|
||||||
@ -87,7 +89,8 @@ impl ConfigResolver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Search for config file in directory with specific mode
|
/// Search for config file in directory with specific mode
|
||||||
/// Searches in order: {service}.{mode}.ncl, {service}.{mode}.toml, {service}.{mode}.json
|
/// Searches in order: {service}.{mode}.ncl, {service}.{mode}.toml,
|
||||||
|
/// {service}.{mode}.json
|
||||||
pub fn find_config_in_dir_with_mode(dir: &Path, service_name: &str, mode: &str) -> Option<PathBuf> {
|
pub fn find_config_in_dir_with_mode(dir: &Path, service_name: &str, mode: &str) -> Option<PathBuf> {
|
||||||
for ext in &["ncl", "toml", "json"] {
|
for ext in &["ncl", "toml", "json"] {
|
||||||
let path = dir.join(format!("{}.{}.{}", service_name, mode, ext));
|
let path = dir.join(format!("{}.{}.{}", service_name, mode, ext));
|
||||||
@ -99,29 +102,86 @@ pub fn find_config_in_dir_with_mode(dir: &Path, service_name: &str, mode: &str)
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Search for config file with intelligent fallback strategy
|
||||||
|
/// 1. {dir}/config/{service}.ncl (no mode suffix first)
|
||||||
|
/// 2. {dir}/config/{service}.{mode}.ncl (with mode suffix)
|
||||||
|
/// 3. {dir}/{service}.ncl (no mode suffix)
|
||||||
|
/// 4. {dir}/{service}.{mode}.ncl (with mode suffix)
|
||||||
|
pub fn find_config_smart(dir: &Path, service_name: &str, mode: &str) -> Option<PathBuf> {
|
||||||
|
debug!(" 🔍 Smart config search in: {}", dir.display());
|
||||||
|
|
||||||
|
// Strategy 1: Check config/ subdir without mode first
|
||||||
|
let config_subdir = dir.join("config");
|
||||||
|
if config_subdir.exists() {
|
||||||
|
debug!(
|
||||||
|
" • Checking {}/config/ without mode suffix",
|
||||||
|
dir.display()
|
||||||
|
);
|
||||||
|
if let Some(path) = find_config_in_dir(&config_subdir, service_name) {
|
||||||
|
return Some(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strategy 2: Check config/ subdir with mode suffix
|
||||||
|
debug!(
|
||||||
|
" • Checking {}/config/ with mode suffix '.{}'",
|
||||||
|
dir.display(),
|
||||||
|
mode
|
||||||
|
);
|
||||||
|
if let Some(path) = find_config_in_dir_with_mode(&config_subdir, service_name, mode) {
|
||||||
|
return Some(path);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
debug!(" • {}/config/ does not exist (skipped)", dir.display());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strategy 3: Check parent dir without mode suffix
|
||||||
|
debug!(" • Checking {} without mode suffix", dir.display());
|
||||||
|
if let Some(path) = find_config_in_dir(dir, service_name) {
|
||||||
|
return Some(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strategy 4: Check parent dir with mode suffix
|
||||||
|
debug!(
|
||||||
|
" • Checking {} with mode suffix '.{}'",
|
||||||
|
dir.display(),
|
||||||
|
mode
|
||||||
|
);
|
||||||
|
if let Some(path) = find_config_in_dir_with_mode(dir, service_name, mode) {
|
||||||
|
return Some(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(" ✗ No config found in any location");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Search for config file in directory with default naming
|
/// Search for config file in directory with default naming
|
||||||
/// Searches in order: {service}.ncl, {service}.toml, {service}.json
|
/// Searches in order: {service}.ncl, {service}.toml, {service}.json
|
||||||
pub fn find_config_in_dir(dir: &Path, service_name: &str) -> Option<PathBuf> {
|
pub fn find_config_in_dir(dir: &Path, service_name: &str) -> Option<PathBuf> {
|
||||||
|
debug!(" Searching in: {}", dir.display());
|
||||||
for ext in &["ncl", "toml", "json"] {
|
for ext in &["ncl", "toml", "json"] {
|
||||||
let path = dir.join(format!("{}.{}", service_name, ext));
|
let path = dir.join(format!("{}.{}", service_name, ext));
|
||||||
|
debug!(" Trying: {}", path.display());
|
||||||
if path.exists() {
|
if path.exists() {
|
||||||
tracing::trace!("Found config in dir: {:?}", path);
|
tracing::info!(" ✓ Found: {}", path.display());
|
||||||
return Some(path);
|
return Some(path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
debug!(" (no files found)");
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
|
||||||
use tempfile::TempDir;
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_cli_config_highest_priority() {
|
fn test_cli_config_highest_priority() {
|
||||||
let resolver = ConfigResolver::new()
|
let resolver =
|
||||||
.with_cli_config(Some(PathBuf::from("/explicit/path.toml")));
|
ConfigResolver::new().with_cli_config(Some(PathBuf::from("/explicit/path.toml")));
|
||||||
|
|
||||||
let resolved = resolver.resolve("orchestrator");
|
let resolved = resolver.resolve("orchestrator");
|
||||||
assert_eq!(resolved, Some(PathBuf::from("/explicit/path.toml")));
|
assert_eq!(resolved, Some(PathBuf::from("/explicit/path.toml")));
|
||||||
@ -137,8 +197,8 @@ mod tests {
|
|||||||
fs::write(&ncl_path, "{}").unwrap();
|
fs::write(&ncl_path, "{}").unwrap();
|
||||||
fs::write(&toml_path, "[orchestrator]").unwrap();
|
fs::write(&toml_path, "[orchestrator]").unwrap();
|
||||||
|
|
||||||
let resolver = ConfigResolver::new()
|
let resolver =
|
||||||
.with_cli_config_dir(Some(temp_dir.path().to_path_buf()));
|
ConfigResolver::new().with_cli_config_dir(Some(temp_dir.path().to_path_buf()));
|
||||||
|
|
||||||
let resolved = resolver.resolve("orchestrator").unwrap();
|
let resolved = resolver.resolve("orchestrator").unwrap();
|
||||||
// Should prefer .ncl over .toml
|
// Should prefer .ncl over .toml
|
||||||
@ -188,8 +248,8 @@ mod tests {
|
|||||||
fn test_no_config_found_returns_none() {
|
fn test_no_config_found_returns_none() {
|
||||||
let temp_dir = TempDir::new().unwrap();
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
|
||||||
let resolver = ConfigResolver::new()
|
let resolver =
|
||||||
.with_cli_config_dir(Some(temp_dir.path().to_path_buf()));
|
ConfigResolver::new().with_cli_config_dir(Some(temp_dir.path().to_path_buf()));
|
||||||
|
|
||||||
let resolved = resolver.resolve("nonexistent-service");
|
let resolved = resolver.resolve("nonexistent-service");
|
||||||
assert!(resolved.is_none());
|
assert!(resolved.is_none());
|
||||||
@ -203,8 +263,8 @@ mod tests {
|
|||||||
|
|
||||||
// This test just verifies the resolver logic works correctly
|
// This test just verifies the resolver logic works correctly
|
||||||
// (actual env var override would need temp_env crate)
|
// (actual env var override would need temp_env crate)
|
||||||
let resolver = ConfigResolver::new()
|
let resolver =
|
||||||
.with_cli_config_dir(Some(temp_dir.path().to_path_buf()));
|
ConfigResolver::new().with_cli_config_dir(Some(temp_dir.path().to_path_buf()));
|
||||||
|
|
||||||
let resolved = resolver.resolve("orchestrator").unwrap();
|
let resolved = resolver.resolve("orchestrator").unwrap();
|
||||||
assert_eq!(resolved, config_path);
|
assert_eq!(resolved, config_path);
|
||||||
|
|||||||
214
crates/platform-config/src/sops.rs
Normal file
214
crates/platform-config/src/sops.rs
Normal file
@ -0,0 +1,214 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
use crate::error::{ConfigError, Result};
|
||||||
|
|
||||||
|
/// SOPS (Secrets Operations) integration for decrypting encrypted configs
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct SopsDecryptor {
|
||||||
|
sops_executable: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SopsDecryptor {
|
||||||
|
/// Create a new SOPS decryptor
|
||||||
|
pub fn new() -> Result<Self> {
|
||||||
|
// Check if sops is installed
|
||||||
|
match Command::new("sops").arg("--version").output() {
|
||||||
|
Ok(output) if output.status.success() => {
|
||||||
|
let version = String::from_utf8_lossy(&output.stdout);
|
||||||
|
tracing::debug!("SOPS available: {}", version.trim());
|
||||||
|
Ok(Self {
|
||||||
|
sops_executable: "sops".to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
_ => Err(ConfigError::io_error(
|
||||||
|
"SOPS executable not found. Install SOPS: https://github.com/mozilla/sops",
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if SOPS is available
|
||||||
|
pub fn is_available() -> bool {
|
||||||
|
Command::new("sops")
|
||||||
|
.arg("--version")
|
||||||
|
.output()
|
||||||
|
.map(|output| output.status.success())
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Decrypt a SOPS-encrypted file and return the plaintext content
|
||||||
|
pub fn decrypt_file<P: AsRef<Path>>(&self, path: P) -> Result<String> {
|
||||||
|
let path = path.as_ref();
|
||||||
|
|
||||||
|
if !path.exists() {
|
||||||
|
return Err(ConfigError::not_found(format!(
|
||||||
|
"SOPS file not found: {:?}",
|
||||||
|
path
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::debug!("Decrypting SOPS file: {:?}", path);
|
||||||
|
|
||||||
|
match Command::new(&self.sops_executable)
|
||||||
|
.arg("--decrypt")
|
||||||
|
.arg(path)
|
||||||
|
.output()
|
||||||
|
{
|
||||||
|
Ok(output) => {
|
||||||
|
if !output.status.success() {
|
||||||
|
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||||
|
return Err(ConfigError::validation_failed(format!(
|
||||||
|
"SOPS decryption failed: {}",
|
||||||
|
stderr
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let content = String::from_utf8(output.stdout).map_err(|e| {
|
||||||
|
ConfigError::deserialization_failed(format!(
|
||||||
|
"Invalid UTF-8 in SOPS output: {}",
|
||||||
|
e
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
tracing::debug!("Successfully decrypted SOPS file");
|
||||||
|
Ok(content)
|
||||||
|
}
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to execute SOPS: {}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to find and decrypt a secrets file for a service
|
||||||
|
/// Looks for patterns: {service}.secrets.ncl.sops,
|
||||||
|
/// {service}.secrets.toml.sops
|
||||||
|
pub fn find_and_decrypt_secrets(
|
||||||
|
&self,
|
||||||
|
service_name: &str,
|
||||||
|
search_dir: &Path,
|
||||||
|
) -> Option<String> {
|
||||||
|
let patterns = vec![
|
||||||
|
format!("{}.secrets.ncl.sops", service_name),
|
||||||
|
format!("{}.secrets.toml.sops", service_name),
|
||||||
|
format!("secrets.{}.ncl.sops", service_name),
|
||||||
|
format!("secrets.{}.toml.sops", service_name),
|
||||||
|
];
|
||||||
|
|
||||||
|
for pattern in patterns {
|
||||||
|
let secret_file = search_dir.join(&pattern);
|
||||||
|
if secret_file.exists() {
|
||||||
|
match self.decrypt_file(&secret_file) {
|
||||||
|
Ok(content) => {
|
||||||
|
tracing::info!("Loaded secrets for {}: {}", service_name, pattern);
|
||||||
|
return Some(content);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("Failed to decrypt {}: {}", pattern, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a file appears to be SOPS-encrypted
|
||||||
|
pub fn is_sops_encrypted(path: &Path) -> bool {
|
||||||
|
path.extension()
|
||||||
|
.and_then(|ext| ext.to_str())
|
||||||
|
.map(|ext| ext == "sops")
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the original file format from a SOPS filename
|
||||||
|
/// Example: "config.ncl.sops" → "ncl"
|
||||||
|
pub fn get_original_format(sops_path: &Path) -> Option<String> {
|
||||||
|
sops_path
|
||||||
|
.file_stem()
|
||||||
|
.and_then(|stem| stem.to_str())
|
||||||
|
.and_then(|stem_str| stem_str.split('.').next_back().map(|ext| ext.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for SopsDecryptor {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new().unwrap_or_else(|_| Self {
|
||||||
|
sops_executable: "sops".to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_sops_availability() {
|
||||||
|
// This test checks if SOPS is installed
|
||||||
|
let available = SopsDecryptor::is_available();
|
||||||
|
if available {
|
||||||
|
println!("✓ SOPS is available");
|
||||||
|
} else {
|
||||||
|
println!("ℹ SOPS not installed (expected in CI)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_sops_decryptor_new() {
|
||||||
|
match SopsDecryptor::new() {
|
||||||
|
Ok(decryptor) => {
|
||||||
|
assert_eq!(decryptor.sops_executable, "sops");
|
||||||
|
println!("✓ SOPS decryptor created");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
println!("ℹ SOPS not available: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_sops_encrypted() {
|
||||||
|
assert!(SopsDecryptor::is_sops_encrypted(Path::new(
|
||||||
|
"config.ncl.sops"
|
||||||
|
)));
|
||||||
|
assert!(SopsDecryptor::is_sops_encrypted(Path::new(
|
||||||
|
"secrets.toml.sops"
|
||||||
|
)));
|
||||||
|
assert!(!SopsDecryptor::is_sops_encrypted(Path::new("config.ncl")));
|
||||||
|
assert!(!SopsDecryptor::is_sops_encrypted(Path::new("secrets.toml")));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_original_format() {
|
||||||
|
assert_eq!(
|
||||||
|
SopsDecryptor::get_original_format(Path::new("config.ncl.sops")),
|
||||||
|
Some("ncl".to_string())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
SopsDecryptor::get_original_format(Path::new("secrets.toml.sops")),
|
||||||
|
Some("toml".to_string())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
SopsDecryptor::get_original_format(Path::new("config.yaml.sops")),
|
||||||
|
Some("yaml".to_string())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_find_and_decrypt_secrets_missing() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
match SopsDecryptor::new() {
|
||||||
|
Ok(decryptor) => {
|
||||||
|
let result = decryptor.find_and_decrypt_secrets("vault-service", temp_dir.path());
|
||||||
|
assert!(result.is_none(), "Should not find any secrets");
|
||||||
|
println!("✓ Missing secrets handled correctly");
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
println!("ℹ SOPS not available, skipping decrypt test");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
363
crates/platform-config/src/startup.rs
Normal file
363
crates/platform-config/src/startup.rs
Normal file
@ -0,0 +1,363 @@
|
|||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use crate::error::{ConfigError, Result};
|
||||||
|
use crate::git::GitRepoCache;
|
||||||
|
|
||||||
|
/// Platform startup manager
|
||||||
|
///
|
||||||
|
/// Handles platform initialization, service enablement checks,
|
||||||
|
/// and dependency validation during startup.
|
||||||
|
pub struct PlatformStartup {
|
||||||
|
git_schemas: GitRepoCache,
|
||||||
|
git_configs: GitRepoCache,
|
||||||
|
enabled_services: HashSet<String>,
|
||||||
|
service_dependencies: HashMap<String, Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PlatformStartup {
|
||||||
|
/// Initialize platform startup manager from deployment-mode config
|
||||||
|
pub fn new(deployment_config: &Value) -> Result<Self> {
|
||||||
|
// Extract enabled services from deployment-mode.ncl
|
||||||
|
let enabled_services = extract_enabled_services(deployment_config)?;
|
||||||
|
|
||||||
|
// Extract Git repository configuration
|
||||||
|
let (git_schemas, git_configs) = extract_git_repos(deployment_config)?;
|
||||||
|
|
||||||
|
// Extract service dependencies
|
||||||
|
let service_dependencies = extract_service_dependencies(deployment_config)?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
git_schemas,
|
||||||
|
git_configs,
|
||||||
|
enabled_services,
|
||||||
|
service_dependencies,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a service is enabled in deployment-mode
|
||||||
|
pub fn is_service_enabled(&self, service_name: &str) -> bool {
|
||||||
|
self.enabled_services.contains(service_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get list of enabled services
|
||||||
|
pub fn enabled_services(&self) -> Vec<String> {
|
||||||
|
self.enabled_services.iter().cloned().collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate that all dependencies for a service are enabled
|
||||||
|
pub fn validate_dependencies(&self, service_name: &str) -> Result<()> {
|
||||||
|
let dependencies = self
|
||||||
|
.service_dependencies
|
||||||
|
.get(service_name)
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
for dep in dependencies {
|
||||||
|
if !self.is_service_enabled(&dep) {
|
||||||
|
return Err(ConfigError::validation_failed(format!(
|
||||||
|
"Service '{}' requires '{}' to be enabled",
|
||||||
|
service_name, dep
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ensure Git repositories are cloned/updated and return cache paths
|
||||||
|
/// If cloning fails, attempt to use existing cache
|
||||||
|
pub fn setup_git_repos(&self) -> Result<(std::path::PathBuf, std::path::PathBuf)> {
|
||||||
|
let schemas_path = match self.git_schemas.ensure_cached() {
|
||||||
|
Ok(path) => {
|
||||||
|
tracing::info!("✓ Schemas repository ready");
|
||||||
|
path
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(
|
||||||
|
"Failed to clone schemas repository: {}. Attempting to use existing cache.",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
// Try to use existing cache directory
|
||||||
|
let cache_dir = self.git_schemas.cache_dir.clone();
|
||||||
|
if cache_dir.exists() {
|
||||||
|
tracing::info!("Using existing schemas cache at {:?}", cache_dir);
|
||||||
|
cache_dir
|
||||||
|
} else {
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let configs_path = match self.git_configs.ensure_cached() {
|
||||||
|
Ok(path) => {
|
||||||
|
tracing::info!("✓ Configs repository ready");
|
||||||
|
path
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(
|
||||||
|
"Failed to clone configs repository: {}. Attempting to use existing cache.",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
// Try to use existing cache directory
|
||||||
|
let cache_dir = self.git_configs.cache_dir.clone();
|
||||||
|
if cache_dir.exists() {
|
||||||
|
tracing::info!("Using existing configs cache at {:?}", cache_dir);
|
||||||
|
cache_dir
|
||||||
|
} else {
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::info!("✓ Git repositories ready (using cache)");
|
||||||
|
tracing::debug!(" Schemas cache: {:?}", schemas_path);
|
||||||
|
tracing::debug!(" Configs cache: {:?}", configs_path);
|
||||||
|
|
||||||
|
Ok((schemas_path, configs_path))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get service startup order based on dependencies
|
||||||
|
pub fn get_startup_order(&self) -> Result<Vec<String>> {
|
||||||
|
let mut order = Vec::new();
|
||||||
|
let mut visited = HashSet::new();
|
||||||
|
|
||||||
|
for service in &self.enabled_services {
|
||||||
|
self.visit_service(service, &mut visited, &mut order)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(order)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_service(
|
||||||
|
&self,
|
||||||
|
service: &str,
|
||||||
|
visited: &mut HashSet<String>,
|
||||||
|
order: &mut Vec<String>,
|
||||||
|
) -> Result<()> {
|
||||||
|
if visited.contains(service) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit dependencies first
|
||||||
|
if let Some(deps) = self.service_dependencies.get(service) {
|
||||||
|
for dep in deps {
|
||||||
|
self.visit_service(dep, visited, order)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
visited.insert(service.to_string());
|
||||||
|
order.push(service.to_string());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract enabled services from deployment-mode config
|
||||||
|
///
|
||||||
|
/// Supports two formats:
|
||||||
|
/// 1. Legacy: "enabled_services": ["service1", "service2"]
|
||||||
|
/// 2. Current: "services": { "service1": { "enabled": true }, "service2": {
|
||||||
|
/// "enabled": false } }
|
||||||
|
fn extract_enabled_services(config: &Value) -> Result<HashSet<String>> {
|
||||||
|
// Try current format first: services object with enabled field
|
||||||
|
if let Some(services_obj) = config.get("services").and_then(|v| v.as_object()) {
|
||||||
|
let enabled: HashSet<String> = services_obj
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(name, cfg)| {
|
||||||
|
cfg.get("enabled")
|
||||||
|
.and_then(|v| v.as_bool())
|
||||||
|
.filter(|&enabled| enabled)
|
||||||
|
.map(|_| name.clone())
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if !enabled.is_empty() {
|
||||||
|
return Ok(enabled);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to legacy format: enabled_services array
|
||||||
|
if let Some(services_array) = config.get("enabled_services").and_then(|v| v.as_array()) {
|
||||||
|
let enabled: HashSet<String> = services_array
|
||||||
|
.iter()
|
||||||
|
.filter_map(|v| v.as_str().map(|s| s.to_string()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if !enabled.is_empty() {
|
||||||
|
return Ok(enabled);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(ConfigError::validation_failed(
|
||||||
|
"No enabled services found in deployment-mode.ncl - check services object or \
|
||||||
|
enabled_services array",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract Git repository configurations from deployment-mode config
|
||||||
|
fn extract_git_repos(config: &Value) -> Result<(GitRepoCache, GitRepoCache)> {
|
||||||
|
let git_config = config
|
||||||
|
.get("git")
|
||||||
|
.ok_or_else(|| ConfigError::validation_failed("git config not found in deployment-mode"))?;
|
||||||
|
|
||||||
|
let schemas_config = git_config
|
||||||
|
.get("schemas_repo")
|
||||||
|
.ok_or_else(|| ConfigError::validation_failed("schemas_repo config not found"))?;
|
||||||
|
|
||||||
|
let configs_config = git_config
|
||||||
|
.get("configs_repo")
|
||||||
|
.ok_or_else(|| ConfigError::validation_failed("configs_repo config not found"))?;
|
||||||
|
|
||||||
|
let schemas_repo = GitRepoCache {
|
||||||
|
url: extract_string(schemas_config, "url")?,
|
||||||
|
branch: extract_string(schemas_config, "branch").unwrap_or_else(|_| "main".to_string()),
|
||||||
|
cache_dir: extract_string(schemas_config, "cache_dir")?.into(),
|
||||||
|
update_check: schemas_config
|
||||||
|
.get("update_check")
|
||||||
|
.and_then(|v| v.as_bool())
|
||||||
|
.unwrap_or(true),
|
||||||
|
};
|
||||||
|
|
||||||
|
let configs_repo = GitRepoCache {
|
||||||
|
url: extract_string(configs_config, "url")?,
|
||||||
|
branch: extract_string(configs_config, "branch").unwrap_or_else(|_| "main".to_string()),
|
||||||
|
cache_dir: extract_string(configs_config, "cache_dir")?.into(),
|
||||||
|
update_check: configs_config
|
||||||
|
.get("update_check")
|
||||||
|
.and_then(|v| v.as_bool())
|
||||||
|
.unwrap_or(true),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((schemas_repo, configs_repo))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract service dependencies from deployment-mode config
|
||||||
|
fn extract_service_dependencies(config: &Value) -> Result<HashMap<String, Vec<String>>> {
|
||||||
|
let mut dependencies = HashMap::new();
|
||||||
|
|
||||||
|
if let Some(services) = config.get("services").and_then(|v| v.as_object()) {
|
||||||
|
for (service_name, service_config) in services {
|
||||||
|
if let Some(deps_array) = service_config
|
||||||
|
.get("dependencies")
|
||||||
|
.and_then(|v| v.as_array())
|
||||||
|
{
|
||||||
|
let deps: Vec<String> = deps_array
|
||||||
|
.iter()
|
||||||
|
.filter_map(|v| v.as_str().map(|s| s.to_string()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
dependencies.insert(service_name.clone(), deps);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(dependencies)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper to extract string value from JSON
|
||||||
|
fn extract_string(value: &Value, key: &str) -> Result<String> {
|
||||||
|
value
|
||||||
|
.get(key)
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.map(|s| s.to_string())
|
||||||
|
.ok_or_else(|| {
|
||||||
|
ConfigError::validation_failed(format!("'{}' not found or not a string", key))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn create_test_config() -> Value {
|
||||||
|
serde_json::json!({
|
||||||
|
"enabled_services": ["orchestrator", "vault_service"],
|
||||||
|
"services": {
|
||||||
|
"orchestrator": {
|
||||||
|
"dependencies": ["vault_service"]
|
||||||
|
},
|
||||||
|
"vault_service": {
|
||||||
|
"dependencies": []
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"git": {
|
||||||
|
"schemas_repo": {
|
||||||
|
"url": "http://localhost:3000/provisioning/provisioning-schemas.git",
|
||||||
|
"branch": "main",
|
||||||
|
"cache_dir": "~/.cache/provisioning/schemas",
|
||||||
|
"update_check": true
|
||||||
|
},
|
||||||
|
"configs_repo": {
|
||||||
|
"url": "http://localhost:3000/provisioning/provisioning-configs.git",
|
||||||
|
"branch": "main",
|
||||||
|
"cache_dir": "~/.cache/provisioning/configs",
|
||||||
|
"update_check": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_extract_enabled_services() {
|
||||||
|
let config = create_test_config();
|
||||||
|
let services = extract_enabled_services(&config).expect("Should extract services");
|
||||||
|
|
||||||
|
assert!(services.contains("orchestrator"));
|
||||||
|
assert!(services.contains("vault_service"));
|
||||||
|
assert_eq!(services.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_platform_startup_is_service_enabled() {
|
||||||
|
let config = create_test_config();
|
||||||
|
let startup = PlatformStartup::new(&config).expect("Should create startup");
|
||||||
|
|
||||||
|
assert!(startup.is_service_enabled("orchestrator"));
|
||||||
|
assert!(startup.is_service_enabled("vault_service"));
|
||||||
|
assert!(!startup.is_service_enabled("mcp-server"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_validate_dependencies_success() {
|
||||||
|
let config = create_test_config();
|
||||||
|
let startup = PlatformStartup::new(&config).expect("Should create startup");
|
||||||
|
|
||||||
|
let result = startup.validate_dependencies("orchestrator");
|
||||||
|
assert!(result.is_ok(), "Should validate dependencies");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_validate_dependencies_failure() {
|
||||||
|
let mut config = create_test_config();
|
||||||
|
// Remove vault_service from enabled services
|
||||||
|
if let Some(services) = config
|
||||||
|
.get_mut("enabled_services")
|
||||||
|
.and_then(|v| v.as_array_mut())
|
||||||
|
{
|
||||||
|
services.retain(|v| v.as_str() != Some("vault_service"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let startup = PlatformStartup::new(&config).expect("Should create startup");
|
||||||
|
let result = startup.validate_dependencies("orchestrator");
|
||||||
|
assert!(result.is_err(), "Should fail validation");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_startup_order() {
|
||||||
|
let config = create_test_config();
|
||||||
|
let startup = PlatformStartup::new(&config).expect("Should create startup");
|
||||||
|
|
||||||
|
let order = startup
|
||||||
|
.get_startup_order()
|
||||||
|
.expect("Should get startup order");
|
||||||
|
// vault_service should come before orchestrator (has fewer dependencies)
|
||||||
|
let vault_idx = order.iter().position(|s| s == "vault_service");
|
||||||
|
let orch_idx = order.iter().position(|s| s == "orchestrator");
|
||||||
|
|
||||||
|
assert!(vault_idx.is_some());
|
||||||
|
assert!(orch_idx.is_some());
|
||||||
|
assert!(vault_idx.unwrap() < orch_idx.unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
240
crates/platform-config/src/team_configs.rs
Normal file
240
crates/platform-config/src/team_configs.rs
Normal file
@ -0,0 +1,240 @@
|
|||||||
|
use std::env;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use crate::error::{ConfigError, Result};
|
||||||
|
|
||||||
|
/// Team-specific configuration resolver
|
||||||
|
///
|
||||||
|
/// Configuration hierarchy (high to low priority):
|
||||||
|
/// 1. Explicit PROVISIONING_TEAM env var
|
||||||
|
/// 2. Current user's team (from HOME/.provisioning-team)
|
||||||
|
/// 3. Default team (if exists)
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TeamConfigResolver {
|
||||||
|
current_team: Option<String>,
|
||||||
|
team_config_dir: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TeamConfigResolver {
|
||||||
|
/// Create a new team config resolver
|
||||||
|
pub fn new(team_config_base_dir: PathBuf) -> Self {
|
||||||
|
let current_team = Self::detect_current_team();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
current_team,
|
||||||
|
team_config_dir: team_config_base_dir,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Detect current team from environment or user config
|
||||||
|
fn detect_current_team() -> Option<String> {
|
||||||
|
// Priority 1: Explicit env var
|
||||||
|
if let Ok(team) = env::var("PROVISIONING_TEAM") {
|
||||||
|
tracing::debug!("Using team from PROVISIONING_TEAM: {}", team);
|
||||||
|
return Some(team);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 2: User's team file
|
||||||
|
let Ok(home) = env::var("HOME") else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
let team_file = PathBuf::from(home).join(".provisioning-team");
|
||||||
|
if !team_file.exists() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let content = match std::fs::read_to_string(&team_file) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("Failed to read ~/.provisioning-team: {}", e);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let team = content.trim().to_string();
|
||||||
|
if team.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
tracing::debug!("Using team from ~/.provisioning-team: {}", team);
|
||||||
|
Some(team)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the current team name
|
||||||
|
pub fn current_team(&self) -> Option<&str> {
|
||||||
|
self.current_team.as_deref()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the team config directory for a specific team
|
||||||
|
pub fn team_dir(&self, team_name: &str) -> PathBuf {
|
||||||
|
self.team_config_dir.join("teams").join(team_name)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the team config directory for current team
|
||||||
|
pub fn current_team_dir(&self) -> Option<PathBuf> {
|
||||||
|
self.current_team.as_ref().map(|team| self.team_dir(team))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a team exists
|
||||||
|
pub fn team_exists(&self, team_name: &str) -> bool {
|
||||||
|
self.team_dir(team_name).exists()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// List all available teams
|
||||||
|
pub fn list_teams(&self) -> Result<Vec<String>> {
|
||||||
|
let teams_dir = self.team_config_dir.join("teams");
|
||||||
|
|
||||||
|
if !teams_dir.exists() {
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut teams = Vec::new();
|
||||||
|
|
||||||
|
match std::fs::read_dir(&teams_dir) {
|
||||||
|
Ok(entries) => {
|
||||||
|
teams.extend(
|
||||||
|
entries
|
||||||
|
.flatten()
|
||||||
|
.filter(|e| e.path().is_dir())
|
||||||
|
.filter_map(|e| e.file_name().to_str().map(|s| s.to_string())),
|
||||||
|
);
|
||||||
|
teams.sort();
|
||||||
|
Ok(teams)
|
||||||
|
}
|
||||||
|
Err(e) => Err(ConfigError::io_error(format!(
|
||||||
|
"Failed to list teams: {}",
|
||||||
|
e
|
||||||
|
))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get team-specific config file for a service
|
||||||
|
/// Returns the path if it exists
|
||||||
|
pub fn get_service_config(&self, service_name: &str) -> Option<PathBuf> {
|
||||||
|
let current_team = self.current_team.as_ref()?;
|
||||||
|
let team_dir = self.team_dir(current_team);
|
||||||
|
|
||||||
|
// Try {service-name}.ncl first
|
||||||
|
let ncl_path = team_dir.join(format!("{}.ncl", service_name));
|
||||||
|
if ncl_path.exists() {
|
||||||
|
tracing::debug!("Found team config for {}: {:?}", service_name, ncl_path);
|
||||||
|
return Some(ncl_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try {service-name}.toml
|
||||||
|
let toml_path = team_dir.join(format!("{}.toml", service_name));
|
||||||
|
if toml_path.exists() {
|
||||||
|
tracing::debug!("Found team config for {}: {:?}", service_name, toml_path);
|
||||||
|
return Some(toml_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Display team information
|
||||||
|
pub fn display_info(&self) -> String {
|
||||||
|
let mut info = String::new();
|
||||||
|
info.push_str("👥 Team Configuration\n");
|
||||||
|
info.push_str("─────────────────────\n");
|
||||||
|
|
||||||
|
if let Some(team) = &self.current_team {
|
||||||
|
info.push_str(&format!("Current Team: {}\n", team));
|
||||||
|
info.push_str(&format!("Team Dir: {:?}\n", self.current_team_dir()));
|
||||||
|
} else {
|
||||||
|
info.push_str("Current Team: (none)\n");
|
||||||
|
info.push_str("Using global defaults\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.list_teams() {
|
||||||
|
Ok(teams) if !teams.is_empty() => {
|
||||||
|
info.push_str(&format!("Available Teams: {}\n", teams.join(", ")));
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
info.push_str("Available Teams: (none)\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_team_resolver_new() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let resolver = TeamConfigResolver::new(temp_dir.path().to_path_buf());
|
||||||
|
assert_eq!(resolver.team_config_dir, temp_dir.path());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list_teams_empty() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
std::fs::create_dir(temp_dir.path().join("teams")).unwrap();
|
||||||
|
|
||||||
|
let resolver = TeamConfigResolver::new(temp_dir.path().to_path_buf());
|
||||||
|
let teams = resolver.list_teams().unwrap();
|
||||||
|
assert!(teams.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_list_teams_with_content() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let teams_dir = temp_dir.path().join("teams");
|
||||||
|
std::fs::create_dir(&teams_dir).unwrap();
|
||||||
|
|
||||||
|
// Create test teams
|
||||||
|
std::fs::create_dir(teams_dir.join("team-a")).unwrap();
|
||||||
|
std::fs::create_dir(teams_dir.join("team-b")).unwrap();
|
||||||
|
|
||||||
|
let resolver = TeamConfigResolver::new(temp_dir.path().to_path_buf());
|
||||||
|
let teams = resolver.list_teams().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(teams.len(), 2);
|
||||||
|
assert!(teams.contains(&"team-a".to_string()));
|
||||||
|
assert!(teams.contains(&"team-b".to_string()));
|
||||||
|
println!("✓ Teams list: {:?}", teams);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_team_exists() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let teams_dir = temp_dir.path().join("teams");
|
||||||
|
std::fs::create_dir(&teams_dir).unwrap();
|
||||||
|
std::fs::create_dir(teams_dir.join("team-a")).unwrap();
|
||||||
|
|
||||||
|
let resolver = TeamConfigResolver::new(temp_dir.path().to_path_buf());
|
||||||
|
assert!(resolver.team_exists("team-a"));
|
||||||
|
assert!(!resolver.team_exists("team-b"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_service_config_with_team() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let teams_dir = temp_dir.path().join("teams").join("team-a");
|
||||||
|
std::fs::create_dir_all(&teams_dir).unwrap();
|
||||||
|
|
||||||
|
// Create service config
|
||||||
|
std::fs::write(teams_dir.join("vault-service.ncl"), "{}").unwrap();
|
||||||
|
|
||||||
|
let mut resolver = TeamConfigResolver::new(temp_dir.path().to_path_buf());
|
||||||
|
resolver.current_team = Some("team-a".to_string());
|
||||||
|
|
||||||
|
let config = resolver.get_service_config("vault-service");
|
||||||
|
assert!(config.is_some());
|
||||||
|
assert!(config
|
||||||
|
.unwrap()
|
||||||
|
.to_string_lossy()
|
||||||
|
.contains("vault-service.ncl"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_team_dir() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let resolver = TeamConfigResolver::new(temp_dir.path().to_path_buf());
|
||||||
|
|
||||||
|
let team_dir = resolver.team_dir("team-a");
|
||||||
|
assert!(team_dir.to_string_lossy().contains("teams/team-a"));
|
||||||
|
}
|
||||||
|
}
|
||||||
288
crates/platform-config/tests/integration_test.rs
Normal file
288
crates/platform-config/tests/integration_test.rs
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
// Integration tests for platform-config multi-source schema resolution
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use platform_config::{deployment::load_deployment_mode, ExtensionSchemaCache, PlatformStartup};
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_deployment_mode_load() {
|
||||||
|
// This test requires deployment-mode.ncl to exist
|
||||||
|
match load_deployment_mode() {
|
||||||
|
Ok(config) => {
|
||||||
|
assert!(
|
||||||
|
config.config.is_object(),
|
||||||
|
"Deployment config should be a JSON object"
|
||||||
|
);
|
||||||
|
println!("✓ Deployment mode loaded successfully");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ℹ Deployment mode not found (expected in CI): {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_platform_startup_initialization() {
|
||||||
|
match load_deployment_mode() {
|
||||||
|
Ok(config) => {
|
||||||
|
let startup = PlatformStartup::new(&config.config);
|
||||||
|
assert!(
|
||||||
|
startup.is_ok(),
|
||||||
|
"PlatformStartup initialization should succeed"
|
||||||
|
);
|
||||||
|
|
||||||
|
let startup = startup.unwrap();
|
||||||
|
let enabled = startup.enabled_services();
|
||||||
|
println!("✓ Enabled services: {:?}", enabled);
|
||||||
|
|
||||||
|
// Should have at least vault_service and orchestrator
|
||||||
|
assert!(
|
||||||
|
!enabled.is_empty(),
|
||||||
|
"At least one service should be enabled"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ℹ Deployment mode not found (expected in CI): {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_service_dependencies_validation() {
|
||||||
|
match load_deployment_mode() {
|
||||||
|
Ok(config) => {
|
||||||
|
let startup = PlatformStartup::new(&config.config);
|
||||||
|
assert!(startup.is_ok(), "PlatformStartup should initialize");
|
||||||
|
|
||||||
|
let startup = startup.unwrap();
|
||||||
|
|
||||||
|
// Validate orchestrator dependencies (should depend on vault_service)
|
||||||
|
let result = startup.validate_dependencies("orchestrator");
|
||||||
|
|
||||||
|
// This will pass if vault_service is enabled, or fail if not
|
||||||
|
// Both are valid outcomes depending on deployment config
|
||||||
|
match result {
|
||||||
|
Ok(()) => println!("✓ Orchestrator dependencies validated"),
|
||||||
|
Err(e) => println!("ℹ Orchestrator dependency check: {}", e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ℹ Deployment mode not found (expected in CI): {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_startup_order_calculation() {
|
||||||
|
match load_deployment_mode() {
|
||||||
|
Ok(config) => {
|
||||||
|
let startup = PlatformStartup::new(&config.config);
|
||||||
|
assert!(startup.is_ok(), "PlatformStartup should initialize");
|
||||||
|
|
||||||
|
let startup = startup.unwrap();
|
||||||
|
let order = startup.get_startup_order();
|
||||||
|
assert!(order.is_ok(), "Startup order calculation should succeed");
|
||||||
|
|
||||||
|
let order = order.unwrap();
|
||||||
|
println!("✓ Startup order: {:?}", order);
|
||||||
|
|
||||||
|
// vault_service should come before orchestrator (if both enabled)
|
||||||
|
let vault_idx = order.iter().position(|s| s == "vault_service");
|
||||||
|
let orch_idx = order.iter().position(|s| s == "orchestrator");
|
||||||
|
|
||||||
|
if let (Some(v), Some(o)) = (vault_idx, orch_idx) {
|
||||||
|
assert!(v < o, "vault_service should start before orchestrator");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ℹ Deployment mode not found (expected in CI): {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_extension_schema_cache_creation() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache = ExtensionSchemaCache::new(temp_dir.path().to_path_buf());
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
cache.is_ok(),
|
||||||
|
"ExtensionSchemaCache should create successfully"
|
||||||
|
);
|
||||||
|
println!("✓ Extension schema cache created");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_extension_discovery_and_import_paths() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache = ExtensionSchemaCache::new(temp_dir.path().to_path_buf()).unwrap();
|
||||||
|
|
||||||
|
// Create mock extension structure
|
||||||
|
let ext_dir = temp_dir.path().join("hetzner@1.0.0");
|
||||||
|
let schemas_dir = ext_dir.join("schemas");
|
||||||
|
fs::create_dir_all(&schemas_dir).unwrap();
|
||||||
|
fs::write(schemas_dir.join("main.ncl"), "{}").unwrap();
|
||||||
|
|
||||||
|
// List extensions
|
||||||
|
let extensions = cache.list_cached_extensions().unwrap();
|
||||||
|
assert_eq!(extensions.len(), 1, "Should find hetzner extension");
|
||||||
|
assert_eq!(extensions[0].name, "hetzner");
|
||||||
|
assert_eq!(extensions[0].version, "1.0.0");
|
||||||
|
|
||||||
|
// Get import paths
|
||||||
|
let import_paths = cache.build_import_paths().unwrap();
|
||||||
|
assert_eq!(import_paths.len(), 1, "Should have one import path");
|
||||||
|
assert!(import_paths[0].exists(), "Import path should exist");
|
||||||
|
|
||||||
|
println!("✓ Extension discovery successful");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multiple_extension_versions() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache = ExtensionSchemaCache::new(temp_dir.path().to_path_buf()).unwrap();
|
||||||
|
|
||||||
|
// Create multiple versions of same extension
|
||||||
|
for version in &["1.0.0", "1.1.0", "2.0.0"] {
|
||||||
|
let ext_dir = temp_dir.path().join(format!("aws@{}", version));
|
||||||
|
let schemas_dir = ext_dir.join("schemas");
|
||||||
|
fs::create_dir_all(&schemas_dir).unwrap();
|
||||||
|
fs::write(schemas_dir.join("main.ncl"), "{}").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let extensions = cache.list_cached_extensions().unwrap();
|
||||||
|
assert_eq!(extensions.len(), 3, "Should find all AWS versions");
|
||||||
|
|
||||||
|
// Test exact version matching
|
||||||
|
let path = cache.get_extension_path("aws", Some("1.1.0"));
|
||||||
|
assert!(path.is_some(), "Should find exact version");
|
||||||
|
assert!(path.unwrap().ends_with("aws@1.1.0/schemas"));
|
||||||
|
|
||||||
|
// Test latest version (first found)
|
||||||
|
let path = cache.get_extension_path("aws", None);
|
||||||
|
assert!(path.is_some(), "Should find at least one version");
|
||||||
|
|
||||||
|
println!("✓ Multiple extension versions handled correctly");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multi_extension_import_paths() {
|
||||||
|
let temp_dir = TempDir::new().unwrap();
|
||||||
|
let cache = ExtensionSchemaCache::new(temp_dir.path().to_path_buf()).unwrap();
|
||||||
|
|
||||||
|
// Create multiple extensions
|
||||||
|
for (name, version) in &[("hetzner", "1.0.0"), ("aws", "2.1.0"), ("upcloud", "1.5.0")] {
|
||||||
|
let ext_dir = temp_dir.path().join(format!("{}@{}", name, version));
|
||||||
|
let schemas_dir = ext_dir.join("schemas");
|
||||||
|
fs::create_dir_all(&schemas_dir).unwrap();
|
||||||
|
fs::write(schemas_dir.join("main.ncl"), "{}").unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let extensions = cache.list_cached_extensions().unwrap();
|
||||||
|
assert_eq!(extensions.len(), 3, "Should find all extensions");
|
||||||
|
|
||||||
|
let import_paths = cache.build_import_paths().unwrap();
|
||||||
|
assert_eq!(import_paths.len(), 3, "Should have three import paths");
|
||||||
|
|
||||||
|
// All paths should exist
|
||||||
|
for path in &import_paths {
|
||||||
|
assert!(path.exists(), "Import path should exist: {:?}", path);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("✓ Multi-extension import paths: {:?}", import_paths);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_git_repo_structure_in_startup() {
|
||||||
|
match load_deployment_mode() {
|
||||||
|
Ok(config) => {
|
||||||
|
// Just verify that the git config is present in deployment-mode
|
||||||
|
let git_config = config.config.get("git");
|
||||||
|
assert!(
|
||||||
|
git_config.is_some(),
|
||||||
|
"Git configuration should be in deployment-mode.ncl"
|
||||||
|
);
|
||||||
|
|
||||||
|
let schemas = git_config.unwrap().get("schemas_repo");
|
||||||
|
let configs = git_config.unwrap().get("configs_repo");
|
||||||
|
|
||||||
|
assert!(schemas.is_some(), "schemas_repo should be defined");
|
||||||
|
assert!(configs.is_some(), "configs_repo should be defined");
|
||||||
|
|
||||||
|
// Verify essential fields
|
||||||
|
let schemas = schemas.unwrap();
|
||||||
|
assert!(schemas.get("url").is_some(), "URL should be defined");
|
||||||
|
assert!(
|
||||||
|
schemas.get("cache_dir").is_some(),
|
||||||
|
"cache_dir should be defined"
|
||||||
|
);
|
||||||
|
|
||||||
|
println!("✓ Git repo structure validated");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ℹ Deployment mode not found (expected in CI): {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_nickel_import_path_priority() {
|
||||||
|
// This test verifies that the NICKEL_IMPORT_PATH priority system works
|
||||||
|
// (actual priority order is tested via functional tests, not unit tests)
|
||||||
|
|
||||||
|
let home = std::env::var("HOME").unwrap_or_else(|_| "/tmp".to_string());
|
||||||
|
|
||||||
|
// Priority 1: Extension schemas
|
||||||
|
let ext_cache = PathBuf::from(format!("{}/.cache/provisioning/extensions", home));
|
||||||
|
if ext_cache.exists() {
|
||||||
|
println!("✓ Extension cache dir exists: {:?}", ext_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 2: Git schemas
|
||||||
|
let schemas_cache = PathBuf::from(format!("{}/.cache/provisioning/schemas", home));
|
||||||
|
if schemas_cache.exists() {
|
||||||
|
println!("✓ Schemas cache dir exists: {:?}", schemas_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 3: Git configs
|
||||||
|
let configs_cache = PathBuf::from(format!("{}/.cache/provisioning/configs", home));
|
||||||
|
if configs_cache.exists() {
|
||||||
|
println!("✓ Configs cache dir exists: {:?}", configs_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority 4: Local provisioning/schemas
|
||||||
|
if PathBuf::from("provisioning/schemas").exists() {
|
||||||
|
println!("✓ Local schemas dir exists");
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("✓ NICKEL_IMPORT_PATH cache structure verified");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_service_enabled_checking() {
|
||||||
|
match load_deployment_mode() {
|
||||||
|
Ok(config) => {
|
||||||
|
let startup = PlatformStartup::new(&config.config);
|
||||||
|
assert!(startup.is_ok());
|
||||||
|
|
||||||
|
let startup = startup.unwrap();
|
||||||
|
|
||||||
|
// Core services should be checkable
|
||||||
|
for service in &["orchestrator", "vault_service"] {
|
||||||
|
let enabled = startup.is_service_enabled(service);
|
||||||
|
println!(
|
||||||
|
" • {}: {}",
|
||||||
|
service,
|
||||||
|
if enabled { "enabled" } else { "disabled" }
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("✓ Service enablement checking works");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("ℹ Deployment mode not found (expected in CI): {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
31
crates/platform-db/Cargo.toml
Normal file
31
crates/platform-db/Cargo.toml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
[package]
|
||||||
|
authors.workspace = true
|
||||||
|
description = "Shared SurrealDB connection pool and schema migrations for provisioning platform"
|
||||||
|
edition.workspace = true
|
||||||
|
name = "platform-db"
|
||||||
|
version.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
surrealdb = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
thiserror = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
chrono = { workspace = true }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
# Embedded RocksDB backend (for solo mode)
|
||||||
|
embedded = ["surrealdb/kv-rocksdb"]
|
||||||
|
# In-process memory backend (for tests)
|
||||||
|
memory = ["surrealdb/kv-mem"]
|
||||||
|
|
||||||
|
default = ["memory"]
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio-test = { workspace = true }
|
||||||
|
tokio = { workspace = true, features = ["full"] }
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "platform_db"
|
||||||
|
path = "src/lib.rs"
|
||||||
24
crates/platform-db/src/config.rs
Normal file
24
crates/platform-db/src/config.rs
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||||
|
#[serde(tag = "mode", rename_all = "snake_case")]
|
||||||
|
pub enum DbConfig {
|
||||||
|
#[default]
|
||||||
|
Memory,
|
||||||
|
Embedded {
|
||||||
|
path: String,
|
||||||
|
},
|
||||||
|
Server {
|
||||||
|
url: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DbConfig {
|
||||||
|
pub fn server(url: impl Into<String>) -> Self {
|
||||||
|
Self::Server { url: url.into() }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn embedded(path: impl Into<String>) -> Self {
|
||||||
|
Self::Embedded { path: path.into() }
|
||||||
|
}
|
||||||
|
}
|
||||||
72
crates/platform-db/src/lib.rs
Normal file
72
crates/platform-db/src/lib.rs
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
pub mod config;
|
||||||
|
pub mod migrate;
|
||||||
|
pub mod pool;
|
||||||
|
|
||||||
|
pub use config::DbConfig;
|
||||||
|
pub use pool::SurrealPool;
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("SurrealDB connect failed: {0}")]
|
||||||
|
Connect(String),
|
||||||
|
|
||||||
|
#[error("SurrealDB migration failed: {0}")]
|
||||||
|
Migration(String),
|
||||||
|
|
||||||
|
#[error("SurrealDB query failed: {0}")]
|
||||||
|
Query(String),
|
||||||
|
|
||||||
|
#[error("SurrealDB health check failed: {0}")]
|
||||||
|
Health(String),
|
||||||
|
|
||||||
|
#[error("Serialization error: {0}")]
|
||||||
|
Serialization(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn connect_memory_and_migrate() {
|
||||||
|
let pool = SurrealPool::connect(&DbConfig::Memory)
|
||||||
|
.await
|
||||||
|
.expect("in-memory connect should succeed");
|
||||||
|
pool.health().await.expect("health check should pass");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn roundtrip_insert_select() {
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use surrealdb::RecordId;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
struct Task {
|
||||||
|
id: Option<RecordId>,
|
||||||
|
name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
let pool = SurrealPool::connect(&DbConfig::Memory)
|
||||||
|
.await
|
||||||
|
.expect("connect");
|
||||||
|
|
||||||
|
let db = pool.db();
|
||||||
|
db.query("USE NS orchestrator DB provisioning")
|
||||||
|
.await
|
||||||
|
.expect("use ns");
|
||||||
|
|
||||||
|
let _: Vec<Task> = db
|
||||||
|
.create("tasks")
|
||||||
|
.content(Task {
|
||||||
|
id: None,
|
||||||
|
name: "test-task".into(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("insert");
|
||||||
|
|
||||||
|
let results: Vec<Task> = db.select("tasks").await.expect("select");
|
||||||
|
assert!(results.iter().any(|t| t.name == "test-task"));
|
||||||
|
}
|
||||||
|
}
|
||||||
93
crates/platform-db/src/migrate.rs
Normal file
93
crates/platform-db/src/migrate.rs
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
use surrealdb::engine::any::Any;
|
||||||
|
use surrealdb::Surreal;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
use crate::Error;
|
||||||
|
|
||||||
|
/// Create all namespaced tables if they don't exist.
|
||||||
|
/// Idempotent — safe to run on every startup.
|
||||||
|
pub async fn migrate(db: &Surreal<Any>) -> Result<(), Error> {
|
||||||
|
// orchestrator namespace
|
||||||
|
db.query("USE NS orchestrator DB provisioning")
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
db.query(
|
||||||
|
r#"
|
||||||
|
DEFINE TABLE IF NOT EXISTS tasks SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS task_events SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS execution_logs SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS config_hashes SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS provider_cache SCHEMALESS;
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
info!(namespace = "orchestrator", "tables ready");
|
||||||
|
|
||||||
|
// vault namespace
|
||||||
|
db.query("USE NS vault DB provisioning")
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
db.query(
|
||||||
|
r#"
|
||||||
|
DEFINE TABLE IF NOT EXISTS secrets SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS keys SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS leases SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS secret_versions SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS audit_trail SCHEMALESS;
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
info!(namespace = "vault", "tables ready");
|
||||||
|
|
||||||
|
// control_center namespace
|
||||||
|
db.query("USE NS control_center DB provisioning")
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
db.query(
|
||||||
|
r#"
|
||||||
|
DEFINE TABLE IF NOT EXISTS users SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS sessions SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS cedar_policies SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS policy_evaluations SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS roles SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS permissions SCHEMALESS;
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
info!(namespace = "control_center", "tables ready");
|
||||||
|
|
||||||
|
// audit namespace
|
||||||
|
db.query("USE NS audit DB provisioning")
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
db.query(
|
||||||
|
r#"
|
||||||
|
DEFINE TABLE IF NOT EXISTS events SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS metrics SCHEMALESS;
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
info!(namespace = "audit", "tables ready");
|
||||||
|
|
||||||
|
// workspace namespace
|
||||||
|
db.query("USE NS workspace DB provisioning")
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
db.query(
|
||||||
|
r#"
|
||||||
|
DEFINE TABLE IF NOT EXISTS registrations SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS deployments SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS git_sync_state SCHEMALESS;
|
||||||
|
DEFINE TABLE IF NOT EXISTS extensions SCHEMALESS;
|
||||||
|
"#,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Migration(e.to_string()))?;
|
||||||
|
info!(namespace = "workspace", "tables ready");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
74
crates/platform-db/src/pool.rs
Normal file
74
crates/platform-db/src/pool.rs
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use surrealdb::{engine::any::Any, Surreal};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
use crate::{config::DbConfig, migrate::migrate, Error};
|
||||||
|
|
||||||
|
/// Shared SurrealDB connection pool.
|
||||||
|
///
|
||||||
|
/// Internally `Arc`-backed — cheap to clone, safe to share across tasks.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct SurrealPool {
|
||||||
|
inner: Arc<Surreal<Any>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SurrealPool {
|
||||||
|
/// Connect to SurrealDB according to `config` and run schema migrations.
|
||||||
|
pub async fn connect(config: &DbConfig) -> Result<Self, Error> {
|
||||||
|
let db: Surreal<Any> = match config {
|
||||||
|
DbConfig::Memory => {
|
||||||
|
info!("SurrealDB: in-process memory backend");
|
||||||
|
surrealdb::engine::any::connect("mem://")
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Connect(e.to_string()))?
|
||||||
|
}
|
||||||
|
#[cfg(feature = "embedded")]
|
||||||
|
DbConfig::Embedded { path } => {
|
||||||
|
info!(path, "SurrealDB: embedded RocksDB backend");
|
||||||
|
surrealdb::engine::any::connect(format!("rocksdb://{path}"))
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Connect(e.to_string()))?
|
||||||
|
}
|
||||||
|
DbConfig::Server { url } => {
|
||||||
|
info!(url, "SurrealDB: WebSocket server backend");
|
||||||
|
surrealdb::engine::any::connect(url.as_str())
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Connect(e.to_string()))?
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "embedded"))]
|
||||||
|
DbConfig::Embedded { .. } => {
|
||||||
|
return Err(Error::Connect(
|
||||||
|
"embedded RocksDB requires the 'embedded' feature".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sign in with root credentials for schema ops
|
||||||
|
db.signin(surrealdb::opt::auth::Root {
|
||||||
|
username: "root",
|
||||||
|
password: "root",
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Connect(e.to_string()))?;
|
||||||
|
|
||||||
|
migrate(&db).await?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
inner: Arc::new(db),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Obtain a reference to the underlying `Surreal<Any>` client.
|
||||||
|
pub fn db(&self) -> &Surreal<Any> {
|
||||||
|
&self.inner
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify connectivity.
|
||||||
|
pub async fn health(&self) -> Result<(), Error> {
|
||||||
|
self.inner
|
||||||
|
.health()
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Health(e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
23
crates/platform-nats/Cargo.toml
Normal file
23
crates/platform-nats/Cargo.toml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
[package]
|
||||||
|
authors.workspace = true
|
||||||
|
description = "Shared NATS JetStream bridge for provisioning platform services"
|
||||||
|
edition.workspace = true
|
||||||
|
name = "platform-nats"
|
||||||
|
version.workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
async-nats = { workspace = true }
|
||||||
|
bytes = { workspace = true }
|
||||||
|
tokio = { workspace = true }
|
||||||
|
tracing = { workspace = true }
|
||||||
|
thiserror = { workspace = true }
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio-test = { workspace = true }
|
||||||
|
tokio = { workspace = true, features = ["full"] }
|
||||||
|
|
||||||
|
[lib]
|
||||||
|
name = "platform_nats"
|
||||||
|
path = "src/lib.rs"
|
||||||
146
crates/platform-nats/src/bridge.rs
Normal file
146
crates/platform-nats/src/bridge.rs
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use async_nats::{
|
||||||
|
jetstream::{self, consumer::pull, publish::PublishAck, Context},
|
||||||
|
ConnectOptions,
|
||||||
|
};
|
||||||
|
use bytes::Bytes;
|
||||||
|
use tokio::time::sleep;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
use crate::{config::NatsConfig, streams::ensure_provisioning_streams, Error};
|
||||||
|
|
||||||
|
/// Shared NATS JetStream bridge.
|
||||||
|
///
|
||||||
|
/// Clone-cheap — both fields are internally `Arc`-backed.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct NatsBridge {
|
||||||
|
pub(crate) client: async_nats::Client,
|
||||||
|
pub(crate) jetstream: Context,
|
||||||
|
prefix: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for NatsBridge {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("NatsBridge")
|
||||||
|
.field("prefix", &self.prefix)
|
||||||
|
.finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NatsBridge {
|
||||||
|
/// Connect to NATS with exponential backoff retry.
|
||||||
|
/// Ensures all provisioning JetStream streams exist before returning.
|
||||||
|
pub async fn connect(config: &NatsConfig) -> Result<Self, Error> {
|
||||||
|
let url = config.url.clone();
|
||||||
|
let auth_token = config.auth_token.clone();
|
||||||
|
let max_reconnects = config.max_reconnects;
|
||||||
|
let reconnect_wait_ms = config.reconnect_wait_ms;
|
||||||
|
|
||||||
|
let mut last_err = None;
|
||||||
|
for attempt in 0..=max_reconnects {
|
||||||
|
let mut opts = ConnectOptions::new();
|
||||||
|
if let Some(ref token) = auth_token {
|
||||||
|
opts = opts.token(token.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
match opts.connect(url.as_str()).await {
|
||||||
|
Ok(client) => {
|
||||||
|
info!(url = %url, "NATS connected");
|
||||||
|
let js = jetstream::new(client.clone());
|
||||||
|
ensure_provisioning_streams(&js).await?;
|
||||||
|
return Ok(Self {
|
||||||
|
client,
|
||||||
|
jetstream: js,
|
||||||
|
prefix: config.subject_prefix.clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
last_err = Some(e.to_string());
|
||||||
|
if attempt < max_reconnects {
|
||||||
|
let wait_ms = reconnect_wait_ms * (2u64.pow(attempt.min(6)));
|
||||||
|
warn!(
|
||||||
|
attempt = attempt + 1,
|
||||||
|
max = max_reconnects,
|
||||||
|
wait_ms,
|
||||||
|
"NATS connect failed, retrying"
|
||||||
|
);
|
||||||
|
sleep(Duration::from_millis(wait_ms)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::Connect(last_err.unwrap_or_else(|| "unknown".into())))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Publish a message to a subject relative to the configured prefix.
|
||||||
|
///
|
||||||
|
/// `subject = "tasks.submitted"` → publishes to
|
||||||
|
/// `"provisioning.tasks.submitted"`. Returns the `PublishAck` after
|
||||||
|
/// JetStream confirms persistence.
|
||||||
|
pub async fn publish(&self, subject: &str, payload: Bytes) -> Result<PublishAck, Error> {
|
||||||
|
let full = format!("{}.{}", self.prefix, subject);
|
||||||
|
self.jetstream
|
||||||
|
.publish(full, payload)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Publish(e.to_string()))?
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Publish(e.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Publish JSON-serialisable value to a subject.
|
||||||
|
pub async fn publish_json<T: serde::Serialize>(
|
||||||
|
&self,
|
||||||
|
subject: &str,
|
||||||
|
value: &T,
|
||||||
|
) -> Result<PublishAck, Error> {
|
||||||
|
let payload = serde_json::to_vec(value).map_err(|e| Error::Serialization(e.to_string()))?;
|
||||||
|
self.publish(subject, Bytes::from(payload)).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Obtain a pull consumer message stream for durable consumption.
|
||||||
|
///
|
||||||
|
/// Creates the consumer if it does not yet exist (idempotent).
|
||||||
|
pub async fn subscribe_pull(
|
||||||
|
&self,
|
||||||
|
stream: &str,
|
||||||
|
consumer_name: &str,
|
||||||
|
) -> Result<pull::Stream, Error> {
|
||||||
|
let js_stream = self
|
||||||
|
.jetstream
|
||||||
|
.get_stream(stream)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Stream(format!("get stream {stream}: {e}")))?;
|
||||||
|
|
||||||
|
let consumer = js_stream
|
||||||
|
.get_or_create_consumer(
|
||||||
|
consumer_name,
|
||||||
|
pull::Config {
|
||||||
|
durable_name: Some(consumer_name.to_string()),
|
||||||
|
ack_policy: async_nats::jetstream::consumer::AckPolicy::Explicit,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Consumer(format!("{consumer_name}: {e}")))?;
|
||||||
|
|
||||||
|
consumer
|
||||||
|
.messages()
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Consumer(format!("messages for {consumer_name}: {e}")))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check connectivity (NATS connection state).
|
||||||
|
pub fn health(&self) -> Result<(), Error> {
|
||||||
|
match self.client.connection_state() {
|
||||||
|
async_nats::connection::State::Connected => Ok(()),
|
||||||
|
state => Err(Error::Health(format!("NATS state: {state:?}"))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Access the raw JetStream context for advanced operations.
|
||||||
|
pub fn jetstream(&self) -> &Context {
|
||||||
|
&self.jetstream
|
||||||
|
}
|
||||||
|
}
|
||||||
26
crates/platform-nats/src/config.rs
Normal file
26
crates/platform-nats/src/config.rs
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct NatsConfig {
|
||||||
|
pub url: String,
|
||||||
|
pub subject_prefix: String,
|
||||||
|
pub max_reconnects: u32,
|
||||||
|
pub reconnect_wait_ms: u64,
|
||||||
|
pub auth_token: Option<String>,
|
||||||
|
pub tls_cert_path: Option<String>,
|
||||||
|
pub tls_key_path: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for NatsConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
url: "nats://127.0.0.1:4222".to_string(),
|
||||||
|
subject_prefix: "provisioning".to_string(),
|
||||||
|
max_reconnects: 10,
|
||||||
|
reconnect_wait_ms: 2000,
|
||||||
|
auth_token: None,
|
||||||
|
tls_cert_path: None,
|
||||||
|
tls_key_path: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
43
crates/platform-nats/src/lib.rs
Normal file
43
crates/platform-nats/src/lib.rs
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
pub mod bridge;
|
||||||
|
pub mod config;
|
||||||
|
pub mod streams;
|
||||||
|
|
||||||
|
pub use bridge::NatsBridge;
|
||||||
|
pub use config::NatsConfig;
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("NATS connect failed: {0}")]
|
||||||
|
Connect(String),
|
||||||
|
|
||||||
|
#[error("NATS publish failed: {0}")]
|
||||||
|
Publish(String),
|
||||||
|
|
||||||
|
#[error("JetStream stream error: {0}")]
|
||||||
|
Stream(String),
|
||||||
|
|
||||||
|
#[error("JetStream consumer error: {0}")]
|
||||||
|
Consumer(String),
|
||||||
|
|
||||||
|
#[error("Serialization error: {0}")]
|
||||||
|
Serialization(String),
|
||||||
|
|
||||||
|
#[error("NATS health check failed: {0}")]
|
||||||
|
Health(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Result<T> = std::result::Result<T, Error>;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn nats_config_defaults() {
|
||||||
|
let cfg = NatsConfig::default();
|
||||||
|
assert_eq!(cfg.url, "nats://127.0.0.1:4222");
|
||||||
|
assert_eq!(cfg.subject_prefix, "provisioning");
|
||||||
|
assert_eq!(cfg.max_reconnects, 10);
|
||||||
|
assert!(cfg.auth_token.is_none());
|
||||||
|
}
|
||||||
|
}
|
||||||
54
crates/platform-nats/src/streams.rs
Normal file
54
crates/platform-nats/src/streams.rs
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
use async_nats::jetstream::{
|
||||||
|
stream::{Config as StreamConfig, RetentionPolicy, StorageType},
|
||||||
|
Context,
|
||||||
|
};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
use crate::Error;
|
||||||
|
|
||||||
|
/// Provision all six JetStream streams required by the provisioning platform.
|
||||||
|
/// Idempotent — uses `get_or_create_stream` so safe to call on every startup.
|
||||||
|
pub async fn ensure_provisioning_streams(js: &Context) -> Result<(), Error> {
|
||||||
|
let streams: &[(&str, &[&str], RetentionPolicy)] = &[
|
||||||
|
(
|
||||||
|
"TASKS",
|
||||||
|
&["provisioning.tasks.>"],
|
||||||
|
RetentionPolicy::WorkQueue,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"VAULT",
|
||||||
|
&["provisioning.vault.>"],
|
||||||
|
RetentionPolicy::Interest,
|
||||||
|
),
|
||||||
|
("AUTH", &["provisioning.auth.>"], RetentionPolicy::Interest),
|
||||||
|
(
|
||||||
|
"WORKSPACE",
|
||||||
|
&["provisioning.workspace.>"],
|
||||||
|
RetentionPolicy::Limits,
|
||||||
|
),
|
||||||
|
("AUDIT", &["provisioning.audit.>"], RetentionPolicy::Limits),
|
||||||
|
(
|
||||||
|
"HEALTH",
|
||||||
|
&["provisioning.health.>"],
|
||||||
|
RetentionPolicy::Interest,
|
||||||
|
),
|
||||||
|
];
|
||||||
|
|
||||||
|
for (name, subjects, retention) in streams {
|
||||||
|
let cfg = StreamConfig {
|
||||||
|
name: name.to_string(),
|
||||||
|
subjects: subjects.iter().map(|s| s.to_string()).collect(),
|
||||||
|
retention: *retention,
|
||||||
|
storage: StorageType::File,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
js.get_or_create_stream(cfg)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Error::Stream(format!("failed to ensure stream {name}: {e}")))?;
|
||||||
|
|
||||||
|
info!(stream = name, "JetStream stream ready");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
@ -67,6 +67,9 @@ config = { workspace = true }
|
|||||||
# Platform configuration management
|
# Platform configuration management
|
||||||
platform-config = { workspace = true }
|
platform-config = { workspace = true }
|
||||||
|
|
||||||
|
# Centralized observability (logging, metrics, health, tracing)
|
||||||
|
observability = { workspace = true, features = ["logging", "metrics-prometheus", "health"] }
|
||||||
|
|
||||||
# Stratum ecosystem - embeddings and LLM abstraction
|
# Stratum ecosystem - embeddings and LLM abstraction
|
||||||
stratum-embeddings = { workspace = true, features = ["openai-provider", "ollama-provider", "fastembed-provider"] }
|
stratum-embeddings = { workspace = true, features = ["openai-provider", "ollama-provider", "fastembed-provider"] }
|
||||||
stratum-llm = { workspace = true, features = ["anthropic", "openai", "ollama"] }
|
stratum-llm = { workspace = true, features = ["anthropic", "openai", "ollama"] }
|
||||||
|
|||||||
@ -58,7 +58,8 @@ impl EmbeddingEngine {
|
|||||||
let api_key_str = if let Some(key) = api_key {
|
let api_key_str = if let Some(key) = api_key {
|
||||||
key
|
key
|
||||||
} else if config.fallback_local {
|
} else if config.fallback_local {
|
||||||
"dummy".to_string() // Will fail, but fallback will take over
|
"dummy".to_string() // Will fail, but fallback will take
|
||||||
|
// over
|
||||||
} else {
|
} else {
|
||||||
return Err(crate::error::RagError::config(
|
return Err(crate::error::RagError::config(
|
||||||
"OpenAI API key required. Set OPENAI_API_KEY or enable fallback_local",
|
"OpenAI API key required. Set OPENAI_API_KEY or enable fallback_local",
|
||||||
|
|||||||
@ -21,8 +21,7 @@ impl LlmClient {
|
|||||||
tracing::warn!("ANTHROPIC_API_KEY not set - LLM calls will fail");
|
tracing::warn!("ANTHROPIC_API_KEY not set - LLM calls will fail");
|
||||||
}
|
}
|
||||||
|
|
||||||
let provider =
|
let provider = AnthropicProvider::new(api_key.unwrap_or_default(), model.clone());
|
||||||
AnthropicProvider::new(api_key.unwrap_or_default(), model.clone());
|
|
||||||
|
|
||||||
let configured = ConfiguredProvider {
|
let configured = ConfiguredProvider {
|
||||||
provider: Box::new(provider),
|
provider: Box::new(provider),
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user