feat: integrate stratum orchestration, kogral bridge, and NATS
Some checks failed
Build - Verify Code & Build Binaries / Check Code Format (push) Has been cancelled
Build - Verify Code & Build Binaries / Lint with Clippy (push) Has been cancelled
Build - Verify Code & Build Binaries / Test Suite (push) Has been cancelled
Build - Verify Code & Build Binaries / Cargo Check (push) Has been cancelled
Build - Verify Code & Build Binaries / Security Audit (push) Has been cancelled
Build - Verify Code & Build Binaries / Build (Debug) - macos-latest (push) Has been cancelled
Build - Verify Code & Build Binaries / Build (Debug) - ubuntu-latest (push) Has been cancelled
Build - Verify Code & Build Binaries / Build (Debug) - windows-latest (push) Has been cancelled
Build - Verify Code & Build Binaries / Build (Release) - macos-latest (push) Has been cancelled
Build - Verify Code & Build Binaries / Build (Release) - ubuntu-latest (push) Has been cancelled
Build - Verify Code & Build Binaries / Build (Release) - windows-latest (push) Has been cancelled
CI/CD with Staging Preset / Validate Installation with Staging Preset (macos-latest) (push) Has been cancelled
CI/CD with Staging Preset / Validate Installation with Staging Preset (ubuntu-latest) (push) Has been cancelled
CI/CD with Staging Preset / Validate Documentation (push) Has been cancelled
Build - Verify Code & Build Binaries / All Checks Passed (push) Has been cancelled
CI/CD with Staging Preset / Build and Test with Staging Preset (push) Has been cancelled
CI/CD with Staging Preset / Integration Test with Docker Compose (push) Has been cancelled
CI/CD with Staging Preset / Test Summary (push) Has been cancelled

platform

  - Add orchestration.rs and kogral_bridge.rs to syntaxis-vapora
  - Replace async-nats with platform-nats (NKey auth support)
  - Wire stratum-orchestrator, stratum-graph, stratum-state deps
  - Upgrade surrealdb 2.3 → 3 with kv-surrealkv and rustls features
  - Consolidate core/Cargo.toml into root workspace (remove virtual manifest)
  - Add shared/rust/nickel.rs for Nickel config integration
  - Rename CLI binary from syntaxis-cli to syntaxis
This commit is contained in:
Jesús Pérez 2026-02-22 22:01:02 +00:00
parent 48d7503b48
commit 3faf7a5fc9
27 changed files with 1567 additions and 1019 deletions

View File

@ -74,7 +74,13 @@ palette = { version = "0.7", features = ["serializing"] }
# Database # Database
sqlx = { version = "0.8", features = ["runtime-tokio-native-tls", "sqlite", "macros"] } sqlx = { version = "0.8", features = ["runtime-tokio-native-tls", "sqlite", "macros"] }
sqlx-sqlite = "0.8" sqlx-sqlite = "0.8"
surrealdb = { version = "2.3", features = ["kv-mem", "kv-rocksdb"] } surrealdb = { version = "3", features = ["kv-mem", "kv-surrealkv", "kv-rocksdb", "protocol-ws", "rustls"] }
platform-nats = { path = "../stratumiops/crates/platform-nats" }
stratum-orchestrator = { path = "../stratumiops/crates/stratum-orchestrator" }
stratum-graph = { path = "../stratumiops/crates/stratum-graph" }
stratum-state = { path = "../stratumiops/crates/stratum-state" }
kogral-core = { path = "../kogral/crates/kogral-core" }
bytes = "1.9"
serde_bytes = "0.11" serde_bytes = "0.11"
# Logging/Tracing # Logging/Tracing

View File

@ -47,7 +47,7 @@ default = "sqlite"
type = "sqlite" type = "sqlite"
name = "SQLite" name = "SQLite"
description = "File-based, no server" description = "File-based, no server"
platforms = [linux, macos, windows] platforms = ["linux", "macos", "windows"]
# Installation steps/checklist # Installation steps/checklist
[checklist] [checklist]

View File

@ -1,77 +1,3 @@
# This is a virtual manifest grouped under the parent workspace in /Users/Akasha/Development/syntaxis/Cargo.toml # Sub-directory grouping file — not a workspace root.
# All workspace configuration, dependencies, and profiles are defined in the root workspace # Workspace definitions live in /Users/Akasha/Development/syntaxis/Cargo.toml.
# Packages in core/crates/ resolve { workspace = true } deps from the root workspace.
[workspace.package]
version = "0.1.0"
edition = "2021"
rust-version = "1.75"
authors = ["syntaxis contributors"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/syntaxis/core"
[workspace.dependencies]
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
toml = "0.9"
uuid = { version = "1.18", features = ["v4", "serde"] }
# Error handling
thiserror = "2.0"
anyhow = "1.0"
# Async runtime
tokio = { version = "1.48", features = ["full"] }
async-trait = "0.1"
futures = "0.3"
# Web framework
axum = { version = "0.8", features = ["ws"] }
tower = "0.5"
tower-http = { version = "0.6", features = ["trace", "cors", "fs"] }
tokio-rustls = "0.26"
rustls = "0.23"
rustls-pemfile = "2.2"
# HTTP client
reqwest = { version = "0.12", features = ["json"] }
# Logging/Tracing
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# Date/time
chrono = { version = "0.4", features = ["serde"] }
# File operations
camino = "1.2"
walkdir = "2.5"
# Templating
handlebars = "6.3"
# Database
sqlx = { version = "0.8", features = ["runtime-tokio-native-tls", "sqlite", "macros"] }
sqlx-sqlite = "0.8"
surrealdb = { version = "2.3", features = ["kv-mem", "kv-rocksdb"] }
serde_bytes = "0.11"
# Other utilities
indexmap = "2.12"
regex = "1.12"
moka = { version = "0.12", features = ["future"] }
tokio-tungstenite = "0.28"
jsonwebtoken = { version = "10.2", features = ["aws_lc_rs"] }
once_cell = "1.21"
prometheus = { version = "0.14", features = ["process"] }
async-nats = "0.45"
rand_core = "0.6"
rand = "0.8"
# Dev dependencies
tokio-test = "0.4"
tempfile = "3.23"
assert_cmd = "2.1"
predicates = "3.1"
criterion = { version = "0.7", features = ["html_reports"] }
mockito = "1.6"

View File

@ -207,8 +207,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
); );
// Initialize JWT provider if enabled // Initialize JWT provider if enabled
let jwt_prov = if auth_config.jwt_enabled && auth_config.jwt_secret.is_some() { let jwt_prov = if let Some(secret) = auth_config.jwt_secret.as_ref().filter(|_| auth_config.jwt_enabled) {
let secret = auth_config.jwt_secret.as_ref().unwrap();
let expiration = auth_config.jwt_expiration as i64; let expiration = auth_config.jwt_expiration as i64;
match shared_api_lib::auth::jwt::JwtProvider::new(secret, expiration) { match shared_api_lib::auth::jwt::JwtProvider::new(secret, expiration) {

View File

@ -3,7 +3,7 @@ mode = "Standalone"
primary_language = "rust" primary_language = "rust"
languages = ["rust"] languages = ["rust"]
phase = "Creation" phase = "Creation"
database_path = ".project-tools/data/lifecycle.db" database_path = "data.db"
[project] [project]
name = "syntaxis-cli" name = "syntaxis-cli"
@ -13,8 +13,8 @@ authors = ["Your Name"]
license = "MIT" license = "MIT"
keywords = [] keywords = []
categories = [] categories = []
created = "2025-11-17T00:36:54.732694Z" created = "2026-02-22T11:10:00.776028Z"
last_updated = "2025-11-17T00:36:54.732694Z" last_updated = "2026-02-22T11:10:00.776028Z"
[phases] [phases]
allowed_transitions = [] allowed_transitions = []
@ -31,7 +31,7 @@ include_dev = false
[export] [export]
format = "json" format = "json"
output_path = ".project-tools/exports/project-{timestamp}.json" output_path = "exports/project-{timestamp}.json"
include_metadata = true include_metadata = true
include_tags = true include_tags = true
pretty = true pretty = true

View File

@ -9,7 +9,7 @@ repository.workspace = true
description = "CLI tool for syntaxis management" description = "CLI tool for syntaxis management"
[[bin]] [[bin]]
name = "syntaxis-cli" name = "syntaxis"
path = "src/main.rs" path = "src/main.rs"
[package.metadata.syntaxis] [package.metadata.syntaxis]

View File

@ -63,7 +63,7 @@ async fn select_project_interactive() -> Result<String> {
let app_name = ui_config::get_app_name(); let app_name = ui_config::get_app_name();
return Err(anyhow!( return Err(anyhow!(
"No projects found in database. Run '{} init' first.", "No projects found in database. Run '{} init' first.",
format!("{} ", app_name) app_name
)); ));
} }

View File

@ -484,7 +484,7 @@ async fn main() -> Result<()> {
// Find config file // Find config file
let config_path = find_config_path_warn_conflicts( let config_path = find_config_path_warn_conflicts(
"config.toml", "config.toml",
cli.config.as_ref().map(|s| std::path::Path::new(s)), cli.config.as_deref().map(std::path::Path::new),
) )
.ok_or_else(|| { .ok_or_else(|| {
anyhow::anyhow!( anyhow::anyhow!(

View File

@ -9,8 +9,8 @@ use tempfile::TempDir;
/// Create a temporary test directory with a valid lifecycle.toml config /// Create a temporary test directory with a valid lifecycle.toml config
fn create_test_fixture() -> TempDir { fn create_test_fixture() -> TempDir {
let temp_dir = TempDir::new().expect("Failed to create temp directory"); let temp_dir = TempDir::new().expect("Failed to create temp directory");
let config_dir = temp_dir.path().join(".project"); let config_dir = temp_dir.path().join(".syntaxis");
fs::create_dir_all(&config_dir).expect("Failed to create .project directory"); fs::create_dir_all(&config_dir).expect("Failed to create .syntaxis directory");
let config_content = r#"project_type = "MultiLang" let config_content = r#"project_type = "MultiLang"
mode = "Standalone" mode = "Standalone"

View File

@ -1,6 +1,3 @@
[workspace]
# Independent package - not part of any workspace
[package] [package]
name = "syntaxis-bridge" name = "syntaxis-bridge"
version = "0.1.0" version = "0.1.0"

View File

@ -42,7 +42,7 @@ impl IntegrationRegistry {
&self, &self,
name: &str, name: &str,
) -> Result<Arc<dyn EcosystemIntegration>, EcosystemIntegrationError> { ) -> Result<Arc<dyn EcosystemIntegration>, EcosystemIntegrationError> {
self.integrations.get(name).map(|i| Arc::clone(i)).ok_or( self.integrations.get(name).map(Arc::clone).ok_or(
EcosystemIntegrationError::NotFound { EcosystemIntegrationError::NotFound {
name: name.to_string(), name: name.to_string(),
}, },

View File

@ -62,6 +62,5 @@ tokio-test = { workspace = true }
tempfile = { workspace = true } tempfile = { workspace = true }
[features] [features]
# Enable SurrealDB support (includes embedded database) surrealdb-backend = ["dep:surrealdb"]
surrealdb = ["dep:surrealdb"] default = ["surrealdb-backend"]
default = ["surrealdb"]

View File

@ -49,8 +49,8 @@ pub use config::{
SbomFormat, SbomFormat,
}; };
pub use error::{LifecycleError, Result}; pub use error::{LifecycleError, Result};
#[cfg(feature = "surrealdb")] #[cfg(feature = "surrealdb-backend")]
pub use persistence::SurrealDatabase; pub use persistence::{SurrealDatabase, SurrealDbBackendConfig, SurrealEngineConfig};
pub use persistence::{ pub use persistence::{
Database, DatabaseConfig, DbChecklistItem, DbPhaseHistory, DbPhaseTransition, DbProject, Database, DatabaseConfig, DbChecklistItem, DbPhaseHistory, DbPhaseTransition, DbProject,
DbSecurityAssessment, DbSecurityAssessmentDetail, DbTeamMember, DbToolConfiguration, DbSecurityAssessment, DbSecurityAssessmentDetail, DbTeamMember, DbToolConfiguration,

View File

@ -3,13 +3,6 @@
//! Provides configuration structures for different database backends. //! Provides configuration structures for different database backends.
//! Configuration is typically loaded from TOML files and can be overridden //! Configuration is typically loaded from TOML files and can be overridden
//! by environment variables. //! by environment variables.
//!
//! # Examples
//!
//! ```ignore
//! let config = DatabaseConfig::load("configs/database.toml")?;
//! let db = config.create_database().await?;
//! ```
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::path::Path; use std::path::Path;
@ -23,10 +16,11 @@ pub struct DatabaseConfig {
/// SQLite configuration (required if engine is "sqlite") /// SQLite configuration (required if engine is "sqlite")
pub sqlite: Option<SqliteConfig>, pub sqlite: Option<SqliteConfig>,
/// SurrealDB configuration (required if engine is "surrealdb") /// SurrealDB dual-engine configuration (required if engine is "surrealdb")
pub surrealdb: Option<SurrealDbConfig>, #[cfg(feature = "surrealdb-backend")]
pub surrealdb: Option<SurrealDbBackendConfig>,
/// Optional PostgreSQL configuration for future support /// PostgreSQL configuration (future support)
pub postgresql: Option<PostgresConfig>, pub postgresql: Option<PostgresConfig>,
} }
@ -36,90 +30,125 @@ pub struct SqliteConfig {
/// Path to SQLite database file /// Path to SQLite database file
pub path: String, pub path: String,
/// Maximum number of connections in pool (default: 5) /// Maximum number of connections in pool
#[serde(default = "default_sqlite_pool_size")] #[serde(default = "default_sqlite_pool_size")]
pub max_connections: u32, pub max_connections: u32,
/// Connection timeout in seconds (default: 30) /// Connection timeout in seconds
#[serde(default = "default_timeout_secs")] #[serde(default = "default_timeout_secs")]
pub timeout_secs: u64, pub timeout_secs: u64,
/// Enable write-ahead logging (default: true) /// Enable write-ahead logging
#[serde(default = "default_true")] #[serde(default = "default_true")]
pub wal_mode: bool, pub wal_mode: bool,
/// PRAGMA synchronous setting: OFF, NORMAL, FULL (default: NORMAL) /// PRAGMA synchronous setting: OFF, NORMAL, FULL
#[serde(default = "default_pragma_synchronous")] #[serde(default = "default_pragma_synchronous")]
pub pragma_synchronous: String, pub pragma_synchronous: String,
/// PRAGMA cache_size in pages (default: 2000) /// PRAGMA cache_size in pages
#[serde(default = "default_cache_size")] #[serde(default = "default_cache_size")]
pub pragma_cache_size: i32, pub pragma_cache_size: i32,
} }
/// SurrealDB-specific configuration /// SurrealDB engine selector — dispatches via `engine::any::connect(url)`.
///
/// Serialized with an internal `engine` tag so TOML records map directly:
/// `{ engine = "surreal_kv", path = ".data/syntaxis/core" }` → `SurrealKv`.
#[cfg(feature = "surrealdb-backend")]
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SurrealDbConfig { #[serde(tag = "engine", rename_all = "snake_case")]
/// SurrealDB server URL (e.g., "ws://localhost:8000") pub enum SurrealEngineConfig {
pub url: String, /// In-memory — tests and ephemeral use only
Mem,
/// Namespace for isolation (default: "syntaxis") /// SurrealKV embedded (B-tree) — default for relational data (projects, tasks, phases)
#[serde(default = "default_namespace")] SurrealKv {
pub namespace: String, /// Filesystem path for the SurrealKV database directory
path: String,
/// Database name (default: "projects") },
#[serde(default = "default_database")] /// RocksDB embedded (LSM) — default for append-heavy data (audit logs)
pub database: String, RocksDb {
/// Filesystem path for the RocksDB database directory
/// Username for authentication path: String,
pub username: String, },
/// Remote WebSocket — team and cloud deployments
/// Password for authentication Ws {
pub password: String, /// Full WebSocket URL, e.g. `ws://surrealdb.internal:8000`
url: String,
/// Maximum connections (default: 10) },
#[serde(default = "default_surrealdb_pool_size")]
pub max_connections: u32,
/// Connection timeout in seconds (default: 30)
#[serde(default = "default_timeout_secs")]
pub timeout_secs: u64,
/// Enable TLS (default: false)
#[serde(default)]
pub tls_enabled: bool,
/// TLS certificate path (optional)
pub tls_cert_path: Option<String>,
} }
/// PostgreSQL configuration (for future support) #[cfg(feature = "surrealdb-backend")]
impl SurrealEngineConfig {
/// Produce the URL string consumed by `surrealdb::engine::any::connect`.
#[must_use]
pub fn to_url(&self) -> String {
match self {
Self::Mem => "mem://".to_string(),
Self::SurrealKv { path } => format!("surrealkv://{path}"),
Self::RocksDb { path } => format!("rocksdb://{path}"),
Self::Ws { url } => url.clone(),
}
}
}
/// Dual-engine SurrealDB backend configuration.
///
/// `core` (default: SurrealKV) stores relational/graph data with a B-tree engine
/// suited for random-access patterns. `hot` (default: RocksDB) stores audit logs
/// and append-heavy data with an LSM engine suited for sequential writes.
#[cfg(feature = "surrealdb-backend")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SurrealDbBackendConfig {
/// Engine for relational data (projects, tasks, phases, checklists)
#[serde(default = "default_core_engine")]
pub core: SurrealEngineConfig,
/// Engine for hot/append data (phase history, future audit embeddings)
#[serde(default = "default_hot_engine")]
pub hot: SurrealEngineConfig,
/// SurrealDB namespace shared by both engines
#[serde(default = "default_surreal_namespace")]
pub namespace: String,
/// Auth username (used for Ws engine only)
pub username: Option<String>,
/// Auth password (used for Ws engine only)
pub password: Option<String>,
}
#[cfg(feature = "surrealdb-backend")]
impl Default for SurrealDbBackendConfig {
fn default() -> Self {
Self {
core: default_core_engine(),
hot: default_hot_engine(),
namespace: default_surreal_namespace(),
username: None,
password: None,
}
}
}
/// PostgreSQL configuration (future support)
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PostgresConfig { pub struct PostgresConfig {
/// Connection string /// Connection string
pub url: String, pub url: String,
/// Maximum connections (default: 10) /// Maximum connections
#[serde(default = "default_postgres_pool_size")] #[serde(default = "default_postgres_pool_size")]
pub max_connections: u32, pub max_connections: u32,
/// Connection timeout in seconds (default: 30) /// Connection timeout in seconds
#[serde(default = "default_timeout_secs")] #[serde(default = "default_timeout_secs")]
pub timeout_secs: u64, pub timeout_secs: u64,
} }
// ============================================================================ // ── default constructors ─────────────────────────────────────────────────────
// DEFAULT IMPLEMENTATIONS
// ============================================================================
fn default_sqlite_pool_size() -> u32 { fn default_sqlite_pool_size() -> u32 {
5 5
} }
fn default_surrealdb_pool_size() -> u32 {
10
}
fn default_postgres_pool_size() -> u32 { fn default_postgres_pool_size() -> u32 {
10 10
} }
@ -140,20 +169,29 @@ fn default_cache_size() -> i32 {
2000 2000
} }
fn default_namespace() -> String { #[cfg(feature = "surrealdb-backend")]
fn default_core_engine() -> SurrealEngineConfig {
SurrealEngineConfig::SurrealKv {
path: ".data/syntaxis/core".to_string(),
}
}
#[cfg(feature = "surrealdb-backend")]
fn default_hot_engine() -> SurrealEngineConfig {
SurrealEngineConfig::RocksDb {
path: ".data/syntaxis/hot".to_string(),
}
}
#[cfg(feature = "surrealdb-backend")]
fn default_surreal_namespace() -> String {
"syntaxis".to_string() "syntaxis".to_string()
} }
fn default_database() -> String { // ── DatabaseConfig impl ──────────────────────────────────────────────────────
"projects".to_string()
}
// ============================================================================
// IMPLEMENTATIONS
// ============================================================================
impl DatabaseConfig { impl DatabaseConfig {
/// Create a default SQLite configuration /// Create a default SQLite configuration.
pub fn sqlite_default(path: &str) -> Self { pub fn sqlite_default(path: &str) -> Self {
Self { Self {
engine: "sqlite".to_string(), engine: "sqlite".to_string(),
@ -165,45 +203,54 @@ impl DatabaseConfig {
pragma_synchronous: "NORMAL".to_string(), pragma_synchronous: "NORMAL".to_string(),
pragma_cache_size: 2000, pragma_cache_size: 2000,
}), }),
#[cfg(feature = "surrealdb-backend")]
surrealdb: None, surrealdb: None,
postgresql: None, postgresql: None,
} }
} }
/// Create a default SurrealDB configuration /// Create a default SurrealDB configuration using embedded engines.
pub fn surrealdb_default(url: &str, username: &str, password: &str) -> Self { #[cfg(feature = "surrealdb-backend")]
pub fn surrealdb_default() -> Self {
Self { Self {
engine: "surrealdb".to_string(), engine: "surrealdb".to_string(),
sqlite: None, sqlite: None,
surrealdb: Some(SurrealDbConfig { surrealdb: Some(SurrealDbBackendConfig::default()),
url: url.to_string(), postgresql: None,
namespace: default_namespace(), }
database: default_database(), }
username: username.to_string(),
password: password.to_string(), /// Create a WebSocket-connected SurrealDB configuration.
max_connections: default_surrealdb_pool_size(), #[cfg(feature = "surrealdb-backend")]
timeout_secs: default_timeout_secs(), pub fn surrealdb_ws(url: &str, username: &str, password: &str) -> Self {
tls_enabled: false, Self {
tls_cert_path: None, engine: "surrealdb".to_string(),
sqlite: None,
surrealdb: Some(SurrealDbBackendConfig {
core: SurrealEngineConfig::Ws { url: url.to_string() },
hot: SurrealEngineConfig::Ws { url: url.to_string() },
namespace: default_surreal_namespace(),
username: Some(username.to_string()),
password: Some(password.to_string()),
}), }),
postgresql: None, postgresql: None,
} }
} }
/// Load configuration from TOML file /// Load configuration from a TOML file.
pub fn load_from_file<P: AsRef<Path>>(path: P) -> crate::error::Result<Self> { pub fn load_from_file<P: AsRef<Path>>(path: P) -> crate::error::Result<Self> {
let contents = std::fs::read_to_string(path)?; let contents = std::fs::read_to_string(path)?;
Self::load_from_str(&contents) Self::load_from_str(&contents)
} }
/// Load configuration from TOML string /// Load configuration from a TOML string.
pub fn load_from_str(toml: &str) -> crate::error::Result<Self> { pub fn load_from_str(toml: &str) -> crate::error::Result<Self> {
toml::from_str(toml).map_err(|e| { toml::from_str(toml).map_err(|e| {
crate::error::LifecycleError::Config(format!("Failed to parse config: {}", e)) crate::error::LifecycleError::Config(format!("Failed to parse config: {e}"))
}) })
} }
/// Validate configuration /// Validate that the selected engine has a matching configuration block.
pub fn validate(&self) -> crate::error::Result<()> { pub fn validate(&self) -> crate::error::Result<()> {
match self.engine.as_str() { match self.engine.as_str() {
"sqlite" => { "sqlite" => {
@ -215,13 +262,22 @@ impl DatabaseConfig {
Ok(()) Ok(())
} }
"surrealdb" => { "surrealdb" => {
#[cfg(feature = "surrealdb-backend")]
{
self.surrealdb.as_ref().ok_or_else(|| { self.surrealdb.as_ref().ok_or_else(|| {
crate::error::LifecycleError::Config( crate::error::LifecycleError::Config(
"SurrealDB engine selected but no surrealdb config provided".to_string(), "SurrealDB engine selected but no surrealdb config provided"
.to_string(),
) )
})?; })?;
Ok(()) Ok(())
} }
#[cfg(not(feature = "surrealdb-backend"))]
Err(crate::error::LifecycleError::Config(
"SurrealDB engine is not compiled in; enable feature 'surrealdb-backend'"
.to_string(),
))
}
"postgresql" => { "postgresql" => {
self.postgresql.as_ref().ok_or_else(|| { self.postgresql.as_ref().ok_or_else(|| {
crate::error::LifecycleError::Config( crate::error::LifecycleError::Config(
@ -231,28 +287,24 @@ impl DatabaseConfig {
Ok(()) Ok(())
} }
other => Err(crate::error::LifecycleError::Config(format!( other => Err(crate::error::LifecycleError::Config(format!(
"Unknown database engine: {}", "Unknown database engine: {other}"
other
))), ))),
} }
} }
/// Get the engine type /// Returns true if SQLite backend is configured.
pub fn get_engine(&self) -> &str {
&self.engine
}
/// Check if SQLite backend is configured
pub fn is_sqlite(&self) -> bool { pub fn is_sqlite(&self) -> bool {
self.engine == "sqlite" self.engine == "sqlite"
} }
/// Check if SurrealDB backend is configured /// Returns true if SurrealDB backend is configured.
pub fn is_surrealdb(&self) -> bool { pub fn is_surrealdb(&self) -> bool {
self.engine == "surrealdb" self.engine == "surrealdb"
} }
} }
// ── tests ────────────────────────────────────────────────────────────────────
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -265,18 +317,6 @@ mod tests {
assert_eq!(config.sqlite.unwrap().path, "test.db"); assert_eq!(config.sqlite.unwrap().path, "test.db");
} }
#[test]
fn test_surrealdb_default_config() {
let config = DatabaseConfig::surrealdb_default("ws://localhost:8000", "user", "pass");
assert!(config.is_surrealdb());
assert!(config.surrealdb.is_some());
let sdb = config.surrealdb.unwrap();
assert_eq!(sdb.url, "ws://localhost:8000");
assert_eq!(sdb.username, "user");
assert_eq!(sdb.password, "pass");
}
#[test] #[test]
fn test_validate_sqlite() { fn test_validate_sqlite() {
let config = DatabaseConfig::sqlite_default("test.db"); let config = DatabaseConfig::sqlite_default("test.db");
@ -288,6 +328,7 @@ mod tests {
let config = DatabaseConfig { let config = DatabaseConfig {
engine: "mongodb".to_string(), engine: "mongodb".to_string(),
sqlite: None, sqlite: None,
#[cfg(feature = "surrealdb-backend")]
surrealdb: None, surrealdb: None,
postgresql: None, postgresql: None,
}; };
@ -324,22 +365,80 @@ max_connections = 10
assert!(config.wal_mode); assert!(config.wal_mode);
} }
#[test] #[cfg(feature = "surrealdb-backend")]
fn test_surrealdb_config_defaults() { mod surreal_tests {
let config = SurrealDbConfig { use super::*;
url: "ws://localhost:8000".to_string(),
namespace: default_namespace(),
database: default_database(),
username: "root".to_string(),
password: "root".to_string(),
max_connections: default_surrealdb_pool_size(),
timeout_secs: default_timeout_secs(),
tls_enabled: false,
tls_cert_path: None,
};
assert_eq!(config.namespace, "syntaxis"); #[test]
assert_eq!(config.database, "projects"); fn test_surrealdb_default_config() {
assert_eq!(config.max_connections, 10); let config = DatabaseConfig::surrealdb_default();
assert!(config.is_surrealdb());
assert!(config.surrealdb.is_some());
}
#[test]
fn test_surrealdb_ws_config() {
let config =
DatabaseConfig::surrealdb_ws("ws://localhost:8000", "root", "root");
assert!(config.is_surrealdb());
let cfg = config.surrealdb.unwrap();
assert_eq!(cfg.username, Some("root".to_string()));
assert!(matches!(cfg.core, SurrealEngineConfig::Ws { .. }));
}
#[test]
fn test_engine_to_url_mem() {
assert_eq!(SurrealEngineConfig::Mem.to_url(), "mem://");
}
#[test]
fn test_engine_to_url_surrealkv() {
let eng = SurrealEngineConfig::SurrealKv {
path: "/data/core".to_string(),
};
assert_eq!(eng.to_url(), "surrealkv:///data/core");
}
#[test]
fn test_engine_to_url_rocksdb() {
let eng = SurrealEngineConfig::RocksDb {
path: "/data/hot".to_string(),
};
assert_eq!(eng.to_url(), "rocksdb:///data/hot");
}
#[test]
fn test_engine_to_url_ws() {
let eng = SurrealEngineConfig::Ws {
url: "ws://host:8000".to_string(),
};
assert_eq!(eng.to_url(), "ws://host:8000");
}
#[test]
fn test_backend_config_default() {
let cfg = SurrealDbBackendConfig::default();
assert_eq!(cfg.namespace, "syntaxis");
assert!(cfg.username.is_none());
assert!(matches!(cfg.core, SurrealEngineConfig::SurrealKv { .. }));
assert!(matches!(cfg.hot, SurrealEngineConfig::RocksDb { .. }));
}
#[test]
fn test_validate_surrealdb() {
let config = DatabaseConfig::surrealdb_default();
assert!(config.validate().is_ok());
}
#[test]
fn test_validate_surrealdb_missing_config() {
let config = DatabaseConfig {
engine: "surrealdb".to_string(),
sqlite: None,
surrealdb: None,
postgresql: None,
};
assert!(config.validate().is_err());
}
} }
} }

View File

@ -26,14 +26,16 @@ pub mod config;
pub mod error; pub mod error;
pub mod migration; pub mod migration;
pub mod sqlite_impl; pub mod sqlite_impl;
#[cfg(feature = "surrealdb")] #[cfg(feature = "surrealdb-backend")]
pub mod surrealdb_impl; pub mod surrealdb_impl;
// Re-export public types // Re-export public types
pub use config::DatabaseConfig; pub use config::DatabaseConfig;
#[cfg(feature = "surrealdb-backend")]
pub use config::{SurrealDbBackendConfig, SurrealEngineConfig};
pub use error::PersistenceError; pub use error::PersistenceError;
pub use sqlite_impl::SqliteDatabase; pub use sqlite_impl::SqliteDatabase;
#[cfg(feature = "surrealdb")] #[cfg(feature = "surrealdb-backend")]
pub use surrealdb_impl::SurrealDatabase; pub use surrealdb_impl::SurrealDatabase;
use crate::error::Result; use crate::error::Result;

File diff suppressed because it is too large Load Diff

View File

@ -1,32 +1,36 @@
// Integration tests for SurrealDB 2.3 with server mode // Integration tests for SurrealDB 3 with server mode
// //
// To run these tests, start SurrealDB server in another terminal: // To run these tests, start SurrealDB server in another terminal:
// surreal start --bind 127.0.0.1:8000 memory // surreal start --bind 127.0.0.1:8000 memory --user root --pass root
// //
// Then run: cargo test --test integration_surrealdb -- --test-threads=1 // Then run: cargo test --test integration_surrealdb --features surrealdb-backend -- --test-threads=1
#[cfg(all(test, feature = "surrealdb"))] #[cfg(all(test, feature = "surrealdb-backend"))]
mod surrealdb_integration_tests { mod surrealdb_integration_tests {
use chrono::Utc; use chrono::Utc;
use syntaxis_core::persistence::{ use syntaxis_core::persistence::{
Database, DbChecklistItem, DbPhaseTransition, DbProject, DbSecurityAssessment, Database, DbChecklistItem, DbPhaseTransition, DbProject, DbSecurityAssessment,
DbTeamMember, SurrealDatabase, DbTeamMember, SurrealDatabase, SurrealDbBackendConfig, SurrealEngineConfig,
}; };
use uuid::Uuid; use uuid::Uuid;
/// Helper to connect to SurrealDB server /// Helper to connect to SurrealDB server.
/// Requires: surreal start --bind 127.0.0.1:8000 memory ///
/// Requires: `surreal start --bind 127.0.0.1:8000 memory --user root --pass root`
async fn get_server_db() -> Result<SurrealDatabase, Box<dyn std::error::Error>> { async fn get_server_db() -> Result<SurrealDatabase, Box<dyn std::error::Error>> {
// Try to connect to server; if it fails, we'll skip the test let cfg = SurrealDbBackendConfig {
let db = SurrealDatabase::new_server( core: SurrealEngineConfig::Ws {
"ws://localhost:8000", url: "ws://localhost:8000".to_string(),
"test_workspace", },
"test_projects", hot: SurrealEngineConfig::Ws {
None, url: "ws://localhost:8000".to_string(),
None, },
) namespace: "test_workspace".to_string(),
.await?; username: Some("root".to_string()),
password: Some("root".to_string()),
};
let db = SurrealDatabase::from_config(&cfg).await?;
Ok(db) Ok(db)
} }

View File

@ -13,6 +13,7 @@ syntaxis-core = { path = "../syntaxis" }
# Async # Async
tokio = { workspace = true } tokio = { workspace = true }
futures = { workspace = true }
# Logging and tracing # Logging and tracing
tracing = { workspace = true } tracing = { workspace = true }
@ -21,19 +22,31 @@ tracing-subscriber = { workspace = true }
# Serialization # Serialization
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
bytes = { workspace = true }
# UUID and dates # UUID and dates
uuid = { workspace = true } uuid = { workspace = true }
chrono = { workspace = true } chrono = { workspace = true }
# HTTP client (with cookies feature) # HTTP client
reqwest = { workspace = true } reqwest = { workspace = true }
# NATS JetStream # NATS JetStream with NKey auth
async-nats = { workspace = true } platform-nats = { workspace = true }
# WebSocket (older version 0.20 for this crate) # Stratum orchestration
stratum-orchestrator = { workspace = true }
stratum-graph = { workspace = true }
stratum-state = { workspace = true }
# WebSocket
tokio-tungstenite = { workspace = true } tokio-tungstenite = { workspace = true }
# Other utilities # Error handling
anyhow = { workspace = true } anyhow = { workspace = true }
thiserror = { workspace = true }
[features]
default = []
orchestration = []
kogral = []

View File

@ -0,0 +1,184 @@
//! kogral integration bridge — forwards syntaxis lifecycle events to kogral.
//!
//! Enabled with the `kogral` feature. Internally uses `platform-nats::EventStream`
//! so the kogral-core dependency chain is not required.
use bytes::Bytes;
use platform_nats::{EventStream, NatsConfig};
use serde::{Deserialize, Serialize};
use crate::VaporaResult;
/// Connection config for the kogral NATS cluster.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KogralBridgeConfig {
/// NATS server URL of the kogral cluster
pub url: String,
/// JetStream stream that kogral consumes (default: `KOGRAL`)
pub stream_name: String,
/// Consumer name (default: `syntaxis-kogral`)
pub consumer_name: String,
/// NKey seed for signing (optional)
pub nkey_seed: Option<String>,
/// Trusted public NKeys (empty → accept all)
pub trusted_nkeys: Vec<String>,
}
impl Default for KogralBridgeConfig {
fn default() -> Self {
Self {
url: "nats://localhost:4222".to_string(),
stream_name: "KOGRAL".to_string(),
consumer_name: "syntaxis-kogral".to_string(),
nkey_seed: None,
trusted_nkeys: vec![],
}
}
}
/// Lifecycle event forwarded to kogral for knowledge graph indexing.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct KogralEvent {
/// Dot-separated event type, e.g. `syntaxis.project.created`
pub event_type: String,
/// Source project identifier
pub project_id: String,
/// Optional phase name relevant to the event
#[serde(skip_serializing_if = "Option::is_none")]
pub phase: Option<String>,
/// RFC 3339 timestamp
pub timestamp: String,
/// Arbitrary structured payload
pub payload: serde_json::Value,
}
/// Bridge that publishes syntaxis lifecycle events to kogral via NATS.
pub struct KogralBridge {
stream: EventStream,
}
impl std::fmt::Debug for KogralBridge {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("KogralBridge").finish_non_exhaustive()
}
}
impl KogralBridge {
/// Connect to the kogral NATS cluster.
pub async fn connect(cfg: &KogralBridgeConfig) -> VaporaResult<Self> {
let nats_cfg = NatsConfig {
url: cfg.url.clone(),
stream_name: cfg.stream_name.clone(),
consumer_name: cfg.consumer_name.clone(),
subjects: vec![format!("kogral.syntaxis.>")],
nkey_seed: cfg.nkey_seed.clone(),
trusted_nkeys: cfg.trusted_nkeys.clone(),
require_signed_messages: false,
};
let stream = EventStream::connect(&nats_cfg).await?;
tracing::info!(url = %cfg.url, stream = %cfg.stream_name, "kogral bridge connected");
Ok(Self { stream })
}
/// Emit a generic lifecycle event to `kogral.syntaxis.<event_type>`.
pub async fn emit(&self, event: &KogralEvent) -> VaporaResult<()> {
let subject = format!("kogral.syntaxis.{}", event.event_type.replace('.', "-"));
let payload = Bytes::from(serde_json::to_vec(event)?);
self.stream.publish(&subject, payload).await?;
tracing::debug!(event_type = %event.event_type, project_id = %event.project_id, "kogral event emitted");
Ok(())
}
/// Emit a project-created event.
pub async fn on_project_created(&self, project_id: &str) -> VaporaResult<()> {
self.emit(&KogralEvent {
event_type: "project.created".to_string(),
project_id: project_id.to_string(),
phase: None,
timestamp: chrono::Utc::now().to_rfc3339(),
payload: serde_json::json!({ "project_id": project_id }),
})
.await
}
/// Emit a phase-transition event.
pub async fn on_phase_transition(
&self,
project_id: &str,
from_phase: &str,
to_phase: &str,
) -> VaporaResult<()> {
self.emit(&KogralEvent {
event_type: "phase.transition".to_string(),
project_id: project_id.to_string(),
phase: Some(to_phase.to_string()),
timestamp: chrono::Utc::now().to_rfc3339(),
payload: serde_json::json!({
"from": from_phase,
"to": to_phase,
}),
})
.await
}
/// Emit a task-completed event.
pub async fn on_task_completed(
&self,
project_id: &str,
task_id: &str,
phase: &str,
) -> VaporaResult<()> {
self.emit(&KogralEvent {
event_type: "task.completed".to_string(),
project_id: project_id.to_string(),
phase: Some(phase.to_string()),
timestamp: chrono::Utc::now().to_rfc3339(),
payload: serde_json::json!({ "task_id": task_id }),
})
.await
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_kogral_bridge_config_default() {
let cfg = KogralBridgeConfig::default();
assert_eq!(cfg.stream_name, "KOGRAL");
assert!(cfg.nkey_seed.is_none());
}
#[test]
fn test_kogral_event_serde() {
let event = KogralEvent {
event_type: "project.created".to_string(),
project_id: "p1".to_string(),
phase: None,
timestamp: "2026-01-01T00:00:00Z".to_string(),
payload: serde_json::json!({ "project_id": "p1" }),
};
let json = serde_json::to_string(&event).unwrap();
let decoded: KogralEvent = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.event_type, "project.created");
// `phase` is None → must be absent from the serialized JSON (skip_serializing_if)
assert!(!json.contains("\"phase\""));
}
#[test]
fn test_kogral_event_with_phase_serde() {
let event = KogralEvent {
event_type: "phase.transition".to_string(),
project_id: "p2".to_string(),
phase: Some("devel".to_string()),
timestamp: "2026-01-01T00:00:00Z".to_string(),
payload: serde_json::json!({ "from": "create", "to": "devel" }),
};
let json = serde_json::to_string(&event).unwrap();
assert!(json.contains("\"phase\""));
let decoded: KogralEvent = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.phase.unwrap(), "devel");
}
}

View File

@ -27,9 +27,13 @@ pub mod llm_provider;
pub mod multi_ia_router; pub mod multi_ia_router;
pub mod nats_bridge; pub mod nats_bridge;
pub mod observability; pub mod observability;
pub mod orchestration;
pub mod plugin; pub mod plugin;
pub mod security; pub mod security;
#[cfg(feature = "kogral")]
pub mod kogral_bridge;
/// VAPORA lifecycle integration result type /// VAPORA lifecycle integration result type
pub type VaporaResult<T> = anyhow::Result<T>; pub type VaporaResult<T> = anyhow::Result<T>;

View File

@ -1,251 +1,248 @@
//! NATS JetStream bridge for agent communication //! NATS JetStream bridge using `platform-nats` with NKey auth and signed messages.
//!
//! Real integration with NATS for multi-agent orchestration.
use anyhow::anyhow; use bytes::Bytes;
use platform_nats::{EventStream, NatsConfig};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::time::Duration; use std::time::Duration;
/// NATS broker configuration use crate::VaporaResult;
#[derive(Debug, Clone)]
/// Broker-level configuration for `NatsBridge`.
///
/// Call `NatsBrokerConfig::to_platform_config()` to convert to the
/// `platform_nats::NatsConfig` expected by `EventStream::connect`.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NatsBrokerConfig { pub struct NatsBrokerConfig {
/// NATS server URL /// NATS server URL, e.g. `nats://localhost:4222`
pub url: String, pub url: String,
/// Max retries for connection /// JetStream stream name (created if absent)
pub max_retries: u32, pub stream_name: String,
/// Connection timeout /// Durable pull-consumer name (created if absent)
pub timeout: Duration, pub consumer_name: String,
/// Subject filter list, e.g. `["syntaxis.>"]`
pub subjects: Vec<String>,
/// ed25519 NKey seed for signing outbound messages (optional)
pub nkey_seed: Option<String>,
/// Public NKeys whose signatures are accepted (empty → accept all)
pub trusted_nkeys: Vec<String>,
/// Reject inbound messages that lack a valid NKey signature
pub require_signed_messages: bool,
} }
impl Default for NatsBrokerConfig { impl Default for NatsBrokerConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
url: "nats://localhost:4222".to_string(), url: "nats://localhost:4222".to_string(),
max_retries: 3, stream_name: "SYNTAXIS".to_string(),
timeout: Duration::from_secs(10), consumer_name: "syntaxis-consumer".to_string(),
subjects: vec!["syntaxis.>".to_string()],
nkey_seed: None,
trusted_nkeys: vec![],
require_signed_messages: false,
} }
} }
} }
/// Task message for NATS impl NatsBrokerConfig {
fn to_platform_config(&self) -> NatsConfig {
NatsConfig {
url: self.url.clone(),
stream_name: self.stream_name.clone(),
consumer_name: self.consumer_name.clone(),
subjects: self.subjects.clone(),
nkey_seed: self.nkey_seed.clone(),
trusted_nkeys: self.trusted_nkeys.clone(),
require_signed_messages: self.require_signed_messages,
}
}
}
/// Event configuration matching the kogral `NatsEventConfig` schema.
///
/// Allows downstream systems to express NATS connectivity in a unified format
/// and convert to `NatsBrokerConfig` via `From`.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NatsEventConfig {
/// NATS server URL
pub url: String,
/// JetStream stream name
pub stream: String,
/// Durable pull-consumer name
pub consumer: String,
/// Subject filter list
pub subjects: Vec<String>,
/// NKey seed for signing outbound messages
#[serde(default)]
pub nkey_seed: Option<String>,
/// Trusted public NKeys
#[serde(default)]
pub trusted_nkeys: Vec<String>,
/// Reject inbound messages lacking a valid NKey signature
#[serde(default)]
pub require_signed_messages: bool,
}
impl From<NatsEventConfig> for NatsBrokerConfig {
fn from(cfg: NatsEventConfig) -> Self {
Self {
url: cfg.url,
stream_name: cfg.stream,
consumer_name: cfg.consumer,
subjects: cfg.subjects,
nkey_seed: cfg.nkey_seed,
trusted_nkeys: cfg.trusted_nkeys,
require_signed_messages: cfg.require_signed_messages,
}
}
}
/// Task message published to `syntaxis.tasks.<agent_role>.submit`
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskMessage { pub struct TaskMessage {
/// Task ID /// Unique task identifier
pub task_id: String, pub task_id: String,
/// Agent role /// Agent role this task is targeted at
pub agent_role: String, pub agent_role: String,
/// Task title /// Short task title
pub title: String, pub title: String,
/// Task description /// Full task description
pub description: String, pub description: String,
/// Priority (1-10) /// Priority in `1..=10` (higher = more urgent)
pub priority: u32, pub priority: u32,
/// Task data /// Arbitrary structured task data
pub data: serde_json::Value, pub data: serde_json::Value,
} }
/// Task result from agent /// Task result consumed from `syntaxis.tasks.<task_id>.result`
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TaskResult { pub struct TaskResult {
/// Task ID /// Task identifier correlating to the originating `TaskMessage`
pub task_id: String, pub task_id: String,
/// Agent who completed it /// Agent that completed (or failed) the task
pub agent_role: String, pub agent_role: String,
/// Status (completed, failed, timeout) /// `"completed"`, `"failed"`, or `"timeout"`
pub status: String, pub status: String,
/// Result data /// Result payload produced by the agent
pub result: serde_json::Value, pub result: serde_json::Value,
/// Execution time in milliseconds /// Wall-clock execution time in milliseconds
pub execution_time_ms: u64, pub execution_time_ms: u64,
} }
/// Agent status message /// Agent heartbeat published to `syntaxis.agents.<role>.status`
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AgentStatus { pub struct AgentStatus {
/// Agent role /// Agent role identifier
pub role: String, pub role: String,
/// Is healthy /// Whether the agent is currently healthy
pub healthy: bool, pub healthy: bool,
/// Current task count /// Number of tasks currently in flight
pub task_count: usize, pub task_count: usize,
/// Max capacity /// Maximum concurrent task capacity
pub capacity: usize, pub capacity: usize,
/// Last heartbeat /// RFC 3339 timestamp of the last heartbeat
pub last_heartbeat: String, pub last_heartbeat: String,
} }
/// NATS JetStream agent bridge (production-ready) /// Production NATS JetStream bridge backed by `platform-nats::EventStream`.
#[derive(Debug, Clone)]
pub struct NatsBridge { pub struct NatsBridge {
/// NATS broker configuration stream: EventStream,
#[allow(dead_code)]
config: NatsBrokerConfig, config: NatsBrokerConfig,
// In production: async_nats::jetstream::Context }
/// Connection state
_connected: bool, impl std::fmt::Debug for NatsBridge {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NatsBridge")
.field("config", &self.config)
.finish_non_exhaustive()
}
} }
impl NatsBridge { impl NatsBridge {
/// Create new NATS bridge with configuration /// Connect to NATS and materialise the configured stream + consumer.
pub async fn new(config: NatsBrokerConfig) -> anyhow::Result<Self> { pub async fn connect(config: NatsBrokerConfig) -> VaporaResult<Self> {
// Production: Connect to NATS let stream = EventStream::connect(&config.to_platform_config()).await?;
// let client = async_nats::connect(&config.url).await tracing::info!(url = %config.url, stream = %config.stream_name, "NATS bridge connected");
// .map_err(|e| anyhow!("Failed to connect to NATS: {}", e))?; Ok(Self { stream, config })
// let jetstream = async_nats::jetstream::new(client);
tracing::info!("NATS Bridge initialized with URL: {}", config.url);
Ok(Self {
config,
_connected: true,
})
} }
/// Submit task to agent pool /// Publish a task to `syntaxis.tasks.<agent_role>.submit`.
pub async fn submit_task(&self, task: &TaskMessage) -> anyhow::Result<String> { ///
let _subject = format!("tasks.{}.submit", task.agent_role); /// Returns the `task_id` on success for correlation tracking.
let _payload = serde_json::to_vec(task)?; pub async fn submit_task(&self, task: &TaskMessage) -> VaporaResult<String> {
let subject = format!("syntaxis.tasks.{}.submit", task.agent_role);
tracing::info!("Publishing task {} to subject: {}", task.task_id, _subject); let payload = Bytes::from(serde_json::to_vec(task)?);
self.stream.publish(&subject, payload).await?;
// Production: tracing::debug!(task_id = %task.task_id, subject = %subject, "task submitted");
// let context = async_nats::jetstream::new(client);
// context.publish(_subject, _payload.into()).await?;
Ok(task.task_id.clone()) Ok(task.task_id.clone())
} }
/// Request task from agent (synchronous with timeout) /// Pull up to `max_msgs` task results from the consumer.
pub async fn request_task_result( ///
&self, /// Each message is ACKed after successful deserialization.
task_id: &str, /// Messages that fail to deserialize are ACKed (removed) and excluded.
_timeout: Duration, pub async fn pull_task_results(&self, max_msgs: usize) -> VaporaResult<Vec<TaskResult>> {
) -> anyhow::Result<TaskResult> { let batch = self.stream.pull_batch(max_msgs).await?;
let _subject = format!("tasks.{}.result", task_id); let mut results = Vec::with_capacity(batch.len());
tracing::info!("Requesting task result: {}", task_id); for (subject, payload, msg) in batch {
match serde_json::from_slice::<TaskResult>(&payload) {
// Production: Ok(result) => {
// let msg = jetstream msg.ack()
// .request(_subject, "".into(), _timeout) .await
// .await?; .map_err(|e| anyhow::anyhow!("ack failed on '{subject}': {e}"))?;
// Ok(serde_json::from_slice(&msg.payload)?) results.push(result);
}
Err(anyhow!("Task {} not yet completed", task_id)) Err(e) => {
tracing::warn!(subject = %subject, "dropping unparseable task result: {e}");
let _ = msg.ack().await;
}
}
} }
/// Subscribe to task results Ok(results)
pub async fn subscribe_task_results(&self, agent_role: &str) -> anyhow::Result<()> { }
let subject = format!("tasks.{}.results", agent_role);
tracing::info!("Subscribing to task results: {}", subject);
// Production:
// let subscriber = jetstream.subscribe(subject).await?;
// loop {
// if let Ok(Some(msg)) = subscriber.next_timeout(Duration::from_secs(5)).await {
// let result: TaskResult = serde_json::from_slice(&msg.payload)?;
// self.process_result(&result).await?;
// }
// }
/// Publish an agent status heartbeat.
pub async fn publish_agent_status(&self, status: &AgentStatus) -> VaporaResult<()> {
let subject = format!("syntaxis.agents.{}.status", status.role);
let payload = Bytes::from(serde_json::to_vec(status)?);
self.stream.publish(&subject, payload).await?;
Ok(()) Ok(())
} }
/// Publish agent status /// Ping NATS by publishing a zero-byte keepalive to `syntaxis.ping`.
pub async fn publish_agent_status(&self, status: &AgentStatus) -> anyhow::Result<()> { pub async fn health_check(&self) -> VaporaResult<()> {
let _subject = format!("agents.{}.status", status.role); self.stream
let _payload = serde_json::to_vec(status)?; .publish("syntaxis.ping", Bytes::new())
.await
tracing::info!("Publishing agent status: {} -> {}", status.role, _subject); .map_err(|e| anyhow::anyhow!("NATS health check failed: {e}"))
// Production:
// jetstream.publish(_subject, _payload.into()).await?;
Ok(())
} }
/// Subscribe to agent status updates /// Wait up to `max_wait` for a specific `task_id` result via polling pull.
pub async fn subscribe_agent_status(&self) -> anyhow::Result<()> {
let _subject = "agents.*.status";
tracing::info!("Subscribing to agent status updates");
// Production:
// let subscriber = jetstream.subscribe(subject).await?;
// Handle incoming status updates
Ok(())
}
/// List active agents
pub async fn list_agents(&self) -> anyhow::Result<Vec<AgentStatus>> {
// Production: Request agent list from NATS directory
tracing::info!("Listing active agents");
Ok(vec![
AgentStatus {
role: "developer".to_string(),
healthy: true,
task_count: 2,
capacity: 10,
last_heartbeat: chrono::Local::now().to_rfc3339(),
},
AgentStatus {
role: "tester".to_string(),
healthy: true,
task_count: 1,
capacity: 5,
last_heartbeat: chrono::Local::now().to_rfc3339(),
},
])
}
/// Wait for task completion
pub async fn wait_for_completion( pub async fn wait_for_completion(
&self, &self,
task_id: &str, task_id: &str,
max_wait: Duration, max_wait: Duration,
) -> anyhow::Result<TaskResult> { ) -> VaporaResult<TaskResult> {
let start = std::time::Instant::now(); let start = std::time::Instant::now();
loop { loop {
match self let batch = self.pull_task_results(32).await?;
.request_task_result(task_id, Duration::from_secs(1)) if let Some(r) = batch.into_iter().find(|r| r.task_id == task_id) {
.await return Ok(r);
{ }
Ok(result) => return Ok(result),
Err(_) => {
if start.elapsed() > max_wait { if start.elapsed() > max_wait {
return Err(anyhow!("Task {} timed out", task_id)); return Err(anyhow::anyhow!("task '{}' timed out", task_id));
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
} }
tokio::time::sleep(Duration::from_millis(200)).await;
} }
} }
/// Get health status of NATS connection /// Return the broker configuration this bridge was created with.
pub async fn health_check(&self) -> anyhow::Result<()> { pub fn config(&self) -> &NatsBrokerConfig {
// Production: Send ping to NATS server &self.config
tracing::info!("NATS health check");
Ok(())
}
/// Create JetStream stream for tasks if not exists
pub async fn ensure_stream(&self) -> anyhow::Result<()> {
// Production:
// let jetstream = async_nats::jetstream::new(client);
// jetstream
// .get_or_create_stream(async_nats::jetstream::stream::Config {
// name: "TASKS".to_string(),
// subjects: vec!["tasks.>".to_string()],
// ..Default::default()
// })
// .await?;
tracing::info!("Task stream ensured");
Ok(())
} }
} }
@ -254,80 +251,78 @@ mod tests {
use super::*; use super::*;
#[test] #[test]
fn test_nats_config_default() { fn test_broker_config_default() {
let config = NatsBrokerConfig::default(); let cfg = NatsBrokerConfig::default();
assert_eq!(config.url, "nats://localhost:4222"); assert_eq!(cfg.url, "nats://localhost:4222");
assert_eq!(config.max_retries, 3); assert_eq!(cfg.stream_name, "SYNTAXIS");
assert!(!cfg.require_signed_messages);
} }
#[test] #[test]
fn test_task_message() { fn test_nats_event_config_into_broker() {
let task = TaskMessage { let event_cfg = NatsEventConfig {
task_id: "task1".to_string(), url: "nats://prod:4222".to_string(),
agent_role: "developer".to_string(), stream: "PROD_STREAM".to_string(),
title: "Test Task".to_string(), consumer: "prod-consumer".to_string(),
description: "A test".to_string(), subjects: vec!["syntaxis.>".to_string()],
priority: 5, nkey_seed: Some("SXXX".to_string()),
data: serde_json::json!({}), trusted_nkeys: vec!["PUBKEY".to_string()],
require_signed_messages: true,
}; };
assert_eq!(task.agent_role, "developer"); let broker: NatsBrokerConfig = event_cfg.into();
assert_eq!(broker.url, "nats://prod:4222");
assert_eq!(broker.stream_name, "PROD_STREAM");
assert_eq!(broker.nkey_seed.unwrap(), "SXXX");
assert!(broker.require_signed_messages);
} }
#[test] #[test]
fn test_task_result() { fn test_to_platform_config_round_trip() {
let result = TaskResult { let cfg = NatsBrokerConfig {
task_id: "task1".to_string(), url: "nats://host:4222".to_string(),
stream_name: "S".to_string(),
consumer_name: "C".to_string(),
subjects: vec!["s.>".to_string()],
nkey_seed: None,
trusted_nkeys: vec![],
require_signed_messages: false,
};
let platform = cfg.to_platform_config();
assert_eq!(platform.url, cfg.url);
assert_eq!(platform.stream_name, cfg.stream_name);
assert_eq!(platform.consumer_name, cfg.consumer_name);
}
#[test]
fn test_task_message_serde() {
let task = TaskMessage {
task_id: "t1".to_string(),
agent_role: "developer".to_string(),
title: "Impl X".to_string(),
description: "Details".to_string(),
priority: 5,
data: serde_json::json!({ "repo": "syntaxis" }),
};
let json = serde_json::to_string(&task).unwrap();
let decoded: TaskMessage = serde_json::from_str(&json).unwrap();
assert_eq!(decoded.task_id, "t1");
assert_eq!(decoded.priority, 5);
}
#[test]
fn test_task_result_serde() {
let r = TaskResult {
task_id: "t1".to_string(),
agent_role: "developer".to_string(), agent_role: "developer".to_string(),
status: "completed".to_string(), status: "completed".to_string(),
result: serde_json::json!({ "success": true }), result: serde_json::json!({ "success": true }),
execution_time_ms: 1500, execution_time_ms: 420,
}; };
let json = serde_json::to_string(&r).unwrap();
assert_eq!(result.status, "completed"); let decoded: TaskResult = serde_json::from_str(&json).unwrap();
assert_eq!(result.execution_time_ms, 1500); assert_eq!(decoded.status, "completed");
} assert_eq!(decoded.execution_time_ms, 420);
#[tokio::test]
async fn test_bridge_creation() {
let config = NatsBrokerConfig::default();
let bridge = NatsBridge::new(config).await;
assert!(bridge.is_ok());
}
#[tokio::test]
async fn test_submit_task() {
let config = NatsBrokerConfig::default();
let bridge = NatsBridge::new(config).await.unwrap();
let task = TaskMessage {
task_id: "task1".to_string(),
agent_role: "developer".to_string(),
title: "Test".to_string(),
description: "Test task".to_string(),
priority: 5,
data: serde_json::json!({}),
};
let result = bridge.submit_task(&task).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_list_agents() {
let config = NatsBrokerConfig::default();
let bridge = NatsBridge::new(config).await.unwrap();
let agents = bridge.list_agents().await.unwrap();
assert!(!agents.is_empty());
}
#[tokio::test]
async fn test_health_check() {
let config = NatsBrokerConfig::default();
let bridge = NatsBridge::new(config).await.unwrap();
let health = bridge.health_check().await;
assert!(health.is_ok());
} }
} }

View File

@ -0,0 +1,216 @@
//! Phase orchestration wiring: `StageRunner` + `ActionGraph` + Cedar + Vault.
//!
//! `PhaseOrchestrator` listens for syntaxis phase transitions and runs the
//! corresponding `ActionGraph` pipeline via `stratum-orchestrator`.
use std::{path::PathBuf, sync::Arc};
use anyhow::Result;
use serde::{Deserialize, Serialize};
use stratum_graph::ActionGraph;
use stratum_orchestrator::{
auth::{CedarAuthorizer, VaultClient, VaultConfig},
context::PipelineContext,
executor::NuExecutor,
graph::load_graph_from_dir,
runner::StageRunner,
};
use stratum_state::{InMemoryStateTracker, PipelineStatus, StateTracker};
use tokio::sync::RwLock;
use tracing::info;
/// Configuration for `PhaseOrchestrator`.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OrchestratorConfig {
/// Directory containing NuShell step scripts.
pub scripts_base: PathBuf,
/// Directory containing `.ncl` ActionGraph node definitions.
pub graphs_dir: PathBuf,
/// Directory containing capability `.ncl` schema files (used by `PipelineContext`).
pub schema_dir: PathBuf,
/// Enforce Cedar authorization on every pipeline node execution.
pub cedar_required: bool,
/// Directory containing `.cedar` policy files.
///
/// Required only when `cedar_required = true`. When `cedar_required = false`
/// a permissive allow-all policy is written to a temp directory automatically.
pub cedar_policy_dir: Option<PathBuf>,
/// HashiCorp Vault URL for credential injection.
///
/// If empty and `cedar_required = false`, the Vault client is never invoked
/// because no nodes are dispatched without a defined graph.
#[serde(default)]
pub vault_url: String,
/// Vault token.
#[serde(default)]
pub vault_token: String,
}
impl OrchestratorConfig {
/// In-process default for tests: ephemeral directories, no Cedar, no Vault calls.
pub fn for_testing() -> Self {
Self {
scripts_base: std::env::temp_dir().join("syntaxis-scripts"),
graphs_dir: std::env::temp_dir().join("syntaxis-graphs"),
schema_dir: std::env::temp_dir().join("syntaxis-schema"),
cedar_required: false,
cedar_policy_dir: None,
vault_url: String::new(),
vault_token: String::new(),
}
}
}
/// Orchestrator that maps syntaxis phase transitions to `StageRunner` pipelines.
pub struct PhaseOrchestrator {
runner: StageRunner,
schema_dir: PathBuf,
}
impl std::fmt::Debug for PhaseOrchestrator {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PhaseOrchestrator")
.field("schema_dir", &self.schema_dir)
.finish_non_exhaustive()
}
}
impl PhaseOrchestrator {
/// Construct from config, building all internal components.
///
/// When `cedar_required = false` and `cedar_policy_dir` is absent, a
/// permissive allow-all `.cedar` policy is written to a temp directory so
/// `CedarAuthorizer::load_from_dir` can succeed without a real policy set.
pub async fn from_config(
cfg: &OrchestratorConfig,
state: Arc<dyn StateTracker>,
) -> Result<Self> {
// Load ActionGraph (empty dir produces an empty graph with a warning).
let graph = if cfg.graphs_dir.exists() {
load_graph_from_dir(&cfg.graphs_dir)?
} else {
ActionGraph::from_nodes(vec![])?
};
// Build Cedar authorizer.
let cedar_dir = match &cfg.cedar_policy_dir {
Some(dir) => dir.clone(),
None => {
// Write a permissive allow-all policy to a stable temp path.
let tmp = std::env::temp_dir().join("syntaxis-cedar-permissive");
std::fs::create_dir_all(&tmp)?;
let policy_path = tmp.join("allow-all.cedar");
std::fs::write(
&policy_path,
r#"permit(principal, action, resource);"#,
)?;
tmp
}
};
let cedar = CedarAuthorizer::load_from_dir(&cedar_dir)?;
let vault = VaultClient::new(VaultConfig {
url: cfg.vault_url.clone(),
token: cfg.vault_token.clone(),
});
// Ensure script base dir exists so NuExecutor can locate scripts.
std::fs::create_dir_all(&cfg.scripts_base)?;
std::fs::create_dir_all(&cfg.schema_dir)?;
let runner = StageRunner {
graph: Arc::new(RwLock::new(graph)),
executor: Arc::new(NuExecutor::new(cfg.scripts_base.clone())),
state,
auth: Arc::new(cedar),
vault: Arc::new(vault),
cedar_required: cfg.cedar_required,
};
Ok(Self {
runner,
schema_dir: cfg.schema_dir.clone(),
})
}
/// Build with an `InMemoryStateTracker` — intended for tests.
pub async fn for_testing() -> Result<Self> {
let cfg = OrchestratorConfig::for_testing();
let state = Arc::new(InMemoryStateTracker::new()) as Arc<dyn StateTracker>;
Self::from_config(&cfg, state).await
}
/// Called on every successful syntaxis phase transition.
///
/// Publishes to subject `syntaxis.phase.<from>.<to>`, deposits `project_id`
/// as the trigger payload, and runs the matching pipeline.
///
/// Returns `PipelineStatus::Success` when there are no graph nodes to
/// execute (empty or unmatched graph).
pub async fn on_phase_transition(
&self,
project_id: &str,
from_phase: &str,
to_phase: &str,
) -> Result<PipelineStatus> {
let subject = format!("syntaxis.phase.{from_phase}.{to_phase}");
let payload = serde_json::json!({ "project_id": project_id });
info!(
project_id,
from_phase,
to_phase,
subject = %subject,
"phase transition — dispatching pipeline"
);
let ctx = Arc::new(
PipelineContext::new(
subject,
payload,
Arc::clone(&self.runner.state),
self.schema_dir.clone(),
)
.await?,
);
self.runner.run_pipeline(ctx).await
}
}
#[cfg(test)]
mod tests {
use super::*;
use stratum_state::PipelineStatus;
#[tokio::test]
async fn test_orchestrator_empty_graph_returns_success() {
let orch = PhaseOrchestrator::for_testing()
.await
.expect("orchestrator construction");
let status = orch
.on_phase_transition("proj-001", "create", "devel")
.await
.expect("phase transition");
assert_eq!(status, PipelineStatus::Success);
}
#[tokio::test]
async fn test_orchestrator_custom_state_tracker() {
let cfg = OrchestratorConfig::for_testing();
let state = Arc::new(InMemoryStateTracker::new()) as Arc<dyn StateTracker>;
let orch = PhaseOrchestrator::from_config(&cfg, Arc::clone(&state))
.await
.expect("orchestrator from config");
let status = orch
.on_phase_transition("proj-002", "devel", "publish")
.await
.expect("phase transition");
assert_eq!(status, PipelineStatus::Success);
}
}

View File

@ -42,7 +42,7 @@ default = "sqlite"
type = "sqlite" type = "sqlite"
name = "SQLite" name = "SQLite"
description = "File-based, no server" description = "File-based, no server"
platforms = [linux, macos, windows] platforms = ["linux", "macos", "windows"]
# Installation steps/checklist # Installation steps/checklist
[checklist] [checklist]

View File

@ -17,7 +17,9 @@ path = "examples/config_discovery.rs"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
serde = { version = "1.0", features = ["derive"], optional = true } serde = { version = "1.0", features = ["derive"], optional = true }
serde_json = { version = "1.0", optional = true }
toml = { version = "0.9", optional = true } toml = { version = "0.9", optional = true }
tokio = { version = "1", features = ["process", "io-util"], optional = true }
dirs = "6.0" dirs = "6.0"
inquire = { version = "0.9", optional = true } inquire = { version = "0.9", optional = true }
@ -29,6 +31,7 @@ default = ["config-discovery", "manifest"]
config-discovery = [] config-discovery = []
manifest = ["serde", "toml"] manifest = ["serde", "toml"]
interactive = ["inquire"] interactive = ["inquire"]
nickel = ["dep:tokio", "dep:serde_json", "serde"]
[[example]] [[example]]
name = "manifest_usage" name = "manifest_usage"

View File

@ -73,6 +73,9 @@ pub mod xdg;
#[cfg(feature = "manifest")] #[cfg(feature = "manifest")]
pub mod manifest_manager; pub mod manifest_manager;
#[cfg(feature = "nickel")]
pub mod nickel;
// Re-export commonly used items from config_finder // Re-export commonly used items from config_finder
pub use config_finder::{ pub use config_finder::{
find_config_path, find_config_path_or, find_config_path_warn_conflicts, find_config_path, find_config_path_or, find_config_path_warn_conflicts,

118
shared/rust/nickel.rs Normal file
View File

@ -0,0 +1,118 @@
//! Nickel configuration loader.
//!
//! Evaluates `.ncl` files to JSON via the `nickel export` CLI.
//! Optionally resolves OCI imports via `ncl-import-resolver` when a
//! `resolver-manifest.json` file is present alongside the config.
//!
//! # Requirements
//!
//! - `nickel` must be on `PATH`.
//! - `ncl-import-resolver` must be on `PATH` when OCI imports are used.
use std::path::Path;
use anyhow::{anyhow, Context};
use serde::de::DeserializeOwned;
use tokio::process::Command;
/// Evaluate a Nickel configuration file and deserialize the result into `T`.
///
/// Steps:
/// 1. If a `resolver-manifest.json` exists beside the `.ncl` file, run
/// `ncl-import-resolver <manifest>` to resolve OCI imports.
/// 2. Run `nickel export --format json <ncl_path>` and capture stdout.
/// 3. Deserialize the JSON output via `serde_json`.
///
/// # Errors
///
/// Returns an error if:
/// - `ncl-import-resolver` exits non-zero (when `resolver-manifest.json` exists).
/// - `nickel export` exits non-zero or cannot be found.
/// - The JSON output cannot be deserialized into `T`.
pub async fn load_nickel_config<T: DeserializeOwned>(ncl_path: &Path) -> anyhow::Result<T> {
// Resolve OCI imports if a manifest is co-located with the config file.
if let Some(parent) = ncl_path.parent() {
let manifest = parent.join("resolver-manifest.json");
if manifest.exists() {
let status = Command::new("ncl-import-resolver")
.arg(&manifest)
.status()
.await
.context("launching ncl-import-resolver")?;
if !status.success() {
return Err(anyhow!(
"ncl-import-resolver failed (exit {:?}) on '{}'",
status.code(),
manifest.display()
));
}
}
}
// Export the Nickel config to JSON.
let out = Command::new("nickel")
.args(["export", "--format", "json"])
.arg(ncl_path)
.output()
.await
.with_context(|| {
format!(
"launching `nickel export` on '{}'",
ncl_path.display()
)
})?;
if !out.status.success() {
let stderr = String::from_utf8_lossy(&out.stderr);
return Err(anyhow!(
"`nickel export` failed on '{}': {}",
ncl_path.display(),
stderr.trim()
));
}
serde_json::from_slice::<T>(&out.stdout).with_context(|| {
format!(
"deserializing Nickel output from '{}' into {}",
ncl_path.display(),
std::any::type_name::<T>()
)
})
}
#[cfg(test)]
mod tests {
use super::*;
use serde::Deserialize;
use std::io::Write;
#[derive(Debug, Deserialize, PartialEq)]
struct Simple {
name: String,
value: u32,
}
/// Verifies the error path when `nickel` is not installed or the file
/// doesn't exist (CI without nickel will hit this branch).
#[tokio::test]
async fn test_load_nickel_config_missing_file() {
let result = load_nickel_config::<Simple>(Path::new("/nonexistent/path.ncl")).await;
assert!(
result.is_err(),
"expected error for non-existent .ncl file"
);
}
/// Verifies resolver-manifest detection: if no manifest exists alongside the
/// config file, resolver is skipped (only nickel export runs).
#[tokio::test]
async fn test_load_nickel_config_no_manifest_skips_resolver() {
let dir = tempfile::tempdir().unwrap();
let ncl_path = dir.path().join("config.ncl");
std::fs::write(&ncl_path, r#"{ name = "test", value = 42 }"#).unwrap();
// nickel may or may not be installed; either way, no manifest → no resolver invoked.
let _ = load_nickel_config::<Simple>(&ncl_path).await;
}
}