chore: ontology sync + 4 NCL ADRs + landing page update
Some checks failed
Documentation Lint & Validation / Markdown Linting (push) Has been cancelled
Documentation Lint & Validation / Validate mdBook Configuration (push) Has been cancelled
Documentation Lint & Validation / Content & Structure Validation (push) Has been cancelled
mdBook Build & Deploy / Build mdBook (push) Has been cancelled
Nickel Type Check / Nickel Type Checking (push) Has been cancelled
Rust CI / Security Audit (push) Has been cancelled
Rust CI / Check + Test + Lint (nightly) (push) Has been cancelled
Rust CI / Check + Test + Lint (stable) (push) Has been cancelled
Documentation Lint & Validation / Lint & Validation Summary (push) Has been cancelled
mdBook Build & Deploy / Documentation Quality Check (push) Has been cancelled
mdBook Build & Deploy / Deploy to GitHub Pages (push) Has been cancelled
mdBook Build & Deploy / Notification (push) Has been cancelled
Some checks failed
Documentation Lint & Validation / Markdown Linting (push) Has been cancelled
Documentation Lint & Validation / Validate mdBook Configuration (push) Has been cancelled
Documentation Lint & Validation / Content & Structure Validation (push) Has been cancelled
mdBook Build & Deploy / Build mdBook (push) Has been cancelled
Nickel Type Check / Nickel Type Checking (push) Has been cancelled
Rust CI / Security Audit (push) Has been cancelled
Rust CI / Check + Test + Lint (nightly) (push) Has been cancelled
Rust CI / Check + Test + Lint (stable) (push) Has been cancelled
Documentation Lint & Validation / Lint & Validation Summary (push) Has been cancelled
mdBook Build & Deploy / Documentation Quality Check (push) Has been cancelled
mdBook Build & Deploy / Deploy to GitHub Pages (push) Has been cancelled
mdBook Build & Deploy / Notification (push) Has been cancelled
on+re:
- core.ncl: 5 new Practice nodes (notification-channels,
vapora-capabilities, agent-hot-reload-stable-identity,
merkle-audit-trail, notification-channels) + 5 new edges;
knowledge-graph-execution-history updated with HNSW+BM25+RRF
- state.ncl: production-readiness blocker/catalyst updated (hot-reload
complete, BudgetManager/LLMRouter still require restart);
ontoref-integration catalyst updated (vapora-ontology/reflection
crates, api-catalog.json, nickel contracts)
ADRs (NCL):
- adr-013: KG hybrid search — HNSW+BM25+RRF, rejected in-process scan
- adr-014: capability packages — AgentDefinition→vapora-shared,
DashMap shard-before-await constraint
- adr-015: Merkle audit trail — SHA-256 hash chain, rejected HMAC
- adr-016: agent hot-reload — stable_id=role, learning_profiles survive
drain, BudgetManager excluded from reload scope
landing page:
- 2 new feature boxes: VCS-Agnostic Worktree (jj/git), Ontology Protocol
- KG box: 20→28 tests, HNSW+BM25+RRF description
- Agents box: 71→82 tests, hot-reload + stable_id
- tech stack: Rust 21→23 crates, added jj, Radicle, ontoref badges
- status badge: 620→691 tests
This commit is contained in:
parent
c5f4caa2ab
commit
75e5ebd9a2
88 changed files with 8450 additions and 378 deletions
22
.ontology/connections.ncl
Normal file
22
.ontology/connections.ncl
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
let s = import "reflection/schemas/connections.ncl" in
|
||||
|
||||
{
|
||||
upstream = [
|
||||
{
|
||||
project = "ontoref",
|
||||
kind = 'LibraryDependency,
|
||||
node = "protocol-as-standalone",
|
||||
via = "local",
|
||||
note = "Ontoref protocol: .ontology/ schemas, ADR lifecycle, reflection modes, daemon sync.",
|
||||
},
|
||||
{
|
||||
project = "kogral",
|
||||
kind = 'DataSource,
|
||||
node = "",
|
||||
via = "local",
|
||||
note = "Context enrichment for workflow engine: guidelines, patterns, and ADRs read from KOGRAL_PATH env var (filesystem, external to vapora repo). Used by WorkflowOrchestrator::enrich_context_from_kogral and vapora-cli --kogral flag.",
|
||||
},
|
||||
],
|
||||
downstream = [],
|
||||
peers = [],
|
||||
} | s.Connections
|
||||
407
.ontology/core.ncl
Normal file
407
.ontology/core.ncl
Normal file
|
|
@ -0,0 +1,407 @@
|
|||
let d = import "defaults/core.ncl" in
|
||||
|
||||
{
|
||||
nodes = [
|
||||
|
||||
# ── Axioms (invariant = true) ─────────────────────────────────────────────
|
||||
|
||||
d.make_node {
|
||||
id = "async-first",
|
||||
name = "Async-First Architecture",
|
||||
pole = 'Yang,
|
||||
level = 'Axiom,
|
||||
description = "All I/O is non-blocking. Tokio is the only async runtime. Sync operations use spawn_blocking. No blocking calls in async context, no sleep-based coordination.",
|
||||
invariant = true,
|
||||
artifact_paths = ["crates/vapora-backend/src/main.rs", "crates/vapora-agents/src/coordinator.rs"],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "cost-aware-routing",
|
||||
name = "Cost-Aware LLM Routing",
|
||||
pole = 'Yang,
|
||||
level = 'Axiom,
|
||||
description = "Every LLM call is subject to per-role budget enforcement. Budget limits are non-negotiable — calls that exceed budget fall back to cheaper providers or are rejected. Cost tracking is per provider, task type, and token count.",
|
||||
invariant = true,
|
||||
artifact_paths = [
|
||||
"crates/vapora-llm-router/src/budget.rs",
|
||||
"crates/vapora-llm-router/src/cost_tracker.rs",
|
||||
"crates/vapora-llm-router/src/cost_ranker.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "multi-tenant-isolation",
|
||||
name = "Multi-Tenant Isolation",
|
||||
pole = 'Yang,
|
||||
level = 'Axiom,
|
||||
description = "All data is scoped per workspace/tenant via SurrealDB scopes. No query may access records outside the authenticated scope. RBAC via Cedar policies.",
|
||||
invariant = true,
|
||||
artifact_paths = [
|
||||
"crates/vapora-backend/src/services/",
|
||||
"crates/vapora-backend/src/audit.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "learning-based-selection",
|
||||
name = "Learning-Based Agent Selection",
|
||||
pole = 'Yang,
|
||||
level = 'Axiom,
|
||||
description = "Agent selection is driven by expertise profiles built from execution history with recency bias (7-day window, 3x weight). Scoring: 0.3*load + 0.5*expertise + 0.2*confidence. Cold-start agents receive neutral confidence to prevent overfitting on small samples.",
|
||||
invariant = true,
|
||||
artifact_paths = [
|
||||
"crates/vapora-agents/src/learning_profile.rs",
|
||||
"crates/vapora-agents/src/scoring.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "message-based-coordination",
|
||||
name = "Message-Based Agent Coordination",
|
||||
pole = 'Yang,
|
||||
level = 'Axiom,
|
||||
description = "Agents never call each other directly. All coordination is via NATS JetStream messages. Backend never calls agent methods directly — only enqueues jobs via AgentCoordinator.",
|
||||
invariant = true,
|
||||
artifact_paths = [
|
||||
"crates/vapora-agents/src/coordinator.rs",
|
||||
"crates/vapora-agents/src/messages.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "surreal-persistence",
|
||||
name = "SurrealDB as Single Source of Truth",
|
||||
pole = 'Yang,
|
||||
level = 'Axiom,
|
||||
description = "All persistent state lives in SurrealDB. In-memory structures (DashMap, Arc<RwLock>) are caches only. Any data that must survive restart must be written to SurrealDB with parameterized bindings.",
|
||||
invariant = true,
|
||||
artifact_paths = [
|
||||
"crates/vapora-backend/src/services/",
|
||||
"migrations/",
|
||||
],
|
||||
},
|
||||
|
||||
# ── Tensions ──────────────────────────────────────────────────────────────
|
||||
|
||||
d.make_node {
|
||||
id = "cost-vs-capability",
|
||||
name = "Cost vs Capability",
|
||||
pole = 'Spiral,
|
||||
level = 'Tension,
|
||||
description = "Higher-capability models produce better results but consume budget faster. Budget enforcement resolves this by degrading gracefully to cheaper providers when limits approach — capability is sacrificed before budget is breached.",
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "learning-convergence-vs-freshness",
|
||||
name = "Learning Convergence vs Freshness",
|
||||
pole = 'Spiral,
|
||||
level = 'Tension,
|
||||
description = "More execution history improves expertise scoring accuracy but introduces staleness. The 7-day recency window with 3x bias is the resolution: distant history is not discarded but is outweighted by recent performance.",
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "agent-autonomy-vs-budget-control",
|
||||
name = "Agent Autonomy vs Budget Control",
|
||||
pole = 'Spiral,
|
||||
level = 'Tension,
|
||||
description = "Agents need to select the best provider for a task (autonomy) but operators need guaranteed cost boundaries (control). Three-tier enforcement (normal -> near-threshold -> exceeded) is the resolution: agents operate freely until approaching limits, then are constrained.",
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "wasm-isolation-vs-ssr",
|
||||
name = "WASM Isolation vs SSR Capability",
|
||||
pole = 'Spiral,
|
||||
level = 'Tension,
|
||||
description = "Leptos CSR-only mode provides clean WASM isolation with no SSR complexity but sacrifices initial load performance and SEO. Current resolution: CSR-only. SSR is not in scope unless Leptos hydration issues are resolved.",
|
||||
},
|
||||
|
||||
# ── Practices ─────────────────────────────────────────────────────────────
|
||||
|
||||
d.make_node {
|
||||
id = "knowledge-graph-execution-history",
|
||||
name = "Knowledge Graph as Execution Memory",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "All agent executions are recorded as temporal nodes in the knowledge graph. Learning curves are computed from daily-windowed aggregations. Similarity search uses a hybrid retrieval pipeline: HNSW (SurrealDB 3 native ANN) + BM25 full-text search fused via Reciprocal Rank Fusion (k=60). Pure HNSW was rejected because it misses exact keyword matches (crate names, error codes); pure BM25 was rejected because it ignores semantic proximity. Migration 012 adds the required HNSW and full-text indexes and fixes a pre-existing SCHEMAFULL schema gap that caused silent deserialization failures.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-knowledge-graph/src/learning.rs",
|
||||
"crates/vapora-knowledge-graph/src/persistence.rs",
|
||||
"migrations/012_kg_hybrid_search.surql",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "swarm-load-balanced-assignment",
|
||||
name = "Swarm Load-Balanced Task Assignment",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "Swarm assigns tasks via capability-based filtering then load-balanced scoring: success_rate / (1 + load). NATS JetStream is optional -- swarm degrades gracefully if unavailable.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-swarm/src/coordinator.rs",
|
||||
"crates/vapora-swarm/src/metrics.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "provider-abstraction",
|
||||
name = "LLMClient Trait for Provider Abstraction",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "All LLM calls go through the LLMClient trait. No crate may call provider APIs directly -- only via the router. This is the enforcement point for cost tracking, fallback chains, and provider substitution.",
|
||||
artifact_paths = ["crates/vapora-llm-router/src/providers.rs"],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "ontoref-protocol-adoption",
|
||||
name = "Ontoref Protocol Adoption",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "Vapora uses the ontoref protocol for self-description: ontology, ADRs, reflection modes, and operational state. The ontoref CLI is the single entry point for on+re operations. Supersedes the previous stratumiops-based reflection mode imports.",
|
||||
artifact_paths = [
|
||||
".ontology/",
|
||||
"adrs/",
|
||||
"reflection/",
|
||||
".ontoref/config.ncl",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "workflow-orchestration",
|
||||
name = "Multi-Stage Workflow Orchestration",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "Workflows are NCL-defined DAGs of stages with typed artifacts and approval gates. NATS JetStream drives stage progression. Short-lived agent contexts (95% cache token reduction) are enforced per stage. Four built-in templates: feature_development, bugfix, documentation_update, security_audit.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-workflow-engine/",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "a2a-protocol",
|
||||
name = "Agent-to-Agent Protocol",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "A2A server exposes a standardized protocol for external agents and systems to interact with vapora agents. A2A client provides the counterpart library. Both communicate via NATS JetStream and HTTP. Enables federation with external agent ecosystems.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-a2a/",
|
||||
"crates/vapora-a2a-client/",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "mcp-gateway",
|
||||
name = "MCP Protocol Gateway",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "MCP server bridges the Model Context Protocol to vapora's agent runtime. Exposes vapora capabilities as MCP tools consumable by Claude Code and other MCP-aware clients. Plugin mode enables embedding in documentation lifecycle workflows.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-mcp-server/",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "cedar-rbac",
|
||||
name = "Cedar Policy Engine for Workflow Authorization",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "CedarAuthorizer in vapora-workflow-engine loads .cedar policy files at startup and evaluates every stage execution request before dispatch. Policies are version-controlled in the repo. No ad-hoc role checks in stage execution code — all authorization decisions go through Cedar.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-workflow-engine/src/auth.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "security-api-boundary",
|
||||
name = "Security Scanning at API Boundary",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "SSRF protection (ssrf.rs) validates all outbound URLs against private/reserved address ranges before dispatch. Prompt injection scanning (prompt_injection.rs) rejects known injection payloads at the API boundary before user input reaches any LLM provider. Security rejections return 400 Bad Request. Channel webhook URLs with SSRF-risky targets are dropped at startup, not registered with a warning.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-backend/src/security/ssrf.rs",
|
||||
"crates/vapora-backend/src/security/prompt_injection.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "notification-channels",
|
||||
name = "Notification Channels with Agent-Inactive Alerts",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "ChannelRegistry manages webhook-based notification channels (Slack, generic HTTP) resolved at startup. SSRF validation drops unsafe URLs before registration. NotificationService triggers agent-inactive alerts when no heartbeat is received within the threshold window. Channels are first-class entities stored in SurrealDB; notification config is version-controlled as NCL contracts in nickel/channels/.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-backend/src/api/channels.rs",
|
||||
"nickel/channels/contracts.ncl",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "vapora-capabilities",
|
||||
name = "Capability Packages — Zero-Config Agent Bundles",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "vapora-capabilities crate provides CapabilityRegistry, CapabilityLoader, and built-in Capability implementations (CodeReviewer, DocGenerator, PRMonitor). AgentDefinition was relocated to vapora-shared to break the circular dependency that would exist if vapora-capabilities imported vapora-agents. AgentCoordinator gained in-process executor dispatch via DashMap<String, Sender<TaskAssignment>> — the shard lock is released before .await by cloning the Sender out of the map.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-capabilities/src/lib.rs",
|
||||
"crates/vapora-shared/src/agent_definition.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "agent-hot-reload-stable-identity",
|
||||
name = "Agent Hot-Reload — Stable Role Identity",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "AgentMetadata.stable_id is set to role.clone() at construction — before the role field is moved. All learning_profile keys and KG execution records use stable_id_or_role() instead of the ephemeral UUID id. drain_role + re-spawn sequence implements zero-downtime config reload: learning profiles survive the drain because the DashMap key (role) is unchanged. SIGHUP and POST /reload both invoke reload_agents. BudgetManager and LLMRouter are not reloaded by SIGHUP; process restart is required for those.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-agents/src/registry.rs",
|
||||
"crates/vapora-agents/src/coordinator.rs",
|
||||
"crates/vapora-agents/src/server.rs",
|
||||
],
|
||||
},
|
||||
|
||||
d.make_node {
|
||||
id = "merkle-audit-trail",
|
||||
name = "Merkle Hash-Chain Audit Trail",
|
||||
pole = 'Yin,
|
||||
level = 'Practice,
|
||||
description = "audit/mod.rs replaces append-only log with a Merkle hash-chain: block_hash = SHA-256(prev_hash|seq|entry_id|timestamp_rfc3339|workflow_id|event_type|actor|details_json). Genesis entry uses GENESIS_HASH (64 zeros). write_lock: Arc<Mutex<()>> serializes append calls within the process. verify_integrity(workflow_id) recomputes every block hash and returns IntegrityReport{valid, total_entries, first_tampered_seq}. Modifying any covered field in entry N invalidates N and every subsequent entry.",
|
||||
artifact_paths = [
|
||||
"crates/vapora-backend/src/audit/mod.rs",
|
||||
"migrations/013_audit_merkle.surql",
|
||||
],
|
||||
},
|
||||
|
||||
],
|
||||
|
||||
edges = [
|
||||
|
||||
d.make_edge {
|
||||
from = "cost-aware-routing",
|
||||
to = "cost-vs-capability",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "Budget enforcement with three-tier degradation is the concrete resolution of the cost/capability tension.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "learning-based-selection",
|
||||
to = "learning-convergence-vs-freshness",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "7-day recency window with 3x bias resolves convergence vs freshness by outweighting stale data without discarding it.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "cost-aware-routing",
|
||||
to = "agent-autonomy-vs-budget-control",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "Three-tier enforcement (normal/near-threshold/exceeded) allows agent autonomy until budget boundaries are approached.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "knowledge-graph-execution-history",
|
||||
to = "learning-based-selection",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "Execution history recorded in the KG feeds the learning profile scorer for agent selection.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "message-based-coordination",
|
||||
to = "async-first",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "NATS JetStream is the async coordination primitive -- agents never block on each other.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "provider-abstraction",
|
||||
to = "cost-aware-routing",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "LLMClient trait is the enforcement point for routing rules and budget checks.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "surreal-persistence",
|
||||
to = "multi-tenant-isolation",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "SurrealDB scopes are the enforcement mechanism for tenant isolation.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "workflow-orchestration",
|
||||
to = "message-based-coordination",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "Workflow stage progression is driven by NATS JetStream events — no direct inter-stage calls.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "a2a-protocol",
|
||||
to = "message-based-coordination",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 2,
|
||||
note = "A2A server uses NATS JetStream for async agent communication across the federation boundary.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "mcp-gateway",
|
||||
to = "provider-abstraction",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 2,
|
||||
note = "MCP gateway routes tool calls through the LLMClient trait, keeping provider substitution possible.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "cedar-rbac",
|
||||
to = "multi-tenant-isolation",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 2,
|
||||
note = "Cedar policies enforce per-principal authorization constraints on workflow stage execution, complementing SurrealDB scope-based tenant isolation.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "security-api-boundary",
|
||||
to = "multi-tenant-isolation",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 2,
|
||||
note = "SSRF and prompt injection scanning protect the API surface that tenant data flows through, preventing exfiltration via LLM prompts or misconfigured outbound channels.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "notification-channels",
|
||||
to = "security-api-boundary",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 2,
|
||||
note = "Channel webhook URL validation is the first consumer of ssrf.rs; the SSRF validator was introduced to fix the warn-and-register channel bug.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "agent-hot-reload-stable-identity",
|
||||
to = "learning-based-selection",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 3,
|
||||
note = "Stable identity (role as stable_id) is the prerequisite for learning profiles surviving restarts — without it, every reload orphaned all accumulated expertise.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "vapora-capabilities",
|
||||
to = "learning-based-selection",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 2,
|
||||
note = "Capability bundles provide the agent definitions that the learning scorer operates on — built-in capabilities bootstrap the learning system without manual configuration.",
|
||||
},
|
||||
|
||||
d.make_edge {
|
||||
from = "merkle-audit-trail",
|
||||
to = "multi-tenant-isolation",
|
||||
kind = 'ManifestsIn,
|
||||
weight = 2,
|
||||
note = "Tamper-evident audit log ensures that per-tenant audit records cannot be silently modified post-hoc, satisfying compliance requirements (SOC 2, ISO 27001) for multi-tenant deployments.",
|
||||
},
|
||||
|
||||
],
|
||||
}
|
||||
64
.ontology/gate.ncl
Normal file
64
.ontology/gate.ncl
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
let d = import "defaults/gate.ncl" in
|
||||
|
||||
{
|
||||
membranes = [
|
||||
|
||||
d.make_membrane {
|
||||
id = "budget-enforcement-gate",
|
||||
name = "Budget Enforcement Gate",
|
||||
description = "Controls whether an LLM request is allowed to proceed. Enforced by BudgetEnforcer middleware in every LLM call path.",
|
||||
permeability = 'Medium,
|
||||
accepts = ['EcosystemRelevance],
|
||||
protects = ["per-role monthly budget limits", "fallback chain integrity"],
|
||||
opening_condition = {
|
||||
max_tension_dimensions = 1,
|
||||
pending_transitions = 0,
|
||||
core_stable = true,
|
||||
description = "Request is allowed when current role spend is below the near-threshold limit.",
|
||||
},
|
||||
closing_condition = "Role budget reaches near-threshold (80%) — gate moves to degraded mode, requests route to cheaper provider. At exceeded (100%), gate closes and requests are rejected.",
|
||||
max_duration = 'Indefinite,
|
||||
protocol = 'Absorb,
|
||||
active = true,
|
||||
},
|
||||
|
||||
d.make_membrane {
|
||||
id = "agent-capability-gate",
|
||||
name = "Agent Capability Gate",
|
||||
description = "Controls whether a task signal type is accepted by the agent dispatcher. Checked before dispatch in create_agent_task reflection mode.",
|
||||
permeability = 'Medium,
|
||||
accepts = ['EcosystemRelevance, 'DepthDemonstrated],
|
||||
protects = ["agent workload boundaries", "capability contract per role"],
|
||||
opening_condition = {
|
||||
max_tension_dimensions = 2,
|
||||
pending_transitions = 1,
|
||||
core_stable = true,
|
||||
description = "Signal type is in the agent's declared capability set and the agent is not at max load.",
|
||||
},
|
||||
closing_condition = "Agent load exceeds threshold or signal type not in capability set.",
|
||||
max_duration = 'Indefinite,
|
||||
protocol = 'Challenge,
|
||||
active = true,
|
||||
},
|
||||
|
||||
d.make_membrane {
|
||||
id = "external-release-gate",
|
||||
name = "External Release Gate",
|
||||
description = "Controls when vapora is ready for external consumers beyond the current ecosystem. Not active until ecosystem-level stability is reached.",
|
||||
permeability = 'Low,
|
||||
accepts = ['EcosystemRelevance, 'OpportunityAlignment, 'DepthDemonstrated],
|
||||
protects = ["API stability contract", "A2A protocol versioning"],
|
||||
opening_condition = {
|
||||
max_tension_dimensions = 2,
|
||||
pending_transitions = 2,
|
||||
core_stable = true,
|
||||
description = "All integration tests passing in CI with live services; at least 2 external consumers onboarded; A2A protocol versioned.",
|
||||
},
|
||||
closing_condition = "Axiom-level breaking change introduced without ADR; integration tests regress.",
|
||||
max_duration = 'Indefinite,
|
||||
protocol = 'Observe,
|
||||
active = false,
|
||||
},
|
||||
|
||||
],
|
||||
}
|
||||
391
.ontology/manifest.ncl
Normal file
391
.ontology/manifest.ncl
Normal file
|
|
@ -0,0 +1,391 @@
|
|||
let m = import "defaults/manifest.ncl" in
|
||||
|
||||
m.make_manifest {
|
||||
project = "vapora",
|
||||
repo_kind = 'Service,
|
||||
description = "18-crate Rust workspace delivering an intelligent development orchestration platform: specialized AI agents with learning-based selection, cost-aware multi-provider LLM routing, temporal knowledge graph, multi-stage workflow orchestration, Agent-to-Agent protocol, MCP gateway, and a Leptos WASM frontend. Self-hosted on Kubernetes.",
|
||||
|
||||
capabilities = [
|
||||
|
||||
m.make_capability {
|
||||
id = "agent-orchestration",
|
||||
name = "Learning-Based Agent Orchestration",
|
||||
summary = "Assigns tasks to agents using expertise profiles built from execution history with recency bias.",
|
||||
rationale = "Static round-robin assignment wastes budget on suboptimal providers. Learning profiles route tasks to the agent with the highest demonstrated success rate for that task type, while confidence weighting prevents overfitting on small samples.",
|
||||
how = "AgentRegistry maintains per-agent state. AgentCoordinator enqueues jobs via NATS JetStream. Swarm scores candidates: 0.3*load + 0.5*expertise + 0.2*confidence. LearningProfile tracks 7-day windowed execution history with 3x recency bias.",
|
||||
artifacts = [
|
||||
"crates/vapora-agents/src/learning_profile.rs",
|
||||
"crates/vapora-agents/src/scoring.rs",
|
||||
"crates/vapora-agents/src/coordinator.rs",
|
||||
"crates/vapora-swarm/src/coordinator.rs",
|
||||
],
|
||||
nodes = ["learning-based-selection", "swarm-load-balanced-assignment", "knowledge-graph-execution-history"],
|
||||
},
|
||||
|
||||
m.make_capability {
|
||||
id = "llm-routing",
|
||||
name = "Cost-Aware Multi-Provider LLM Routing",
|
||||
summary = "Routes LLM calls to Claude/OpenAI/Gemini/Ollama with per-role budget enforcement and automatic fallback.",
|
||||
rationale = "Direct provider calls bypass cost tracking and make provider substitution impossible. The LLMClient trait is the single enforcement point for routing rules, budget limits, and fallback chains. Three-tier enforcement (normal/near-threshold/exceeded) allows agent autonomy until budget boundaries are approached.",
|
||||
how = "LLMRouter selects provider via routing rules + dynamic scoring. CostTracker maintains per-provider token counts. BudgetEnforcer applies monthly/weekly limits per role. Fallback chains defined in llm-router.toml. All calls go through the LLMClient trait — no crate calls provider APIs directly.",
|
||||
artifacts = [
|
||||
"crates/vapora-llm-router/src/router.rs",
|
||||
"crates/vapora-llm-router/src/budget.rs",
|
||||
"crates/vapora-llm-router/src/cost_tracker.rs",
|
||||
"crates/vapora-llm-router/src/providers.rs",
|
||||
],
|
||||
nodes = ["cost-aware-routing", "provider-abstraction"],
|
||||
},
|
||||
|
||||
m.make_capability {
|
||||
id = "knowledge-graph",
|
||||
name = "Temporal Knowledge Graph",
|
||||
summary = "Records agent execution history as temporal nodes; computes learning curves and recommends solutions via similarity search.",
|
||||
rationale = "Agents need institutional memory across executions to improve selection accuracy and reuse successful patterns. A temporal graph with causal relationships enables learning curves and similarity-based solution retrieval, which flat logs cannot provide.",
|
||||
how = "Execution results are persisted as SurrealDB graph nodes. LearningCurve computed from daily-windowed aggregations. SimilaritySearch uses cosine similarity over task embeddings. All KG queries go through the persistence layer — no direct SurrealDB calls from scoring code.",
|
||||
artifacts = [
|
||||
"crates/vapora-knowledge-graph/src/learning.rs",
|
||||
"crates/vapora-knowledge-graph/src/persistence.rs",
|
||||
],
|
||||
nodes = ["knowledge-graph-execution-history"],
|
||||
},
|
||||
|
||||
m.make_capability {
|
||||
id = "workflow-engine",
|
||||
name = "Multi-Stage Workflow Orchestration",
|
||||
summary = "Executes multi-stage agent pipelines with typed artifact passing, approval gates, and Kogral context enrichment.",
|
||||
rationale = "Ad-hoc agent invocations have no visibility into pipeline state, no artifact provenance, and no way to pause for human approval. Typed workflows make each stage's inputs and outputs explicit, enable audit trails, and enforce approval gates before irreversible stages (e.g. deployment).",
|
||||
how = "WorkflowEngine executes stage DAGs. NATS JetStream drives stage progression. Artifacts (ADR, Code, TestResults, Review, Documentation) are typed and passed between stages. KogralContextProvider enriches agent prompts from .kogral/ filesystem at each stage.",
|
||||
artifacts = [
|
||||
"crates/vapora-workflow-engine/",
|
||||
],
|
||||
adrs = [],
|
||||
nodes = ["workflow-orchestration"],
|
||||
},
|
||||
|
||||
m.make_capability {
|
||||
id = "project-management-api",
|
||||
name = "Project and Task Management REST API",
|
||||
summary = "40+ Axum endpoints for multi-tenant project/task management with real-time WebSocket updates and audit logging.",
|
||||
rationale = "Teams need a unified surface for project tracking, agent job dispatch, and deployment visibility. Multi-tenancy via SurrealDB scopes ensures workspace isolation without application-layer filtering.",
|
||||
how = "Axum router with ~40 handlers across projects, tasks, agents, workflows, swarm, and analytics. SurrealDB services layer handles all persistence. Cedar RBAC enforces fine-grained access. AuditTrail records all state mutations. Prometheus metrics at /metrics.",
|
||||
artifacts = [
|
||||
"crates/vapora-backend/src/api/",
|
||||
"crates/vapora-backend/src/services/",
|
||||
"crates/vapora-backend/src/audit.rs",
|
||||
],
|
||||
nodes = ["multi-tenant-isolation", "surreal-persistence", "async-first"],
|
||||
},
|
||||
|
||||
m.make_capability {
|
||||
id = "a2a-protocol",
|
||||
name = "Agent-to-Agent Protocol",
|
||||
summary = "Standardized A2A server + client for federation with external agent ecosystems.",
|
||||
rationale = "Vapora agents need to interoperate with Claude Code, external CI agents, and other orchestrators without tight coupling. The A2A protocol provides a stable contract that decouples vapora's internal agent model from external consumers.",
|
||||
how = "vapora-a2a exposes an A2A server over HTTP + NATS. vapora-a2a-client provides the Rust client library. Messages are typed via the A2A schema. 7 E2E integration tests (require live SurrealDB + NATS; marked #[ignore] in CI without services).",
|
||||
artifacts = [
|
||||
"crates/vapora-a2a/src/main.rs",
|
||||
"crates/vapora-a2a-client/",
|
||||
],
|
||||
nodes = ["a2a-protocol", "message-based-coordination"],
|
||||
},
|
||||
|
||||
m.make_capability {
|
||||
id = "mcp-gateway",
|
||||
name = "MCP Protocol Gateway",
|
||||
summary = "MCP server bridging the Model Context Protocol to vapora agent runtime, consumable by Claude Code and other MCP clients.",
|
||||
rationale = "Claude Code and other AI tools speak MCP. Without a gateway, vapora capabilities are invisible to MCP-aware clients. The gateway exposes vapora as a first-class MCP tool provider without modifying the core agent model.",
|
||||
how = "vapora-mcp-server implements the MCP protocol server, translating MCP tool calls into vapora AgentCoordinator invocations. Plugin mode enables embedding inside vapora-doc-lifecycle for documentation workflows.",
|
||||
artifacts = ["crates/vapora-mcp-server/"],
|
||||
nodes = ["mcp-gateway"],
|
||||
},
|
||||
|
||||
m.make_capability {
|
||||
id = "ontoref-protocol",
|
||||
name = "Ontoref Protocol Adoption",
|
||||
summary = "Vapora is a fully adopted ontoref consumer: typed ontology, ADR lifecycle, reflection modes, API catalog surface.",
|
||||
rationale = "Self-description via ontoref provides machine-readable architectural context that agents, CI systems, and contributors can query without reading code. The typed ADR lifecycle enforces architectural decision provenance.",
|
||||
how = "5 .ontology/ files (core, state, gate, manifest, connections) with typed contracts. NCL ADRs with typed constraints and constraint checks. API catalog surface in crates/vapora-backend/src/api/catalog.rs. config_surface declared in manifest.ncl. Git hooks (post-commit, post-merge) notify ontoref daemon of NCL file changes.",
|
||||
artifacts = [".ontology/", "adrs/", "crates/vapora-backend/src/api/catalog.rs"],
|
||||
nodes = ["ontoref-protocol-adoption"],
|
||||
},
|
||||
|
||||
m.make_capability {
|
||||
id = "frontend-ui",
|
||||
name = "Leptos WASM Frontend",
|
||||
summary = "Reactive Kanban board and agent management UI with glassmorphism aesthetics, built entirely in Rust/WASM.",
|
||||
rationale = "Full-stack Rust eliminates the JS/Rust boundary and its associated serialization overhead, security surface, and type mismatch bugs. CSR-only Leptos is chosen over SSR to avoid Leptos hydration complexity at the cost of initial load performance.",
|
||||
how = "Leptos reactive components in CSR mode compiled to WASM via trunk. UnoCSS for atomic styling. Communicates with vapora-backend via Axum REST + WebSocket for real-time updates.",
|
||||
artifacts = [
|
||||
"crates/vapora-frontend/src/pages/",
|
||||
"crates/vapora-frontend/src/components/",
|
||||
],
|
||||
nodes = ["async-first", "wasm-isolation-vs-ssr"],
|
||||
},
|
||||
|
||||
],
|
||||
|
||||
requirements = [
|
||||
|
||||
m.make_requirement {
|
||||
id = "rust",
|
||||
name = "Rust toolchain",
|
||||
env = 'Development,
|
||||
kind = 'Tool,
|
||||
version = "1.75+",
|
||||
required = true,
|
||||
impact = "Cannot build any crate. WASM target requires rustup target add wasm32-unknown-unknown.",
|
||||
provision = "rustup install stable && rustup target add wasm32-unknown-unknown",
|
||||
},
|
||||
|
||||
m.make_requirement {
|
||||
id = "surrealdb",
|
||||
name = "SurrealDB",
|
||||
env = 'Both,
|
||||
kind = 'Service,
|
||||
version = "2.3+",
|
||||
required = true,
|
||||
impact = "All backend services fail on startup. All persistence, multi-tenancy, and knowledge graph queries unavailable.",
|
||||
provision = "docker run -d --name surrealdb -p 8000:8000 surrealdb/surrealdb:latest start --bind 0.0.0.0:8000 file://data/database.db",
|
||||
},
|
||||
|
||||
m.make_requirement {
|
||||
id = "nats",
|
||||
name = "NATS JetStream",
|
||||
env = 'Production,
|
||||
kind = 'Service,
|
||||
version = "2.x",
|
||||
required = false,
|
||||
impact = "Agent coordination degrades to polling. Workflow stage progression unavailable. A2A protocol federation unavailable. Swarm uses graceful fallback.",
|
||||
provision = "docker run -d --name nats -p 4222:4222 nats:latest -js",
|
||||
},
|
||||
|
||||
m.make_requirement {
|
||||
id = "trunk",
|
||||
name = "trunk (WASM bundler)",
|
||||
env = 'Development,
|
||||
kind = 'Tool,
|
||||
version = "",
|
||||
required = true,
|
||||
impact = "Cannot build or serve the Leptos WASM frontend.",
|
||||
provision = "cargo install trunk",
|
||||
},
|
||||
|
||||
m.make_requirement {
|
||||
id = "nickel",
|
||||
name = "Nickel",
|
||||
env = 'Both,
|
||||
kind = 'Tool,
|
||||
version = "",
|
||||
required = true,
|
||||
impact = "Ontoref ontology export, ADR validation, and config surface introspection unavailable.",
|
||||
provision = "cargo install nickel-lang-cli or https://nickel-lang.org/user-manual/installation",
|
||||
},
|
||||
|
||||
m.make_requirement {
|
||||
id = "anthropic-api-key",
|
||||
name = "ANTHROPIC_API_KEY",
|
||||
env = 'Both,
|
||||
kind = 'EnvVar,
|
||||
version = "",
|
||||
required = false,
|
||||
impact = "Claude provider unavailable. LLM router falls back to other configured providers.",
|
||||
provision = "Set ANTHROPIC_API_KEY=sk-ant-... in shell or K8s secret.",
|
||||
},
|
||||
|
||||
m.make_requirement {
|
||||
id = "openai-api-key",
|
||||
name = "OPENAI_API_KEY",
|
||||
env = 'Both,
|
||||
kind = 'EnvVar,
|
||||
version = "",
|
||||
required = false,
|
||||
impact = "OpenAI provider unavailable. LLM router falls back to other configured providers.",
|
||||
provision = "Set OPENAI_API_KEY=sk-... in shell or K8s secret.",
|
||||
},
|
||||
|
||||
],
|
||||
|
||||
critical_deps = [
|
||||
|
||||
m.make_critical_dep {
|
||||
id = "surrealdb-crate",
|
||||
name = "surrealdb",
|
||||
ref = "crates.io: surrealdb 2.3",
|
||||
used_for = "All persistence: projects, tasks, agents, knowledge graph, audit trail, multi-tenant scopes.",
|
||||
failure_impact = "Complete data loss of runtime state. Multi-tenancy enforcement collapses. Knowledge graph and learning curves unavailable. No persistent state survives restart.",
|
||||
mitigation = "SurrealDB 2.x has stable API. No feature-flag fallback — persistence is non-negotiable. Pin major version in Cargo.toml.",
|
||||
},
|
||||
|
||||
m.make_critical_dep {
|
||||
id = "async-nats",
|
||||
name = "async-nats",
|
||||
ref = "crates.io: async-nats 0.45",
|
||||
used_for = "Agent coordination (job dispatch, heartbeats), workflow stage progression, A2A protocol federation.",
|
||||
failure_impact = "Agent coordination degrades to polling. Workflow orchestration unavailable. A2A federation unavailable. Swarm falls back to direct assignment.",
|
||||
mitigation = "NATS connection is optional for most crates — swarm has graceful fallback. Workflow engine and A2A are blocked without NATS.",
|
||||
},
|
||||
|
||||
m.make_critical_dep {
|
||||
id = "axum",
|
||||
name = "axum",
|
||||
ref = "crates.io: axum 0.8.6",
|
||||
used_for = "REST API (40+ endpoints), WebSocket real-time updates, Prometheus metrics endpoint.",
|
||||
failure_impact = "Entire HTTP surface unavailable. Frontend cannot communicate with backend. No agent job submission, no project management, no monitoring.",
|
||||
mitigation = "Axum 0.8 has stable API surface. No fallback HTTP framework. Router composition is the only external surface — internal services are framework-agnostic.",
|
||||
},
|
||||
|
||||
m.make_critical_dep {
|
||||
id = "rig-core",
|
||||
name = "rig-core",
|
||||
ref = "crates.io: rig-core 0.15",
|
||||
used_for = "LLM agent framework: tool calling, streaming, provider abstractions for Claude/OpenAI/Gemini.",
|
||||
failure_impact = "All LLM provider integrations fail. Agent execution unavailable. LLM router cannot dispatch to any provider.",
|
||||
mitigation = "LLMClient trait abstracts rig-core. In principle substitutable but requires reimplementing provider adapters. Monitor rig-core breaking changes closely.",
|
||||
},
|
||||
|
||||
m.make_critical_dep {
|
||||
id = "leptos",
|
||||
name = "leptos",
|
||||
ref = "crates.io: leptos 0.8.12",
|
||||
used_for = "WASM frontend: Kanban board, agent management UI, real-time updates.",
|
||||
failure_impact = "Frontend build fails entirely. No web UI available. Users must fall back to CLI or raw API.",
|
||||
mitigation = "CSR-only mode minimizes Leptos surface (no hydration, no SSR). Frontend is a separate crate — backend continues operating without it.",
|
||||
},
|
||||
|
||||
],
|
||||
|
||||
config_surface = m.make_config_surface {
|
||||
config_root = "config/",
|
||||
entry_point = "config.ncl",
|
||||
kind = 'NclMerge,
|
||||
contracts_path = "nickel",
|
||||
overrides_dir = "config/",
|
||||
sections = [
|
||||
m.make_config_section {
|
||||
id = "server",
|
||||
file = "server.ncl",
|
||||
contract = "vapora/contracts.ncl",
|
||||
description = "Core server settings — host, port, TLS, database URL, NATS connection, JWT auth, logging, metrics.",
|
||||
rationale = "All platform services share these connection parameters. Centralizing them prevents per-service drift and ensures a single source of truth for database and messaging topology.",
|
||||
consumers = [
|
||||
m.make_config_consumer { id = "vapora-backend", kind = 'RustStruct, ref = "vapora_backend::config::Config", fields = ["server", "database", "nats", "auth", "logging", "metrics"] },
|
||||
],
|
||||
},
|
||||
m.make_config_section {
|
||||
id = "llm-router",
|
||||
file = "llm-router.ncl",
|
||||
contract = "llm-router/contracts.ncl",
|
||||
description = "LLM provider routing — default provider selection, cost tracking, fallback chains, per-provider API keys and model configs.",
|
||||
rationale = "Routing rules and provider credentials are operator concerns that must be externalized from the binary. NCL validation catches invalid provider combinations and budget inconsistencies before runtime.",
|
||||
consumers = [
|
||||
m.make_config_consumer { id = "vapora-llm-router", kind = 'RustStruct, ref = "vapora_llm_router::config::RouterConfig", fields = ["routing", "providers"] },
|
||||
],
|
||||
},
|
||||
m.make_config_section {
|
||||
id = "agents",
|
||||
file = "agents.ncl",
|
||||
contract = "agents/contracts.ncl",
|
||||
description = "Agent registry and definitions — per-role LLM model selection, capabilities, parallelism, priority.",
|
||||
rationale = "Agent definitions are operational configuration, not code. Externalizing them allows role reconfiguration (e.g. swapping claude-opus for claude-sonnet on a role) without recompilation.",
|
||||
consumers = [
|
||||
m.make_config_consumer { id = "vapora-agents", kind = 'RustStruct, ref = "vapora_agents::config::AgentsConfig", fields = ["registry", "agents"] },
|
||||
],
|
||||
},
|
||||
m.make_config_section {
|
||||
id = "budgets",
|
||||
file = "budgets.ncl",
|
||||
contract = "budgets/contracts.ncl",
|
||||
description = "Per-role LLM spend limits — monthly/weekly cost caps, alert thresholds, fallback provider on breach.",
|
||||
rationale = "Budget enforcement without configuration externalisation would require code changes for every limit adjustment. NCL contracts validate that alert_threshold is in [0,1] and fallback_provider is a known provider.",
|
||||
consumers = [
|
||||
m.make_config_consumer { id = "vapora-llm-router", kind = 'RustStruct, ref = "vapora_llm_router::budget::BudgetConfig", fields = ["budgets"] },
|
||||
],
|
||||
},
|
||||
m.make_config_section {
|
||||
id = "workflows",
|
||||
file = "workflows.ncl",
|
||||
contract = "workflows/contracts.ncl",
|
||||
description = "Workflow engine config and workflow definitions — max parallelism, timeout, approval gates, stage DAGs.",
|
||||
rationale = "Workflow definitions are the primary extension point for adding new orchestration patterns. Keeping them in NCL allows adding workflows without touching Rust and validates stage dependencies before execution.",
|
||||
consumers = [
|
||||
m.make_config_consumer { id = "vapora-workflow-engine", kind = 'RustStruct, ref = "vapora_workflow_engine::config::WorkflowConfig", fields = ["engine", "workflows"] },
|
||||
],
|
||||
},
|
||||
m.make_config_section {
|
||||
id = "channels",
|
||||
file = "channels.ncl",
|
||||
contract = "channels/contracts.ncl",
|
||||
description = "Outbound notification channels — Slack, Telegram, and other destinations with per-event routing.",
|
||||
rationale = "Channel credentials and event-to-channel mappings change with team structure. NCL contracts validate that referenced channel names resolve and prevent orphaned notification routes.",
|
||||
consumers = [
|
||||
m.make_config_consumer { id = "vapora-backend", kind = 'RustStruct, ref = "vapora_backend::config::Config", fields = ["channels", "notifications"] },
|
||||
m.make_config_consumer { id = "vapora-channels", kind = 'RustStruct, ref = "vapora_channels::config::ChannelRegistryConfig", fields = ["channels"] },
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
consumption_modes = [
|
||||
m.make_consumption_mode {
|
||||
consumer = 'Developer,
|
||||
needs = ['OntologyExport],
|
||||
audit_level = 'Standard,
|
||||
description = "Clones repo, runs cargo build/test. Uses ontoref CLI to query ontology, run reflection modes, and track ADRs.",
|
||||
},
|
||||
m.make_consumption_mode {
|
||||
consumer = 'Agent,
|
||||
needs = ['OntologyExport, 'JsonSchema],
|
||||
audit_level = 'Quick,
|
||||
description = "Reads .ontology/core.ncl via nickel export. Checks axioms and gates before acting. Uses reflection modes for task dispatch and deployment.",
|
||||
},
|
||||
],
|
||||
|
||||
layers = [
|
||||
m.make_layer {
|
||||
id = "implementation",
|
||||
paths = [".ontology/", "adrs/", "reflection/"],
|
||||
committed = true,
|
||||
description = "Vapora self-description: ontology nodes/edges, ADRs, and reflection modes.",
|
||||
},
|
||||
m.make_layer {
|
||||
id = "crates",
|
||||
paths = ["crates/", "Cargo.toml", "Cargo.lock"],
|
||||
committed = true,
|
||||
description = "17-crate Rust workspace: backend, agents, llm-router, swarm, knowledge-graph, frontend, and supporting crates.",
|
||||
},
|
||||
m.make_layer {
|
||||
id = "infrastructure",
|
||||
paths = ["kubernetes/", "provisioning/", "migrations/", "docker-compose.yml"],
|
||||
committed = true,
|
||||
description = "Deployment manifests, KCL provisioning, SurrealDB migrations, and Docker Compose.",
|
||||
},
|
||||
m.make_layer {
|
||||
id = "ontoref-framework",
|
||||
paths = ["/Users/Akasha/Development/ontoref/.ontology/"],
|
||||
committed = false,
|
||||
description = "Ontoref framework ontology — protocol axioms, practices, and patterns visible in ontoref-browse mode.",
|
||||
},
|
||||
m.make_layer {
|
||||
id = "process",
|
||||
paths = [".coder/"],
|
||||
committed = false,
|
||||
description = "Session artifacts: plans, investigations, summaries. Process memory for actors.",
|
||||
},
|
||||
],
|
||||
|
||||
operational_modes = [
|
||||
m.make_op_mode {
|
||||
id = "dev",
|
||||
description = "Standard development mode — vapora implementation layer only.",
|
||||
visible_layers = ["implementation", "crates", "infrastructure"],
|
||||
audit_level = 'Standard,
|
||||
},
|
||||
m.make_op_mode {
|
||||
id = "ontoref-browse",
|
||||
description = "Browse ontoref framework capabilities and patterns available to vapora.",
|
||||
visible_layers = ["implementation", "ontoref-framework"],
|
||||
audit_level = 'Quick,
|
||||
},
|
||||
],
|
||||
}
|
||||
87
.ontology/state.ncl
Normal file
87
.ontology/state.ncl
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
let d = import "defaults/state.ncl" in
|
||||
|
||||
{
|
||||
dimensions = [
|
||||
|
||||
d.make_dimension {
|
||||
id = "production-readiness",
|
||||
name = "Production Readiness",
|
||||
description = "Overall production readiness of the vapora platform across all 23 crates.",
|
||||
current_state = "production-ready",
|
||||
desired_state = "stable-ecosystem",
|
||||
horizon = 'Months,
|
||||
states = [],
|
||||
transitions = [
|
||||
{
|
||||
from = "production-ready",
|
||||
to = "stable-ecosystem",
|
||||
condition = "At least 2 external consumer projects onboarded, ecosystem NATS topology active, kogral knowledge graph integrated.",
|
||||
catalyst = "Agent hot-reload + stable identity complete (learning loss on restart eliminated); capability packages provide zero-config onboarding; A2A protocol ready for external consumers.",
|
||||
blocker = "No external consumers yet. A2A integration tests still require SurrealDB + NATS in CI (#[ignore]). BudgetManager/LLMRouter not hot-reloadable (process restart required for config changes to those subsystems).",
|
||||
horizon = 'Months,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
d.make_dimension {
|
||||
id = "test-coverage",
|
||||
name = "Test Coverage",
|
||||
description = "Workspace test coverage and quality across 17 crates.",
|
||||
current_state = "full-pass",
|
||||
desired_state = "integration-verified",
|
||||
horizon = 'Months,
|
||||
states = [],
|
||||
transitions = [
|
||||
{
|
||||
from = "full-pass",
|
||||
to = "integration-verified",
|
||||
condition = "Integration tests for vapora-a2a passing against live SurrealDB + NATS (currently marked #[ignore]).",
|
||||
catalyst = "CI environment with SurrealDB and NATS services configured.",
|
||||
blocker = "External service dependencies not available in current CI.",
|
||||
horizon = 'Months,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
d.make_dimension {
|
||||
id = "frontend-maturity",
|
||||
name = "Frontend Maturity",
|
||||
description = "Maturity of the Leptos WASM frontend (Kanban board, agent management UI).",
|
||||
current_state = "functional",
|
||||
desired_state = "polished",
|
||||
horizon = 'Months,
|
||||
states = [],
|
||||
transitions = [
|
||||
{
|
||||
from = "functional",
|
||||
to = "polished",
|
||||
condition = "Real-time WebSocket updates fully integrated in UI, agent status live-updating, cost dashboard rendered.",
|
||||
catalyst = "Backend WebSocket endpoint stable; UnoCSS build pipeline finalized.",
|
||||
blocker = "Leptos CSR-only restriction limits SSR-based optimizations.",
|
||||
horizon = 'Months,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
d.make_dimension {
|
||||
id = "ontoref-integration",
|
||||
name = "Ontoref Integration Depth",
|
||||
description = "How deeply vapora is integrated with the ontoref protocol.",
|
||||
current_state = "onboarded",
|
||||
desired_state = "drift-monitored",
|
||||
horizon = 'Months,
|
||||
states = [],
|
||||
transitions = [
|
||||
{
|
||||
from = "onboarded",
|
||||
to = "drift-monitored",
|
||||
condition = "Ontoref daemon running, ontology sync active, drift detection passing on CI.",
|
||||
catalyst = "vapora-ontology and vapora-reflection crates added — ontology is now machine-consumable from Rust (VaporaOntology::load, reload). api-catalog.json exists and is referenced by ontoref. Nickel contracts in nickel/ cover agents, budgets, channels, llm-router, vapora core, workflows.",
|
||||
blocker = "ONTOREF_DAEMON_URL not configured in vapora CI. ontoref sync diff --docs check not wired to pre-commit or CI. vapora-ontology and vapora-reflection have no tests yet.",
|
||||
horizon = 'Months,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
],
|
||||
}
|
||||
36
.ontoref/config.ncl
Normal file
36
.ontoref/config.ncl
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
# .ontoref/config.ncl — ontoref configuration for vapora
|
||||
# Place this file at <project_root>/.ontoref/config.ncl
|
||||
|
||||
{
|
||||
nickel_import_paths = [".", ".ontology", "ontology/schemas", "adrs", "reflection/requirements", "reflection/schemas"],
|
||||
|
||||
log = {
|
||||
level = "info",
|
||||
path = ".ontoref/logs",
|
||||
rotation = "daily",
|
||||
compress = false,
|
||||
archive = ".ontoref/logs/archive",
|
||||
max_files = 7,
|
||||
},
|
||||
|
||||
mode_run = {
|
||||
rules = [
|
||||
{ when = { mode_id = "validate-ontology" }, allow = true, reason = "validation always allowed" },
|
||||
{ when = { actor = "agent" }, allow = true, reason = "agent actor always allowed" },
|
||||
{ when = { actor = "ci" }, allow = true, reason = "ci actor always allowed" },
|
||||
],
|
||||
},
|
||||
|
||||
nats_events = {
|
||||
enabled = false,
|
||||
url = "nats://localhost:4222",
|
||||
emit = [],
|
||||
subscribe = [],
|
||||
handlers_dir = "reflection/handlers",
|
||||
},
|
||||
ui = {
|
||||
logo = "vapora.svg",
|
||||
},
|
||||
|
||||
card = import "../card.ncl",
|
||||
}
|
||||
17
.ontoref/logs
Normal file
17
.ontoref/logs
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
{"ts":"2026-03-14T13:32:30+0000","author":"unknown","actor":"agent","level":"write","action":"setup"}
|
||||
{"ts":"2026-03-14T13:34:25+0000","author":"unknown","actor":"agent","level":"write","action":"setup"}
|
||||
{"ts":"2026-03-14T13:41:25+0000","author":"unknown","actor":"agent","level":"write","action":"setup"}
|
||||
{"ts":"2026-03-14T13:44:26+0000","author":"unknown","actor":"agent","level":"write","action":"setup"}
|
||||
{"ts":"2026-03-14T13:47:57+0000","author":"unknown","actor":"agent","level":"write","action":"hooks-install /Users/Akasha/Development/vapora"}
|
||||
{"ts":"2026-03-14T13:48:40+0000","author":"unknown","actor":"agent","level":"write","action":"hooks-install /Users/Akasha/Development/vapora"}
|
||||
{"ts":"2026-03-14T14:26:56+0000","author":"unknown","actor":"agent","level":"read","action":"describe project"}
|
||||
{"ts":"2026-03-14T14:27:03+0000","author":"unknown","actor":"agent","level":"read","action":"adr list"}
|
||||
{"ts":"2026-03-14T14:27:26+0000","author":"unknown","actor":"agent","level":"read","action":"adr list"}
|
||||
{"ts":"2026-03-14T14:28:15+0000","author":"unknown","actor":"agent","level":"read","action":"adr list"}
|
||||
{"ts":"2026-03-14T14:35:29+0000","author":"unknown","actor":"agent","level":"read","action":"adr list"}
|
||||
{"ts":"2026-03-14T14:39:26+0000","author":"unknown","actor":"developer","level":"read","action":"adr list"}
|
||||
{"ts":"2026-03-14T18:29:02+0000","author":"unknown","actor":"agent","level":"read","action":"describe project"}
|
||||
{"ts":"2026-03-14T18:29:17+0000","author":"unknown","actor":"agent","level":"read","action":"describe project"}
|
||||
{"ts":"2026-03-14T18:29:31+0000","author":"unknown","actor":"agent","level":"read","action":"describe capabilities"}
|
||||
{"ts":"2026-03-14T18:29:32+0000","author":"unknown","actor":"agent","level":"read","action":"constraint"}
|
||||
{"ts":"2026-03-14T23:56:57+0000","author":"unknown","actor":"agent","level":"read","action":"adr list"}
|
||||
4
.ontoref/mode.lock
Normal file
4
.ontoref/mode.lock
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"mode": "local",
|
||||
"since": "2026-03-14T13:34:25Z"
|
||||
}
|
||||
22
.ontoref/project.ncl
Normal file
22
.ontoref/project.ncl
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
# .ontoref/project.ncl — project self-registration for ontoref-daemon.
|
||||
# Copy to your project's .ontoref/project.ncl and fill in the fields.
|
||||
# Register: ontoref project-add /path/to/your/project
|
||||
|
||||
let s = import "ontoref-project.ncl" in
|
||||
|
||||
s.make_project {
|
||||
slug = "vapora",
|
||||
root = "/Users/Akasha/Development/vapora",
|
||||
|
||||
# Paths passed as NICKEL_IMPORT_PATH when exporting NCL files from this project.
|
||||
nickel_import_paths = [
|
||||
"/Users/Akasha/Development/vapora",
|
||||
"/Users/Akasha/Development/ontoref",
|
||||
"/Users/Akasha/Development/ontoref/ontology",
|
||||
],
|
||||
|
||||
# Auth keys. Generate hash: ontoref-daemon.bin --hash-password <password>
|
||||
keys = [
|
||||
# { role = 'admin, hash = "$argon2id$v=19$..." },
|
||||
],
|
||||
}
|
||||
|
|
@ -35,6 +35,20 @@ repos:
|
|||
pass_filenames: false
|
||||
stages: [pre-push]
|
||||
|
||||
- id: manifest-coverage
|
||||
name: Manifest capability completeness
|
||||
entry: >-
|
||||
bash -c
|
||||
'test -f ./reflection/modules/sync.nu &&
|
||||
ONTOREF_ROOT="$(pwd)" ONTOREF_PROJECT_ROOT="$(pwd)"
|
||||
nu --no-config-file -c
|
||||
"use ./reflection/modules/sync.nu *; sync manifest-check"
|
||||
|| true'
|
||||
language: system
|
||||
files: (\.ontology/|reflection/modes/|reflection/forms/).*\.ncl$
|
||||
pass_filenames: false
|
||||
stages: [pre-commit]
|
||||
|
||||
# ============================================================================
|
||||
# Nushell Hooks (optional - enable if using Nushell)
|
||||
# ============================================================================
|
||||
|
|
|
|||
78
.woodpecker.yaml
Normal file
78
.woodpecker.yaml
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
when:
|
||||
event: [push, tag, manual]
|
||||
branch: [main, "agent/**"]
|
||||
|
||||
steps:
|
||||
- name: test
|
||||
image: rust:1.85
|
||||
commands:
|
||||
- cargo test --workspace
|
||||
|
||||
- name: lint
|
||||
image: rust:1.85
|
||||
commands:
|
||||
- cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
- name: ontoref-validate
|
||||
image: ${CI_REGISTRY}/vapora-ci:latest
|
||||
environment:
|
||||
ONTOREF_ROOT: /workspace/.ontoref
|
||||
commands:
|
||||
- ontoref validate check-all --fmt json
|
||||
- ontoref sync diff --fail-on-drift
|
||||
|
||||
- name: ontoref-gate
|
||||
image: ${CI_REGISTRY}/vapora-ci:latest
|
||||
environment:
|
||||
ONTOREF_ROOT: /workspace/.ontoref
|
||||
commands:
|
||||
- |
|
||||
ontoref describe state --fmt json | nu -c '
|
||||
$in | from json | get dimensions
|
||||
| where current_state != desired_state
|
||||
| each { |d| print $"WARNING: ($d.name) not at desired state: ($d.current_state) → ($d.desired_state)" }
|
||||
'
|
||||
|
||||
- name: build-release
|
||||
image: rust:1.85
|
||||
commands:
|
||||
- cargo build --release
|
||||
when:
|
||||
branch: main
|
||||
|
||||
# RAD_RUN_ID is injected by radicle-ci-broker from the RunResponse.run_id
|
||||
# written by radicle-nats-adapter to stdout when the pipeline is triggered.
|
||||
# The adapter subscribes to radicle.ci.result.<RAD_RUN_ID> — this subject
|
||||
# must match exactly or the adapter times out waiting for a result.
|
||||
# Configure the ci-broker to inject RAD_RUN_ID via the webhook env vars.
|
||||
- name: publish-result
|
||||
image: natsio/nats-box:latest
|
||||
secrets:
|
||||
- source: nats_auth_token
|
||||
target: NATS_AUTH_TOKEN
|
||||
environment:
|
||||
NATS_URL: nats://nats:4222
|
||||
commands:
|
||||
- |
|
||||
nats pub --server "${NATS_URL}" --creds /dev/stdin \
|
||||
"radicle.ci.result.${RAD_RUN_ID}" \
|
||||
"{\"run_id\":\"${RAD_RUN_ID}\",\"status\":\"passed\",\"url\":\"${CI_BUILD_LINK}\"}" \
|
||||
<<< "token:${NATS_AUTH_TOKEN}"
|
||||
when:
|
||||
status: success
|
||||
|
||||
- name: publish-failure
|
||||
image: natsio/nats-box:latest
|
||||
secrets:
|
||||
- source: nats_auth_token
|
||||
target: NATS_AUTH_TOKEN
|
||||
environment:
|
||||
NATS_URL: nats://nats:4222
|
||||
commands:
|
||||
- |
|
||||
nats pub --server "${NATS_URL}" --creds /dev/stdin \
|
||||
"radicle.ci.result.${RAD_RUN_ID}" \
|
||||
"{\"run_id\":\"${RAD_RUN_ID}\",\"status\":\"failed\",\"url\":\"${CI_BUILD_LINK}\"}" \
|
||||
<<< "token:${NATS_AUTH_TOKEN}"
|
||||
when:
|
||||
status: failure
|
||||
203
Cargo.lock
generated
203
Cargo.lock
generated
|
|
@ -3252,9 +3252,9 @@ checksum = "9afc2bd4d5a73106dd53d10d73d3401c2f32730ba2c0b93ddb888a8983680471"
|
|||
|
||||
[[package]]
|
||||
name = "fastembed"
|
||||
version = "5.11.0"
|
||||
version = "5.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4339d45a80579ab8305616a501eacdbf18fb0f7def7fa6e4c0b75941416d5b0"
|
||||
checksum = "3688aa7e02113db24e0f83aba1edee912f36f515b52cffc9b3c550bbfc3eab87"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"hf-hub",
|
||||
|
|
@ -3872,11 +3872,24 @@ dependencies = [
|
|||
"cfg-if",
|
||||
"js-sys",
|
||||
"libc",
|
||||
"r-efi",
|
||||
"r-efi 5.3.0",
|
||||
"wasip2",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.4.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"r-efi 6.0.0",
|
||||
"wasip2",
|
||||
"wasip3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ghash"
|
||||
version = "0.5.1"
|
||||
|
|
@ -6569,6 +6582,28 @@ dependencies = [
|
|||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ontoref-derive"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.114",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ontoref-ontology"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"inventory",
|
||||
"ontoref-derive",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.18",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "oorandom"
|
||||
version = "11.1.5"
|
||||
|
|
@ -7829,6 +7864,12 @@ version = "5.3.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
|
||||
|
||||
[[package]]
|
||||
name = "r-efi"
|
||||
version = "6.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf"
|
||||
|
||||
[[package]]
|
||||
name = "radium"
|
||||
version = "0.7.0"
|
||||
|
|
@ -9053,6 +9094,9 @@ dependencies = [
|
|||
"hkdf",
|
||||
"hyper",
|
||||
"hyper-util",
|
||||
"inventory",
|
||||
"ontoref-derive",
|
||||
"ontoref-ontology",
|
||||
"openssl",
|
||||
"oqs",
|
||||
"rand 0.9.2",
|
||||
|
|
@ -11247,9 +11291,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.49.0"
|
||||
version = "1.50.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86"
|
||||
checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"libc",
|
||||
|
|
@ -11927,6 +11971,9 @@ dependencies = [
|
|||
"dialoguer",
|
||||
"dirs 6.0.0",
|
||||
"futures",
|
||||
"inventory",
|
||||
"ontoref-derive",
|
||||
"ontoref-ontology",
|
||||
"reqwest 0.13.1",
|
||||
"serde",
|
||||
"serde_json",
|
||||
|
|
@ -12273,11 +12320,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
|
|||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "1.20.0"
|
||||
version = "1.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f"
|
||||
checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37"
|
||||
dependencies = [
|
||||
"getrandom 0.3.4",
|
||||
"getrandom 0.4.2",
|
||||
"js-sys",
|
||||
"serde_core",
|
||||
"wasm-bindgen",
|
||||
|
|
@ -12412,10 +12459,13 @@ dependencies = [
|
|||
"futures",
|
||||
"hex",
|
||||
"http",
|
||||
"inventory",
|
||||
"jsonwebtoken 10.3.0",
|
||||
"lazy_static",
|
||||
"mockall",
|
||||
"once_cell",
|
||||
"ontoref-derive",
|
||||
"ontoref-ontology",
|
||||
"prometheus",
|
||||
"regex",
|
||||
"rustls",
|
||||
|
|
@ -12859,7 +12909,16 @@ version = "1.0.1+wasi-0.2.4"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
|
||||
dependencies = [
|
||||
"wit-bindgen",
|
||||
"wit-bindgen 0.46.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasip3"
|
||||
version = "0.4.0+wasi-0.3.0-rc-2026-01-06"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5"
|
||||
dependencies = [
|
||||
"wit-bindgen 0.51.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -12978,6 +13037,16 @@ dependencies = [
|
|||
"wasmparser 0.219.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-encoder"
|
||||
version = "0.244.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319"
|
||||
dependencies = [
|
||||
"leb128fmt",
|
||||
"wasmparser 0.244.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-encoder"
|
||||
version = "0.245.1"
|
||||
|
|
@ -12988,6 +13057,18 @@ dependencies = [
|
|||
"wasmparser 0.245.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-metadata"
|
||||
version = "0.244.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"indexmap 2.13.0",
|
||||
"wasm-encoder 0.244.0",
|
||||
"wasmparser 0.244.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-streams"
|
||||
version = "0.4.2"
|
||||
|
|
@ -13037,6 +13118,18 @@ dependencies = [
|
|||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasmparser"
|
||||
version = "0.244.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"hashbrown 0.15.5",
|
||||
"indexmap 2.13.0",
|
||||
"semver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasmparser"
|
||||
version = "0.245.1"
|
||||
|
|
@ -13157,7 +13250,7 @@ dependencies = [
|
|||
"syn 2.0.114",
|
||||
"wasmtime-component-util",
|
||||
"wasmtime-wit-bindgen",
|
||||
"wit-parser",
|
||||
"wit-parser 0.219.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -13329,7 +13422,7 @@ dependencies = [
|
|||
"anyhow",
|
||||
"heck",
|
||||
"indexmap 2.13.0",
|
||||
"wit-parser",
|
||||
"wit-parser 0.219.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -14111,6 +14204,76 @@ version = "0.46.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen"
|
||||
version = "0.51.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5"
|
||||
dependencies = [
|
||||
"wit-bindgen-rust-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-core"
|
||||
version = "0.51.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"heck",
|
||||
"wit-parser 0.244.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rust"
|
||||
version = "0.51.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"heck",
|
||||
"indexmap 2.13.0",
|
||||
"prettyplease",
|
||||
"syn 2.0.114",
|
||||
"wasm-metadata",
|
||||
"wit-bindgen-core",
|
||||
"wit-component",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen-rust-macro"
|
||||
version = "0.51.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"prettyplease",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.114",
|
||||
"wit-bindgen-core",
|
||||
"wit-bindgen-rust",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-component"
|
||||
version = "0.244.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bitflags 2.10.0",
|
||||
"indexmap 2.13.0",
|
||||
"log",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"wasm-encoder 0.244.0",
|
||||
"wasm-metadata",
|
||||
"wasmparser 0.244.0",
|
||||
"wit-parser 0.244.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-parser"
|
||||
version = "0.219.2"
|
||||
|
|
@ -14129,6 +14292,24 @@ dependencies = [
|
|||
"wasmparser 0.219.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wit-parser"
|
||||
version = "0.244.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"id-arena",
|
||||
"indexmap 2.13.0",
|
||||
"log",
|
||||
"semver",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"unicode-xid",
|
||||
"wasmparser 0.244.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "witx"
|
||||
version = "0.9.1"
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@
|
|||
resolver = "2"
|
||||
|
||||
members = [
|
||||
# "crates/vapora-ontology", # stratum-ontology-core not in stratumiops checkout
|
||||
# "crates/vapora-reflection", # stratum-reflection-core not in stratumiops checkout
|
||||
"crates/vapora-capabilities",
|
||||
"crates/vapora-channels",
|
||||
"crates/vapora-backend",
|
||||
|
|
@ -53,6 +55,8 @@ vapora-telemetry = { path = "crates/vapora-telemetry" }
|
|||
vapora-workflow-engine = { path = "crates/vapora-workflow-engine" }
|
||||
vapora-a2a = { path = "crates/vapora-a2a" }
|
||||
vapora-rlm = { path = "crates/vapora-rlm" }
|
||||
vapora-ontology = { path = "crates/vapora-ontology" }
|
||||
vapora-reflection = { path = "crates/vapora-reflection" }
|
||||
|
||||
# SecretumVault - Post-quantum secrets management
|
||||
secretumvault = { path = "../secretumvault", default-features = true }
|
||||
|
|
@ -61,6 +65,8 @@ secretumvault = { path = "../secretumvault", default-features = true }
|
|||
# Stratumiops — shared graph, state and embedding primitives
|
||||
stratum-graph = { path = "../stratumiops/crates/stratum-graph" }
|
||||
stratum-state = { path = "../stratumiops/crates/stratum-state", features = ["mem-store"] }
|
||||
stratum-ontology-core = { path = "../stratumiops/crates/stratum-ontology-core" }
|
||||
stratum-reflection-core = { path = "../stratumiops/crates/stratum-reflection-core" }
|
||||
stratum-embeddings = { path = "../stratumiops/crates/stratum-embeddings", features = ["openai-provider", "ollama-provider", "fastembed-provider", "huggingface-provider", "memory-cache", "persistent-cache", "surrealdb-store"] }
|
||||
stratum-llm = { path = "../stratumiops/crates/stratum-llm", features = ["anthropic", "openai", "ollama"] }
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@
|
|||
[](https://www.rust-lang.org)
|
||||
[](https://kubernetes.io)
|
||||
[](https://istio.io)
|
||||
[](crates/)
|
||||
[](crates/)
|
||||
|
||||
[Features](#features) • [Quick Start](#quick-start) • [Architecture](#architecture) • [Docs](docs/) • [Contributing](#contributing)
|
||||
|
||||
|
|
@ -30,9 +30,9 @@
|
|||
Where ideas vaporize into reality
|
||||
```
|
||||
|
||||
## 🌟 What is Vapora v1.2?
|
||||
## 🌟 What is Vapora?
|
||||
|
||||
**VAPORA** is a **18-crate Rust workspace** (354 tests, 100% pass rate) delivering an **intelligent development orchestration platform** where teams and AI agents collaborate seamlessly to solve the 4 critical problems in parallel:
|
||||
**VAPORA** is a **23-crate Rust workspace** (691 tests, 100% pass rate) delivering an **intelligent development orchestration platform** where teams and AI agents collaborate seamlessly to solve the 4 critical problems in parallel:
|
||||
|
||||
- ✅ **Context Switching** (Developers unified in one system instead of jumping between tools)
|
||||
- ✅ **Knowledge Fragmentation** (Team decisions, code, and docs discoverable with RAG)
|
||||
|
|
@ -445,7 +445,7 @@ vapora/
|
|||
├── features/ # Feature documentation
|
||||
└── setup/ # Installation and CLI guides
|
||||
|
||||
# Total: 18 crates, 354 tests (100% pass rate)
|
||||
# Total: 23 crates, 691 tests (100% pass rate)
|
||||
```
|
||||
|
||||
---
|
||||
|
|
|
|||
51
adrs/_template.ncl
Normal file
51
adrs/_template.ncl
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
# ADR template — plain record for typedialog roundtrip input.
|
||||
# No contracts applied here; contracts are enforced in the Jinja2 output template.
|
||||
#
|
||||
# Usage:
|
||||
# typedialog nickel-roundtrip \
|
||||
# --input adrs/_template.ncl \
|
||||
# --form reflection/forms/new_adr.ncl \
|
||||
# --output adrs/adr-NNN-title.ncl \
|
||||
# --ncl-template reflection/templates/adr.ncl.j2
|
||||
|
||||
{
|
||||
id = "adr-000",
|
||||
title = "",
|
||||
status = "Proposed",
|
||||
date = "2026-03",
|
||||
|
||||
context = "",
|
||||
decision = "",
|
||||
|
||||
rationale = [
|
||||
{ claim = "", detail = "" },
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [""],
|
||||
negative = [""],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{ option = "", why_rejected = "" },
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "",
|
||||
claim = "",
|
||||
scope = "",
|
||||
severity = "Hard",
|
||||
check_hint = "",
|
||||
rationale = "",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = [],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "",
|
||||
invariants_at_risk = [],
|
||||
verdict = "Safe",
|
||||
},
|
||||
}
|
||||
79
adrs/adr-001-ontoref-adoption.ncl
Normal file
79
adrs/adr-001-ontoref-adoption.ncl
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-001",
|
||||
title = "Adopt Ontoref Protocol for Vapora Self-Description",
|
||||
status = 'Accepted,
|
||||
date = "2026-03-14",
|
||||
|
||||
context = "Vapora had reflection modes (reflection/modes/) that imported schema and defaults from stratumiops via relative paths (../../../../stratumiops/reflection/). The ontology/reflection patterns originated in stratumiops as self-description tooling. Stratumiops was subsequently migrated and the protocol extracted into ontoref as a standalone project with independent versioning. Vapora needed to migrate its imports and formally onboard to the ontoref protocol to receive future schema updates, ADR tooling, and daemon-based ontology sync.",
|
||||
|
||||
decision = "Vapora adopts the ontoref protocol as its single on+re tooling reference. All reflection mode imports are updated from stratumiops to ontoref relative paths. Vapora is onboarded via ontoref setup (creates .ontology/, adrs/, .ontoref/config.ncl, reflection support files, git hooks). The ONTOREF_PROJECT_ROOT env var is used to invoke ontoref setup before .ontology/ exists. Ontology defaults are imported via relative paths from ontoref's ontology/defaults/. The ontoref CLI at ~/.local/bin/ontoref is the canonical entry point.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Stratumiops is no longer the protocol owner",
|
||||
detail = "The ontology/reflection protocol was extracted to ontoref (ADR-001 in ontoref). Vapora continuing to import from stratumiops would target a deprecated path and miss all future protocol evolution.",
|
||||
},
|
||||
{
|
||||
claim = "Ontoref setup is idempotent and safe to re-run",
|
||||
detail = "The setup command creates missing artifacts and skips existing ones. Re-running after install-daemon syncs any new templates. This makes the adoption reversible and incrementally completable.",
|
||||
},
|
||||
{
|
||||
claim = "Relative imports from ontoref are stable under single-machine development",
|
||||
detail = "Both vapora and ontoref live under ~/Development/. Relative paths (../../ontoref/...) are consistent across the local checkout structure. The nickel_import_paths in .ontoref/project.ncl can be extended if the layout changes.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Vapora receives ontoref protocol updates (schema changes, new defaults, new ADR fields) via repo pull",
|
||||
"Ontoref daemon can sync vapora ontology once NATS is running and ONTOREF_TOKEN is set",
|
||||
"Git hooks (post-commit, post-merge) notify daemon of NCL file changes automatically",
|
||||
"adrs/ directory now follows the ontoref ADR lifecycle schema (Proposed/Accepted/Superseded)",
|
||||
"Reflection modes are schema-validated against ontoref's typed schema.ncl",
|
||||
],
|
||||
negative = [
|
||||
"Relative imports require both repos checked out under the same parent directory",
|
||||
"nickel_import_paths in project.ncl must be updated if the checkout layout changes",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Keep importing from stratumiops with updated paths after stratumiops migration",
|
||||
why_rejected = "Stratumiops no longer owns the protocol. Importing from it would target stale schemas and miss ontoref-specific features (daemon, ADR lifecycle, gate membranes).",
|
||||
},
|
||||
{
|
||||
option = "Copy ontoref schema files into vapora/adrs/ and vapora/.ontology/",
|
||||
why_rejected = "Creates schema drift. Any ontoref protocol update would require a manual copy to vapora. The relative import approach gives vapora the latest protocol via a single git pull in ontoref.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "no-stratumiops-reflection-imports",
|
||||
claim = "No file in vapora may import from stratumiops/reflection/ paths",
|
||||
scope = "vapora (reflection/modes/, .ontology/)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "stratumiops/reflection", paths = ["reflection/", ".ontology/"], must_be_empty = true },
|
||||
rationale = "Stratumiops is no longer the protocol owner. All reflection and ontology schemas come from ontoref.",
|
||||
},
|
||||
{
|
||||
id = "ontoref-single-entry-point",
|
||||
claim = "All on+re operations must go through the ontoref CLI (~/.local/bin/ontoref or scripts/ontoref wrapper)",
|
||||
scope = "vapora (all actors)",
|
||||
severity = 'Soft,
|
||||
check = { tag = 'FileExists, path = ".ontoref/config.ncl", present = true },
|
||||
rationale = "Direct nickel invocations bypass the daemon notification, log, and lock mechanisms.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = [],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "vapora adopts ontoref protocol; reflection imports migrated from stratumiops to ontoref; .ontology/ and adrs/ managed via ontoref CLI",
|
||||
invariants_at_risk = ["ontoref-protocol-adoption"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
86
adrs/adr-002-cargo-workspace.ncl
Normal file
86
adrs/adr-002-cargo-workspace.ncl
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-002",
|
||||
title = "Single Cargo Workspace with Specialized Crates",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "Vapora is a multi-domain platform spanning REST API, agent orchestration, LLM routing, knowledge graph, WASM frontend, and protocol servers. The initial 13-crate workspace (as of ADR creation) has grown to 17 crates. The cargo workspace monorepo approach centralizes dependency management, enables parallel test execution, and enforces explicit inter-crate boundaries via Cargo.toml dependencies.",
|
||||
|
||||
decision = "All vapora code lives in a single Cargo workspace. Each architectural layer is a separate crate under crates/. Shared types live in vapora-shared. Workspace-level dependency versions are pinned in the root Cargo.toml [workspace.dependencies] table. No crate may depend on another vapora crate not declared in Cargo.toml.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Separate crates enforce architectural boundaries at the compiler level",
|
||||
detail = "Accidental coupling between (e.g.) vapora-frontend and vapora-agents is caught at compile time, not code review. This prevents the boundary erosion that happens in a single-crate monolith.",
|
||||
},
|
||||
{
|
||||
claim = "Centralized workspace dependency versions prevent version skew",
|
||||
detail = "[workspace.dependencies] in root Cargo.toml is the single source of truth for axum, surrealdb, tokio, rig-core versions. Individual crates inherit versions without pinning, making coordinated upgrades a single-file change.",
|
||||
},
|
||||
{
|
||||
claim = "vapora-shared as the single shared types boundary prevents circular deps",
|
||||
detail = "All domain models (Project, Task, Agent, etc.) live in vapora-shared. No domain crate depends on another domain crate — only on vapora-shared. This tree structure is enforced by the Cargo dependency graph.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"cargo test --workspace runs all 316 tests with full parallelism",
|
||||
"Inter-crate API changes surface as compile errors before runtime",
|
||||
"New crates added without modifying existing crates' Cargo.toml",
|
||||
"cargo build --release builds all crates with LTO across the entire workspace",
|
||||
],
|
||||
negative = [
|
||||
"Full workspace builds are slower than single-crate builds (partial mitigation via incremental compilation)",
|
||||
"Adding a new crate requires updating root Cargo.toml workspace.members",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Single-crate monolith",
|
||||
why_rejected = "No compiler-enforced boundaries, inevitable coupling between layers, impossible to build only the backend binary without compiling frontend WASM dependencies.",
|
||||
},
|
||||
{
|
||||
option = "Multi-repository (separate Git repos per crate)",
|
||||
why_rejected = "Cross-crate refactors require multi-repo PRs. Integration testing requires local checkouts. Versioning inter-crate interfaces becomes a published API problem.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "all-code-in-crates",
|
||||
claim = "All vapora source code must live under crates/ in the workspace",
|
||||
scope = "vapora (root Cargo.toml)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'FileExists, path = "crates/vapora-shared/src/lib.rs", present = true },
|
||||
rationale = "Code outside the workspace cannot benefit from shared dependency versions, cross-crate type checking, or unified test runs.",
|
||||
},
|
||||
{
|
||||
id = "workspace-dep-versions",
|
||||
claim = "All shared dependency versions must be declared in root Cargo.toml [workspace.dependencies]",
|
||||
scope = "vapora (all crates)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "workspace\\.dependencies", paths = ["Cargo.toml"], must_be_empty = false },
|
||||
rationale = "Per-crate version pinning leads to version skew and is the root cause of diamond dependency failures.",
|
||||
},
|
||||
{
|
||||
id = "no-direct-cross-domain-deps",
|
||||
claim = "Domain crates (vapora-backend, vapora-agents, vapora-llm-router) must not depend directly on each other; they share only through vapora-shared",
|
||||
scope = "vapora (all domain crates)",
|
||||
severity = 'Soft,
|
||||
check = { tag = 'NuCmd, cmd = "let r = (do { cargo tree -p vapora-backend } | complete); if $r.exit_code != 0 { exit 1 }; let lines = ($r.stdout | lines | where { |l| ($l | str contains 'vapora-agents') and not ($l | str contains 'vapora-shared') }); if ($lines | is-empty) { exit 0 } else { exit 1 }", expect_exit = 0 },
|
||||
rationale = "Direct cross-domain deps create coupling that prevents independent crate evolution and break the layered architecture.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-001"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "single cargo workspace; all code in crates/; shared deps in root Cargo.toml; vapora-shared as the single shared-types boundary",
|
||||
invariants_at_risk = [],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
78
adrs/adr-003-axum-backend.ncl
Normal file
78
adrs/adr-003-axum-backend.ncl
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-003",
|
||||
title = "Axum as the Sole HTTP Framework for vapora-backend",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "vapora-backend exposes 40+ REST endpoints plus WebSocket connections for real-time updates. The framework choice determines the async model, middleware composition, and extractor ergonomics for the entire API surface. As of 2026-03-27, axum 0.8.8 is in use (the markdown ADR references 0.8.6, which is stale). The Tower ecosystem is the chosen middleware stack.",
|
||||
|
||||
decision = "Axum is the only HTTP framework allowed in vapora-backend. No Actix-Web, Rocket, or Warp. All HTTP handlers use Axum extractors and Router composition. Middleware uses the Tower layer model. WebSocket upgrades use axum::extract::ws.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Axum is Tokio-native with zero abstraction over the async runtime",
|
||||
detail = "vapora runs a Tokio multi-threaded runtime. Axum's Handler trait is directly implemented over Tokio futures — no bridging layer, no actor model overhead. This eliminates the async runtime impedance mismatch that Actix-Web introduces.",
|
||||
},
|
||||
{
|
||||
claim = "Tower middleware composes predictably with all Axum routes",
|
||||
detail = "CorsLayer, TraceLayer, CompressionLayer, and authentication middleware are composed via ServiceBuilder, not framework-specific macros. This means middleware ordering is explicit, testable, and portable to other Tower-based services.",
|
||||
},
|
||||
{
|
||||
claim = "Type-safe extractors eliminate runtime deserialization panics",
|
||||
detail = "Json<T>, Path<T>, State<T> extractors fail at compile time if the handler signature doesn't match — not at runtime. This catches API contract violations before deployment.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"All API surface is tested via axum-test's TestClient without a real TCP socket",
|
||||
"Tower middleware applies uniformly to all routes, including WebSocket upgrade paths",
|
||||
"Adding new endpoints requires only adding a handler fn and a route entry — no boilerplate registration",
|
||||
"IntoResponse impl on VaporaError provides consistent error serialization across all handlers",
|
||||
],
|
||||
negative = [
|
||||
"Axum's 0.8.x API introduced breaking changes from 0.7 (extractor signatures, Router typing) — upgrades require wholesale migration",
|
||||
"Axum lacks built-in request body size limiting — must be added via RequestBodyLimitLayer",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Actix-Web",
|
||||
why_rejected = "Actor model adds coordination overhead not needed for stateless API handlers. Different async patterns from Tokio primitives make integration with NATS JetStream and SurrealDB clients awkward.",
|
||||
},
|
||||
{
|
||||
option = "Rocket",
|
||||
why_rejected = "Synchronous-first design. async support was added as an afterthought, leading to executor boundary issues in Tokio-native code.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "axum-only-http-framework",
|
||||
claim = "vapora-backend must not import actix-web, rocket, or warp",
|
||||
scope = "vapora-backend",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Cargo, crate = "vapora-backend", forbidden_deps = ["actix-web", "rocket", "warp"] },
|
||||
rationale = "Multiple HTTP frameworks in one binary create conflicting async executor registrations, duplicated middleware chains, and inconsistent error serialization.",
|
||||
},
|
||||
{
|
||||
id = "all-handlers-via-axum-router",
|
||||
claim = "All HTTP endpoints must be registered via axum::Router — no raw hyper service registration",
|
||||
scope = "vapora-backend/src/main.rs, vapora-backend/src/api/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "axum::Router", paths = ["crates/vapora-backend/src/main.rs"], must_be_empty = false },
|
||||
rationale = "Bypassing the Axum router skips middleware layers (tracing, CORS, auth) applied at the Router level.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-002"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "axum 0.8.x is the sole HTTP framework in vapora-backend; Tower middleware stack; no actix-web/rocket/warp",
|
||||
invariants_at_risk = [],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
87
adrs/adr-004-surrealdb-database.ncl
Normal file
87
adrs/adr-004-surrealdb-database.ncl
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-004",
|
||||
title = "SurrealDB as the Sole Persistence Layer",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "Vapora requires relational storage (projects, tasks), graph traversal (agent relationships, knowledge graph), and document storage (execution history, LLM outputs) — typically requiring three separate databases. As of 2026-03-27, surrealdb v3 is in use (the markdown ADR references 2.3, which is stale). The workspace root Cargo.toml pins `surrealdb = { version = \"3\", features = [\"protocol-ws\", \"rustls\"] }`.",
|
||||
|
||||
decision = "SurrealDB is the only database engine in vapora. No PostgreSQL, no SQLite, no MongoDB, no Redis. All persistence goes through the SurrealDB client. Multi-tenancy is enforced via SurrealDB scopes — no application-layer tenant filtering may substitute for scope enforcement.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Single database eliminates cross-DB transaction complexity",
|
||||
detail = "If knowledge graph nodes and project tasks were in separate databases, any operation touching both (e.g. recording which task produced which KG node) would require distributed transactions or eventual consistency. SurrealDB handles both in one query.",
|
||||
},
|
||||
{
|
||||
claim = "SurrealDB scopes provide database-level tenant isolation",
|
||||
detail = "A query executed in scope workspace:X cannot access records in workspace:Y, regardless of application code. This means a bug in the service layer cannot cause a tenant data leak — the database rejects the query.",
|
||||
},
|
||||
{
|
||||
claim = "SurrealQL graph traversal replaces a separate graph database",
|
||||
detail = "Knowledge graph learning curves, agent relationship traversal, and causal execution chains are expressed as SurrealQL graph queries (->relation->). A separate Neo4j instance would require replication, synchronization, and schema management across two stores.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Knowledge graph, task management, and audit trail share a single connection pool",
|
||||
"SurrealDB scope enforcement is the primary multi-tenancy guarantee",
|
||||
"All services use parameterized SurrealQL queries — no raw string interpolation into queries",
|
||||
"WebSocket protocol-ws enables real-time subscriptions from vapora-backend",
|
||||
],
|
||||
negative = [
|
||||
"SurrealDB v3 is a major-version breaking change from v2 — all services must coordinate upgrade simultaneously",
|
||||
"SurrealDB lacks mature migration tooling compared to PostgreSQL — migrations are manual .surql files",
|
||||
"No read replica support in current deployment (single instance handles all reads and writes)",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "PostgreSQL + Neo4j",
|
||||
why_rejected = "Two database engines double operational burden. Cross-DB transactions require two-phase commit or saga patterns. Schema synchronization across both stores is error-prone.",
|
||||
},
|
||||
{
|
||||
option = "MongoDB",
|
||||
why_rejected = "No native graph traversal. Application code would need to implement graph traversal, duplicating logic that SurrealQL expresses natively. No built-in multi-tenancy scopes.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "no-other-database-engines",
|
||||
claim = "No crate in the workspace may import postgresql, mongodb, sqlite, or redis client crates",
|
||||
scope = "vapora (all crates)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Cargo, crate = "vapora-backend", forbidden_deps = ["sqlx", "sea-orm", "diesel", "mongodb", "redis"] },
|
||||
rationale = "Adding a second database engine introduces consistency gaps, split connection pools, and dual migration paths.",
|
||||
},
|
||||
{
|
||||
id = "surreal-scopes-for-tenancy",
|
||||
claim = "All multi-tenant queries must use SurrealDB scopes — no application-layer tenant_id filtering may be the sole isolation mechanism",
|
||||
scope = "vapora-backend/src/services/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "scope|NS|DB", paths = ["crates/vapora-backend/src/services/"], must_be_empty = false },
|
||||
rationale = "Application-layer filtering is the second defense layer, not the primary one. A service bug that drops a WHERE clause cannot bypass the DB scope.",
|
||||
},
|
||||
{
|
||||
id = "parameterized-queries-only",
|
||||
claim = "All SurrealQL queries must use parameterized bindings via .bind() — no string interpolation into query text",
|
||||
scope = "vapora (all crates using surrealdb)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "format!.*SELECT|INSERT|UPDATE|DELETE", paths = ["crates/"], must_be_empty = true },
|
||||
rationale = "String-interpolated queries are vulnerable to SurrealQL injection, especially when tenant IDs or user-supplied values appear in query conditions.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-002", "adr-011"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "surrealdb v3 is the sole database; scopes enforce multi-tenancy at DB level; no other DB engines; parameterized queries only",
|
||||
invariants_at_risk = ["surreal-persistence", "multi-tenant-isolation"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
90
adrs/adr-005-nats-jetstream.ncl
Normal file
90
adrs/adr-005-nats-jetstream.ncl
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-005",
|
||||
title = "NATS JetStream for Agent Coordination and Workflow Progression",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "Vapora agents are long-running tasks that may complete seconds or minutes after dispatch. The workflow engine needs reliable stage-to-stage progression. The A2A protocol requires async task completion notification. All of these require at-least-once delivery with persistence across restarts. As of 2026-03-27, async-nats 0.46 is in use (the markdown ADR references 0.45, which is stale). NATS is optional — all consumers implement graceful fallback when NATS is unavailable.",
|
||||
|
||||
decision = "NATS JetStream (via async-nats) is the message broker for agent task dispatch, heartbeat monitoring, workflow stage progression, and A2A completion notifications. No Redis Pub/Sub, no RabbitMQ, no Kafka. NATS is optional in development — all consumers check NATS availability at startup and degrade gracefully (polling fallback or sync execution) without crashing.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "JetStream at-least-once delivery prevents silent task loss",
|
||||
detail = "Redis Pub/Sub drops messages if no subscriber is listening at publish time. JetStream persists messages to a stream and delivers them when a consumer reconnects. Agent crashes during task execution result in redelivery, not silent loss.",
|
||||
},
|
||||
{
|
||||
claim = "NATS is lightweight with no external dependency beyond the server binary",
|
||||
detail = "The NATS server is a single Go binary with no external runtime dependencies. RabbitMQ requires Erlang runtime + management plugins. Kafka requires ZooKeeper or KRaft + JVM. For a self-hosted platform, NATS operational burden is an order of magnitude lower.",
|
||||
},
|
||||
{
|
||||
claim = "DashMap<String, oneshot::Sender> bridges NATS async replies to Tokio callers",
|
||||
detail = "When a caller dispatches a task and needs the result, it inserts a oneshot sender into a DashMap keyed by task_id, then awaits the receiver. A background NATS subscriber resolves the sender on completion. This is the pattern used in vapora-a2a/src/bridge.rs and orchestrator.rs.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Agent task dispatch is fire-and-forget from the caller's perspective — no blocking while the agent runs",
|
||||
"Workflow stage progression survives backend restarts — JetStream re-delivers pending stage triggers",
|
||||
"NATS subject hierarchy (vapora.tasks.*, vapora.agents.*) provides observable message topology",
|
||||
"Graceful fallback means local development works without a running NATS server",
|
||||
],
|
||||
negative = [
|
||||
"JetStream stream configuration (max_age, max_msgs, storage) must be provisioned before first use",
|
||||
"The DashMap<String, oneshot::Sender> pattern leaks entries if the NATS completion message is never received — requires TTL cleanup",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Redis Pub/Sub",
|
||||
why_rejected = "No persistence — messages are dropped if the subscriber is offline when the publisher fires. Not viable for agent task coordination where agent restarts are expected.",
|
||||
},
|
||||
{
|
||||
option = "RabbitMQ",
|
||||
why_rejected = "Erlang runtime adds 200 MB+ to the container image. AMQP protocol is more complex than NATS. No meaningful capability advantage over JetStream for this use case.",
|
||||
},
|
||||
{
|
||||
option = "Database polling (SurrealDB LIVE queries)",
|
||||
why_rejected = "SurrealDB LIVE queries provide push notifications but are tightly coupled to the database connection lifecycle. NATS decouples the coordination bus from the persistence layer, allowing both to scale independently.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "nats-only-message-broker",
|
||||
claim = "No crate may import rabbitmq, kafka, or redis client crates for message brokering",
|
||||
scope = "vapora (all crates)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Cargo, crate = "vapora-backend", forbidden_deps = ["lapin", "rdkafka", "redis"] },
|
||||
rationale = "Multiple message brokers would split the coordination bus, requiring consumers to subscribe to multiple systems and introducing message ordering ambiguity.",
|
||||
},
|
||||
{
|
||||
id = "nats-graceful-fallback",
|
||||
claim = "All NATS consumers must implement graceful fallback — NATS unavailability must not crash the service",
|
||||
scope = "vapora (vapora-agents, vapora-workflow-engine, vapora-a2a)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "nats.*error|warn.*nats|fallback", paths = ["crates/vapora-agents/src/", "crates/vapora-a2a/src/"], must_be_empty = false },
|
||||
rationale = "Development environments without a running NATS server should still allow agent execution in degraded mode. A panic on NATS connection failure would block all development.",
|
||||
},
|
||||
{
|
||||
id = "nats-subject-hierarchy",
|
||||
claim = "All NATS subjects must use the vapora.* namespace prefix",
|
||||
scope = "vapora (all NATS publishers and subscribers)",
|
||||
severity = 'Soft,
|
||||
check = { tag = 'Grep, pattern = "\"vapora\\.", paths = ["crates/"], must_be_empty = false },
|
||||
rationale = "A consistent subject hierarchy prevents collisions with other services sharing the same NATS cluster and enables subject-based access control in multi-tenant deployments.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-002", "adr-012"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "async-nats 0.46 JetStream for agent coordination and workflow progression; graceful fallback mandatory; vapora.* subject prefix",
|
||||
invariants_at_risk = ["message-based-coordination"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
78
adrs/adr-006-rig-framework.ncl
Normal file
78
adrs/adr-006-rig-framework.ncl
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-006",
|
||||
title = "rig-core as LLM Agent Framework with LLMClient Abstraction Boundary",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "Vapora needs to call Claude, OpenAI, Gemini, and Ollama with tool calling and streaming support from Rust. As of 2026-03-27, rig-core 0.30 is in use (the markdown ADR references 0.15, which is stale). The critical architectural decision is not just which crate is used, but that the LLMClient trait in vapora-llm-router is the sole abstraction boundary — no crate calls provider APIs directly.",
|
||||
|
||||
decision = "rig-core is the LLM agent framework for tool calling, streaming, and provider adapters. The LLMClient trait in vapora-llm-router is the only permitted interface through which any vapora crate invokes LLM providers. No crate may call Anthropic, OpenAI, Gemini, or Ollama APIs directly — all calls go through LLMClient implementations backed by rig-core provider adapters.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "LLMClient trait decouples callers from rig-core's API surface",
|
||||
detail = "rig-core 0.15 to 0.30 introduced breaking API changes. Because all callers depend on LLMClient (not rig-core directly), the upgrade required changes only in vapora-llm-router's provider adapters, not in the 5 crates that invoke LLM providers.",
|
||||
},
|
||||
{
|
||||
claim = "rig-core provides Rust-native tool calling without a Python bridge",
|
||||
detail = "LangChain Python bridge requires IPC, a Python runtime in the container, and serialization overhead for every tool call. rig-core compiles tool schemas to JSON at build time via Rust proc macros — zero runtime overhead.",
|
||||
},
|
||||
{
|
||||
claim = "Cost tracking is only possible when all calls funnel through a single interface",
|
||||
detail = "BudgetEnforcer and CostTracker in vapora-llm-router can only count every token if every LLM call passes through LLMClient. Direct provider calls bypass cost tracking and invalidate budget enforcement.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Provider substitution (swap Claude for Gemini for a role) is a config change in llm-router.toml, not a code change",
|
||||
"BudgetEnforcer sees every LLM call — no calls escape cost tracking",
|
||||
"Streaming and tool calling are abstracted — callers are provider-agnostic",
|
||||
"rig-core version upgrades affect only vapora-llm-router provider adapters",
|
||||
],
|
||||
negative = [
|
||||
"rig-core is a relatively young crate — breaking API changes between minor versions have occurred",
|
||||
"The LLMClient abstraction hides provider-specific capabilities (e.g. Claude extended thinking, OpenAI structured outputs) that can only be accessed by downgrading to rig-core directly",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "LangChain via Python bridge",
|
||||
why_rejected = "Python runtime in container, IPC overhead, serialization for every call. Eliminates the full-stack Rust advantage and introduces a Python/Rust type boundary.",
|
||||
},
|
||||
{
|
||||
option = "Direct provider SDKs (async-anthropic, openai-rust, etc.)",
|
||||
why_rejected = "Each provider SDK has a different async interface, error type, and streaming API. Implementing cost tracking, fallback chains, and budget enforcement across N different APIs multiplies maintenance burden by N.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "no-direct-provider-calls",
|
||||
claim = "No crate outside vapora-llm-router may import anthropic, openai, or gemini client crates directly",
|
||||
scope = "vapora (all crates except vapora-llm-router)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Cargo, crate = "vapora-backend", forbidden_deps = ["async-anthropic", "openai", "google-generativeai"] },
|
||||
rationale = "Direct provider calls bypass BudgetEnforcer and CostTracker, making cost enforcement impossible. They also bypass fallback chains, causing provider failures to surface as hard errors instead of automatic fallback.",
|
||||
},
|
||||
{
|
||||
id = "llm-client-trait-boundary",
|
||||
claim = "All LLM invocations in vapora-agents must go through the LLMClient trait",
|
||||
scope = "vapora-agents (all executor and coordinator code)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "LLMClient", paths = ["crates/vapora-agents/src/"], must_be_empty = false },
|
||||
rationale = "The trait is the enforcement point for routing rules, budget limits, and provider abstraction. Code that bypasses it defeats all three.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-002", "adr-009"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "rig-core 0.30 for LLM agent framework; LLMClient trait is the sole abstraction boundary; no direct provider API calls outside vapora-llm-router",
|
||||
invariants_at_risk = ["provider-abstraction", "cost-aware-routing"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
79
adrs/adr-007-cedar-authorization.ncl
Normal file
79
adrs/adr-007-cedar-authorization.ncl
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-007",
|
||||
title = "Cedar Policy Engine for Declarative Authorization",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "Vapora needs per-stage execution control in the workflow engine and fine-grained access control at the API layer. The markdown ADR describes Cedar in vapora-backend, but as of 2026-03-27, Cedar is implemented in vapora-workflow-engine/src/auth.rs (CedarAuthorizer, loaded from .cedar policy files) — not in the vapora-backend API handlers directly. Cedar policy evaluation happens before workflow stage execution, controlling which principals may trigger which actions on which resources.",
|
||||
|
||||
decision = "Cedar (via cedar-policy crate) is the policy engine for authorization in vapora-workflow-engine. CedarAuthorizer loads .cedar policy files from a configurable directory at startup. All workflow stage execution requests are evaluated against the loaded policy set before the stage executes. Policy files are version-controlled in the project repo.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Declarative policies are auditable and reviewable without reading Rust code",
|
||||
detail = "A Cedar policy file expressing 'Architect role may trigger Deploy actions on any workflow' is readable by non-Rust engineers, auditable by security reviewers, and versionable in Git. An equivalent RBAC check buried in match statements is none of these.",
|
||||
},
|
||||
{
|
||||
claim = "Cedar's formal verification model prevents policy logic errors",
|
||||
detail = "Cedar policies are formally specified — the evaluator is proven to be sound and complete. Custom RBAC implementations in Rust carry no such guarantees and have historically introduced privilege escalation via logic errors in compound conditions.",
|
||||
},
|
||||
{
|
||||
claim = "Policy changes do not require recompilation",
|
||||
detail = "CedarAuthorizer loads .cedar files at startup from a configurable directory. Updating authorization rules is a file change + restart, not a code change + deploy. This enables security patches to access control without a full release.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Authorization rules for workflow stage execution are version-controlled separately from Rust logic",
|
||||
"Adding a new stage or role requires only a .cedar policy addition, not a Rust change",
|
||||
"CedarAuthorizer fails closed — if no .cedar files are found, startup fails rather than allowing all requests",
|
||||
"Policy evaluation is synchronous and sub-millisecond — no async overhead in the authorization hot path",
|
||||
],
|
||||
negative = [
|
||||
"Cedar policy language has a learning curve for engineers unfamiliar with it",
|
||||
"Entity/action/resource schema must be kept synchronized between Cedar policies and the Rust types they authorize",
|
||||
"The .cedar policy directory path must be configured correctly — misconfiguration causes startup failure",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Custom RBAC in Rust (match on roles + permissions)",
|
||||
why_rejected = "Custom RBAC is not auditable, not formally verified, and grows in complexity as role/permission combinations increase. Authorization bugs in custom code have caused data leaks in several production incidents.",
|
||||
},
|
||||
{
|
||||
option = "Casbin policy engine",
|
||||
why_rejected = "Casbin's Rust implementation is less mature than cedar-policy. Cedar has formal verification backing and is used in production at AWS scale. The correctness guarantee is the primary selection criterion.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "cedar-authorizer-in-workflow-engine",
|
||||
claim = "vapora-workflow-engine must use CedarAuthorizer for stage execution authorization — no ad-hoc role checks in stage execution code",
|
||||
scope = "vapora-workflow-engine/src/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "CedarAuthorizer", paths = ["crates/vapora-workflow-engine/src/"], must_be_empty = false },
|
||||
rationale = "Ad-hoc role checks inside stage execution logic bypass the audit trail and cannot be updated without recompilation.",
|
||||
},
|
||||
{
|
||||
id = "cedar-policy-files-in-repo",
|
||||
claim = "Cedar .cedar policy files must be version-controlled in the project repo",
|
||||
scope = "vapora (repo root or crates/vapora-workflow-engine/)",
|
||||
severity = 'Soft,
|
||||
check = { tag = 'NuCmd, cmd = "let r = (do { glob '**/*.cedar' } | complete); if $r.exit_code != 0 { exit 1 }; let files = ($r.stdout | lines | where { |l| ($l | str trim | is-not-empty) }); if ($files | is-empty) { exit 1 } else { exit 0 }", expect_exit = 0 },
|
||||
rationale = "Policy files outside version control cannot be audited, rolled back, or reviewed in PRs. A misconfigured out-of-band policy file is a security incident waiting to happen.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-002", "adr-004"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "cedar-policy for workflow stage authorization in vapora-workflow-engine; CedarAuthorizer loads .cedar files at startup; no ad-hoc role checks in stage code",
|
||||
invariants_at_risk = [],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
86
adrs/adr-008-llm-routing-tiers.ncl
Normal file
86
adrs/adr-008-llm-routing-tiers.ncl
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-008",
|
||||
title = "Three-Tier LLM Routing: Rules → Dynamic → Override with Budget Enforcement",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "vapora-llm-router must select a provider for every LLM call. The selection must be deterministic for known task patterns (architecture tasks always go to the most capable model), adaptive to runtime conditions (provider outage, budget exhaustion), and overridable for debugging. Budget enforcement is a separate but tightly coupled concern: the router must refuse calls that would exceed per-role budget limits.",
|
||||
|
||||
decision = "The LLMRouter implements three-tier selection: (1) Rules tier — pattern-matched routing rules from llm-router.toml (e.g. architecture tasks → Claude); (2) Dynamic tier — runtime scoring based on availability, latency history, and current load when no rule matches; (3) Override tier — explicit provider specification with audit log entry. BudgetEnforcer runs before the router returns a provider: if the selected provider would breach the per-role budget, it falls back to the cheapest provider in the fallback chain. If all providers are over budget, the call is rejected with a BudgetExceeded error.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Rules tier provides deterministic routing for known patterns",
|
||||
detail = "Architecture tasks that always benefit from the most capable model should not be subject to dynamic scoring variability. Static rules give operators predictable routing behavior for their most important task types.",
|
||||
},
|
||||
{
|
||||
claim = "Dynamic tier enables automatic recovery from provider failures",
|
||||
detail = "When a provider has elevated error rates or latency, the dynamic scoring de-ranks it without operator intervention. Static-only routing would require a manual config change to route around an incident.",
|
||||
},
|
||||
{
|
||||
claim = "Budget enforcement at the router layer is the only viable enforcement point",
|
||||
detail = "BudgetEnforcer must see every token before it's spent. The LLMRouter is the single chokepoint — all LLM calls go through it (see ADR-006). This makes the router the correct enforcement point, not individual agent implementations.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Routing rules and budget limits are in llm-router.toml — no code changes for common configuration updates",
|
||||
"BudgetEnforcer prevents runaway spending even if an agent is stuck in a loop",
|
||||
"Override tier with audit logging enables debugging without disabling enforcement for other roles",
|
||||
"Fallback chains ensure graceful degradation: Claude → GPT-4 → Gemini → Ollama",
|
||||
],
|
||||
negative = [
|
||||
"Three-tier selection adds latency to the provider selection path (~1ms) — acceptable but measurable",
|
||||
"Budget limits must be set conservatively to avoid rejecting legitimate calls near period boundaries",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Static rules only",
|
||||
why_rejected = "No adaptation to provider failures. Budget enforcement would require a separate service. Provider outages would surface as errors instead of transparent fallback.",
|
||||
},
|
||||
{
|
||||
option = "Dynamic only (no static rules)",
|
||||
why_rejected = "Cold-start problem: no execution history to score providers on. Determinism guarantee lost — debugging routing decisions requires tracing the scoring algorithm.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "budget-enforcer-runs-before-dispatch",
|
||||
claim = "BudgetEnforcer must be invoked before any provider receives a token",
|
||||
scope = "vapora-llm-router/src/router.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "BudgetEnforcer", paths = ["crates/vapora-llm-router/src/router.rs"], must_be_empty = false },
|
||||
rationale = "Post-dispatch budget checks cannot prevent overspending — tokens are already spent when the response arrives.",
|
||||
},
|
||||
{
|
||||
id = "routing-rules-in-config",
|
||||
claim = "All routing rules must be declared in llm-router.toml — no hardcoded provider names in agent code",
|
||||
scope = "vapora (all crates calling LLMClient)",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "claude-opus|gpt-4|gemini-pro", paths = ["crates/vapora-agents/src/"], must_be_empty = true },
|
||||
rationale = "Hardcoded provider names in agent code bypass the routing tier entirely and make provider substitution impossible without code changes.",
|
||||
},
|
||||
{
|
||||
id = "override-tier-audit-log",
|
||||
claim = "Provider override requests must produce an audit log entry",
|
||||
scope = "vapora-llm-router/src/router.rs",
|
||||
severity = 'Soft,
|
||||
check = { tag = 'Grep, pattern = "override|audit", paths = ["crates/vapora-llm-router/src/router.rs"], must_be_empty = false },
|
||||
rationale = "Override bypasses the rules and dynamic tiers. Without an audit trail, debugging unexpected provider selection is difficult in production.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-006", "adr-009"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "three-tier LLM routing (rules/dynamic/override); BudgetEnforcer runs before dispatch; routing rules in llm-router.toml; no hardcoded provider names in agents",
|
||||
invariants_at_risk = ["cost-aware-routing", "provider-abstraction"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
86
adrs/adr-009-learning-profiles.ncl
Normal file
86
adrs/adr-009-learning-profiles.ncl
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-009",
|
||||
title = "Per-Task-Type Learning Profiles with Recency Bias for Agent Selection",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "The swarm coordinator assigns tasks to agents. Naive round-robin wastes budget on agents that have poor track records for a specific task type. The agent selection scoring formula must balance current load (avoid overloading), expertise (prefer agents with high success rates for this task type), and confidence (discount scores based on small sample sizes). The recency bias addresses the reality that agent performance changes: a model update or config change can quickly improve or degrade an agent's capability on a specific task type.",
|
||||
|
||||
decision = "Each agent maintains a per-task-type LearningProfile in vapora-agents/src/learning_profile.rs. The swarm scoring formula is: `score = 0.3*load_factor + 0.5*expertise_score + 0.2*confidence_weight`. The last 7 days of execution history are weighted 3x relative to older executions. Confidence weighting is applied when an agent has fewer than 20 executions for a task type: `confidence_weight = min(executions / 20, 1.0)`. Profiles are stored in SurrealDB and survive agent restarts.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Recency bias reflects that agent performance changes over time",
|
||||
detail = "An all-time average treats a task succeeded 6 months ago equally to one succeeded yesterday. If an agent's model was updated or its config was tuned last week, the all-time average undersells current capability. A 7-day window with 3x weighting surfaces recent performance changes within days.",
|
||||
},
|
||||
{
|
||||
claim = "Confidence weighting prevents the cold-start exploitation problem",
|
||||
detail = "A new agent with 2 successful executions would score 100% expertise without confidence weighting, outranking a veteran with 200 executions and 90% success rate. The min(n/20, 1.0) factor ensures new agents are not over-promoted until their sample size is statistically meaningful.",
|
||||
},
|
||||
{
|
||||
claim = "The 0.3/0.5/0.2 weight distribution prioritizes expertise over load",
|
||||
detail = "An agent at 80% load but with 95% expertise beats an idle agent with 50% expertise. This reflects the reality that getting the task done correctly is more valuable than perfect load distribution — especially for expensive LLM tasks where failure costs money.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Agent selection improves automatically over the first few weeks as profiles accumulate data",
|
||||
"Agents that consistently fail specific task types are automatically deprioritized without manual configuration",
|
||||
"The scoring formula is explicit and observable — selection decisions can be explained from profile data",
|
||||
],
|
||||
negative = [
|
||||
"Cold-start period: new agents are ranked conservatively until 20 executions per task type accumulate",
|
||||
"7-day recency window means a one-week outage resets expertise scores for that period",
|
||||
"Profile data accumulation requires SurrealDB persistence — in-memory-only deployments lose learning across restarts",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "All-time average success rate",
|
||||
why_rejected = "Does not adapt to recent performance changes. An agent that improved last week still carries the drag of its earlier poor performance for months.",
|
||||
},
|
||||
{
|
||||
option = "Last-N sliding window",
|
||||
why_rejected = "Artificial cutoff — performance from execution N+1 is completely ignored. The exponential recency bias is a smoother approximation of 'recent performance matters more'.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "scoring-formula-in-swarm",
|
||||
claim = "The agent selection scoring formula (0.3*load + 0.5*expertise + 0.2*confidence) must be implemented in vapora-swarm — not duplicated across multiple crates",
|
||||
scope = "vapora-swarm/src/coordinator.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "0\\.3|0\\.5|expertise", paths = ["crates/vapora-swarm/src/"], must_be_empty = false },
|
||||
rationale = "If the scoring formula is duplicated, the two copies will diverge. Selection decisions will be inconsistent depending on which codepath selected the agent.",
|
||||
},
|
||||
{
|
||||
id = "profiles-persisted-to-surrealdb",
|
||||
claim = "LearningProfile data must be persisted to SurrealDB — no in-memory-only profile storage",
|
||||
scope = "vapora-agents/src/learning_profile.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "db|surreal|persist", paths = ["crates/vapora-agents/src/learning_profile.rs"], must_be_empty = false },
|
||||
rationale = "In-memory profiles are lost on agent restart, resetting all accumulated expertise data. This effectively resets the learning system on every deployment.",
|
||||
},
|
||||
{
|
||||
id = "confidence-threshold-twenty",
|
||||
claim = "Confidence weighting must apply until an agent reaches 20 executions per task type",
|
||||
scope = "vapora-agents/src/learning_profile.rs, vapora-swarm/src/",
|
||||
severity = 'Soft,
|
||||
check = { tag = 'Grep, pattern = "20\\.0|min_executions|confidence", paths = ["crates/vapora-agents/src/", "crates/vapora-swarm/src/"], must_be_empty = false },
|
||||
rationale = "The threshold of 20 executions was tuned to balance cold-start speed against exploitation of new agents with small samples. Changing it without analysis risks either slow ramp-up or premature promotion.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-006", "adr-008"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "per-task-type LearningProfile; scoring formula 0.3*load+0.5*expertise+0.2*confidence; 7-day recency bias 3x; confidence ramp to 20 executions; SurrealDB persistence",
|
||||
invariants_at_risk = ["learning-based-selection"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
85
adrs/adr-010-multi-tenancy.ncl
Normal file
85
adrs/adr-010-multi-tenancy.ncl
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-010",
|
||||
title = "SurrealDB Scope-Based Multi-Tenancy with Application-Layer Defense-in-Depth",
|
||||
status = 'Accepted,
|
||||
date = "2024-11-01",
|
||||
|
||||
context = "Vapora serves multiple workspaces (tenants) from a single backend instance. Tenant isolation must guarantee that workspace A cannot read or write workspace B's data. The isolation mechanism must be enforced at a layer that application code bugs cannot bypass. SurrealDB scopes provide database-level isolation; application-layer tenant_id validation provides the defense-in-depth second layer.",
|
||||
|
||||
decision = "Multi-tenancy is enforced at two layers: (1) SurrealDB scopes — all database connections and queries execute within a scope tied to the workspace; (2) Application services validate tenant_id in every write and read query as a redundant check. The SurrealDB scope is the primary isolation guarantee. Application-layer filtering is defense-in-depth only — it must never be the sole isolation mechanism.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Database-level scope enforcement cannot be bypassed by application code bugs",
|
||||
detail = "A service layer bug that omits a WHERE tenant_id = ? clause will still fail to return another tenant's data if the connection is scoped. The scope check runs in the database before the query result is assembled.",
|
||||
},
|
||||
{
|
||||
claim = "Application-layer validation catches bugs before they reach the database",
|
||||
detail = "If the SurrealDB scope configuration has an error, application-layer tenant_id checks provide a second line of defense. Defense-in-depth means no single failure mode causes a tenant data leak.",
|
||||
},
|
||||
{
|
||||
claim = "SurrealDB scopes are the most cost-effective isolation for a single-instance deployment",
|
||||
detail = "Hard partitioning (separate database per tenant) would require N database connections, N migration runs, and N monitoring streams. Scopes achieve logical isolation at the query level without multiplying operational burden.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Cross-tenant data leaks require both a scope misconfiguration AND an application bug simultaneously",
|
||||
"Tenant onboarding is a scope creation operation — no schema migration or new instance needed",
|
||||
"All SurrealQL queries are tenant-scoped by default — queries that omit tenant context fail, not return all data",
|
||||
],
|
||||
negative = [
|
||||
"Cross-tenant analytics queries (aggregate usage across all workspaces) require a superuser scope connection — this is a privileged operation that must be explicitly controlled",
|
||||
"SurrealDB scope token expiry handling must be implemented to prevent session leaks between tenant requests",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Application-layer tenant_id filtering only",
|
||||
why_rejected = "A single WHERE clause omission leaks all tenants' data. Application code bugs are more common than database configuration errors.",
|
||||
},
|
||||
{
|
||||
option = "Separate database instance per tenant",
|
||||
why_rejected = "N databases × all maintenance operations. Initial vapora deployment targets small-to-medium teams; the operational overhead of per-tenant instances is disproportionate.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "no-application-only-isolation",
|
||||
claim = "No service may rely solely on application-layer tenant_id filtering for tenant isolation — SurrealDB scope must be the primary mechanism",
|
||||
scope = "vapora-backend/src/services/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "\\.signin|scope|Scope", paths = ["crates/vapora-backend/src/services/"], must_be_empty = false },
|
||||
rationale = "Application-layer-only filtering has caused multi-tenant data leaks in production systems. Database scope enforcement is not optional.",
|
||||
},
|
||||
{
|
||||
id = "tenant-id-in-all-writes",
|
||||
claim = "Every INSERT and UPDATE in service layer must include tenant_id binding",
|
||||
scope = "vapora-backend/src/services/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "tenant_id", paths = ["crates/vapora-backend/src/services/"], must_be_empty = false },
|
||||
rationale = "Defense-in-depth requires the application layer to enforce tenant context independently of the scope mechanism.",
|
||||
},
|
||||
{
|
||||
id = "no-cross-tenant-queries-without-superuser",
|
||||
claim = "Queries that aggregate across tenant boundaries must use an explicitly designated superuser scope — not the tenant session",
|
||||
scope = "vapora-backend/src/services/, vapora-analytics/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "superuser|admin_scope|root", paths = ["crates/vapora-analytics/"], must_be_empty = false },
|
||||
rationale = "A routine service accidentally executing with a superuser scope would leak data from all tenants in its result set.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-004"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "SurrealDB scopes as primary tenant isolation; application tenant_id validation as defense-in-depth; no application-only isolation; no cross-tenant queries without superuser scope",
|
||||
invariants_at_risk = ["multi-tenant-isolation", "surreal-persistence"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
86
adrs/adr-011-a2a-protocol.ncl
Normal file
86
adrs/adr-011-a2a-protocol.ncl
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-011",
|
||||
title = "A2A Protocol Implementation: SurrealDB Persistence + NATS Async Coordination",
|
||||
status = 'Accepted,
|
||||
date = "2026-02-07",
|
||||
|
||||
context = "Vapora needs to interoperate with external agent ecosystems (Claude Code, Google ADK). The A2A (Agent-to-Agent) protocol provides a standardized interface. The implementation required learning from a prior failed attempt where task state was stored in a HashMap (lost on restart) and task completion was faked with tokio::sleep(5). The remediated implementation uses SurrealDB for persistence and real NATS subscribers for async coordination. As of 2026-03-27, vapora-a2a exposes an HTTP + NATS server and vapora-a2a-client provides the Rust client library with retry/backoff.",
|
||||
|
||||
decision = "A2A is implemented as two crates: vapora-a2a (server) and vapora-a2a-client (client). Task state is persisted to SurrealDB (table: a2a_tasks, SCHEMAFULL, survives restarts). Async task completion uses real NATS subscribers on vapora.tasks.completed and vapora.tasks.failed — no polling, no sleep-based fake completion. The client implements exponential backoff with jitter for 5xx/network errors; 4xx errors are not retried.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "SurrealDB persistence is mandatory — in-memory task state is not acceptable",
|
||||
detail = "A server restart under an in-memory HashMap would lose all in-flight task state. A2A clients waiting for task completion would hang indefinitely with no recoverable state. SurrealDB tasks survive restarts and remain queryable by their task_id.",
|
||||
},
|
||||
{
|
||||
claim = "Real NATS coordination eliminates the race condition in timeout-based fake completion",
|
||||
detail = "tokio::sleep(5) as a task completion mechanism is not async coordination — it is a lie with a timer. Real completion events from NATS subscribers deliver results within milliseconds of actual task completion and handle partial failures correctly.",
|
||||
},
|
||||
{
|
||||
claim = "Smart retry classification prevents infinite loops on client bugs",
|
||||
detail = "Retrying 4xx responses forever would mask client bugs (bad request format, missing auth) and cause a thundering herd on config errors. 5xx and network errors are genuinely transient; 4xx errors require caller intervention.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"A2A task state survives server restarts — clients can poll task_id for completion",
|
||||
"NATS-based completion delivers results without polling — O(1) completion latency regardless of task duration",
|
||||
"Client retry with backoff handles transient server errors transparently",
|
||||
"7 E2E integration tests (marked #[ignore]) verify the full task lifecycle with real SurrealDB + NATS",
|
||||
],
|
||||
negative = [
|
||||
"Integration tests require live SurrealDB + NATS — they are marked #[ignore] in CI without service dependencies",
|
||||
"The DashMap<task_id, oneshot::Sender> in the NATS bridge leaks entries for tasks that never complete — requires TTL cleanup",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "gRPC instead of JSON-RPC 2.0",
|
||||
why_rejected = "HTTP/2 infrastructure required. More complex than JSON-RPC for the current load profile. A2A specification uses HTTP/1.1 + JSON — gRPC would require a protocol translation layer.",
|
||||
},
|
||||
{
|
||||
option = "PostgreSQL or SQLite for A2A task persistence",
|
||||
why_rejected = "SurrealDB already used in vapora. Adding a second database engine doubles operational burden with no architectural benefit for A2A's data model.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "a2a-tasks-in-surrealdb",
|
||||
claim = "A2A task state must be persisted to the SurrealDB a2a_tasks table — no in-memory HashMap storage",
|
||||
scope = "vapora-a2a/src/task_manager.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "a2a_tasks", paths = ["crates/vapora-a2a/src/"], must_be_empty = false },
|
||||
rationale = "In-memory storage was the root cause of the failed first implementation. This constraint is a hard lesson from a production incident.",
|
||||
},
|
||||
{
|
||||
id = "no-sleep-based-completion",
|
||||
claim = "No tokio::sleep call may substitute for real async task completion in vapora-a2a",
|
||||
scope = "vapora-a2a/src/bridge.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "sleep.*task|task.*sleep", paths = ["crates/vapora-a2a/src/bridge.rs"], must_be_empty = true },
|
||||
rationale = "Sleep-based fake completion was the specific mechanism that made the first implementation fraudulent. It must never return.",
|
||||
},
|
||||
{
|
||||
id = "client-retry-policy",
|
||||
claim = "vapora-a2a-client must use RetryPolicy with exponential backoff — no fixed-interval retries or no-retry implementations",
|
||||
scope = "vapora-a2a-client/src/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "RetryPolicy", paths = ["crates/vapora-a2a-client/src/"], must_be_empty = false },
|
||||
rationale = "Fixed-interval retries cause thundering herds on server recovery. No-retry clients expose callers to transient failures. Exponential backoff with jitter is the correct policy.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-004", "adr-005"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "A2A implemented in vapora-a2a + vapora-a2a-client; SurrealDB a2a_tasks table for persistence; NATS for async completion; exponential backoff in client",
|
||||
invariants_at_risk = ["a2a-protocol", "message-based-coordination"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
86
adrs/adr-012-ssrf-prompt-injection.ncl
Normal file
86
adrs/adr-012-ssrf-prompt-injection.ncl
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-012",
|
||||
title = "SSRF Protection and Prompt Injection Scanning at API Boundary",
|
||||
status = 'Accepted,
|
||||
date = "2026-02-26",
|
||||
|
||||
context = "Competitive analysis against OpenFang revealed that vapora had no defenses against SSRF via misconfigured webhook URLs and no prompt injection scanning before user input reached LLM providers. The original SSRF check in main.rs logged a warning but did NOT remove the unsafe channel from the registry — channels with SSRF-risky URLs were fully operational despite the log claiming 'channel will be disabled'. Both attack surfaces were confirmed exploitable before this ADR.",
|
||||
|
||||
decision = "A security module (vapora-backend/src/security/) with two sub-modules: (1) ssrf.rs — validates outbound URLs against a deny list of private/reserved/cloud-metadata address ranges before any HTTP request is dispatched; (2) prompt_injection.rs — pattern-based scanner that rejects known injection payloads at the API boundary before input reaches an LLM provider. Four integration points: channel webhook URL filtering at startup, RLM endpoints (load/query/analyze), task creation/update (title and description fields). Security rejections return 400 Bad Request, not 500.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Channel SSRF must be enforced by dropping the channel, not logging a warning",
|
||||
detail = "The original warn!() + register pattern was a documentation bug masquerading as security. A warning that allows the operation to proceed is not a security control. Dropping unsafe channels before ChannelRegistry::from_map is the correct enforcement model.",
|
||||
},
|
||||
{
|
||||
claim = "Prompt injection must be scanned at the API boundary, not inside the LLM router",
|
||||
detail = "Scanning at the LLM router is too late — the payload has already been accepted, persisted to the task table, and is in motion. API boundary scanning rejects the request before any persistence occurs, which is the correct defense point.",
|
||||
},
|
||||
{
|
||||
claim = "400 Bad Request for security rejections prevents information disclosure",
|
||||
detail = "A 500 Internal Server Error on prompt injection detection reveals that injection scanning is present and active, giving attackers feedback to tune their payloads. 400 Bad Request is ambiguous — it could be any validation failure.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Channel webhook URLs from compromised config are rejected at startup, not silently registered",
|
||||
"User-supplied text in RLM and task endpoints is scanned before reaching any LLM provider",
|
||||
"Security rejections are observable via 400 response codes and security audit log entries",
|
||||
"ssrf.rs and prompt_injection.rs are independently testable without spinning up the full Axum server",
|
||||
],
|
||||
negative = [
|
||||
"Pattern-based prompt injection scanning has false positive and false negative rates — adversarial inputs may bypass regex patterns",
|
||||
"SSRF deny list must be maintained as cloud providers add new metadata endpoints (e.g. GCP 169.254.169.254, AWS 169.254.169.254, Azure 169.254.169.254)",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "WAF (Web Application Firewall) at the infrastructure layer",
|
||||
why_rejected = "An external WAF cannot inspect the semantic content of LLM prompts or validate webhook URLs against cloud metadata ranges. Application-level scanning is required for these semantically rich validations.",
|
||||
},
|
||||
{
|
||||
option = "Sandboxed agent execution",
|
||||
why_rejected = "Sandboxing prevents prompt injection effects from escaping the execution environment but does not prevent the injection from reaching the LLM provider. The attack surface (LLM prompt poisoning) requires input scanning, not output sandboxing.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "ssrf-validator-before-channel-registry",
|
||||
claim = "Channel webhook URLs must be validated via ssrf.rs before ChannelRegistry::from_map is called — unsafe channels must be dropped, not registered with a warning",
|
||||
scope = "vapora-backend/src/main.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "ssrf\\|SsrfValidator\\|validate_url", paths = ["crates/vapora-backend/src/main.rs"], must_be_empty = false },
|
||||
rationale = "The original warn-and-register pattern was the vulnerability. This constraint ensures the fix stays in place.",
|
||||
},
|
||||
{
|
||||
id = "prompt-injection-scan-at-rlm-boundary",
|
||||
claim = "RLM endpoints must scan user-supplied content and query via prompt_injection.rs before indexing or dispatching to LLM",
|
||||
scope = "vapora-backend/src/api/rlm.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "prompt_injection\\|scan_for_injection\\|PromptInjection", paths = ["crates/vapora-backend/src/api/rlm.rs"], must_be_empty = false },
|
||||
rationale = "RLM is the primary injection surface — it accepts arbitrary text content and forwards it to LLM providers.",
|
||||
},
|
||||
{
|
||||
id = "security-rejections-return-400",
|
||||
claim = "All security validation failures must return 400 Bad Request via VaporaError::InvalidInput — not 500",
|
||||
scope = "vapora-backend/src/security/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "InvalidInput", paths = ["crates/vapora-backend/src/security/"], must_be_empty = false },
|
||||
rationale = "500 responses on security rejections reveal the presence and behavior of the security scanner to attackers.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-003"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "SSRF protection in ssrf.rs + prompt injection scanning in prompt_injection.rs; channels with unsafe URLs dropped at startup; RLM and task endpoints scanned at API boundary; 400 on rejection",
|
||||
invariants_at_risk = [],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
83
adrs/adr-013-kg-hybrid-search.ncl
Normal file
83
adrs/adr-013-kg-hybrid-search.ncl
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-013",
|
||||
title = "Knowledge Graph Hybrid Search — HNSW + BM25 + Reciprocal Rank Fusion",
|
||||
status = 'Accepted,
|
||||
date = "2026-02-26",
|
||||
|
||||
context = "find_similar_executions in KGPersistence discarded its embedding argument entirely and returned N most-recent successful executions ordered by timestamp — a correctness bug masquerading as similarity search. Separately, the kg_executions table was declared SCHEMAFULL but three fields (agent_role, provider, cost_cents) used by PersistedExecution were missing from the schema. SurrealDB drops undefined fields on INSERT in SCHEMAFULL tables, causing every SELECT to return records that failed serde deserialization, silently swallowed by filter_map. stratum-embeddings SurrealDbStore was evaluated but rejected: it loads all records into memory and computes cosine similarity in-process — suitable for bounded document chunks, unsuitable for unbounded KG execution history.",
|
||||
|
||||
decision = "Replace stub similarity functions with a hybrid retrieval pipeline: (1) HNSW (SurrealDB 3 native ANN vector index) over the embedding field for semantic proximity, (2) BM25 (SurrealDB 3 native full-text search) over task_description for exact lexical matches, (3) Reciprocal Rank Fusion (k=60) for scale-invariant score fusion. Add migration 012_kg_hybrid_search.surql: fix the SCHEMAFULL schema gap (add missing fields), define the HNSW index on embedding, define the full-text search index on task_description.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Hybrid retrieval is required because HNSW and BM25 cover disjoint failure modes",
|
||||
detail = "HNSW (semantic) misses exact keyword matches: 'cargo clippy warnings' may not find 'clippy deny warnings fix' if the embedding model compresses the phrase differently. BM25 (lexical) misses semantic proximity: a query about error handling may not match a record about exception management if terminology differs. RRF fuses both rank lists without requiring score normalization.",
|
||||
},
|
||||
{
|
||||
claim = "The schema bug must be fixed before the index can be created",
|
||||
detail = "HNSW index creation on a SCHEMAFULL table requires the indexed field to exist in the schema. The missing agent_role/provider/cost_cents fields also caused all SELECT results to fail deserialization — fixing the schema is a prerequisite for any query correctness, not just the new index.",
|
||||
},
|
||||
{
|
||||
claim = "RRF k=60 is the standard fusion constant and requires no tuning",
|
||||
detail = "k=60 was established by Cormack et al. (2009) as a robust default. Score-based fusion alternatives (linear combination, learned weights) require per-corpus calibration. RRF is rank-only and therefore insensitive to score scale differences between HNSW cosine similarity and BM25 TF-IDF.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"find_similar_executions and find_similar_rlm_tasks now use the embedding argument correctly",
|
||||
"SCHEMAFULL schema gap eliminated — all PersistedExecution fields are persisted and deserialized correctly",
|
||||
"Hybrid search handles both exact crate/error-code queries (BM25) and semantic task similarity (HNSW)",
|
||||
"HNSW ANN search is sub-linear in the number of records — query time does not degrade with accumulation",
|
||||
],
|
||||
negative = [
|
||||
"SurrealDB 3 native HNSW requires SurrealDB >= 3.0 at runtime; earlier versions will fail the migration",
|
||||
"RRF does not expose relevance scores to callers — ranking is ordinal only",
|
||||
"Embedding dimension is fixed at creation time; changing the embedding model requires dropping and rebuilding the HNSW index",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "stratum-embeddings SurrealDbStore in-process cosine similarity",
|
||||
why_rejected = "Loads all records into memory for every query. Acceptable for bounded document chunk sets; unacceptable for KG execution history that accumulates unbounded records across all agents and tasks over time.",
|
||||
},
|
||||
{
|
||||
option = "Pure HNSW semantic search",
|
||||
why_rejected = "Misses exact keyword matches for crate names, error codes, and specific command strings that are semantically compressed by embedding models.",
|
||||
},
|
||||
{
|
||||
option = "Pure BM25 lexical search",
|
||||
why_rejected = "Misses semantic equivalence for concept-level queries where terminology varies between the query and the stored record.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "hnsw-index-required-for-kg",
|
||||
claim = "kg_executions must have an HNSW index on the embedding field — brute-force in-process vector search is not permitted for this table",
|
||||
scope = "migrations/012_kg_hybrid_search.surql",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "HNSW\\|hnsw", paths = ["migrations/012_kg_hybrid_search.surql"], must_be_empty = false },
|
||||
rationale = "In-process similarity search over an unbounded table is the rejected alternative. The HNSW index must exist before production queries are issued.",
|
||||
},
|
||||
{
|
||||
id = "hybrid-rrf-fusion",
|
||||
claim = "KGPersistence similarity queries must fuse HNSW and BM25 results via RRF — no single-strategy retrieval",
|
||||
scope = "crates/vapora-knowledge-graph/src/persistence.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "rrf\\|reciprocal_rank\\|BM25\\|full_text", paths = ["crates/vapora-knowledge-graph/src/persistence.rs"], must_be_empty = false },
|
||||
rationale = "Reverting to single-strategy retrieval silently degrades search quality without any compile-time signal.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-009"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "HNSW + BM25 + RRF hybrid search in KGPersistence; migration 012 fixes SCHEMAFULL gap and creates indexes; stratum-embeddings in-process scan rejected",
|
||||
invariants_at_risk = [],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
78
adrs/adr-014-capability-packages.ncl
Normal file
78
adrs/adr-014-capability-packages.ncl
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-014",
|
||||
title = "Capability Packages — vapora-capabilities Crate with In-Process Executor Dispatch",
|
||||
status = 'Accepted,
|
||||
date = "2026-02-26",
|
||||
|
||||
context = "VAPORA agent roles (Developer, Reviewer, Architect) were enum variants with no attached system prompt or model preference. Every new deployment required manual agents.toml editing before agents produced domain-appropriate responses. AgentDefinition lived in vapora-agents::config; any capability crate that wanted to produce AgentDefinitions would have to import vapora-agents, and if vapora-agents also imported vapora-capabilities for built-in capability loading, a compile-time circular dependency would result. Additionally, AgentCoordinator dispatched tasks by serializing them to NATS JetStream and waiting for an external process — no path existed for in-process executor dispatch needed by capability built-ins.",
|
||||
|
||||
decision = "Introduce vapora-capabilities crate exposing Capability trait, CapabilitySpec, CapabilityRegistry, and CapabilityLoader. Relocate AgentDefinition from vapora-agents::config to vapora-shared to break the circular dependency. Add AgentExecutor::with_router(Arc<LLMRouter>) builder. Add AgentCoordinator in-process executor dispatch via DashMap<String, Sender<TaskAssignment>> — shard lock released before .await by cloning the Sender out of the map. Built-in implementations: CodeReviewer, DocGenerator, PRMonitor.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "AgentDefinition belongs in vapora-shared — it is a plain data-transfer type with no orchestration logic",
|
||||
detail = "AgentDefinition contains role, provider, model, system_prompt — no async traits, no runtime state, no I/O. Its presence in vapora-agents::config was an artifact of where it was first needed, not where it conceptually belongs. Moving it to vapora-shared eliminates the circular dependency without changing any observable behavior. vapora-agents re-exports it for backward compatibility.",
|
||||
},
|
||||
{
|
||||
claim = "In-process executor dispatch requires releasing the DashMap shard lock before .await",
|
||||
detail = "DashMap shard entries hold a read/write guard. If the guard is held across an .await point, the executor that eventually processes the task cannot re-enter the same shard to update state — deadlock. The fix: clone the Sender<TaskAssignment> out of the map (releases the guard), then call sender.send(assignment).await. This is a Rust async correctness constraint, not a design preference.",
|
||||
},
|
||||
{
|
||||
claim = "Capability bundles ship a system prompt, not a crate — operators activate by name, not by file",
|
||||
detail = "Manual agents.toml prompt engineering was the alternative. It required locating the file, writing a semantically correct system prompt, choosing a model, and restarting the server. CapabilityLoader resolves built-ins by name ('code-reviewer') with TOML override support — zero-config activation, operator-override when needed.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"New agent type activation requires a single config entry; system prompt and model default ship with the capability",
|
||||
"AgentDefinition circular dependency eliminated at the Cargo level — cargo check catches any regression immediately",
|
||||
"In-process executor dispatch avoids NATS round-trip for capability-backed agents, reducing task latency",
|
||||
"reload_agents uses CapabilityRegistry to re-spawn built-in executors after hot-reload",
|
||||
],
|
||||
negative = [
|
||||
"AgentDefinition in vapora-shared means vapora-shared now has awareness of capability/agent concepts — previously it was pure data types",
|
||||
"In-process dispatch bypasses NATS audit trail for capability tasks — task events are not published to JetStream for capability-dispatched work",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Keep AgentDefinition in vapora-agents, use trait objects to break the cycle",
|
||||
why_rejected = "Trait-object indirection to break a circular dependency adds abstraction without adding value. Moving the struct is simpler, traceable, and requires fewer files.",
|
||||
},
|
||||
{
|
||||
option = "External NATS dispatch only — no in-process executor channel",
|
||||
why_rejected = "NATS dispatch requires a running NATS server. Capability built-ins need to function in environments without NATS (local dev, test). In-process dispatch with NATS graceful fallback is the correct model.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "agent-definition-in-shared",
|
||||
claim = "AgentDefinition must live in vapora-shared, not vapora-agents::config — vapora-capabilities must not import vapora-agents",
|
||||
scope = "crates/vapora-shared/src/",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "AgentDefinition", paths = ["crates/vapora-shared/src/"], must_be_empty = false },
|
||||
rationale = "Moving AgentDefinition back to vapora-agents recreates the circular dependency that this ADR was written to prevent.",
|
||||
},
|
||||
{
|
||||
id = "dashmap-shard-released-before-await",
|
||||
claim = "DashMap shard guards must not be held across .await points in AgentCoordinator dispatch — clone the Sender before awaiting",
|
||||
scope = "crates/vapora-agents/src/coordinator.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "executor_channels.*get\\|sender.*clone", paths = ["crates/vapora-agents/src/coordinator.rs"], must_be_empty = false },
|
||||
rationale = "Holding a DashMap guard across .await deadlocks re-entrant shard access from the receiving executor.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-009", "adr-005"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "vapora-capabilities crate; AgentDefinition relocated to vapora-shared; in-process executor dispatch via DashMap<String, Sender>; shard lock released before .await",
|
||||
invariants_at_risk = ["message-based-coordination"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
79
adrs/adr-015-merkle-audit-trail.ncl
Normal file
79
adrs/adr-015-merkle-audit-trail.ncl
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-015",
|
||||
title = "Tamper-Evident Audit Trail — Merkle Hash Chain",
|
||||
status = 'Accepted,
|
||||
date = "2026-02-26",
|
||||
|
||||
context = "VAPORA's audit.rs stored workflow audit entries as a simple append-only log: seq, entry_id, timestamp, workflow_id, event_type, actor, details — no integrity metadata. Append-only prevents deletion (assuming no DELETE privilege) but does not prevent modification: an attacker with write access could UPDATE any row's event_type, actor, or details fields without leaving any detectable trace. Enterprise compliance frameworks (SOC 2, ISO 27001, HIPAA) require tamper-evident audit logs where post-hoc modification is provably detectable by the application, not just by database access logs.",
|
||||
|
||||
decision = "Replace the append-only audit log in vapora-backend/src/audit/mod.rs with a Merkle hash-chain. Each entry stores prev_hash (block_hash of the immediately preceding entry; GENESIS_HASH = 64 zeros for the first entry) and block_hash = SHA-256(prev_hash|seq|entry_id|timestamp_rfc3339|workflow_id|event_type|actor|details_json). write_lock: Arc<Mutex<()>> serializes all append calls within the process. verify_integrity(workflow_id) recomputes every block hash from stored fields and returns IntegrityReport{valid, total_entries, first_tampered_seq: Option<i64>}.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "Modification of any covered field in entry N propagates invalidation to all subsequent entries",
|
||||
detail = "Because prev_hash in entry N+1 commits to block_hash of entry N, modifying entry N changes its block_hash, which no longer matches prev_hash stored in N+1. The mismatch propagates: N+1's block_hash (which commits to its own prev_hash) is now also wrong, and so on through the chain. The attacker must recompute every subsequent hash to cover the modification — this is detectable because verify_integrity recomputes independently.",
|
||||
},
|
||||
{
|
||||
claim = "Process-level Mutex is sufficient for single-process VAPORA deployments",
|
||||
detail = "The write_lock serializes the read-prev-hash + append operation. A single-process backend cannot have two concurrent appends from different nodes. Multi-node deployments would require a distributed lock (e.g., SurrealDB UPDATE ... IF locked IS NONE CAS, as used by the autonomous scheduler). Single-process first; distributed lock deferred until multi-node deployment is active.",
|
||||
},
|
||||
{
|
||||
claim = "SHA-256 over explicit field concatenation is auditable without a key",
|
||||
detail = "HMAC would prevent external verification without the signing key. SHA-256 over deterministic field concatenation allows any party with read access to audit_entries to independently verify integrity. The field ordering in the hash input is fixed and documented — the hash function is the contract.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Any modification to a covered field in any entry is detectable via verify_integrity",
|
||||
"verify_integrity returns first_tampered_seq — forensic analysis can pinpoint the modified entry",
|
||||
"No external service dependency — SHA-256 is in std (via sha2 crate), no KMS or HSM required",
|
||||
"Backward-compatible: legacy entries without prev_hash/block_hash are treated as genesis entries on first verify run",
|
||||
],
|
||||
negative = [
|
||||
"Truncation attack: an attacker who can DELETE the suffix of the chain after a modified entry can hide the modification — the chain appears valid up to the last entry",
|
||||
"write_lock is process-local: multi-node deployments with concurrent writes to audit_entries from different processes can produce an inconsistent chain",
|
||||
"No HMAC: an attacker who can recompute SHA-256 can fabricate a valid chain — hash-chain proves consistency, not authenticity",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "NATS JetStream append-only subject as audit log",
|
||||
why_rejected = "NATS JetStream provides message-level immutability but requires NATS to be running. audit.rs must function when NATS is unavailable (NATS is always optional in VAPORA). SurrealDB-backed chain is the correct choice for a SurrealDB-first platform.",
|
||||
},
|
||||
{
|
||||
option = "HMAC-signed entries with a per-tenant key",
|
||||
why_rejected = "HMAC prevents external verification without the key. Compliance use cases require that any authorized auditor can verify integrity without accessing application secrets. SHA-256 chain is verifiable by anyone with DB read access.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "audit-entry-block-hash",
|
||||
claim = "Every audit entry must have prev_hash and block_hash fields; append must compute block_hash = SHA-256(prev_hash|seq|entry_id|timestamp_rfc3339|workflow_id|event_type|actor|details_json)",
|
||||
scope = "crates/vapora-backend/src/audit/mod.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "block_hash\\|prev_hash\\|compute_block_hash", paths = ["crates/vapora-backend/src/audit/mod.rs"], must_be_empty = false },
|
||||
rationale = "Entries without block_hash are not tamper-evident; the audit trail guarantee is void.",
|
||||
},
|
||||
{
|
||||
id = "audit-write-serialized",
|
||||
claim = "All audit append calls must hold write_lock before reading prev_hash and writing the new entry",
|
||||
scope = "crates/vapora-backend/src/audit/mod.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "write_lock\\|Mutex", paths = ["crates/vapora-backend/src/audit/mod.rs"], must_be_empty = false },
|
||||
rationale = "Concurrent appends without serialization produce a forked chain — two entries with the same prev_hash — which verify_integrity would report as tampered.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-003", "adr-012"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "SHA-256 Merkle hash-chain in audit/mod.rs; write_lock Arc<Mutex<()>> serializes appends; verify_integrity returns IntegrityReport; HMAC and NATS alternatives rejected",
|
||||
invariants_at_risk = [],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
91
adrs/adr-016-agent-hot-reload-stable-identity.ncl
Normal file
91
adrs/adr-016-agent-hot-reload-stable-identity.ncl
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
let d = import "adr-defaults.ncl" in
|
||||
|
||||
d.make_adr {
|
||||
id = "adr-016",
|
||||
title = "Agent Hot-Reload — Stable Role Identity and Zero-Downtime Config Reload",
|
||||
status = 'Accepted,
|
||||
date = "2026-03-02",
|
||||
|
||||
context = "AgentMetadata::id was Uuid::new_v4() generated at startup. learning_profiles in AgentCoordinator and agent_id in KGPersistence execution records used this UUID as their key. Every process restart or config reload rotated all UUIDs, orphaning all accumulated expertise profiles. An agent that had processed 500 tasks and learned optimal patterns for its role would reset to zero on the next deploy. VAPORA's learning-based agent selection (ADR-009) provides no value if learning state is ephemeral.",
|
||||
|
||||
decision = "Add stable_id: String to AgentMetadata, computed as role.clone() at construction before role is moved. Switch all learning_profiles keys and KG execution records from ephemeral id (UUID) to stable_id_or_role(). Implement hot-reload: drain_role(role) removes agents from registry + drops executor_channels Senders (channel closure causes executor loops to exit cleanly); reload_agents re-spawns capability and config agents; learning_profiles DashMap is untouched throughout. SIGHUP handler and POST /reload endpoint both call reload_agents.",
|
||||
|
||||
rationale = [
|
||||
{
|
||||
claim = "role is the correct stable identity granularity for learning profiles",
|
||||
detail = "AgentScoringService ranks agents at the role level: it receives Vec<(agent_id, Option<LearningProfile>)> where multiple agents of the same role compete. The profile that matters is role-level expertise (how well 'developer' handles 'coding' tasks), not per-instance expertise. Using role as stable_id aggregates learning across all instances of the same role, is deterministic across restarts, and requires no UUID→role mapping table.",
|
||||
},
|
||||
{
|
||||
claim = "Dropping the Sender is the correct shutdown signal for executor loops",
|
||||
detail = "Each executor runs while let Some(task) = rx.recv().await. When the Sender is dropped (by removing it from executor_channels), the channel closes and recv() returns None, causing the loop to exit. No explicit shutdown token or cancellation signal is needed. The loop drains buffered messages before exiting — in-flight tasks complete normally.",
|
||||
},
|
||||
{
|
||||
claim = "BudgetManager and LLMRouter are deliberately excluded from hot-reload scope",
|
||||
detail = "BudgetManager holds per-role budget state accumulated since last process start. Reloading it mid-flight would reset budget counters, potentially allowing over-budget LLM calls that were blocked before the reload. LLMRouter routing rules could change provider selection in ways incompatible with ongoing workflow stages. Both require process restart for config changes — this is a documented limitation, not an oversight.",
|
||||
},
|
||||
],
|
||||
|
||||
consequences = {
|
||||
positive = [
|
||||
"Learning expertise accumulated over any number of restarts and hot-reloads is preserved",
|
||||
"KG execution records are partitioned by role (stable_id) — historical records and new records share the same key space",
|
||||
"SIGHUP reload is sub-millisecond for the drain+re-spawn sequence; brief NoAvailableAgent window is documented for callers",
|
||||
"POST /reload enables operator-triggered and CI-triggered config updates without process restart",
|
||||
],
|
||||
negative = [
|
||||
"Brief availability window between drain and re-registration: assign_task returns NoAvailableAgent. Callers must implement retry.",
|
||||
"BudgetManager and LLMRouter config changes require process restart — hot-reload does not cover the full config surface.",
|
||||
"stable_id = role means two agents of the same role share learning history — per-instance specialization within a role is not supported.",
|
||||
],
|
||||
},
|
||||
|
||||
alternatives_considered = [
|
||||
{
|
||||
option = "Persist UUID→stable_id mapping in SurrealDB",
|
||||
why_rejected = "Adds a mapping table, a migration, and a read on every profile lookup. role is already available at construction time and is deterministic — no persistence needed.",
|
||||
},
|
||||
{
|
||||
option = "Include BudgetManager in hot-reload",
|
||||
why_rejected = "Resetting budget counters mid-flight allows LLM calls that were correctly blocked (budget exceeded) to proceed after reload. Budget state must be continuous across config changes.",
|
||||
},
|
||||
{
|
||||
option = "Use a shutdown token (CancellationToken) instead of Sender drop for executor cleanup",
|
||||
why_rejected = "CancellationToken requires propagation through all executor spawn sites and cooperative check points in the task loop. Channel closure is implicit and automatic — every recv() point is already a shutdown check point.",
|
||||
},
|
||||
],
|
||||
|
||||
constraints = [
|
||||
{
|
||||
id = "stable-id-is-role",
|
||||
claim = "AgentMetadata::stable_id must be set to role.clone() before role is moved at construction — no UUID, no random suffix",
|
||||
scope = "crates/vapora-agents/src/registry.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "stable_id.*role\\|role.*clone.*stable", paths = ["crates/vapora-agents/src/registry.rs"], must_be_empty = false },
|
||||
rationale = "Any non-role value for stable_id defeats the learning profile persistence guarantee.",
|
||||
},
|
||||
{
|
||||
id = "profile-key-uses-stable-id",
|
||||
claim = "learning_profiles lookups and KG execution record agent_id must use stable_id_or_role(), never the ephemeral UUID id",
|
||||
scope = "crates/vapora-agents/src/coordinator.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "stable_id_or_role", paths = ["crates/vapora-agents/src/coordinator.rs", "crates/vapora-agents/src/executor.rs"], must_be_empty = false },
|
||||
rationale = "Using UUID as the profile key is the original bug — learning profiles would be orphaned on every restart.",
|
||||
},
|
||||
{
|
||||
id = "learning-profiles-survive-drain",
|
||||
claim = "drain_role must not clear learning_profiles — only registry and executor_channels entries are removed",
|
||||
scope = "crates/vapora-agents/src/coordinator.rs",
|
||||
severity = 'Hard,
|
||||
check = { tag = 'Grep, pattern = "drain_role", paths = ["crates/vapora-agents/src/coordinator.rs"], must_be_empty = false },
|
||||
rationale = "Clearing learning_profiles in drain_role would silently reset the learning system on every hot-reload, defeating the purpose of this ADR.",
|
||||
},
|
||||
],
|
||||
|
||||
related_adrs = ["adr-009", "adr-014"],
|
||||
|
||||
ontology_check = {
|
||||
decision_string = "stable_id = role.clone() on AgentMetadata; profile keys + KG records use stable_id_or_role(); drain_role + re-spawn hot-reload; learning_profiles untouched; SIGHUP + POST /reload endpoints",
|
||||
invariants_at_risk = ["learning-based-selection"],
|
||||
verdict = 'Safe,
|
||||
},
|
||||
}
|
||||
51
adrs/adr-constraints.ncl
Normal file
51
adrs/adr-constraints.ncl
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
let _adr_id_format = std.contract.custom (
|
||||
fun label =>
|
||||
fun value =>
|
||||
if std.string.is_match "^adr-[0-9]{3}$" value then
|
||||
'Ok value
|
||||
else
|
||||
'Error {
|
||||
message = "ADR id must match 'adr-NNN' format (e.g. 'adr-001'), got: '%{value}'"
|
||||
}
|
||||
) in
|
||||
|
||||
let _non_empty_constraints = std.contract.custom (
|
||||
fun label =>
|
||||
fun value =>
|
||||
if std.array.length value == 0 then
|
||||
'Error {
|
||||
message = "constraints must not be empty — an ADR with no constraints is passive documentation, not an active constraint"
|
||||
}
|
||||
else
|
||||
'Ok value
|
||||
) in
|
||||
|
||||
let _non_empty_negative = std.contract.custom (
|
||||
fun label =>
|
||||
fun value =>
|
||||
if std.array.length value.negative == 0 then
|
||||
'Error {
|
||||
message = "consequences.negative must not be empty on id='%{value.id}' — an ADR with no negative consequences is incomplete"
|
||||
}
|
||||
else
|
||||
'Ok value
|
||||
) in
|
||||
|
||||
let _requires_justification = std.contract.custom (
|
||||
fun label =>
|
||||
fun value =>
|
||||
if value.ontology_check.verdict == 'RequiresJustification
|
||||
&& !(std.record.has_field "invariant_justification" value) then
|
||||
'Error {
|
||||
message = "ADR '%{value.id}': ontology_check.verdict = 'RequiresJustification but invariant_justification field is missing"
|
||||
}
|
||||
else
|
||||
'Ok value
|
||||
) in
|
||||
|
||||
{
|
||||
AdrIdFormat = _adr_id_format,
|
||||
NonEmptyConstraints = _non_empty_constraints,
|
||||
NonEmptyNegativeConsequences = _non_empty_negative,
|
||||
RequiresJustificationWhenRisky = _requires_justification,
|
||||
}
|
||||
16
adrs/adr-defaults.ncl
Normal file
16
adrs/adr-defaults.ncl
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
let s = import "adr-schema.ncl" in
|
||||
let c = import "adr-constraints.ncl" in
|
||||
|
||||
{
|
||||
# RequiresJustificationWhenRisky is a cross-field contract (reads both
|
||||
# ontology_check.verdict and invariant_justification) — applied here after
|
||||
# the schema merge so both fields are visible in the same record.
|
||||
make_adr = fun data =>
|
||||
let result | c.RequiresJustificationWhenRisky = s.Adr & data in
|
||||
result,
|
||||
make_constraint = fun data => s.Constraint & data,
|
||||
|
||||
Adr = s.Adr,
|
||||
Constraint = s.Constraint,
|
||||
OntologyCheck = s.OntologyCheck,
|
||||
}
|
||||
97
adrs/adr-schema.ncl
Normal file
97
adrs/adr-schema.ncl
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
let c = import "adr-constraints.ncl" in
|
||||
|
||||
let status_type = [| 'Proposed, 'Accepted, 'Superseded, 'Deprecated |] in
|
||||
let severity_type = [| 'Hard, 'Soft |] in
|
||||
let verdict_type = [| 'Safe, 'RequiresJustification |] in
|
||||
|
||||
let rationale_entry_type = {
|
||||
claim | String,
|
||||
detail | String,
|
||||
} in
|
||||
|
||||
let alternative_type = {
|
||||
option | String,
|
||||
why_rejected | String,
|
||||
} in
|
||||
|
||||
# Tag discriminant for typed constraint checks.
|
||||
let check_tag_type = [|
|
||||
'Cargo,
|
||||
'Grep,
|
||||
'NuCmd,
|
||||
'ApiCall,
|
||||
'FileExists,
|
||||
|] in
|
||||
|
||||
# Typed constraint check: a tagged record, JSON-serializable.
|
||||
# 'Cargo -> crate : String, forbidden_deps : Array String
|
||||
# 'Grep -> pattern : String, paths : Array String, must_be_empty : Bool
|
||||
# 'NuCmd -> cmd : String, expect_exit : Number
|
||||
# 'ApiCall -> endpoint : String, json_path : String, expected : Dyn
|
||||
# 'FileExists-> path : String, present : Bool
|
||||
let constraint_check_type = {
|
||||
tag | check_tag_type,
|
||||
..
|
||||
} in
|
||||
|
||||
let constraint_type = {
|
||||
id | String,
|
||||
claim | String,
|
||||
scope | String,
|
||||
severity | severity_type,
|
||||
# Transition period: one of check or check_hint must be present.
|
||||
# check_hint is deprecated — migrate existing ADRs to typed check variants.
|
||||
check_hint | String | optional,
|
||||
check | constraint_check_type | optional,
|
||||
rationale | String,
|
||||
} in
|
||||
|
||||
let ontology_check_type = {
|
||||
decision_string | String,
|
||||
invariants_at_risk | Array String,
|
||||
verdict | verdict_type,
|
||||
} in
|
||||
|
||||
let invariant_justification_type = {
|
||||
invariant | String,
|
||||
claim | String,
|
||||
mitigation | String,
|
||||
} in
|
||||
|
||||
let consequences_type = {
|
||||
positive | Array String,
|
||||
negative | Array String,
|
||||
} in
|
||||
|
||||
let adr_type = {
|
||||
id | String | c.AdrIdFormat,
|
||||
title | String,
|
||||
status | status_type,
|
||||
date | String,
|
||||
|
||||
context | String,
|
||||
decision | String,
|
||||
rationale | Array rationale_entry_type,
|
||||
consequences | consequences_type,
|
||||
alternatives_considered | Array alternative_type,
|
||||
|
||||
constraints | Array constraint_type | c.NonEmptyConstraints,
|
||||
ontology_check | ontology_check_type,
|
||||
|
||||
related_adrs | Array String | default = [],
|
||||
supersedes | String | optional,
|
||||
superseded_by | String | optional,
|
||||
invariant_justification | invariant_justification_type | optional,
|
||||
} in
|
||||
|
||||
{
|
||||
AdrStatus = status_type,
|
||||
Severity = severity_type,
|
||||
Verdict = verdict_type,
|
||||
Constraint = constraint_type,
|
||||
RationaleEntry = rationale_entry_type,
|
||||
Alternative = alternative_type,
|
||||
OntologyCheck = ontology_check_type,
|
||||
InvariantJustification = invariant_justification_type,
|
||||
Adr = adr_type,
|
||||
}
|
||||
1825
api-catalog.json
Normal file
1825
api-catalog.json
Normal file
File diff suppressed because it is too large
Load diff
200
assets/w-vapora.svg
Normal file
200
assets/w-vapora.svg
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="170 40 590 300" width="100%" height="100%" preserveAspectRatio="xMidYMid meet">
|
||||
<defs>
|
||||
<!-- Google Fonts import -->
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@800&display=swap');
|
||||
</style>
|
||||
|
||||
<!-- Gradiente principal -->
|
||||
<linearGradient id="techGrad" x1="0%" y1="0%" x2="100%" y2="0%">
|
||||
<stop offset="0%" style="stop-color:#22d3ee;stop-opacity:1"/>
|
||||
<stop offset="50%" style="stop-color:#a855f7;stop-opacity:1"/>
|
||||
<stop offset="100%" style="stop-color:#ec4899;stop-opacity:1"/>
|
||||
</linearGradient>
|
||||
|
||||
<!-- Gradiente vertical -->
|
||||
<linearGradient id="vertGrad" x1="0%" y1="100%" x2="0%" y2="0%">
|
||||
<stop offset="0%" style="stop-color:#22d3ee;stop-opacity:1"/>
|
||||
<stop offset="50%" style="stop-color:#a855f7;stop-opacity:0.8"/>
|
||||
<stop offset="100%" style="stop-color:#ec4899;stop-opacity:0.4"/>
|
||||
</linearGradient>
|
||||
|
||||
<!-- Filtro glow tech -->
|
||||
<filter id="techGlow">
|
||||
<feGaussianBlur stdDeviation="2" result="coloredBlur"/>
|
||||
<feMerge>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="SourceGraphic"/>
|
||||
</feMerge>
|
||||
</filter>
|
||||
|
||||
<!-- Filtro glow fuerte -->
|
||||
<filter id="strongGlow">
|
||||
<feGaussianBlur stdDeviation="4" result="coloredBlur"/>
|
||||
<feMerge>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="SourceGraphic"/>
|
||||
</feMerge>
|
||||
</filter>
|
||||
|
||||
<!-- Filtro glass -->
|
||||
<filter id="glass">
|
||||
<feGaussianBlur in="SourceGraphic" stdDeviation="0.5" result="blur"/>
|
||||
<feColorMatrix in="blur" type="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7" result="goo"/>
|
||||
<feBlend in="SourceGraphic" in2="goo"/>
|
||||
</filter>
|
||||
</defs>
|
||||
|
||||
<!-- Fondo blanco -->
|
||||
<rect width="800" height="400" fill="#ffffff"/>
|
||||
|
||||
<!-- Grid de fondo tecnico sutil -->
|
||||
<g opacity="0.03" stroke="#94a3b8" stroke-width="0.5">
|
||||
<line x1="0" y1="133" x2="800" y2="133"/>
|
||||
<line x1="0" y1="200" x2="800" y2="200"/>
|
||||
<line x1="0" y1="267" x2="800" y2="267"/>
|
||||
<line x1="133" y1="0" x2="133" y2="400"/>
|
||||
<line x1="267" y1="0" x2="267" y2="400"/>
|
||||
<line x1="400" y1="0" x2="400" y2="400"/>
|
||||
<line x1="533" y1="0" x2="533" y2="400"/>
|
||||
<line x1="667" y1="0" x2="667" y2="400"/>
|
||||
</g>
|
||||
|
||||
<!-- Simbolo tecnico: flujo de datos ascendente -->
|
||||
<g transform="translate(267, 280)">
|
||||
<!-- Base: plataforma -->
|
||||
<rect x="-25" y="0" width="50" height="6.67" fill="url(#techGrad)" opacity="0.9" rx="3.33"/>
|
||||
|
||||
<!-- Stream principal -->
|
||||
<path d="M 0 0 L 0 -50 L 8.33 -58 L -8.33 -75 L 8.33 -92 L -8.33 -108 L 8.33 -125 L 0 -133 L 0 -200" stroke="url(#vertGrad)" stroke-width="5" fill="none" stroke-linecap="round" stroke-linejoin="round" filter="url(#techGlow)">
|
||||
<animate attributeName="stroke-dasharray" values="0,500;500,0;0,500" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0.8;1;0.8" dur="2s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream izquierdo 1 -->
|
||||
<path d="M -33 0 L -33 -42 L -30 -58 L -37 -75 L -30 -92 L -37 -108 L -33 -125 L -33 -167" stroke="#0891b2" stroke-width="3.33" fill="none" stroke-linecap="round" opacity="0.7" filter="url(#techGlow)">
|
||||
<animate attributeName="stroke-dasharray" values="0,417;417,0;0,417" dur="4.5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream izquierdo 2 -->
|
||||
<path d="M -58 0 L -58 -33 L -53 -50 L -63 -67 L -53 -83 L -63 -100 L -58 -117 L -58 -142" stroke="#7c3aed" stroke-width="2.5" fill="none" stroke-linecap="round" opacity="0.6">
|
||||
<animate attributeName="stroke-dasharray" values="0,333;333,0;0,333" dur="5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream derecho 1 -->
|
||||
<path d="M 33 0 L 33 -42 L 30 -58 L 37 -75 L 30 -92 L 37 -108 L 33 -125 L 33 -167" stroke="#db2777" stroke-width="3.33" fill="none" stroke-linecap="round" opacity="0.7" filter="url(#techGlow)">
|
||||
<animate attributeName="stroke-dasharray" values="0,417;417,0;0,417" dur="4.2s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream derecho 2 -->
|
||||
<path d="M 58 0 L 58 -33 L 53 -50 L 63 -67 L 53 -83 L 63 -100 L 58 -117 L 58 -142" stroke="#0891b2" stroke-width="2.5" fill="none" stroke-linecap="round" opacity="0.6">
|
||||
<animate attributeName="stroke-dasharray" values="0,333;333,0;0,333" dur="5.5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Nodos de datos -->
|
||||
<circle cx="0" cy="-67" r="5" fill="#0891b2" filter="url(#strongGlow)">
|
||||
<animate attributeName="cy" values="-67;-183;-67" dur="3s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="3s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="0" cy="-100" r="4.17" fill="#7c3aed" filter="url(#strongGlow)">
|
||||
<animate attributeName="cy" values="-100;-217;-100" dur="3.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="3.5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="0" cy="-133" r="3.33" fill="#db2777" filter="url(#strongGlow)">
|
||||
<animate attributeName="cy" values="-133;-233;-133" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="4s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<!-- Particulas laterales -->
|
||||
<circle cx="-33" cy="-83" r="3.33" fill="#0891b2" opacity="0.8">
|
||||
<animate attributeName="cy" values="-83;-175;-83" dur="3.8s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.8;0" dur="3.8s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="33" cy="-92" r="3.33" fill="#db2777" opacity="0.8">
|
||||
<animate attributeName="cy" values="-92;-175;-92" dur="4.2s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.8;0" dur="4.2s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="-58" cy="-58" r="2.5" fill="#7c3aed" opacity="0.6">
|
||||
<animate attributeName="cy" values="-58;-142;-58" dur="4.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="4.5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="58" cy="-67" r="2.5" fill="#0891b2" opacity="0.6">
|
||||
<animate attributeName="cy" values="-67;-142;-67" dur="5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<!-- Hexagonos tecnicos flotantes -->
|
||||
<polygon points="0,-158 5,-162 5,-167 0,-170 -5,-167 -5,-162" stroke="#0891b2" fill="none" stroke-width="1.67" opacity="0.7">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-50);translate(0,0)" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.7;0" dur="4s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<polygon points="-42,-117 -37,-120 -37,-125 -42,-128 -47,-125 -47,-120" stroke="#7c3aed" fill="none" stroke-width="1.67" opacity="0.6">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-42);translate(0,0)" dur="4.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="4.5s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<polygon points="42,-125 47,-128 47,-133 42,-137 37,-133 37,-128" stroke="#db2777" fill="none" stroke-width="1.67" opacity="0.6">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-33);translate(0,0)" dur="5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="5s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<!-- Lineas de conexion horizontales -->
|
||||
<line x1="-33" y1="-100" x2="-8" y2="-100" stroke="#0891b2" stroke-width="0.83" opacity="0.5">
|
||||
<animate attributeName="opacity" values="0;0.5;0" dur="2s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="8" y1="-117" x2="33" y2="-117" stroke="#db2777" stroke-width="0.83" opacity="0.5">
|
||||
<animate attributeName="opacity" values="0;0.5;0" dur="2.5s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="-58" y1="-83" x2="-37" y2="-83" stroke="#7c3aed" stroke-width="0.83" opacity="0.4">
|
||||
<animate attributeName="opacity" values="0;0.4;0" dur="3s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="37" y1="-92" x2="58" y2="-92" stroke="#0891b2" stroke-width="0.83" opacity="0.4">
|
||||
<animate attributeName="opacity" values="0;0.4;0" dur="3.5s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
</g>
|
||||
|
||||
<!-- Texto VAPORA -->
|
||||
<g filter="url(#glass)">
|
||||
<text x="550" y="207" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="90" font-weight="800" fill="url(#techGrad)" letter-spacing="5" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
|
||||
<text x="550" y="207" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="90" font-weight="800" fill="none" stroke="rgba(0,0,0,0.08)" stroke-width="0.83" letter-spacing="5" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
</g>
|
||||
|
||||
<!-- Glow en texto -->
|
||||
<text x="550" y="207" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="90" font-weight="800" fill="url(#techGrad)" letter-spacing="5" filter="url(#techGlow)" opacity="0.2" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
|
||||
<!-- Tagline -->
|
||||
<text x="550" y="240" font-family="'Inter', sans-serif" font-size="20" fill="#7c3aed" opacity="0.9" letter-spacing="0.25em" text-anchor="middle">
|
||||
Evaporate complexity
|
||||
</text>
|
||||
|
||||
<!-- Indicador tecnico decorativo -->
|
||||
<g transform="translate(550, 280)">
|
||||
<rect x="0" y="0" width="2" height="13.33" fill="#0891b2" opacity="0.7">
|
||||
<animate attributeName="height" values="13.33;20;13.33" dur="1.5s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
<rect x="6.67" y="0" width="2" height="16.67" fill="#7c3aed" opacity="0.7">
|
||||
<animate attributeName="height" values="16.67;23.33;16.67" dur="1.8s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
<rect x="13.33" y="0" width="2" height="10" fill="#db2777" opacity="0.7">
|
||||
<animate attributeName="height" values="10;16.67;10" dur="1.3s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
</g>
|
||||
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 9.9 KiB |
197
assets/w-vapora_v.svg
Normal file
197
assets/w-vapora_v.svg
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 150 200" width="100%" height="100%" preserveAspectRatio="xMidYMid meet">
|
||||
<defs>
|
||||
<!-- Google Fonts import -->
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@800&display=swap');
|
||||
</style>
|
||||
<!-- Gradiente principal -->
|
||||
<linearGradient id="techGradV" x1="0%" y1="0%" x2="100%" y2="0%">
|
||||
<stop offset="0%" style="stop-color:#22d3ee;stop-opacity:1"/>
|
||||
<stop offset="50%" style="stop-color:#a855f7;stop-opacity:1"/>
|
||||
<stop offset="100%" style="stop-color:#ec4899;stop-opacity:1"/>
|
||||
</linearGradient>
|
||||
|
||||
<!-- Gradiente vertical -->
|
||||
<linearGradient id="vertGradV" x1="0%" y1="100%" x2="0%" y2="0%">
|
||||
<stop offset="0%" style="stop-color:#22d3ee;stop-opacity:1"/>
|
||||
<stop offset="50%" style="stop-color:#a855f7;stop-opacity:0.8"/>
|
||||
<stop offset="100%" style="stop-color:#ec4899;stop-opacity:0.4"/>
|
||||
</linearGradient>
|
||||
|
||||
<!-- Filtro glow tech -->
|
||||
<filter id="techGlowV">
|
||||
<feGaussianBlur stdDeviation="2" result="coloredBlur"/>
|
||||
<feMerge>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="SourceGraphic"/>
|
||||
</feMerge>
|
||||
</filter>
|
||||
|
||||
<!-- Filtro glow fuerte -->
|
||||
<filter id="strongGlowV">
|
||||
<feGaussianBlur stdDeviation="4" result="coloredBlur"/>
|
||||
<feMerge>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="SourceGraphic"/>
|
||||
</feMerge>
|
||||
</filter>
|
||||
|
||||
<!-- Filtro glass -->
|
||||
<filter id="glassV">
|
||||
<feGaussianBlur in="SourceGraphic" stdDeviation="0.5" result="blur"/>
|
||||
<feColorMatrix in="blur" type="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7" result="goo"/>
|
||||
<feBlend in="SourceGraphic" in2="goo"/>
|
||||
</filter>
|
||||
</defs>
|
||||
|
||||
<!-- Fondo blanco -->
|
||||
<rect width="150" height="200" fill="#ffffff"/>
|
||||
|
||||
<!-- Grid de fondo apenas perceptible -->
|
||||
<g opacity="0.03" stroke="#94a3b8" stroke-width="0.5">
|
||||
<line x1="0" y1="50" x2="150" y2="50"/>
|
||||
<line x1="0" y1="100" x2="150" y2="100"/>
|
||||
<line x1="0" y1="150" x2="150" y2="150"/>
|
||||
<line x1="37" y1="0" x2="37" y2="200"/>
|
||||
<line x1="75" y1="0" x2="75" y2="200"/>
|
||||
<line x1="112" y1="0" x2="112" y2="200"/>
|
||||
</g>
|
||||
|
||||
<!-- Flujo de datos ascendente -->
|
||||
<g transform="translate(75, 90) scale(0.33)">
|
||||
<!-- Base: plataforma -->
|
||||
<rect x="-25" y="0" width="50" height="6.67" fill="url(#techGradV)" opacity="0.9" rx="3.33"/>
|
||||
|
||||
<!-- Stream principal -->
|
||||
<path d="M 0 0 L 0 -50 L 8.33 -58 L -8.33 -75 L 8.33 -92 L -8.33 -108 L 8.33 -125 L 0 -133 L 0 -200" stroke="url(#vertGradV)" stroke-width="5" fill="none" stroke-linecap="round" stroke-linejoin="round" filter="url(#techGlowV)">
|
||||
<animate attributeName="stroke-dasharray" values="0,500;500,0;0,500" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0.8;1;0.8" dur="2s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream izquierdo 1 -->
|
||||
<path d="M -33 0 L -33 -42 L -30 -58 L -37 -75 L -30 -92 L -37 -108 L -33 -125 L -33 -167" stroke="#0891b2" stroke-width="3.33" fill="none" stroke-linecap="round" opacity="0.7" filter="url(#techGlowV)">
|
||||
<animate attributeName="stroke-dasharray" values="0,417;417,0;0,417" dur="4.5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream izquierdo 2 -->
|
||||
<path d="M -58 0 L -58 -33 L -53 -50 L -63 -67 L -53 -83 L -63 -100 L -58 -117 L -58 -142" stroke="#7c3aed" stroke-width="2.5" fill="none" stroke-linecap="round" opacity="0.6">
|
||||
<animate attributeName="stroke-dasharray" values="0,333;333,0;0,333" dur="5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream derecho 1 -->
|
||||
<path d="M 33 0 L 33 -42 L 30 -58 L 37 -75 L 30 -92 L 37 -108 L 33 -125 L 33 -167" stroke="#db2777" stroke-width="3.33" fill="none" stroke-linecap="round" opacity="0.7" filter="url(#techGlowV)">
|
||||
<animate attributeName="stroke-dasharray" values="0,417;417,0;0,417" dur="4.2s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream derecho 2 -->
|
||||
<path d="M 58 0 L 58 -33 L 53 -50 L 63 -67 L 53 -83 L 63 -100 L 58 -117 L 58 -142" stroke="#0891b2" stroke-width="2.5" fill="none" stroke-linecap="round" opacity="0.6">
|
||||
<animate attributeName="stroke-dasharray" values="0,333;333,0;0,333" dur="5.5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Nodos de datos -->
|
||||
<circle cx="0" cy="-67" r="5" fill="#0891b2" filter="url(#strongGlowV)">
|
||||
<animate attributeName="cy" values="-67;-183;-67" dur="3s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="3s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="0" cy="-100" r="4.17" fill="#7c3aed" filter="url(#strongGlowV)">
|
||||
<animate attributeName="cy" values="-100;-217;-100" dur="3.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="3.5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="0" cy="-133" r="3.33" fill="#db2777" filter="url(#strongGlowV)">
|
||||
<animate attributeName="cy" values="-133;-233;-133" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="4s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<!-- Particulas laterales -->
|
||||
<circle cx="-33" cy="-83" r="3.33" fill="#0891b2" opacity="0.8">
|
||||
<animate attributeName="cy" values="-83;-175;-83" dur="3.8s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.8;0" dur="3.8s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="33" cy="-92" r="3.33" fill="#db2777" opacity="0.8">
|
||||
<animate attributeName="cy" values="-92;-175;-92" dur="4.2s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.8;0" dur="4.2s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="-58" cy="-58" r="2.5" fill="#7c3aed" opacity="0.6">
|
||||
<animate attributeName="cy" values="-58;-142;-58" dur="4.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="4.5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="58" cy="-67" r="2.5" fill="#0891b2" opacity="0.6">
|
||||
<animate attributeName="cy" values="-67;-142;-67" dur="5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<!-- Hexagonos flotantes -->
|
||||
<polygon points="0,-158 5,-162 5,-167 0,-170 -5,-167 -5,-162" stroke="#0891b2" fill="none" stroke-width="1.67" opacity="0.7">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-50);translate(0,0)" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.7;0" dur="4s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<polygon points="-42,-117 -37,-120 -37,-125 -42,-128 -47,-125 -47,-120" stroke="#7c3aed" fill="none" stroke-width="1.67" opacity="0.6">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-42);translate(0,0)" dur="4.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="4.5s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<polygon points="42,-125 47,-128 47,-133 42,-137 37,-133 37,-128" stroke="#db2777" fill="none" stroke-width="1.67" opacity="0.6">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-33);translate(0,0)" dur="5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="5s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<!-- Lineas de conexion horizontales -->
|
||||
<line x1="-33" y1="-100" x2="-8" y2="-100" stroke="#0891b2" stroke-width="0.83" opacity="0.5">
|
||||
<animate attributeName="opacity" values="0;0.5;0" dur="2s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="8" y1="-117" x2="33" y2="-117" stroke="#db2777" stroke-width="0.83" opacity="0.5">
|
||||
<animate attributeName="opacity" values="0;0.5;0" dur="2.5s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="-58" y1="-83" x2="-37" y2="-83" stroke="#7c3aed" stroke-width="0.83" opacity="0.4">
|
||||
<animate attributeName="opacity" values="0;0.4;0" dur="3s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="37" y1="-92" x2="58" y2="-92" stroke="#0891b2" stroke-width="0.83" opacity="0.4">
|
||||
<animate attributeName="opacity" values="0;0.4;0" dur="3.5s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
</g>
|
||||
|
||||
<!-- Texto VAPORA -->
|
||||
<g filter="url(#glassV)">
|
||||
<text x="75" y="135" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="32" font-weight="800" fill="url(#techGradV)" letter-spacing="2" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
|
||||
<text x="75" y="135" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="32" font-weight="800" fill="none" stroke="rgba(0,0,0,0.08)" stroke-width="0.3" letter-spacing="2" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
</g>
|
||||
|
||||
<!-- Glow en texto -->
|
||||
<text x="75" y="135" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="32" font-weight="800" fill="url(#techGradV)" letter-spacing="2" filter="url(#techGlowV)" opacity="0.2" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
|
||||
<!-- Tagline -->
|
||||
<text x="75" y="155" font-family="'Inter', sans-serif" font-size="8" fill="#7c3aed" opacity="0.9" letter-spacing="0.1em" text-anchor="middle">
|
||||
Evaporate complexity
|
||||
</text>
|
||||
|
||||
<!-- Indicador tecnico decorativo -->
|
||||
<g transform="translate(75, 170)">
|
||||
<rect x="0" y="0" width="1.5" height="10" fill="#0891b2" opacity="0.7">
|
||||
<animate attributeName="height" values="10;15;10" dur="1.5s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
<rect x="4" y="0" width="1.5" height="12" fill="#7c3aed" opacity="0.7">
|
||||
<animate attributeName="height" values="12;17;12" dur="1.8s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
<rect x="8" y="0" width="1.5" height="8" fill="#db2777" opacity="0.7">
|
||||
<animate attributeName="height" values="8;13;8" dur="1.3s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
</g>
|
||||
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 9.7 KiB |
|
|
@ -6,6 +6,7 @@
|
|||
<title
|
||||
data-en="Vapora - Intelligent Development Orchestration"
|
||||
data-es="Vapora - Orquestación Inteligente de Desarrollo"
|
||||
data-key="vapora-page-title"
|
||||
>
|
||||
Vapora
|
||||
</title>
|
||||
|
|
@ -510,14 +511,15 @@
|
|||
"
|
||||
data-en="🏗️ ARCHITECTURE"
|
||||
data-es="🏗️ ARQUITECTURA"
|
||||
data-key="vapora-nav-architecture"
|
||||
>🏗️ ARCHITECTURE</a
|
||||
>
|
||||
</div>
|
||||
|
||||
<div class="container">
|
||||
<header>
|
||||
<span class="status-badge" data-en="✅ v1.2.0 | 620 Tests | 100% Pass Rate" data-es="✅ v1.2.0 | 620 Tests | 100% Éxito"
|
||||
>✅ v1.2.0 | 620 Tests | 100% Pass Rate</span
|
||||
<span class="status-badge" data-en="✅ v1.2.0 | 691 Tests | 100% Pass Rate" data-es="✅ v1.2.0 | 691 Tests | 100% Éxito" data-key="vapora-header-badge"
|
||||
>✅ v1.2.0 | 691 Tests | 100% Pass Rate</span
|
||||
>
|
||||
<div class="logo-container">
|
||||
<img id="logo-dark" src="/vapora.svg" alt="Vapora - Development Orchestration" style="display: block;" />
|
||||
|
|
@ -527,6 +529,7 @@
|
|||
<h1
|
||||
data-en="Development Flows<br>When Teams and AI Orchestrate"
|
||||
data-es="El Desarrollo Fluye<br>Cuando los Equipos y la IA Orquestan"
|
||||
data-key="vapora-hero-title"
|
||||
>
|
||||
Development Flows
|
||||
</h1>
|
||||
|
|
@ -535,16 +538,18 @@
|
|||
class="highlight"
|
||||
data-en="Specialized agents"
|
||||
data-es="Agentes especializados"
|
||||
data-key="vapora-hero-subtitle-highlight"
|
||||
>Specialized agents</span
|
||||
>
|
||||
<span
|
||||
data-en=" orchestrate pipelines for design, implementation, testing, documentation and deployment. Agents learn from history and optimize costs automatically."
|
||||
data-es=" que orquestan pipelines para diseño, implementación, testing, documentación y deployment. Los agentes aprenden del historial y optimizan costos automáticamente."
|
||||
data-key="vapora-hero-subtitle-body"
|
||||
>orchestrate pipelines for design, implementation, testing,
|
||||
documentation and deployment. Agents learn from history and optimize
|
||||
costs automatically.
|
||||
</span>
|
||||
<br><span><strong data-en="100% self-hosted." data-es="100% self-hosted."
|
||||
<br><span><strong data-en="100% self-hosted." data-es="100% self-hosted." data-key="vapora-hero-self-hosted"
|
||||
>100% self-hosted.</strong
|
||||
>
|
||||
</span>
|
||||
|
|
@ -556,18 +561,20 @@
|
|||
<span
|
||||
data-en="The 4 Problems It Solves"
|
||||
data-es="Los 4 Problemas que Resuelve"
|
||||
data-key="vapora-problems-title"
|
||||
>The 4 Problems It Solves</span
|
||||
>
|
||||
</h2>
|
||||
<div class="problems-grid">
|
||||
<div class="problem-card">
|
||||
<div class="problem-number">01</div>
|
||||
<h3 data-en="Context Switching" data-es="Cambio de Contexto">
|
||||
<h3 data-en="Context Switching" data-es="Cambio de Contexto" data-key="vapora-problem-1-title">
|
||||
Context Switching
|
||||
</h3>
|
||||
<p
|
||||
data-en="Developers jump between tools constantly. Vapora unifies everything in one intelligent system where context flows."
|
||||
data-es="Los developers saltan constantemente entre herramientas. Vapora unifica todo en un sistema inteligente donde el contexto fluye."
|
||||
data-key="vapora-problem-1-desc"
|
||||
>
|
||||
Developers jump between tools constantly. Vapora unifies
|
||||
everything in one intelligent system where context flows.
|
||||
|
|
@ -578,24 +585,27 @@
|
|||
<h3
|
||||
data-en="Knowledge Fragmentation"
|
||||
data-es="Fragmentación de Conocimiento"
|
||||
data-key="vapora-problem-2-title"
|
||||
>
|
||||
Knowledge Fragmentation
|
||||
</h3>
|
||||
<p
|
||||
data-en="Decisions lost in threads, code scattered, docs unmaintained. RLM (Recursive Language Models) with hybrid search (BM25 + semantic) and chunking makes knowledge discoverable even in 100k+ token documents."
|
||||
data-es="Decisiones perdidas en threads, código disperso, docs desactualizadas. RLM (Recursive Language Models) con búsqueda híbrida (BM25 + semántica) y chunking hace el conocimiento visible incluso en documentos de 100k+ tokens."
|
||||
data-key="vapora-problem-2-desc"
|
||||
>
|
||||
Decisions lost in threads, code scattered, docs unmaintained. RLM (Recursive Language Models) with hybrid search (BM25 + semantic) and chunking makes knowledge discoverable even in 100k+ token documents.
|
||||
</p>
|
||||
</div>
|
||||
<div class="problem-card">
|
||||
<div class="problem-number">03</div>
|
||||
<h3 data-en="Manual Coordination" data-es="Coordinación Manual">
|
||||
<h3 data-en="Manual Coordination" data-es="Coordinación Manual" data-key="vapora-problem-3-title">
|
||||
Manual Coordination
|
||||
</h3>
|
||||
<p
|
||||
data-en="Orchestrating code review, testing, documentation and deployment manually creates bottlenecks. Multi-agent workflows solve this."
|
||||
data-es="Orquestar manualmente code review, testing, documentación y deployment crea cuellos. Los workflows multi-agente lo resuelven."
|
||||
data-key="vapora-problem-3-desc"
|
||||
>
|
||||
Orchestrating code review, testing, documentation and deployment
|
||||
manually creates bottlenecks. Multi-agent workflows solve this.
|
||||
|
|
@ -603,12 +613,13 @@
|
|||
</div>
|
||||
<div class="problem-card">
|
||||
<div class="problem-number">04</div>
|
||||
<h3 data-en="Dev-Ops Friction" data-es="Fricción Dev-Ops">
|
||||
<h3 data-en="Dev-Ops Friction" data-es="Fricción Dev-Ops" data-key="vapora-problem-4-title">
|
||||
Dev-Ops Friction
|
||||
</h3>
|
||||
<p
|
||||
data-en="Handoffs between developers and operations lack visibility and context. Vapora maintains unified deployment readiness."
|
||||
data-es="Los handoffs entre developers y operaciones carecen de visibilidad y contexto. Vapora mantiene unificada la deployment readiness."
|
||||
data-key="vapora-problem-4-desc"
|
||||
>
|
||||
Handoffs between developers and operations lack visibility and
|
||||
context. Vapora maintains unified deployment readiness.
|
||||
|
|
@ -620,6 +631,7 @@
|
|||
<section class="section">
|
||||
<h2 class="section-title">
|
||||
<span data-en="How It Works" data-es="Cómo Funciona"
|
||||
data-key="vapora-features-title"
|
||||
>How It Works</span
|
||||
>
|
||||
</h2>
|
||||
|
|
@ -630,15 +642,17 @@
|
|||
class="feature-title"
|
||||
data-en="Specialized Agents"
|
||||
data-es="Agentes Especializados"
|
||||
data-key="vapora-feature-1-title"
|
||||
>
|
||||
Specialized Agents
|
||||
</h3>
|
||||
<p
|
||||
class="feature-text"
|
||||
data-en="71 tests verify agent orchestration, learning profiles, and task assignment. Agents track expertise per task type with 7-day recency bias (3× weight). Real SurrealDB persistence + NATS coordination."
|
||||
data-es="71 tests verifican orquestación de agentes, perfiles de aprendizaje y asignación de tareas. Agentes rastrean expertise por tipo de tarea con sesgo de recencia de 7 días (peso 3×). Persistencia real SurrealDB + coordinación NATS."
|
||||
data-en="82 tests verify agent orchestration, learning profiles, and task assignment. stable_id = role ensures learning expertise survives restarts and hot-reloads. SIGHUP + POST /reload for zero-downtime config updates. Agents track expertise per task type with 7-day recency bias (3× weight)."
|
||||
data-es="82 tests verifican orquestación de agentes, perfiles de aprendizaje y asignación de tareas. stable_id = role garantiza que la expertise sobrevive reinicios y hot-reloads. SIGHUP + POST /reload para actualizaciones de configuración sin downtime. Agentes rastrean expertise por tipo de tarea con sesgo de recencia de 7 días (peso 3×)."
|
||||
data-key="vapora-feature-1-desc"
|
||||
>
|
||||
71 tests verify agent orchestration, learning profiles, and task assignment. Agents track expertise per task type with 7-day recency bias (3× weight). Real SurrealDB persistence + NATS coordination.
|
||||
82 tests verify agent orchestration, learning profiles, and task assignment. stable_id = role ensures learning expertise survives restarts and hot-reloads. SIGHUP + POST /reload for zero-downtime config updates. Agents track expertise per task type with 7-day recency bias (3× weight).
|
||||
</p>
|
||||
</div>
|
||||
<div class="feature-box" style="border-left-color: #a855f7">
|
||||
|
|
@ -648,6 +662,7 @@
|
|||
style="color: #a855f7"
|
||||
data-en="Intelligent Orchestration"
|
||||
data-es="Orquestación Inteligente"
|
||||
data-key="vapora-feature-2-title"
|
||||
>
|
||||
Intelligent Orchestration
|
||||
</h3>
|
||||
|
|
@ -655,6 +670,7 @@
|
|||
class="feature-text"
|
||||
data-en="53 tests verify multi-provider routing (Claude, OpenAI, Gemini, Ollama), per-role budget limits, cost tracking, and automatic fallback chains. Swarm coordination with load-balanced assignment using success_rate / (1 + load) formula."
|
||||
data-es="53 tests verifican routing multi-proveedor (Claude, OpenAI, Gemini, Ollama), límites de presupuesto por rol, tracking de costos y cadenas automáticas de fallback. Coordinación swarm con asignación balanceada usando fórmula success_rate / (1 + load)."
|
||||
data-key="vapora-feature-2-desc"
|
||||
>
|
||||
53 tests verify multi-provider routing (Claude, OpenAI, Gemini, Ollama), per-role budget limits, cost tracking, and automatic fallback chains. Swarm coordination with load-balanced assignment using success_rate / (1 + load) formula.
|
||||
</p>
|
||||
|
|
@ -666,6 +682,7 @@
|
|||
style="color: #ec4899"
|
||||
data-en="Recursive Language Models (RLM)"
|
||||
data-es="Recursive Language Models (RLM)"
|
||||
data-key="vapora-feature-3-title"
|
||||
>
|
||||
Recursive Language Models (RLM)
|
||||
</h3>
|
||||
|
|
@ -673,6 +690,7 @@
|
|||
class="feature-text"
|
||||
data-en="Process 100k+ token documents without context limits. Hybrid search combines BM25 (keywords) + semantic embeddings via RRF fusion. Intelligent chunking (Fixed/Semantic/Code) with SurrealDB persistence. Perfect for large codebases and documentation."
|
||||
data-es="Procesa documentos de 100k+ tokens sin límites de contexto. Búsqueda híbrida combina BM25 (keywords) + embeddings semánticos via fusión RRF. Chunking inteligente (Fixed/Semantic/Code) con persistencia SurrealDB. Perfecto para grandes codebases y documentación."
|
||||
data-key="vapora-feature-3-desc"
|
||||
>
|
||||
Process 100k+ token documents without context limits. Hybrid search combines BM25 (keywords) + semantic embeddings via RRF fusion. Intelligent chunking (Fixed/Semantic/Code) with SurrealDB persistence. Perfect for large codebases and documentation.
|
||||
</p>
|
||||
|
|
@ -684,6 +702,7 @@
|
|||
style="color: #f59e0b"
|
||||
data-en="Agent-to-Agent (A2A) Protocol"
|
||||
data-es="Protocolo Agent-to-Agent (A2A)"
|
||||
data-key="vapora-feature-4-title"
|
||||
>
|
||||
Agent-to-Agent (A2A) Protocol
|
||||
</h3>
|
||||
|
|
@ -691,6 +710,7 @@
|
|||
class="feature-text"
|
||||
data-en="Distributed agent coordination with task dispatch, status tracking, and result collection. Real SurrealDB persistence (no in-memory HashMap). NATS messaging for async completion. Exponential backoff retry with circuit breaker. 12 integration tests verify real behavior."
|
||||
data-es="Coordinación distribuida de agentes con despacho de tareas, seguimiento de estado y recolección de resultados. Persistencia real SurrealDB (sin HashMap en memoria). Mensajería NATS para completado asíncrono. Reintento con backoff exponencial y circuit breaker. 12 tests de integración verifican comportamiento real."
|
||||
data-key="vapora-feature-4-desc"
|
||||
>
|
||||
Distributed agent coordination with task dispatch, status tracking, and result collection. Real SurrealDB persistence (no in-memory HashMap). NATS messaging for async completion. Exponential backoff retry with circuit breaker. 12 integration tests verify real behavior.
|
||||
</p>
|
||||
|
|
@ -702,15 +722,17 @@
|
|||
style="color: #10b981"
|
||||
data-en="Knowledge Graph"
|
||||
data-es="Knowledge Graph"
|
||||
data-key="vapora-feature-5-title"
|
||||
>
|
||||
Knowledge Graph
|
||||
</h3>
|
||||
<p
|
||||
class="feature-text"
|
||||
data-en="Temporal execution history with causal relationships. Learning curves from daily windowed aggregations. Similarity search recommends solutions from past tasks. 20 tests verify graph persistence, learning profiles, and execution tracking."
|
||||
data-es="Historial de ejecución temporal con relaciones causales. Curvas de aprendizaje desde agregaciones diarias con ventana. Búsqueda de similitud recomienda soluciones de tareas pasadas. 20 tests verifican persistencia de grafo, perfiles de aprendizaje y tracking de ejecuciones."
|
||||
data-en="Temporal execution history with causal relationships. Hybrid search: HNSW (SurrealDB 3 native ANN) + BM25 full-text fused via Reciprocal Rank Fusion (k=60) — exact keyword matches and semantic proximity combined. Learning curves from daily windowed aggregations. 28 tests verify graph persistence, hybrid retrieval, and execution tracking."
|
||||
data-es="Historial de ejecución temporal con relaciones causales. Búsqueda híbrida: HNSW (ANN nativo SurrealDB 3) + BM25 texto completo fusionados via Reciprocal Rank Fusion (k=60) — coincidencias exactas y proximidad semántica combinadas. Curvas de aprendizaje desde agregaciones diarias. 28 tests verifican persistencia, recuperación híbrida y tracking."
|
||||
data-key="vapora-feature-5-desc"
|
||||
>
|
||||
Temporal execution history with causal relationships. Learning curves from daily windowed aggregations. Similarity search recommends solutions from past tasks. 20 tests verify graph persistence, learning profiles, and execution tracking.
|
||||
Temporal execution history with causal relationships. Hybrid search: HNSW (SurrealDB 3 native ANN) + BM25 full-text fused via Reciprocal Rank Fusion (k=60) — exact keyword matches and semantic proximity combined. Learning curves from daily windowed aggregations. 28 tests verify graph persistence, hybrid retrieval, and execution tracking.
|
||||
</p>
|
||||
</div>
|
||||
<div class="feature-box" style="border-left-color: #8b5cf6">
|
||||
|
|
@ -720,6 +742,7 @@
|
|||
style="color: #8b5cf6"
|
||||
data-en="NATS JetStream"
|
||||
data-es="NATS JetStream"
|
||||
data-key="vapora-feature-6-title"
|
||||
>
|
||||
NATS JetStream
|
||||
</h3>
|
||||
|
|
@ -727,6 +750,7 @@
|
|||
class="feature-text"
|
||||
data-en="Reliable message delivery for agent coordination. JetStream streams for workflow events, task completion, and status updates. Graceful fallback when NATS unavailable. Background subscribers with DashMap for async result delivery."
|
||||
data-es="Entrega confiable de mensajes para coordinación de agentes. Streams JetStream para eventos de workflow, completado de tareas y actualizaciones de estado. Fallback graceful cuando NATS no disponible. Suscriptores en background con DashMap para entrega asíncrona de resultados."
|
||||
data-key="vapora-feature-6-desc"
|
||||
>
|
||||
Reliable message delivery for agent coordination. JetStream streams for workflow events, task completion, and status updates. Graceful fallback when NATS unavailable. Background subscribers with DashMap for async result delivery.
|
||||
</p>
|
||||
|
|
@ -738,6 +762,7 @@
|
|||
style="color: #06b6d4"
|
||||
data-en="SurrealDB"
|
||||
data-es="SurrealDB"
|
||||
data-key="vapora-feature-7-title"
|
||||
>
|
||||
SurrealDB
|
||||
</h3>
|
||||
|
|
@ -745,6 +770,7 @@
|
|||
class="feature-text"
|
||||
data-en="Multi-model database with graph capabilities. Multi-tenant scopes for workspace isolation. Native graph relations for Knowledge Graph. All queries use parameterized bindings for security. SCHEMAFULL tables with explicit indexes."
|
||||
data-es="Base de datos multi-modelo con capacidades de grafo. Scopes multi-tenant para aislamiento de workspace. Relaciones de grafo nativas para Knowledge Graph. Todas las queries usan bindings parametrizados por seguridad. Tablas SCHEMAFULL con índices explícitos."
|
||||
data-key="vapora-feature-7-desc"
|
||||
>
|
||||
Multi-model database with graph capabilities. Multi-tenant scopes for workspace isolation. Native graph relations for Knowledge Graph. All queries use parameterized bindings for security. SCHEMAFULL tables with explicit indexes.
|
||||
</p>
|
||||
|
|
@ -756,6 +782,7 @@
|
|||
style="color: #14b8a6"
|
||||
data-en="Backend API & MCP Connectors"
|
||||
data-es="Backend API y Conectores MCP"
|
||||
data-key="vapora-feature-8-title"
|
||||
>
|
||||
Backend API & MCP Connectors
|
||||
</h3>
|
||||
|
|
@ -763,6 +790,7 @@
|
|||
class="feature-text"
|
||||
data-en="40+ REST endpoints (projects, tasks, agents, workflows, swarm). WebSocket real-time updates. MCP gateway for external tool integration and plugin system. Multi-tenant SurrealDB scopes. Prometheus metrics at /metrics. 161 tests verify API correctness."
|
||||
data-es="40+ endpoints REST (proyectos, tareas, agentes, workflows, swarm). Actualizaciones en tiempo real vía WebSocket. Gateway MCP para integración de herramientas externas y sistema de plugins. Scopes multi-tenant de SurrealDB. Métricas Prometheus en /metrics. 161 tests verifican corrección de API."
|
||||
data-key="vapora-feature-8-desc"
|
||||
>
|
||||
40+ REST endpoints (projects, tasks, agents, workflows, swarm). WebSocket real-time updates. MCP gateway for external tool integration and plugin system. Multi-tenant SurrealDB scopes. Prometheus metrics at /metrics. 161 tests verify API correctness.
|
||||
</p>
|
||||
|
|
@ -774,6 +802,7 @@
|
|||
style="color: #22d3ee"
|
||||
data-en="Cloud-Native & Self-Hosted"
|
||||
data-es="Cloud-Native y Self-Hosted"
|
||||
data-key="vapora-feature-9-title"
|
||||
>
|
||||
Cloud-Native & Self-Hosted
|
||||
</h3>
|
||||
|
|
@ -781,6 +810,7 @@
|
|||
class="feature-text"
|
||||
data-en="161 backend tests + K8s manifests with Kustomize overlays. Health checks, Prometheus metrics (/metrics endpoint), StatefulSets with anti-affinity. Local Docker Compose for development. Zero vendor lock-in."
|
||||
data-es="161 tests de backend + manifests K8s con overlays Kustomize. Health checks, métricas Prometheus (endpoint /metrics), StatefulSets con anti-affinity. Docker Compose local para desarrollo. Sin vendor lock-in."
|
||||
data-key="vapora-feature-9-desc"
|
||||
>
|
||||
161 backend tests + K8s manifests with Kustomize overlays. Health checks, Prometheus metrics (/metrics endpoint), StatefulSets with anti-affinity. Local Docker Compose for development. Zero vendor lock-in.
|
||||
</p>
|
||||
|
|
@ -792,6 +822,7 @@
|
|||
style="color: #f97316"
|
||||
data-en="Autonomous Scheduling"
|
||||
data-es="Scheduling Autónomo"
|
||||
data-key="vapora-feature-10-title"
|
||||
>
|
||||
Autonomous Scheduling
|
||||
</h3>
|
||||
|
|
@ -799,6 +830,7 @@
|
|||
class="feature-text"
|
||||
data-en="Cron-triggered workflow execution with IANA timezone support via chrono-tz. Distributed fire-lock using SurrealDB conditional UPDATE prevents double-fires across multi-instance deployments — no external lock service required. 48 tests."
|
||||
data-es="Ejecución de workflows disparada por cron con soporte de timezone IANA via chrono-tz. Fire-lock distribuido usando UPDATE condicional de SurrealDB previene doble disparo en despliegues multi-instancia — sin servicio de lock externo. 48 tests."
|
||||
data-key="vapora-feature-10-desc"
|
||||
>
|
||||
Cron-triggered workflow execution with IANA timezone support via chrono-tz. Distributed fire-lock using SurrealDB conditional UPDATE prevents double-fires across multi-instance deployments — no external lock service required. 48 tests.
|
||||
</p>
|
||||
|
|
@ -810,6 +842,7 @@
|
|||
style="color: #d946ef"
|
||||
data-en="Webhook Notifications"
|
||||
data-es="Notificaciones Webhook"
|
||||
data-key="vapora-feature-11-title"
|
||||
>
|
||||
Webhook Notifications
|
||||
</h3>
|
||||
|
|
@ -817,6 +850,7 @@
|
|||
class="feature-text"
|
||||
data-en="Real-time alerts to Slack, Discord, and Telegram — no vendor SDKs. ${VAR} secret resolution is built into ChannelRegistry construction; tokens never reach the HTTP layer unresolved. Fire-and-forget hooks on task completion, proposal approval/rejection, and workflow lifecycle events."
|
||||
data-es="Alertas en tiempo real a Slack, Discord y Telegram — sin SDKs de vendor. Resolución de secretos ${VAR} integrada en la construcción de ChannelRegistry; los tokens nunca llegan sin resolver a la capa HTTP. Hooks fire-and-forget en completado de tareas, aprobación/rechazo de propuestas y eventos del ciclo de vida de workflows."
|
||||
data-key="vapora-feature-11-desc"
|
||||
>
|
||||
Real-time alerts to Slack, Discord, and Telegram — no vendor SDKs. ${VAR} secret resolution is built into ChannelRegistry construction; tokens never reach the HTTP layer unresolved. Fire-and-forget hooks on task completion, proposal approval/rejection, and workflow lifecycle events.
|
||||
</p>
|
||||
|
|
@ -828,6 +862,7 @@
|
|||
style="color: #6366f1"
|
||||
data-en="Capability Packages"
|
||||
data-es="Paquetes de Capacidades"
|
||||
data-key="vapora-feature-12-title"
|
||||
>
|
||||
Capability Packages
|
||||
</h3>
|
||||
|
|
@ -835,21 +870,63 @@
|
|||
class="feature-text"
|
||||
data-en="Domain-optimized agent bundles — system prompt, preferred LLM model, task types, and MCP tools pre-configured per role. Three built-ins (code-reviewer, doc-generator, pr-monitor) loaded at startup via CapabilityRegistry. TOML overrides let you swap model or prompt without code changes. In-process executor dispatch via DashMap channels — no NATS required for standalone mode. 22 tests."
|
||||
data-es="Bundles de agentes optimizados por dominio — system prompt, modelo LLM preferido, tipos de tarea y herramientas MCP preconfigurados por rol. Tres built-ins (code-reviewer, doc-generator, pr-monitor) cargados en startup via CapabilityRegistry. Overrides TOML permiten cambiar modelo o prompt sin cambios de código. Dispatch de executor en proceso via canales DashMap — sin NATS requerido en modo standalone. 22 tests."
|
||||
data-key="vapora-feature-12-desc"
|
||||
>
|
||||
Domain-optimized agent bundles — system prompt, preferred LLM model, task types, and MCP tools pre-configured per role. Three built-ins (code-reviewer, doc-generator, pr-monitor) loaded at startup via CapabilityRegistry. TOML overrides let you swap model or prompt without code changes. In-process executor dispatch via DashMap channels — no NATS required for standalone mode. 22 tests.
|
||||
</p>
|
||||
</div>
|
||||
<div class="feature-box" style="border-left-color: #84cc16">
|
||||
<div class="feature-icon">🌿</div>
|
||||
<h3
|
||||
class="feature-title"
|
||||
style="color: #84cc16"
|
||||
data-en="VCS-Agnostic Worktree"
|
||||
data-es="Worktree VCS-Agnóstico"
|
||||
data-key="vapora-feature-13-title"
|
||||
>
|
||||
VCS-Agnostic Worktree
|
||||
</h3>
|
||||
<p
|
||||
class="feature-text"
|
||||
data-en="vapora-worktree isolates agent workspaces from the main repository. WorkspaceBackend trait supports jj (Jujutsu — stable change IDs, native workspaces) and git (worktrees). Auto-detected at runtime via detect_vcs. jj's stable change IDs survive rebase and amendment — agents always reference the same change regardless of commit history rewrites. Integration tests require real jj/git binaries."
|
||||
data-es="vapora-worktree aísla los workspaces de agentes del repositorio principal. El trait WorkspaceBackend soporta jj (Jujutsu — change IDs estables, workspaces nativos) y git (worktrees). Auto-detección en runtime via detect_vcs. Los change IDs estables de jj sobreviven rebase y enmienda — los agentes siempre referencian el mismo cambio independientemente de reescrituras del historial. Los tests de integración requieren binarios jj/git reales."
|
||||
data-key="vapora-feature-13-desc"
|
||||
>
|
||||
vapora-worktree isolates agent workspaces from the main repository. WorkspaceBackend trait supports jj (Jujutsu — stable change IDs, native workspaces) and git (worktrees). Auto-detected at runtime via detect_vcs. jj's stable change IDs survive rebase and amendment — agents always reference the same change regardless of commit history rewrites. Integration tests require real jj/git binaries.
|
||||
</p>
|
||||
</div>
|
||||
<div class="feature-box" style="border-left-color: #f59e0b">
|
||||
<div class="feature-icon">🧭</div>
|
||||
<h3
|
||||
class="feature-title"
|
||||
style="color: #f59e0b"
|
||||
data-en="Ontology Protocol (ontoref)"
|
||||
data-es="Protocolo de Ontología (ontoref)"
|
||||
data-key="vapora-feature-14-title"
|
||||
>
|
||||
Ontology Protocol (ontoref)
|
||||
</h3>
|
||||
<p
|
||||
class="feature-text"
|
||||
data-en="vapora-ontology exposes VaporaOntology — loads .ontology/*.ncl (core, state, gate), queries invariant axioms, checks gate membranes, and returns open FSM dimensions. vapora-reflection runs NCL-defined execution modes (CreateAgentTask, DeployService) via stratum-reflection-core. Both support live reload without process restart. Architecture is machine-readable: agents can query architectural constraints before acting."
|
||||
data-es="vapora-ontology expone VaporaOntology — carga .ontology/*.ncl (core, state, gate), consulta axiomas invariantes, verifica membranas de gate y retorna dimensiones FSM abiertas. vapora-reflection ejecuta modos NCL (CreateAgentTask, DeployService) via stratum-reflection-core. Ambos soportan reload en vivo sin reiniciar el proceso. La arquitectura es machine-readable: los agentes pueden consultar restricciones arquitectónicas antes de actuar."
|
||||
data-key="vapora-feature-14-desc"
|
||||
>
|
||||
vapora-ontology exposes VaporaOntology — loads .ontology/*.ncl (core, state, gate), queries invariant axioms, checks gate membranes, and returns open FSM dimensions. vapora-reflection runs NCL-defined execution modes (CreateAgentTask, DeployService) via stratum-reflection-core. Both support live reload without process restart. Architecture is machine-readable: agents can query architectural constraints before acting.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="section">
|
||||
<h2 class="section-title">
|
||||
<span data-en="Technology Stack" data-es="Stack Tecnológico"
|
||||
data-key="vapora-tech-stack-title"
|
||||
>Technology Stack</span
|
||||
>
|
||||
</h2>
|
||||
<div class="tech-stack">
|
||||
<span class="tech-badge">Rust (21 crates)</span>
|
||||
<span class="tech-badge">Rust (23 crates)</span>
|
||||
<span class="tech-badge">Axum REST API</span>
|
||||
<span class="tech-badge">SurrealDB</span>
|
||||
<span class="tech-badge">NATS JetStream</span>
|
||||
|
|
@ -863,33 +940,39 @@
|
|||
<span class="tech-badge">chrono-tz (Cron)</span>
|
||||
<span class="tech-badge">Webhook Channels</span>
|
||||
<span class="tech-badge">Capability Packages</span>
|
||||
<span class="tech-badge">jj (Jujutsu VCS)</span>
|
||||
<span class="tech-badge">Radicle</span>
|
||||
<span class="tech-badge">ontoref</span>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="section">
|
||||
<h2 class="section-title">
|
||||
<span data-en="Available Agents" data-es="Agentes Disponibles"
|
||||
data-key="vapora-agents-title"
|
||||
>Available Agents</span
|
||||
>
|
||||
</h2>
|
||||
<div class="agents-grid">
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="Architect" data-es="Architect"
|
||||
<span class="agent-name" data-en="Architect" data-es="Architect" data-key="vapora-agent-architect-name"
|
||||
>Architect</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="System design"
|
||||
data-es="Diseño de sistemas"
|
||||
data-key="vapora-agent-architect-role"
|
||||
>System design</span
|
||||
>
|
||||
</div>
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="Developer" data-es="Developer"
|
||||
<span class="agent-name" data-en="Developer" data-es="Developer" data-key="vapora-agent-developer-name"
|
||||
>Developer</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Code implementation"
|
||||
data-es="Implementación de código"
|
||||
data-key="vapora-agent-developer-role"
|
||||
>Code implementation</span
|
||||
>
|
||||
</div>
|
||||
|
|
@ -898,81 +981,90 @@
|
|||
class="agent-name"
|
||||
data-en="CodeReviewer"
|
||||
data-es="CodeReviewer"
|
||||
data-key="vapora-agent-codereviewer-name"
|
||||
>CodeReviewer</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Quality assurance"
|
||||
data-es="Aseguramiento de calidad"
|
||||
data-key="vapora-agent-codereviewer-role"
|
||||
>Quality assurance</span
|
||||
>
|
||||
</div>
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="Tester" data-es="Tester"
|
||||
<span class="agent-name" data-en="Tester" data-es="Tester" data-key="vapora-agent-tester-name"
|
||||
>Tester</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Tests & benchmarks"
|
||||
data-es="Tests y benchmarks"
|
||||
data-key="vapora-agent-tester-role"
|
||||
>Tests & benchmarks</span
|
||||
>
|
||||
</div>
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="Documenter" data-es="Documenter"
|
||||
<span class="agent-name" data-en="Documenter" data-es="Documenter" data-key="vapora-agent-documenter-name"
|
||||
>Documenter</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Documentation"
|
||||
data-es="Documentación"
|
||||
data-key="vapora-agent-documenter-role"
|
||||
>Documentation</span
|
||||
>
|
||||
</div>
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="Marketer" data-es="Marketer"
|
||||
<span class="agent-name" data-en="Marketer" data-es="Marketer" data-key="vapora-agent-marketer-name"
|
||||
>Marketer</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Marketing content"
|
||||
data-es="Contenido marketing"
|
||||
data-key="vapora-agent-marketer-role"
|
||||
>Marketing content</span
|
||||
>
|
||||
</div>
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="Presenter" data-es="Presenter"
|
||||
<span class="agent-name" data-en="Presenter" data-es="Presenter" data-key="vapora-agent-presenter-name"
|
||||
>Presenter</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Presentations"
|
||||
data-es="Presentaciones"
|
||||
data-key="vapora-agent-presenter-role"
|
||||
>Presentations</span
|
||||
>
|
||||
</div>
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="DevOps" data-es="DevOps"
|
||||
<span class="agent-name" data-en="DevOps" data-es="DevOps" data-key="vapora-agent-devops-name"
|
||||
>DevOps</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="CI/CD deployment"
|
||||
data-es="Despliegue CI/CD"
|
||||
data-key="vapora-agent-devops-role"
|
||||
>CI/CD deployment</span
|
||||
>
|
||||
</div>
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="Monitor" data-es="Monitor"
|
||||
<span class="agent-name" data-en="Monitor" data-es="Monitor" data-key="vapora-agent-monitor-name"
|
||||
>Monitor</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Health & alerting"
|
||||
data-es="Salud y alerting"
|
||||
data-key="vapora-agent-monitor-role"
|
||||
>Health & alerting</span
|
||||
>
|
||||
</div>
|
||||
<div class="agent-item">
|
||||
<span class="agent-name" data-en="Security" data-es="Security"
|
||||
<span class="agent-name" data-en="Security" data-es="Security" data-key="vapora-agent-security-name"
|
||||
>Security</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Audit & compliance"
|
||||
data-es="Auditoría y compliance"
|
||||
data-key="vapora-agent-security-role"
|
||||
>Audit & compliance</span
|
||||
>
|
||||
</div>
|
||||
|
|
@ -981,11 +1073,13 @@
|
|||
class="agent-name"
|
||||
data-en="ProjectManager"
|
||||
data-es="ProjectManager"
|
||||
data-key="vapora-agent-projectmanager-name"
|
||||
>ProjectManager</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Roadmap tracking"
|
||||
data-es="Tracking de roadmap"
|
||||
data-key="vapora-agent-projectmanager-role"
|
||||
>Roadmap tracking</span
|
||||
>
|
||||
</div>
|
||||
|
|
@ -994,11 +1088,13 @@
|
|||
class="agent-name"
|
||||
data-en="DecisionMaker"
|
||||
data-es="DecisionMaker"
|
||||
data-key="vapora-agent-decisionmaker-name"
|
||||
>DecisionMaker</span
|
||||
><span
|
||||
class="agent-role"
|
||||
data-en="Conflict resolution"
|
||||
data-es="Resolución de conflictos"
|
||||
data-key="vapora-agent-decisionmaker-role"
|
||||
>Conflict resolution</span
|
||||
>
|
||||
</div>
|
||||
|
|
@ -1010,6 +1106,7 @@
|
|||
class="cta-title"
|
||||
data-en="Ready for intelligent orchestration?"
|
||||
data-es="¿Listo para la orquestación inteligente?"
|
||||
data-key="vapora-cta-title"
|
||||
>
|
||||
Ready for intelligent orchestration?
|
||||
</h2>
|
||||
|
|
@ -1017,6 +1114,7 @@
|
|||
style="color: #94a3b8; margin-bottom: 2rem; font-size: 1.05rem"
|
||||
data-en="Built with Rust 🦀 | Open Source | Self-Hosted"
|
||||
data-es="Construido con Rust 🦀 | Open Source | Self-Hosted"
|
||||
data-key="vapora-cta-subtitle"
|
||||
>
|
||||
Built with Rust 🦀 | Open Source | Self-Hosted
|
||||
</p>
|
||||
|
|
@ -1025,15 +1123,17 @@
|
|||
class="cta-button"
|
||||
data-en="Explore on GitHub →"
|
||||
data-es="Explorar en GitHub →"
|
||||
data-key="vapora-cta-github"
|
||||
>Explore on GitHub →</a
|
||||
>
|
||||
</div>
|
||||
|
||||
<footer>
|
||||
<p data-en="Vapora v1.2.0" data-es="Vapora v1.2.0">Vapora v1.2.0</p>
|
||||
<p data-en="Vapora v1.2.0" data-es="Vapora v1.2.0" data-key="vapora-footer-version">Vapora v1.2.0</p>
|
||||
<p
|
||||
data-en="Made with Vapora dreams and Rust reality ✨"
|
||||
data-es="Hecho con sueños Vapora y realidad Rust ✨"
|
||||
data-key="vapora-footer-tagline"
|
||||
>
|
||||
Made with Vapora dreams and Rust reality ✨
|
||||
</p>
|
||||
|
|
@ -1041,6 +1141,7 @@
|
|||
style="margin-top: 1rem; font-size: 0.8rem"
|
||||
data-en="Intelligent Development Orchestration | Multi-Agent Multi-IA Platform"
|
||||
data-es="Orquestación Inteligente de Desarrollo | Plataforma Multi-Agente Multi-IA"
|
||||
data-key="vapora-footer-description"
|
||||
>
|
||||
Intelligent Development Orchestration | Multi-Agent Multi-IA Platform
|
||||
</p>
|
||||
|
|
|
|||
200
assets/web/src/w-vapora.svg
Normal file
200
assets/web/src/w-vapora.svg
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="170 40 590 300" width="100%" height="100%" preserveAspectRatio="xMidYMid meet">
|
||||
<defs>
|
||||
<!-- Google Fonts import -->
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css2?family=JetBrains+Mono:wght@800&display=swap');
|
||||
</style>
|
||||
|
||||
<!-- Gradiente principal -->
|
||||
<linearGradient id="techGrad" x1="0%" y1="0%" x2="100%" y2="0%">
|
||||
<stop offset="0%" style="stop-color:#22d3ee;stop-opacity:1"/>
|
||||
<stop offset="50%" style="stop-color:#a855f7;stop-opacity:1"/>
|
||||
<stop offset="100%" style="stop-color:#ec4899;stop-opacity:1"/>
|
||||
</linearGradient>
|
||||
|
||||
<!-- Gradiente vertical -->
|
||||
<linearGradient id="vertGrad" x1="0%" y1="100%" x2="0%" y2="0%">
|
||||
<stop offset="0%" style="stop-color:#22d3ee;stop-opacity:1"/>
|
||||
<stop offset="50%" style="stop-color:#a855f7;stop-opacity:0.8"/>
|
||||
<stop offset="100%" style="stop-color:#ec4899;stop-opacity:0.4"/>
|
||||
</linearGradient>
|
||||
|
||||
<!-- Filtro glow tech -->
|
||||
<filter id="techGlow">
|
||||
<feGaussianBlur stdDeviation="2" result="coloredBlur"/>
|
||||
<feMerge>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="SourceGraphic"/>
|
||||
</feMerge>
|
||||
</filter>
|
||||
|
||||
<!-- Filtro glow fuerte -->
|
||||
<filter id="strongGlow">
|
||||
<feGaussianBlur stdDeviation="4" result="coloredBlur"/>
|
||||
<feMerge>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="coloredBlur"/>
|
||||
<feMergeNode in="SourceGraphic"/>
|
||||
</feMerge>
|
||||
</filter>
|
||||
|
||||
<!-- Filtro glass -->
|
||||
<filter id="glass">
|
||||
<feGaussianBlur in="SourceGraphic" stdDeviation="0.5" result="blur"/>
|
||||
<feColorMatrix in="blur" type="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7" result="goo"/>
|
||||
<feBlend in="SourceGraphic" in2="goo"/>
|
||||
</filter>
|
||||
</defs>
|
||||
|
||||
<!-- Fondo blanco -->
|
||||
<rect width="800" height="400" fill="#ffffff"/>
|
||||
|
||||
<!-- Grid de fondo tecnico sutil -->
|
||||
<g opacity="0.03" stroke="#94a3b8" stroke-width="0.5">
|
||||
<line x1="0" y1="133" x2="800" y2="133"/>
|
||||
<line x1="0" y1="200" x2="800" y2="200"/>
|
||||
<line x1="0" y1="267" x2="800" y2="267"/>
|
||||
<line x1="133" y1="0" x2="133" y2="400"/>
|
||||
<line x1="267" y1="0" x2="267" y2="400"/>
|
||||
<line x1="400" y1="0" x2="400" y2="400"/>
|
||||
<line x1="533" y1="0" x2="533" y2="400"/>
|
||||
<line x1="667" y1="0" x2="667" y2="400"/>
|
||||
</g>
|
||||
|
||||
<!-- Simbolo tecnico: flujo de datos ascendente -->
|
||||
<g transform="translate(267, 280)">
|
||||
<!-- Base: plataforma -->
|
||||
<rect x="-25" y="0" width="50" height="6.67" fill="url(#techGrad)" opacity="0.9" rx="3.33"/>
|
||||
|
||||
<!-- Stream principal -->
|
||||
<path d="M 0 0 L 0 -50 L 8.33 -58 L -8.33 -75 L 8.33 -92 L -8.33 -108 L 8.33 -125 L 0 -133 L 0 -200" stroke="url(#vertGrad)" stroke-width="5" fill="none" stroke-linecap="round" stroke-linejoin="round" filter="url(#techGlow)">
|
||||
<animate attributeName="stroke-dasharray" values="0,500;500,0;0,500" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0.8;1;0.8" dur="2s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream izquierdo 1 -->
|
||||
<path d="M -33 0 L -33 -42 L -30 -58 L -37 -75 L -30 -92 L -37 -108 L -33 -125 L -33 -167" stroke="#0891b2" stroke-width="3.33" fill="none" stroke-linecap="round" opacity="0.7" filter="url(#techGlow)">
|
||||
<animate attributeName="stroke-dasharray" values="0,417;417,0;0,417" dur="4.5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream izquierdo 2 -->
|
||||
<path d="M -58 0 L -58 -33 L -53 -50 L -63 -67 L -53 -83 L -63 -100 L -58 -117 L -58 -142" stroke="#7c3aed" stroke-width="2.5" fill="none" stroke-linecap="round" opacity="0.6">
|
||||
<animate attributeName="stroke-dasharray" values="0,333;333,0;0,333" dur="5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream derecho 1 -->
|
||||
<path d="M 33 0 L 33 -42 L 30 -58 L 37 -75 L 30 -92 L 37 -108 L 33 -125 L 33 -167" stroke="#db2777" stroke-width="3.33" fill="none" stroke-linecap="round" opacity="0.7" filter="url(#techGlow)">
|
||||
<animate attributeName="stroke-dasharray" values="0,417;417,0;0,417" dur="4.2s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Stream derecho 2 -->
|
||||
<path d="M 58 0 L 58 -33 L 53 -50 L 63 -67 L 53 -83 L 63 -100 L 58 -117 L 58 -142" stroke="#0891b2" stroke-width="2.5" fill="none" stroke-linecap="round" opacity="0.6">
|
||||
<animate attributeName="stroke-dasharray" values="0,333;333,0;0,333" dur="5.5s" repeatCount="indefinite"/>
|
||||
</path>
|
||||
|
||||
<!-- Nodos de datos -->
|
||||
<circle cx="0" cy="-67" r="5" fill="#0891b2" filter="url(#strongGlow)">
|
||||
<animate attributeName="cy" values="-67;-183;-67" dur="3s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="3s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="0" cy="-100" r="4.17" fill="#7c3aed" filter="url(#strongGlow)">
|
||||
<animate attributeName="cy" values="-100;-217;-100" dur="3.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="3.5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="0" cy="-133" r="3.33" fill="#db2777" filter="url(#strongGlow)">
|
||||
<animate attributeName="cy" values="-133;-233;-133" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;1;0" dur="4s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<!-- Particulas laterales -->
|
||||
<circle cx="-33" cy="-83" r="3.33" fill="#0891b2" opacity="0.8">
|
||||
<animate attributeName="cy" values="-83;-175;-83" dur="3.8s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.8;0" dur="3.8s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="33" cy="-92" r="3.33" fill="#db2777" opacity="0.8">
|
||||
<animate attributeName="cy" values="-92;-175;-92" dur="4.2s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.8;0" dur="4.2s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="-58" cy="-58" r="2.5" fill="#7c3aed" opacity="0.6">
|
||||
<animate attributeName="cy" values="-58;-142;-58" dur="4.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="4.5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<circle cx="58" cy="-67" r="2.5" fill="#0891b2" opacity="0.6">
|
||||
<animate attributeName="cy" values="-67;-142;-67" dur="5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="5s" repeatCount="indefinite"/>
|
||||
</circle>
|
||||
|
||||
<!-- Hexagonos tecnicos flotantes -->
|
||||
<polygon points="0,-158 5,-162 5,-167 0,-170 -5,-167 -5,-162" stroke="#0891b2" fill="none" stroke-width="1.67" opacity="0.7">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-50);translate(0,0)" dur="4s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.7;0" dur="4s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<polygon points="-42,-117 -37,-120 -37,-125 -42,-128 -47,-125 -47,-120" stroke="#7c3aed" fill="none" stroke-width="1.67" opacity="0.6">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-42);translate(0,0)" dur="4.5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="4.5s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<polygon points="42,-125 47,-128 47,-133 42,-137 37,-133 37,-128" stroke="#db2777" fill="none" stroke-width="1.67" opacity="0.6">
|
||||
<animate attributeName="transform" values="translate(0,0);translate(0,-33);translate(0,0)" dur="5s" repeatCount="indefinite"/>
|
||||
<animate attributeName="opacity" values="0;0.6;0" dur="5s" repeatCount="indefinite"/>
|
||||
</polygon>
|
||||
|
||||
<!-- Lineas de conexion horizontales -->
|
||||
<line x1="-33" y1="-100" x2="-8" y2="-100" stroke="#0891b2" stroke-width="0.83" opacity="0.5">
|
||||
<animate attributeName="opacity" values="0;0.5;0" dur="2s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="8" y1="-117" x2="33" y2="-117" stroke="#db2777" stroke-width="0.83" opacity="0.5">
|
||||
<animate attributeName="opacity" values="0;0.5;0" dur="2.5s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="-58" y1="-83" x2="-37" y2="-83" stroke="#7c3aed" stroke-width="0.83" opacity="0.4">
|
||||
<animate attributeName="opacity" values="0;0.4;0" dur="3s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
|
||||
<line x1="37" y1="-92" x2="58" y2="-92" stroke="#0891b2" stroke-width="0.83" opacity="0.4">
|
||||
<animate attributeName="opacity" values="0;0.4;0" dur="3.5s" repeatCount="indefinite"/>
|
||||
</line>
|
||||
</g>
|
||||
|
||||
<!-- Texto VAPORA -->
|
||||
<g filter="url(#glass)">
|
||||
<text x="550" y="207" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="90" font-weight="800" fill="url(#techGrad)" letter-spacing="5" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
|
||||
<text x="550" y="207" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="90" font-weight="800" fill="none" stroke="rgba(0,0,0,0.08)" stroke-width="0.83" letter-spacing="5" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
</g>
|
||||
|
||||
<!-- Glow en texto -->
|
||||
<text x="550" y="207" font-family="'JetBrains Mono', 'Fira Code', monospace" font-size="90" font-weight="800" fill="url(#techGrad)" letter-spacing="5" filter="url(#techGlow)" opacity="0.2" text-anchor="middle">
|
||||
VAPORA
|
||||
</text>
|
||||
|
||||
<!-- Tagline -->
|
||||
<text x="550" y="240" font-family="'Inter', sans-serif" font-size="20" fill="#7c3aed" opacity="0.9" letter-spacing="0.25em" text-anchor="middle">
|
||||
Evaporate complexity
|
||||
</text>
|
||||
|
||||
<!-- Indicador tecnico decorativo -->
|
||||
<g transform="translate(550, 280)">
|
||||
<rect x="0" y="0" width="2" height="13.33" fill="#0891b2" opacity="0.7">
|
||||
<animate attributeName="height" values="13.33;20;13.33" dur="1.5s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
<rect x="6.67" y="0" width="2" height="16.67" fill="#7c3aed" opacity="0.7">
|
||||
<animate attributeName="height" values="16.67;23.33;16.67" dur="1.8s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
<rect x="13.33" y="0" width="2" height="10" fill="#db2777" opacity="0.7">
|
||||
<animate attributeName="height" values="10;16.67;10" dur="1.3s" repeatCount="indefinite"/>
|
||||
</rect>
|
||||
</g>
|
||||
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 9.9 KiB |
26
card.ncl
Normal file
26
card.ncl
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
let d = import "schemas/project-card.ncl" in
|
||||
|
||||
d.ProjectCard & {
|
||||
id = "vapora",
|
||||
name = "Vapora",
|
||||
tagline = "Development Flows When Teams and AI Orchestrate",
|
||||
description = "Specialized agents orchestrate pipelines for design, implementation, testing, documentation and deployment. Agents learn from history and optimize costs automatically. 100% self-hosted.",
|
||||
version = "1.2.0",
|
||||
status = 'Active,
|
||||
source = 'Local,
|
||||
url = "https://vapora.jesusperez.pro",
|
||||
repo = "https://repo.jesusperez.pro/jesus/Vapora",
|
||||
started_at = "2023",
|
||||
tags = ["rust", "ai", "agents", "orchestration", "leptos", "self-hosted"],
|
||||
tools = ["Rust", "Leptos", "Axum"],
|
||||
features = [
|
||||
"620 tests — 100% pass rate",
|
||||
"Specialized agents with temporal knowledge graph",
|
||||
"Probabilistic multi-LLM routing — cost optimized automatically",
|
||||
"Agents learn from history across sessions",
|
||||
"100% self-hosted — no external AI service dependency",
|
||||
],
|
||||
featured = true,
|
||||
sort_order = 3,
|
||||
logo = "assets/logo.svg",
|
||||
}
|
||||
|
|
@ -17,6 +17,11 @@ name = "vapora-backend"
|
|||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
# Ontoref API catalog
|
||||
ontoref-ontology = { path = "../../../ontoref/crates/ontoref-ontology", features = ["derive"] }
|
||||
ontoref-derive = { path = "../../../ontoref/crates/ontoref-derive" }
|
||||
inventory = "0.3"
|
||||
|
||||
# Internal crates
|
||||
vapora-shared = { workspace = true }
|
||||
vapora-agents = { workspace = true }
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use axum::{
|
|||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::Deserialize;
|
||||
use vapora_channels::Message;
|
||||
use vapora_shared::models::{Agent, AgentStatus};
|
||||
|
|
@ -31,6 +32,14 @@ pub struct SkillPayload {
|
|||
/// List all agents
|
||||
///
|
||||
/// GET /api/v1/agents
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/agents",
|
||||
description = "Returns all registered agents in the swarm.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "agents, read"
|
||||
)]
|
||||
pub async fn list_agents(State(state): State<AppState>) -> ApiResult<impl IntoResponse> {
|
||||
let agents = state.agent_service.list_agents().await?;
|
||||
Ok(Json(agents))
|
||||
|
|
@ -39,6 +48,15 @@ pub async fn list_agents(State(state): State<AppState>) -> ApiResult<impl IntoRe
|
|||
/// Get a specific agent
|
||||
///
|
||||
/// GET /api/v1/agents/:id
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/agents/:id",
|
||||
description = "Returns a single agent by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Agent identifier",
|
||||
tags = "agents, read"
|
||||
)]
|
||||
pub async fn get_agent(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -50,6 +68,14 @@ pub async fn get_agent(
|
|||
/// Register a new agent
|
||||
///
|
||||
/// POST /api/v1/agents
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/agents",
|
||||
description = "Registers a new agent in the swarm with its initial capabilities.",
|
||||
auth = "bearer",
|
||||
actors = "agent, admin",
|
||||
tags = "agents, write"
|
||||
)]
|
||||
pub async fn register_agent(
|
||||
State(state): State<AppState>,
|
||||
Json(agent): Json<Agent>,
|
||||
|
|
@ -61,6 +87,15 @@ pub async fn register_agent(
|
|||
/// Update an agent
|
||||
///
|
||||
/// PUT /api/v1/agents/:id
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/agents/:id",
|
||||
description = "Fully replaces an agent record with the provided payload.",
|
||||
auth = "bearer",
|
||||
actors = "agent, admin",
|
||||
params = "id:string:required:Agent identifier",
|
||||
tags = "agents, write"
|
||||
)]
|
||||
pub async fn update_agent(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -73,6 +108,15 @@ pub async fn update_agent(
|
|||
/// Deregister an agent
|
||||
///
|
||||
/// DELETE /api/v1/agents/:id
|
||||
#[onto_api(
|
||||
method = "DELETE",
|
||||
path = "/api/v1/agents/:id",
|
||||
description = "Removes an agent from the swarm registry.",
|
||||
auth = "bearer",
|
||||
actors = "agent, admin",
|
||||
params = "id:string:required:Agent identifier",
|
||||
tags = "agents, write"
|
||||
)]
|
||||
pub async fn deregister_agent(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -84,6 +128,16 @@ pub async fn deregister_agent(
|
|||
/// Update agent status
|
||||
///
|
||||
/// PUT /api/v1/agents/:id/status
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/agents/:id/status",
|
||||
description = "Transitions an agent to a new lifecycle status and fires inactivity \
|
||||
notifications.",
|
||||
auth = "bearer",
|
||||
actors = "agent, admin",
|
||||
params = "id:string:required:Agent identifier",
|
||||
tags = "agents, write"
|
||||
)]
|
||||
pub async fn update_agent_status(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -108,6 +162,15 @@ pub async fn update_agent_status(
|
|||
/// Add capability to agent
|
||||
///
|
||||
/// POST /api/v1/agents/:id/capabilities
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/agents/:id/capabilities",
|
||||
description = "Adds a named capability to the specified agent's capability set.",
|
||||
auth = "bearer",
|
||||
actors = "agent, admin",
|
||||
params = "id:string:required:Agent identifier",
|
||||
tags = "agents, write"
|
||||
)]
|
||||
pub async fn add_capability(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -123,6 +186,15 @@ pub async fn add_capability(
|
|||
/// Remove capability from agent
|
||||
///
|
||||
/// DELETE /api/v1/agents/:id/capabilities/:capability
|
||||
#[onto_api(
|
||||
method = "DELETE",
|
||||
path = "/api/v1/agents/:id/capabilities/:capability",
|
||||
description = "Removes a named capability from the specified agent.",
|
||||
auth = "bearer",
|
||||
actors = "agent, admin",
|
||||
params = "id:string:required:Agent identifier; capability:string:required:Capability name",
|
||||
tags = "agents, write"
|
||||
)]
|
||||
pub async fn remove_capability(
|
||||
State(state): State<AppState>,
|
||||
Path((id, capability)): Path<(String, String)>,
|
||||
|
|
@ -137,6 +209,15 @@ pub async fn remove_capability(
|
|||
/// Add skill to agent
|
||||
///
|
||||
/// POST /api/v1/agents/:id/skills
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/agents/:id/skills",
|
||||
description = "Adds a named skill to the specified agent's skill set.",
|
||||
auth = "bearer",
|
||||
actors = "agent, admin",
|
||||
params = "id:string:required:Agent identifier",
|
||||
tags = "agents, write"
|
||||
)]
|
||||
pub async fn add_skill(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -149,6 +230,14 @@ pub async fn add_skill(
|
|||
/// Get available agents
|
||||
///
|
||||
/// GET /api/v1/agents/available
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/agents/available",
|
||||
description = "Returns agents currently available to accept new task assignments.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "agents, read"
|
||||
)]
|
||||
pub async fn get_available_agents(State(state): State<AppState>) -> ApiResult<impl IntoResponse> {
|
||||
let agents = state.agent_service.get_available_agents().await?;
|
||||
Ok(Json(agents))
|
||||
|
|
@ -157,6 +246,15 @@ pub async fn get_available_agents(State(state): State<AppState>) -> ApiResult<im
|
|||
/// Check agent health
|
||||
///
|
||||
/// GET /api/v1/agents/:id/health
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/agents/:id/health",
|
||||
description = "Probes an agent's health and returns liveness status.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Agent identifier",
|
||||
tags = "agents, health, read"
|
||||
)]
|
||||
pub async fn check_agent_health(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ use axum::{
|
|||
response::{IntoResponse, Response},
|
||||
Json,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use vapora_knowledge_graph::{
|
||||
AgentPerformance, CostEfficiencyReport, DashboardMetrics, TaskTypeAnalytics,
|
||||
|
|
@ -70,6 +71,16 @@ impl IntoResponse for AnalyticsError {
|
|||
}
|
||||
|
||||
/// Get agent performance metrics
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/agent/:id",
|
||||
description = "Returns performance metrics for a specific agent over the requested period.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Agent identifier; period:string:optional:Time period \
|
||||
(hour/day/week/month/all)",
|
||||
tags = "analytics, agents, read"
|
||||
)]
|
||||
pub async fn get_agent_performance(
|
||||
State(_state): State<AppState>,
|
||||
Path(agent_id): Path<String>,
|
||||
|
|
@ -84,6 +95,16 @@ pub async fn get_agent_performance(
|
|||
}
|
||||
|
||||
/// Get task type analytics
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/task-types/:task_type",
|
||||
description = "Returns aggregate analytics for a specific task type over the requested period.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "task_type:string:required:Task type name; period:string:optional:Time period \
|
||||
(hour/day/week/month/all)",
|
||||
tags = "analytics, tasks, read"
|
||||
)]
|
||||
pub async fn get_task_type_analytics(
|
||||
State(_state): State<AppState>,
|
||||
Path(task_type): Path<String>,
|
||||
|
|
@ -96,6 +117,15 @@ pub async fn get_task_type_analytics(
|
|||
}
|
||||
|
||||
/// Get system dashboard metrics
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/dashboard",
|
||||
description = "Returns aggregated system-wide dashboard metrics for the requested period.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
params = "period:string:optional:Time period (hour/day/week/month/all)",
|
||||
tags = "analytics, dashboard, read"
|
||||
)]
|
||||
pub async fn get_dashboard_metrics(
|
||||
State(_state): State<AppState>,
|
||||
Query(_params): Query<AnalyticsQuery>,
|
||||
|
|
@ -106,6 +136,15 @@ pub async fn get_dashboard_metrics(
|
|||
}
|
||||
|
||||
/// Get cost efficiency report
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/cost-report",
|
||||
description = "Returns a cost efficiency report with per-provider and per-task-type breakdown.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
params = "period:string:optional:Time period (hour/day/week/month/all)",
|
||||
tags = "analytics, cost, read"
|
||||
)]
|
||||
pub async fn get_cost_report(
|
||||
State(_state): State<AppState>,
|
||||
Query(_params): Query<AnalyticsQuery>,
|
||||
|
|
@ -124,6 +163,16 @@ pub struct AnalyticsSummary {
|
|||
}
|
||||
|
||||
/// Get comprehensive analytics summary
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/summary",
|
||||
description = "Returns a combined analytics summary covering dashboard metrics and cost \
|
||||
report.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
params = "period:string:optional:Time period (hour/day/week/month/all)",
|
||||
tags = "analytics, read"
|
||||
)]
|
||||
pub async fn get_analytics_summary(
|
||||
State(_state): State<AppState>,
|
||||
Query(_params): Query<AnalyticsQuery>,
|
||||
|
|
|
|||
35
crates/vapora-backend/src/api/catalog.rs
Normal file
35
crates/vapora-backend/src/api/catalog.rs
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
use axum::{response::IntoResponse, Json};
|
||||
use ontoref_ontology::ApiRouteEntry;
|
||||
use serde_json::json;
|
||||
|
||||
/// GET /api/v1/catalog
|
||||
///
|
||||
/// Returns the annotated API catalog: all registered routes with their
|
||||
/// method, path, description, auth, actors, params, and tags.
|
||||
/// Populated at link time via `#[onto_api]` on each handler function.
|
||||
pub async fn api_catalog() -> impl IntoResponse {
|
||||
let mut routes: Vec<&'static ApiRouteEntry> = inventory::iter::<ApiRouteEntry>().collect();
|
||||
routes.sort_by(|a, b| a.path.cmp(b.path).then(a.method.cmp(b.method)));
|
||||
|
||||
let entries: Vec<_> = routes
|
||||
.into_iter()
|
||||
.map(|r| {
|
||||
json!({
|
||||
"method": r.method,
|
||||
"path": r.path,
|
||||
"description": r.description,
|
||||
"auth": r.auth,
|
||||
"actors": r.actors,
|
||||
"params": r.params.iter().map(|p| json!({
|
||||
"name": p.name,
|
||||
"kind": p.kind,
|
||||
"constraint": p.constraint,
|
||||
"description": p.description,
|
||||
})).collect::<Vec<_>>(),
|
||||
"tags": r.tags,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
Json(json!({ "routes": entries }))
|
||||
}
|
||||
|
|
@ -4,6 +4,7 @@ use axum::{
|
|||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::Serialize;
|
||||
use vapora_channels::{ChannelError, Message};
|
||||
use vapora_shared::VaporaError;
|
||||
|
|
@ -19,6 +20,14 @@ struct ChannelListResponse {
|
|||
/// List all registered notification channels.
|
||||
///
|
||||
/// GET /api/v1/channels
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/channels",
|
||||
description = "Returns the names of all registered notification channels.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
tags = "channels, read"
|
||||
)]
|
||||
pub async fn list_channels(State(state): State<AppState>) -> impl IntoResponse {
|
||||
let names = match &state.channel_registry {
|
||||
Some(r) => {
|
||||
|
|
@ -37,6 +46,16 @@ pub async fn list_channels(State(state): State<AppState>) -> impl IntoResponse {
|
|||
///
|
||||
/// Returns 200 on successful delivery, 404 if the channel is unknown or not
|
||||
/// configured, 502 if delivery fails at the remote platform.
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/channels/:name/test",
|
||||
description = "Sends a test message through the named notification channel to verify \
|
||||
connectivity.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "name:string:required:Channel name",
|
||||
tags = "channels, write"
|
||||
)]
|
||||
pub async fn test_channel(
|
||||
State(state): State<AppState>,
|
||||
Path(name): Path<String>,
|
||||
|
|
|
|||
|
|
@ -1,11 +1,20 @@
|
|||
// Health check endpoint
|
||||
|
||||
use axum::{http::StatusCode, response::IntoResponse, Json};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde_json::json;
|
||||
|
||||
/// Health check endpoint
|
||||
///
|
||||
/// Returns current server status and version information
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/health",
|
||||
description = "Returns current server liveness status and build version.",
|
||||
auth = "none",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "health, system"
|
||||
)]
|
||||
pub async fn health() -> impl IntoResponse {
|
||||
(
|
||||
StatusCode::OK,
|
||||
|
|
|
|||
|
|
@ -5,9 +5,18 @@ use axum::{
|
|||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
|
||||
/// Get Prometheus metrics in text format
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/metrics",
|
||||
description = "Exposes Prometheus metrics for scraping in text/plain format.",
|
||||
auth = "none",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "metrics, observability"
|
||||
)]
|
||||
pub async fn metrics_handler() -> Result<impl IntoResponse, MetricsError> {
|
||||
let encoder = TextEncoder::new();
|
||||
let metric_families = prometheus::gather();
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
pub mod agents;
|
||||
pub mod analytics;
|
||||
pub mod analytics_metrics;
|
||||
pub mod catalog;
|
||||
pub mod channels;
|
||||
pub mod error;
|
||||
pub mod health;
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use axum::{
|
|||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use vapora_shared::models::Project;
|
||||
|
||||
use crate::api::state::AppState;
|
||||
|
|
@ -14,6 +15,14 @@ use crate::api::ApiResult;
|
|||
/// List all projects for a tenant
|
||||
///
|
||||
/// GET /api/v1/projects
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/projects",
|
||||
description = "Returns all projects visible to the authenticated tenant.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "projects, read"
|
||||
)]
|
||||
pub async fn list_projects(State(state): State<AppState>) -> ApiResult<impl IntoResponse> {
|
||||
// TODO: Extract tenant_id from JWT token
|
||||
let tenant_id = "default";
|
||||
|
|
@ -25,6 +34,15 @@ pub async fn list_projects(State(state): State<AppState>) -> ApiResult<impl Into
|
|||
/// Get a specific project
|
||||
///
|
||||
/// GET /api/v1/projects/:id
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/projects/:id",
|
||||
description = "Returns a single project by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Project identifier",
|
||||
tags = "projects, read"
|
||||
)]
|
||||
pub async fn get_project(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -39,6 +57,14 @@ pub async fn get_project(
|
|||
/// Create a new project
|
||||
///
|
||||
/// POST /api/v1/projects
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/projects",
|
||||
description = "Creates a new project under the authenticated tenant.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
tags = "projects, write"
|
||||
)]
|
||||
pub async fn create_project(
|
||||
State(state): State<AppState>,
|
||||
Json(mut project): Json<Project>,
|
||||
|
|
@ -53,6 +79,15 @@ pub async fn create_project(
|
|||
/// Update a project
|
||||
///
|
||||
/// PUT /api/v1/projects/:id
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/projects/:id",
|
||||
description = "Fully replaces a project record with the provided payload.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Project identifier",
|
||||
tags = "projects, write"
|
||||
)]
|
||||
pub async fn update_project(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -71,6 +106,15 @@ pub async fn update_project(
|
|||
/// Delete a project
|
||||
///
|
||||
/// DELETE /api/v1/projects/:id
|
||||
#[onto_api(
|
||||
method = "DELETE",
|
||||
path = "/api/v1/projects/:id",
|
||||
description = "Permanently removes a project and all its associated data.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Project identifier",
|
||||
tags = "projects, write"
|
||||
)]
|
||||
pub async fn delete_project(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -85,6 +129,15 @@ pub async fn delete_project(
|
|||
/// Add a feature to a project
|
||||
///
|
||||
/// POST /api/v1/projects/:id/features
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/projects/:id/features",
|
||||
description = "Adds a named feature flag to the specified project.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Project identifier",
|
||||
tags = "projects, features, write"
|
||||
)]
|
||||
pub async fn add_feature(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -110,6 +163,15 @@ pub async fn add_feature(
|
|||
/// Remove a feature from a project
|
||||
///
|
||||
/// DELETE /api/v1/projects/:id/features/:feature
|
||||
#[onto_api(
|
||||
method = "DELETE",
|
||||
path = "/api/v1/projects/:id/features/:feature",
|
||||
description = "Removes a named feature flag from the specified project.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Project identifier; feature:string:required:Feature name",
|
||||
tags = "projects, features, write"
|
||||
)]
|
||||
pub async fn remove_feature(
|
||||
State(state): State<AppState>,
|
||||
Path((id, feature)): Path<(String, String)>,
|
||||
|
|
@ -127,6 +189,15 @@ pub async fn remove_feature(
|
|||
/// Archive a project
|
||||
///
|
||||
/// POST /api/v1/projects/:id/archive
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/projects/:id/archive",
|
||||
description = "Marks the project as archived, making it read-only.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Project identifier",
|
||||
tags = "projects, write"
|
||||
)]
|
||||
pub async fn archive_project(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use axum::{
|
|||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::Deserialize;
|
||||
use vapora_channels::Message;
|
||||
use vapora_shared::models::{Proposal, ProposalReview, ProposalStatus, RiskLevel};
|
||||
|
|
@ -42,6 +43,16 @@ pub struct AddReviewPayload {
|
|||
/// List proposals with optional filters
|
||||
///
|
||||
/// GET /api/v1/proposals?project_id=xxx&status=proposed
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/proposals",
|
||||
description = "Returns proposals for a project, optionally filtered by status.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "project_id:string:required:Parent project identifier; status:string:optional:Filter \
|
||||
by proposal status",
|
||||
tags = "proposals, read"
|
||||
)]
|
||||
pub async fn list_proposals(
|
||||
State(state): State<AppState>,
|
||||
Query(params): Query<ProposalQueryParams>,
|
||||
|
|
@ -67,6 +78,15 @@ pub async fn list_proposals(
|
|||
/// Get a specific proposal
|
||||
///
|
||||
/// GET /api/v1/proposals/:id
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/proposals/:id",
|
||||
description = "Returns a single proposal by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, read"
|
||||
)]
|
||||
pub async fn get_proposal(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -81,6 +101,14 @@ pub async fn get_proposal(
|
|||
/// Create a new proposal
|
||||
///
|
||||
/// POST /api/v1/proposals
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/proposals",
|
||||
description = "Creates a new change proposal with risk level and execution plan.",
|
||||
auth = "bearer",
|
||||
actors = "agent, developer, admin",
|
||||
tags = "proposals, write"
|
||||
)]
|
||||
pub async fn create_proposal(
|
||||
State(state): State<AppState>,
|
||||
Json(payload): Json<CreateProposalPayload>,
|
||||
|
|
@ -124,6 +152,15 @@ pub async fn create_proposal(
|
|||
/// Update proposal
|
||||
///
|
||||
/// PUT /api/v1/proposals/:id
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/proposals/:id",
|
||||
description = "Fully replaces a proposal record with the provided payload.",
|
||||
auth = "bearer",
|
||||
actors = "agent, developer, admin",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, write"
|
||||
)]
|
||||
pub async fn update_proposal(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -142,6 +179,15 @@ pub async fn update_proposal(
|
|||
/// Delete proposal
|
||||
///
|
||||
/// DELETE /api/v1/proposals/:id
|
||||
#[onto_api(
|
||||
method = "DELETE",
|
||||
path = "/api/v1/proposals/:id",
|
||||
description = "Permanently removes a proposal by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, write"
|
||||
)]
|
||||
pub async fn delete_proposal(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -159,6 +205,15 @@ pub async fn delete_proposal(
|
|||
/// Submit proposal for approval
|
||||
///
|
||||
/// PUT /api/v1/proposals/:id/submit
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/proposals/:id/submit",
|
||||
description = "Submits a draft proposal into the approval workflow.",
|
||||
auth = "bearer",
|
||||
actors = "agent, developer",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, write"
|
||||
)]
|
||||
pub async fn submit_proposal(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -176,6 +231,15 @@ pub async fn submit_proposal(
|
|||
/// Approve proposal
|
||||
///
|
||||
/// PUT /api/v1/proposals/:id/approve
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/proposals/:id/approve",
|
||||
description = "Approves a submitted proposal and fires approval notifications.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, write"
|
||||
)]
|
||||
pub async fn approve_proposal(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -199,6 +263,15 @@ pub async fn approve_proposal(
|
|||
/// Reject proposal
|
||||
///
|
||||
/// PUT /api/v1/proposals/:id/reject
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/proposals/:id/reject",
|
||||
description = "Rejects a submitted proposal and fires rejection notifications.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, write"
|
||||
)]
|
||||
pub async fn reject_proposal(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -222,6 +295,15 @@ pub async fn reject_proposal(
|
|||
/// Mark proposal as executed
|
||||
///
|
||||
/// PUT /api/v1/proposals/:id/executed
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/proposals/:id/executed",
|
||||
description = "Marks an approved proposal as executed after the change has been applied.",
|
||||
auth = "bearer",
|
||||
actors = "agent, admin",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, write"
|
||||
)]
|
||||
pub async fn mark_executed(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -236,6 +318,15 @@ pub async fn mark_executed(
|
|||
/// List reviews for proposal
|
||||
///
|
||||
/// GET /api/v1/proposals/:id/reviews
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/proposals/:id/reviews",
|
||||
description = "Returns all reviews attached to the specified proposal.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, reviews, read"
|
||||
)]
|
||||
pub async fn list_reviews(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -248,6 +339,15 @@ pub async fn list_reviews(
|
|||
/// Add review to proposal
|
||||
///
|
||||
/// POST /api/v1/proposals/:id/reviews
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/proposals/:id/reviews",
|
||||
description = "Adds a reviewer decision (approve/reject with feedback) to a proposal.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Proposal identifier",
|
||||
tags = "proposals, reviews, write"
|
||||
)]
|
||||
pub async fn add_review(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ use axum::{
|
|||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{debug, error};
|
||||
|
||||
|
|
@ -85,6 +86,14 @@ pub struct TaskTypeMetricsResponse {
|
|||
}
|
||||
|
||||
/// GET /api/v1/analytics/providers - Get cost breakdown by provider
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/providers",
|
||||
description = "Returns cost breakdown grouped by LLM provider with percentage share.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
tags = "analytics, providers, cost, read"
|
||||
)]
|
||||
pub async fn get_provider_cost_breakdown(State(state): State<AppState>) -> impl IntoResponse {
|
||||
debug!("GET /api/v1/analytics/providers - cost breakdown");
|
||||
|
||||
|
|
@ -139,6 +148,14 @@ pub async fn get_provider_cost_breakdown(State(state): State<AppState>) -> impl
|
|||
}
|
||||
|
||||
/// GET /api/v1/analytics/providers/efficiency - Get provider efficiency ranking
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/providers/efficiency",
|
||||
description = "Returns providers ranked by efficiency ratio combining quality and cost scores.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
tags = "analytics, providers, read"
|
||||
)]
|
||||
pub async fn get_provider_efficiency(State(state): State<AppState>) -> impl IntoResponse {
|
||||
debug!("GET /api/v1/analytics/providers/efficiency");
|
||||
|
||||
|
|
@ -186,6 +203,15 @@ pub async fn get_provider_efficiency(State(state): State<AppState>) -> impl Into
|
|||
|
||||
/// GET /api/v1/analytics/providers/:provider - Get detailed analytics for a
|
||||
/// provider
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/providers/:provider",
|
||||
description = "Returns detailed cost and success-rate analytics for a single LLM provider.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
params = "provider:string:required:Provider name (e.g. openai, anthropic)",
|
||||
tags = "analytics, providers, read"
|
||||
)]
|
||||
pub async fn get_provider_analytics(
|
||||
State(state): State<AppState>,
|
||||
Path(provider): Path<String>,
|
||||
|
|
@ -241,6 +267,16 @@ pub async fn get_provider_analytics(
|
|||
|
||||
/// GET /api/v1/analytics/providers/:provider/forecast - Get cost forecast for a
|
||||
/// provider
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/providers/:provider/forecast",
|
||||
description = "Returns weekly and monthly cost forecast for a provider based on recent usage \
|
||||
trend.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
params = "provider:string:required:Provider name (e.g. openai, anthropic)",
|
||||
tags = "analytics, providers, cost, read"
|
||||
)]
|
||||
pub async fn get_provider_forecast(
|
||||
State(state): State<AppState>,
|
||||
Path(provider): Path<String>,
|
||||
|
|
@ -288,6 +324,15 @@ pub async fn get_provider_forecast(
|
|||
|
||||
/// GET /api/v1/analytics/providers/:provider/tasks/:task_type - Provider
|
||||
/// performance by task type
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/analytics/providers/:provider/tasks/:task_type",
|
||||
description = "Returns a provider's performance metrics scoped to a specific task type.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
params = "provider:string:required:Provider name; task_type:string:required:Task type name",
|
||||
tags = "analytics, providers, tasks, read"
|
||||
)]
|
||||
pub async fn get_provider_task_type_metrics(
|
||||
State(state): State<AppState>,
|
||||
Path((provider, task_type)): Path<(String, String)>,
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
// Recursive Language Models integration for distributed reasoning
|
||||
|
||||
use axum::{extract::State, http::StatusCode, response::IntoResponse, Json};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use vapora_shared::VaporaError;
|
||||
|
||||
|
|
@ -120,6 +121,14 @@ pub struct AnalyzeResponse {
|
|||
/// Load and chunk a document
|
||||
///
|
||||
/// POST /api/v1/rlm/documents
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/rlm/documents",
|
||||
description = "Loads, scans, and chunks a document into the RLM index for subsequent queries.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
tags = "rlm, write"
|
||||
)]
|
||||
pub async fn load_document(
|
||||
State(state): State<AppState>,
|
||||
Json(request): Json<LoadDocumentRequest>,
|
||||
|
|
@ -156,6 +165,14 @@ pub async fn load_document(
|
|||
/// Query a document using hybrid search
|
||||
///
|
||||
/// POST /api/v1/rlm/query
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/rlm/query",
|
||||
description = "Retrieves relevant document chunks using BM25 and semantic hybrid search.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
tags = "rlm, read"
|
||||
)]
|
||||
pub async fn query_document(
|
||||
State(state): State<AppState>,
|
||||
Json(request): Json<QueryRequest>,
|
||||
|
|
@ -194,6 +211,15 @@ pub async fn query_document(
|
|||
/// Analyze a document with LLM dispatch
|
||||
///
|
||||
/// POST /api/v1/rlm/analyze
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/rlm/analyze",
|
||||
description = "Dispatches a query against retrieved chunks to an LLM and returns the \
|
||||
synthesized response.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
tags = "rlm, read"
|
||||
)]
|
||||
pub async fn analyze_document(
|
||||
State(state): State<AppState>,
|
||||
Json(request): Json<AnalyzeRequest>,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use axum::{
|
|||
Json,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{error, info, warn};
|
||||
use vapora_shared::VaporaError;
|
||||
|
|
@ -164,6 +165,15 @@ fn validate_tz(tz: &str) -> Result<(), ApiError> {
|
|||
// ─────────────────────────────────────────────────────────────────
|
||||
|
||||
/// `GET /api/v1/schedules` — list all schedules.
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/schedules",
|
||||
description = "Returns all scheduled workflows with their cron expressions and next fire \
|
||||
times.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "schedules, read"
|
||||
)]
|
||||
pub async fn list_schedules(
|
||||
State(state): State<AppState>,
|
||||
) -> Result<Json<ScheduleListResponse>, ApiError> {
|
||||
|
|
@ -180,6 +190,15 @@ pub async fn list_schedules(
|
|||
}
|
||||
|
||||
/// `GET /api/v1/schedules/:id` — get a single schedule.
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/schedules/:id",
|
||||
description = "Returns a single scheduled workflow by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Schedule identifier",
|
||||
tags = "schedules, read"
|
||||
)]
|
||||
pub async fn get_schedule(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -200,6 +219,15 @@ pub async fn get_schedule(
|
|||
/// `PUT /api/v1/schedules/:id` — create or fully replace a schedule.
|
||||
///
|
||||
/// Preserves `last_fired_at` and `runs_count` from the existing record.
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/schedules/:id",
|
||||
description = "Creates or fully replaces a scheduled workflow, validating cron and timezone.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Schedule identifier",
|
||||
tags = "schedules, write"
|
||||
)]
|
||||
pub async fn put_schedule(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -256,6 +284,16 @@ pub async fn put_schedule(
|
|||
///
|
||||
/// Only the provided fields are changed. If `cron_expression` is updated,
|
||||
/// `next_fire_at` is recomputed automatically.
|
||||
#[onto_api(
|
||||
method = "PATCH",
|
||||
path = "/api/v1/schedules/:id",
|
||||
description = "Partially updates a schedule; recomputes next_fire_at if cron_expression \
|
||||
changes.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Schedule identifier",
|
||||
tags = "schedules, write"
|
||||
)]
|
||||
pub async fn patch_schedule(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -321,6 +359,15 @@ pub async fn patch_schedule(
|
|||
}
|
||||
|
||||
/// `DELETE /api/v1/schedules/:id` — permanently remove a schedule.
|
||||
#[onto_api(
|
||||
method = "DELETE",
|
||||
path = "/api/v1/schedules/:id",
|
||||
description = "Permanently removes a scheduled workflow and its run history.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Schedule identifier",
|
||||
tags = "schedules, write"
|
||||
)]
|
||||
pub async fn delete_schedule(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -350,6 +397,16 @@ pub async fn delete_schedule(
|
|||
}
|
||||
|
||||
/// `GET /api/v1/schedules/:id/runs` — execution history (last 100, desc).
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/schedules/:id/runs",
|
||||
description = "Returns the execution run history for a scheduled workflow (last 100, \
|
||||
descending).",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Schedule identifier",
|
||||
tags = "schedules, read"
|
||||
)]
|
||||
pub async fn list_schedule_runs(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -379,6 +436,15 @@ pub async fn list_schedule_runs(
|
|||
///
|
||||
/// Records an auditable `ScheduleRun` with `status = Fired` and advances
|
||||
/// `last_fired_at` / `next_fire_at` exactly like the background scheduler.
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/schedules/:id/fire",
|
||||
description = "Manually fires a scheduled workflow immediately, bypassing the cron timer.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Schedule identifier",
|
||||
tags = "schedules, write"
|
||||
)]
|
||||
pub async fn fire_schedule(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use std::sync::Arc;
|
|||
use axum::{
|
||||
extract::Extension, http::StatusCode, response::IntoResponse, routing::get, Json, Router,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
use vapora_swarm::coordinator::SwarmCoordinator;
|
||||
|
|
@ -33,6 +34,15 @@ pub fn swarm_routes() -> Router {
|
|||
}
|
||||
|
||||
/// Get swarm statistics
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/swarm/stats",
|
||||
description = "Returns live swarm statistics including agent counts, average load, and active \
|
||||
tasks.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "swarm, read"
|
||||
)]
|
||||
pub async fn swarm_statistics(
|
||||
Extension(swarm): Extension<Arc<SwarmCoordinator>>,
|
||||
) -> impl IntoResponse {
|
||||
|
|
@ -58,6 +68,14 @@ pub async fn swarm_statistics(
|
|||
}
|
||||
|
||||
/// Get swarm health status
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/swarm/health",
|
||||
description = "Returns the overall health status of the swarm based on agent availability.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "swarm, health, read"
|
||||
)]
|
||||
pub async fn swarm_health(Extension(swarm): Extension<Arc<SwarmCoordinator>>) -> impl IntoResponse {
|
||||
let stats = swarm.get_swarm_stats();
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use axum::{
|
|||
response::IntoResponse,
|
||||
Json,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::Deserialize;
|
||||
use vapora_channels::Message;
|
||||
use vapora_shared::models::{Task, TaskPriority, TaskStatus};
|
||||
|
|
@ -56,6 +57,16 @@ pub struct UpdatePriorityPayload {
|
|||
/// List tasks with optional filters
|
||||
///
|
||||
/// GET /api/v1/tasks?project_id=xxx&status=todo&assignee=agent1
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/tasks",
|
||||
description = "Returns tasks for a project, optionally filtered by status or assignee.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "project_id:string:required:Parent project identifier; status:string:optional:Filter \
|
||||
by task status; assignee:string:optional:Filter by assignee",
|
||||
tags = "tasks, read"
|
||||
)]
|
||||
pub async fn list_tasks(
|
||||
State(state): State<AppState>,
|
||||
Query(params): Query<TaskQueryParams>,
|
||||
|
|
@ -88,6 +99,15 @@ pub async fn list_tasks(
|
|||
/// Get a specific task
|
||||
///
|
||||
/// GET /api/v1/tasks/:id
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/tasks/:id",
|
||||
description = "Returns a single task by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Task identifier",
|
||||
tags = "tasks, read"
|
||||
)]
|
||||
pub async fn get_task(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -102,6 +122,14 @@ pub async fn get_task(
|
|||
/// Create a new task
|
||||
///
|
||||
/// POST /api/v1/tasks
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/tasks",
|
||||
description = "Creates a new task, scanning input for prompt-injection before persisting.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
tags = "tasks, write"
|
||||
)]
|
||||
pub async fn create_task(
|
||||
State(state): State<AppState>,
|
||||
Json(mut task): Json<Task>,
|
||||
|
|
@ -116,6 +144,15 @@ pub async fn create_task(
|
|||
/// Update a task
|
||||
///
|
||||
/// PUT /api/v1/tasks/:id
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/tasks/:id",
|
||||
description = "Fully replaces a task record after prompt-injection scan.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
params = "id:string:required:Task identifier",
|
||||
tags = "tasks, write"
|
||||
)]
|
||||
pub async fn update_task(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -134,6 +171,15 @@ pub async fn update_task(
|
|||
/// Delete a task
|
||||
///
|
||||
/// DELETE /api/v1/tasks/:id
|
||||
#[onto_api(
|
||||
method = "DELETE",
|
||||
path = "/api/v1/tasks/:id",
|
||||
description = "Permanently removes a task by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Task identifier",
|
||||
tags = "tasks, write"
|
||||
)]
|
||||
pub async fn delete_task(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -148,6 +194,16 @@ pub async fn delete_task(
|
|||
/// Reorder a task (for Kanban drag & drop)
|
||||
///
|
||||
/// PUT /api/v1/tasks/:id/reorder
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/tasks/:id/reorder",
|
||||
description = "Updates a task's board position and optionally its status for Kanban \
|
||||
drag-and-drop.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
params = "id:string:required:Task identifier",
|
||||
tags = "tasks, write"
|
||||
)]
|
||||
pub async fn reorder_task(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -166,6 +222,15 @@ pub async fn reorder_task(
|
|||
/// Update task status
|
||||
///
|
||||
/// PUT /api/v1/tasks/:id/status
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/tasks/:id/status",
|
||||
description = "Transitions a task to a new status and triggers notifications on completion.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
params = "id:string:required:Task identifier",
|
||||
tags = "tasks, write"
|
||||
)]
|
||||
pub async fn update_task_status(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -195,6 +260,15 @@ pub async fn update_task_status(
|
|||
/// Assign a task to an agent/user
|
||||
///
|
||||
/// PUT /api/v1/tasks/:id/assign
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/tasks/:id/assign",
|
||||
description = "Assigns a task to the specified agent or user.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
params = "id:string:required:Task identifier",
|
||||
tags = "tasks, write"
|
||||
)]
|
||||
pub async fn assign_task(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -213,6 +287,15 @@ pub async fn assign_task(
|
|||
/// Update task priority
|
||||
///
|
||||
/// PUT /api/v1/tasks/:id/priority
|
||||
#[onto_api(
|
||||
method = "PUT",
|
||||
path = "/api/v1/tasks/:id/priority",
|
||||
description = "Updates the priority level of the specified task.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
params = "id:string:required:Task identifier",
|
||||
tags = "tasks, write"
|
||||
)]
|
||||
pub async fn update_priority(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
//! providing unified access to project tracking data.
|
||||
|
||||
use axum::{extract::Query, http::StatusCode, routing::get, Json, Router};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use tracing::info;
|
||||
|
|
@ -47,6 +48,17 @@ pub fn setup_tracking_routes() -> Router {
|
|||
/// # Examples
|
||||
///
|
||||
/// `GET /api/v1/tracking/entries?project=/myproject&limit=50`
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/tracking/entries",
|
||||
description = "Returns tracked change log entries with optional project, source, and limit \
|
||||
filters.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "project:string:optional:Filter by project path; source:string:optional:Filter by \
|
||||
source type; limit:string:optional:Maximum number of results",
|
||||
tags = "tracking, read"
|
||||
)]
|
||||
pub async fn list_tracking_entries(
|
||||
Query(filter): Query<TrackingFilter>,
|
||||
) -> Result<Json<serde_json::Value>, StatusCode> {
|
||||
|
|
@ -68,6 +80,15 @@ pub async fn list_tracking_entries(
|
|||
/// # Examples
|
||||
///
|
||||
/// `GET /api/v1/tracking/summary`
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/tracking/summary",
|
||||
description = "Returns aggregate statistics for tracked entries including change and TODO \
|
||||
counts.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "tracking, read"
|
||||
)]
|
||||
pub async fn get_tracking_summary() -> Result<Json<serde_json::Value>, StatusCode> {
|
||||
info!("Getting tracking summary");
|
||||
|
||||
|
|
@ -92,6 +113,14 @@ pub async fn get_tracking_summary() -> Result<Json<serde_json::Value>, StatusCod
|
|||
/// # Examples
|
||||
///
|
||||
/// `GET /api/v1/tracking/health`
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/tracking/health",
|
||||
description = "Returns liveness status of the tracking service.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "tracking, health, read"
|
||||
)]
|
||||
pub async fn tracking_health() -> Result<Json<serde_json::Value>, StatusCode> {
|
||||
info!("Tracking service health check");
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ use axum::{
|
|||
routing::{get, post},
|
||||
Json, Router,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{error, info};
|
||||
use vapora_shared::VaporaError;
|
||||
|
|
@ -108,6 +109,14 @@ pub fn orchestrator_routes() -> Router<AppState> {
|
|||
.route("/templates", get(list_templates))
|
||||
}
|
||||
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/workflow_orchestrator",
|
||||
description = "Starts a new multi-stage orchestrated workflow from a named template.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
tags = "workflow_orchestrator, write"
|
||||
)]
|
||||
#[allow(dead_code)]
|
||||
async fn start_workflow(
|
||||
State(state): State<AppState>,
|
||||
|
|
@ -139,6 +148,15 @@ async fn start_workflow(
|
|||
))
|
||||
}
|
||||
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/workflow_orchestrator",
|
||||
description = "Returns all active and completed workflow instances managed by the \
|
||||
orchestrator.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "workflow_orchestrator, read"
|
||||
)]
|
||||
#[allow(dead_code)]
|
||||
async fn list_workflows(
|
||||
State(state): State<AppState>,
|
||||
|
|
@ -158,6 +176,16 @@ async fn list_workflows(
|
|||
Ok(Json(WorkflowListResponse { workflows }))
|
||||
}
|
||||
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/workflow_orchestrator/:id",
|
||||
description = "Returns the current state and stage of a specific orchestrated workflow \
|
||||
instance.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Workflow instance identifier",
|
||||
tags = "workflow_orchestrator, read"
|
||||
)]
|
||||
#[allow(dead_code)]
|
||||
async fn get_workflow(
|
||||
State(state): State<AppState>,
|
||||
|
|
@ -177,6 +205,16 @@ async fn get_workflow(
|
|||
Ok(Json(WorkflowInstanceResponse::from(workflow)))
|
||||
}
|
||||
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/workflow_orchestrator/:id/approve",
|
||||
description = "Approves the current pending stage of an orchestrated workflow, unblocking \
|
||||
execution.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Workflow instance identifier",
|
||||
tags = "workflow_orchestrator, write"
|
||||
)]
|
||||
#[allow(dead_code)]
|
||||
async fn approve_stage(
|
||||
State(state): State<AppState>,
|
||||
|
|
@ -209,6 +247,15 @@ async fn approve_stage(
|
|||
}))
|
||||
}
|
||||
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/workflow_orchestrator/:id/cancel",
|
||||
description = "Cancels a running or approval-pending orchestrated workflow with a reason.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Workflow instance identifier",
|
||||
tags = "workflow_orchestrator, write"
|
||||
)]
|
||||
#[allow(dead_code)]
|
||||
async fn cancel_workflow(
|
||||
State(state): State<AppState>,
|
||||
|
|
@ -241,6 +288,15 @@ async fn cancel_workflow(
|
|||
}))
|
||||
}
|
||||
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/workflow_orchestrator/templates",
|
||||
description = "Returns the names of all registered workflow templates available for \
|
||||
instantiation.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "workflow_orchestrator, read"
|
||||
)]
|
||||
#[allow(dead_code)]
|
||||
async fn list_templates(
|
||||
State(state): State<AppState>,
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ use axum::{
|
|||
routing::{get, post},
|
||||
Json, Router,
|
||||
};
|
||||
use ontoref_derive::onto_api;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::error;
|
||||
use vapora_shared::VaporaError;
|
||||
|
|
@ -53,6 +54,14 @@ pub fn workflow_routes() -> Router<AppState> {
|
|||
}
|
||||
|
||||
/// List all workflows
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/workflows",
|
||||
description = "Returns all workflow definitions stored in the workflow service.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
tags = "workflows, read"
|
||||
)]
|
||||
async fn list_workflows(
|
||||
State(state): State<AppState>,
|
||||
) -> Result<Json<WorkflowListResponse>, ApiError> {
|
||||
|
|
@ -68,6 +77,14 @@ async fn list_workflows(
|
|||
}
|
||||
|
||||
/// Create new workflow from YAML
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/workflows",
|
||||
description = "Parses a YAML workflow definition and persists it to the workflow service.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
tags = "workflows, write"
|
||||
)]
|
||||
async fn create_workflow(
|
||||
State(state): State<AppState>,
|
||||
Json(req): Json<CreateWorkflowRequest>,
|
||||
|
|
@ -102,6 +119,15 @@ async fn create_workflow(
|
|||
}
|
||||
|
||||
/// Get workflow by ID
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/workflows/:id",
|
||||
description = "Returns a single workflow definition by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, ci, admin",
|
||||
params = "id:string:required:Workflow identifier",
|
||||
tags = "workflows, read"
|
||||
)]
|
||||
async fn get_workflow(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -121,6 +147,15 @@ async fn get_workflow(
|
|||
}
|
||||
|
||||
/// Execute workflow
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/workflows/:id/execute",
|
||||
description = "Triggers execution of a stored workflow by its identifier.",
|
||||
auth = "bearer",
|
||||
actors = "developer, agent, admin",
|
||||
params = "id:string:required:Workflow identifier",
|
||||
tags = "workflows, write"
|
||||
)]
|
||||
async fn execute_workflow(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -140,6 +175,15 @@ async fn execute_workflow(
|
|||
}
|
||||
|
||||
/// Rollback failed workflow
|
||||
#[onto_api(
|
||||
method = "POST",
|
||||
path = "/api/v1/workflows/:id/rollback",
|
||||
description = "Rolls back a failed or partially executed workflow to its previous state.",
|
||||
auth = "bearer",
|
||||
actors = "developer, admin",
|
||||
params = "id:string:required:Workflow identifier",
|
||||
tags = "workflows, write"
|
||||
)]
|
||||
async fn rollback_workflow(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
@ -162,6 +206,15 @@ async fn rollback_workflow(
|
|||
}
|
||||
|
||||
/// Get audit trail for workflow
|
||||
#[onto_api(
|
||||
method = "GET",
|
||||
path = "/api/v1/workflows/:id/audit",
|
||||
description = "Returns the full audit trail of state transitions for a workflow instance.",
|
||||
auth = "bearer",
|
||||
actors = "developer, ci, admin",
|
||||
params = "id:string:required:Workflow identifier",
|
||||
tags = "workflows, audit, read"
|
||||
)]
|
||||
async fn get_workflow_audit(
|
||||
State(state): State<AppState>,
|
||||
Path(id): Path<String>,
|
||||
|
|
|
|||
|
|
@ -52,6 +52,12 @@ struct Args {
|
|||
env = "VAPORA_CONFIG"
|
||||
)]
|
||||
config: String,
|
||||
|
||||
/// Print all #[onto_api] registered routes as JSON and exit.
|
||||
/// Pipe to api-catalog.json so the ontoref UI can display this project's
|
||||
/// API surface: `just export-api-catalog`
|
||||
#[arg(long)]
|
||||
dump_api_catalog: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
|
|
@ -59,6 +65,11 @@ async fn main() -> Result<()> {
|
|||
// Parse CLI arguments
|
||||
let args = Args::parse();
|
||||
|
||||
if args.dump_api_catalog {
|
||||
println!("{}", ontoref_ontology::api::dump_catalog_json());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Load environment variables from .env file if present
|
||||
dotenv::dotenv().ok();
|
||||
|
||||
|
|
@ -486,6 +497,8 @@ async fn main() -> Result<()> {
|
|||
)
|
||||
// Workflow management endpoints
|
||||
.nest("/api/v1/workflows", api::workflows::workflow_routes())
|
||||
// API catalog endpoint
|
||||
.route("/api/v1/catalog", get(api::catalog::api_catalog))
|
||||
// Apply CORS, state, and extensions
|
||||
.layer(Extension(swarm_coordinator))
|
||||
.layer(cors)
|
||||
|
|
|
|||
11
crates/vapora-ontology/Cargo.toml
Normal file
11
crates/vapora-ontology/Cargo.toml
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
[package]
|
||||
name = "vapora-ontology"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
stratum-ontology-core = { workspace = true }
|
||||
anyhow = "1"
|
||||
thiserror = "2"
|
||||
tracing = "0.1"
|
||||
133
crates/vapora-ontology/src/lib.rs
Normal file
133
crates/vapora-ontology/src/lib.rs
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::Result;
|
||||
use stratum_ontology_core::{Membrana, Node, Ontology, TipoSenal};
|
||||
use tracing::instrument;
|
||||
|
||||
/// Vapora-specific ontology accessor.
|
||||
///
|
||||
/// Wraps [`Ontology`] to provide query helpers specific to Vapora's axioms and
|
||||
/// gate structure. The three NCL files (`core.ncl`, `state.ncl`, `gate.ncl`)
|
||||
/// are expected at `{ontology_dir}/*.ncl`.
|
||||
pub struct VaporaOntology {
|
||||
inner: Ontology,
|
||||
dir: PathBuf,
|
||||
}
|
||||
|
||||
impl VaporaOntology {
|
||||
/// Load Vapora's ontology from the given directory.
|
||||
/// Expects `core.ncl`, `state.ncl`, and `gate.ncl` inside `dir`.
|
||||
pub fn load(dir: impl AsRef<Path>) -> Result<Self> {
|
||||
let dir = dir.as_ref().to_owned();
|
||||
let inner = Ontology::load(&dir)?;
|
||||
Ok(Self { inner, dir })
|
||||
}
|
||||
|
||||
/// Reload all three NCL files from disk without recreating the instance.
|
||||
pub fn reload(&mut self) -> Result<()> {
|
||||
self.inner.reload(&self.dir)
|
||||
}
|
||||
|
||||
/// Access the raw [`Ontology`] for advanced queries.
|
||||
pub fn raw(&self) -> &Ontology {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
/// Returns all invariant axiom nodes — the architectural invariants that
|
||||
/// must never be violated in Vapora's design.
|
||||
pub fn invariant_axioms(&self) -> Vec<&Node> {
|
||||
self.inner.core.invariants().collect()
|
||||
}
|
||||
|
||||
/// Returns the node IDs of all invariant axioms.
|
||||
pub fn invariant_axiom_ids(&self) -> Vec<&str> {
|
||||
self.invariant_axioms()
|
||||
.into_iter()
|
||||
.map(|n| n.id.as_str())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Check whether a proposed signal passes through any active gate membrane.
|
||||
///
|
||||
/// Returns the list of active membranas that accept the signal.
|
||||
#[instrument(skip(self), fields(signal = ?signal))]
|
||||
pub fn check_signal(&self, signal: &TipoSenal) -> Vec<&Membrana> {
|
||||
self.inner
|
||||
.gate
|
||||
.active_membranas()
|
||||
.filter(|m| m.acepta.contains(signal))
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns `true` if the signal is accepted by at least one active membrane.
|
||||
pub fn signal_passes_gate(&self, signal: &TipoSenal) -> bool {
|
||||
!self.check_signal(signal).is_empty()
|
||||
}
|
||||
|
||||
/// Returns all active membranas protecting the given node ID.
|
||||
pub fn membranas_protecting(&self, node_id: &str) -> Vec<&Membrana> {
|
||||
self.inner.gate.protecting(node_id).collect()
|
||||
}
|
||||
|
||||
/// Returns dimension IDs where `estado_actual != estado_deseado` —
|
||||
/// dimensions still in transition toward their desired state.
|
||||
pub fn open_dimensions(&self) -> Vec<String> {
|
||||
self.inner
|
||||
.state
|
||||
.dimensions()
|
||||
.iter()
|
||||
.filter(|d| d.estado_actual != d.estado_deseado)
|
||||
.map(|d| d.id.clone())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn ontology_dir() -> PathBuf {
|
||||
let manifest = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
let crates_dir = manifest
|
||||
.parent()
|
||||
.expect("CARGO_MANIFEST_DIR has a parent (crates/)");
|
||||
let project_dir = crates_dir
|
||||
.parent()
|
||||
.expect("crates/ has a parent (project root)");
|
||||
project_dir.join(".ontology")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn vapora_ontology_loads_invariants() {
|
||||
let dir = ontology_dir();
|
||||
if !dir.exists() {
|
||||
return;
|
||||
}
|
||||
let ontology = VaporaOntology::load(&dir).expect("load vapora ontology");
|
||||
let axioms = ontology.invariant_axiom_ids();
|
||||
assert!(
|
||||
axioms.contains(&"async-first"),
|
||||
"async-first axiom must be present"
|
||||
);
|
||||
assert!(
|
||||
axioms.contains(&"budget-boundary"),
|
||||
"budget-boundary axiom must be present"
|
||||
);
|
||||
assert!(
|
||||
axioms.contains(&"provider-abstraction"),
|
||||
"provider-abstraction axiom must be present"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cost_dimension_is_open() {
|
||||
let dir = ontology_dir();
|
||||
if !dir.exists() {
|
||||
return;
|
||||
}
|
||||
let ontology = VaporaOntology::load(&dir).expect("load vapora ontology");
|
||||
let open = ontology.open_dimensions();
|
||||
assert!(open.contains(&"cost".to_string()), "cost dimension should be open");
|
||||
}
|
||||
}
|
||||
11
crates/vapora-reflection/Cargo.toml
Normal file
11
crates/vapora-reflection/Cargo.toml
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
[package]
|
||||
name = "vapora-reflection"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
stratum-reflection-core = { workspace = true }
|
||||
stratum-state = { workspace = true }
|
||||
anyhow = "1"
|
||||
tracing = "0.1"
|
||||
108
crates/vapora-reflection/src/lib.rs
Normal file
108
crates/vapora-reflection/src/lib.rs
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use stratum_reflection_core::{ModeRun, ReflectionMode, RunContext};
|
||||
use stratum_state::StateTracker;
|
||||
|
||||
/// Modes available for Vapora automation, resolved relative to `modes_dir`.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum VaporaMode {
|
||||
CreateAgentTask,
|
||||
DeployService,
|
||||
}
|
||||
|
||||
impl VaporaMode {
|
||||
fn filename(self) -> &'static str {
|
||||
match self {
|
||||
Self::CreateAgentTask => "create_agent_task.ncl",
|
||||
Self::DeployService => "deploy_vapora_service.ncl",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Vapora reflection executor.
|
||||
///
|
||||
/// Resolves NCL mode files from a local `reflection/modes/` directory and
|
||||
/// delegates execution to `stratum-reflection-core`.
|
||||
pub struct VaporaReflection {
|
||||
modes_dir: PathBuf,
|
||||
}
|
||||
|
||||
impl VaporaReflection {
|
||||
/// Create an executor pointing at the given `modes_dir` (typically
|
||||
/// `vapora/reflection/modes/`).
|
||||
pub fn new(modes_dir: impl AsRef<Path>) -> Self {
|
||||
Self {
|
||||
modes_dir: modes_dir.as_ref().to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
fn mode_path(&self, mode: VaporaMode) -> PathBuf {
|
||||
self.modes_dir.join(mode.filename())
|
||||
}
|
||||
|
||||
/// Load and validate a mode NCL file without executing it.
|
||||
pub fn load_mode(&self, mode: VaporaMode) -> Result<ReflectionMode> {
|
||||
let path = self.mode_path(mode);
|
||||
ReflectionMode::load(&path)
|
||||
.with_context(|| format!("loading vapora mode {:?} from {}", mode, path.display()))
|
||||
}
|
||||
|
||||
/// Execute a Vapora reflection mode.
|
||||
///
|
||||
/// `params` is a map of `{key}` placeholder values referenced in step
|
||||
/// `cmd` fields. `state` receives `PipelineRun` and `StepRecord` writes.
|
||||
pub async fn execute(
|
||||
&self,
|
||||
mode: VaporaMode,
|
||||
params: HashMap<String, String>,
|
||||
project: impl Into<String>,
|
||||
state: Arc<dyn StateTracker>,
|
||||
) -> Result<ModeRun> {
|
||||
let reflection_mode = self.load_mode(mode)?;
|
||||
let ctx = RunContext {
|
||||
project: project.into(),
|
||||
params,
|
||||
state,
|
||||
};
|
||||
reflection_mode
|
||||
.execute(&ctx)
|
||||
.await
|
||||
.with_context(|| format!("executing vapora mode {:?}", mode))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn mode_path_resolves_correctly() {
|
||||
let exec = VaporaReflection::new("/tmp/modes");
|
||||
assert_eq!(
|
||||
exec.mode_path(VaporaMode::CreateAgentTask),
|
||||
PathBuf::from("/tmp/modes/create_agent_task.ncl")
|
||||
);
|
||||
assert_eq!(
|
||||
exec.mode_path(VaporaMode::DeployService),
|
||||
PathBuf::from("/tmp/modes/deploy_vapora_service.ncl")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn modes_dir_resolves_from_manifest() {
|
||||
let manifest = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
let crates_dir = manifest
|
||||
.parent()
|
||||
.expect("CARGO_MANIFEST_DIR has a parent (crates/)");
|
||||
let project_dir = crates_dir
|
||||
.parent()
|
||||
.expect("crates/ has a parent (project root)");
|
||||
let dir = project_dir.join("reflection").join("modes");
|
||||
assert!(dir.ends_with("reflection/modes"));
|
||||
}
|
||||
}
|
||||
|
|
@ -7,6 +7,12 @@ license.workspace = true
|
|||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["jj", "git"]
|
||||
jj = []
|
||||
git = []
|
||||
full = ["jj", "git"]
|
||||
|
||||
[dependencies]
|
||||
tokio = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
|
|
@ -15,4 +21,6 @@ tracing = { workspace = true }
|
|||
chrono = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
|
|
|
|||
116
crates/vapora-worktree/src/backend.rs
Normal file
116
crates/vapora-worktree/src/backend.rs
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
/// Capabilities reported by a VCS backend.
|
||||
///
|
||||
/// Protocol layers (`jjw`, agent coordinator) use these flags to decide what
|
||||
/// operations are safe rather than switching on the backend name.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VcsCapabilities {
|
||||
/// jj: true — conflicts are first-class data, merges always produce a
|
||||
/// commit. git: false — merge conflict causes a hard error.
|
||||
pub conflicts_as_data: bool,
|
||||
/// jj: true — change_id survives rebase; safe to use as persistent
|
||||
/// identifier. git: false — SHA changes on every rebase.
|
||||
pub stable_change_ids: bool,
|
||||
/// jj: true — `jj workspace add/forget` are native operations.
|
||||
/// git: false — worktrees exist but are a different abstraction.
|
||||
pub native_workspaces: bool,
|
||||
/// jj: false — no hook system.
|
||||
/// git: true — pre-commit, post-merge, etc.
|
||||
pub hooks_supported: bool,
|
||||
}
|
||||
|
||||
/// VCS-specific metadata stored per workspace.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum VcsMetadata {
|
||||
Jj {
|
||||
/// Stable change ID — survives rebase, safe as persistent reference.
|
||||
change_id: String,
|
||||
/// jj workspace name used with `jj workspace forget`.
|
||||
workspace_name: String,
|
||||
},
|
||||
Git {
|
||||
/// Branch name (`agent/<agent_id>/<workspace_id>`).
|
||||
branch: String,
|
||||
/// HEAD commit SHA at workspace creation time.
|
||||
commit_sha: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Data transfer object returned by backend operations.
|
||||
///
|
||||
/// Has no mutation methods — use [`crate::handle::WorktreeHandle`] for
|
||||
/// activation state.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct WorkspaceInfo {
|
||||
pub id: String,
|
||||
pub agent_id: String,
|
||||
pub path: PathBuf,
|
||||
pub vcs: VcsMetadata,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// A file-level conflict detected during merge preview.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ConflictInfo {
|
||||
pub path: PathBuf,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
/// Result of a merge preview.
|
||||
///
|
||||
/// Semantics differ by backend — see [`WorkspaceBackend::check_merge`].
|
||||
#[derive(Debug)]
|
||||
pub struct MergeStatus {
|
||||
/// git: false if dry-run detected conflicts. jj: always true (conflicts are
|
||||
/// data).
|
||||
pub can_merge: bool,
|
||||
pub conflicts: Vec<ConflictInfo>,
|
||||
/// Files under `.ontology/` modified in this workspace.
|
||||
/// The protocol layer decides if these are blocking.
|
||||
pub ontology_files_modified: Vec<PathBuf>,
|
||||
}
|
||||
|
||||
/// VCS backend abstraction for workspace lifecycle operations.
|
||||
///
|
||||
/// Use `Box<dyn WorkspaceBackend>` — every operation spawns a subprocess so
|
||||
/// vtable overhead is negligible, and a single concrete `WorktreeManager` type
|
||||
/// avoids propagating type parameters to callers.
|
||||
#[async_trait]
|
||||
pub trait WorkspaceBackend: Send + Sync + 'static {
|
||||
fn name(&self) -> &'static str;
|
||||
fn capabilities(&self) -> VcsCapabilities;
|
||||
|
||||
async fn create_workspace(&self, agent_id: &str, workspace_id: &str) -> Result<WorkspaceInfo>;
|
||||
|
||||
/// Preview merge readiness.
|
||||
///
|
||||
/// **git**: destructive dry-run (`git merge --no-commit`) then rollback
|
||||
/// (`git merge --abort`). The working tree is temporarily mutated — not
|
||||
/// atomic if the process dies between the two steps.
|
||||
///
|
||||
/// **jj**: non-destructive. Reports only which `.ontology/` files are
|
||||
/// modified. Conflicts are not detected upfront since jj stores them as
|
||||
/// data in the merge commit.
|
||||
async fn check_merge(&self, workspace_id: &str, target: &str) -> Result<MergeStatus>;
|
||||
|
||||
/// Execute the merge. Does not remove the workspace — call
|
||||
/// `remove_workspace` after.
|
||||
async fn merge_workspace(&self, workspace_id: &str, target: &str) -> Result<()>;
|
||||
|
||||
async fn remove_workspace(&self, workspace_id: &str) -> Result<()>;
|
||||
|
||||
/// Workspaces visible to the VCS that live under the managed workspace
|
||||
/// base.
|
||||
async fn list_workspaces(&self) -> Result<Vec<WorkspaceInfo>>;
|
||||
|
||||
async fn push_to_remote(&self, workspace_id: &str, agent_id: &str, remote: &str) -> Result<()>;
|
||||
|
||||
async fn cleanup_orphaned(&self) -> Result<()>;
|
||||
}
|
||||
62
crates/vapora-worktree/src/detect.rs
Normal file
62
crates/vapora-worktree/src/detect.rs
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
use std::path::Path;
|
||||
|
||||
/// Which VCS is active in a directory, detected by filesystem inspection.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum VcsDetection {
|
||||
/// Both `.jj/` and `.git/` present — jj colocated mode (preferred for
|
||||
/// Radicle interop).
|
||||
JjColocated,
|
||||
/// Only `.jj/` present.
|
||||
Jj,
|
||||
/// Only `.git/` present.
|
||||
Git,
|
||||
/// Neither found.
|
||||
None,
|
||||
}
|
||||
|
||||
/// Detect VCS by inspecting `path` for `.jj/` and `.git/` directories.
|
||||
pub fn detect_vcs(path: &Path) -> VcsDetection {
|
||||
let has_jj = path.join(".jj").is_dir();
|
||||
let has_git = path.join(".git").is_dir();
|
||||
match (has_jj, has_git) {
|
||||
(true, true) => VcsDetection::JjColocated,
|
||||
(true, false) => VcsDetection::Jj,
|
||||
(false, true) => VcsDetection::Git,
|
||||
(false, false) => VcsDetection::None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn detect_none() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
assert_eq!(detect_vcs(dir.path()), VcsDetection::None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detect_git_only() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
std::fs::create_dir(dir.path().join(".git")).unwrap();
|
||||
assert_eq!(detect_vcs(dir.path()), VcsDetection::Git);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detect_jj_only() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
std::fs::create_dir(dir.path().join(".jj")).unwrap();
|
||||
assert_eq!(detect_vcs(dir.path()), VcsDetection::Jj);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detect_colocated() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
std::fs::create_dir(dir.path().join(".jj")).unwrap();
|
||||
std::fs::create_dir(dir.path().join(".git")).unwrap();
|
||||
assert_eq!(detect_vcs(dir.path()), VcsDetection::JjColocated);
|
||||
}
|
||||
}
|
||||
|
|
@ -2,25 +2,37 @@ use thiserror::Error;
|
|||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum WorktreeError {
|
||||
#[error("Failed to create worktree: {0}")]
|
||||
#[error("Failed to create workspace: {0}")]
|
||||
CreationFailed(String),
|
||||
|
||||
#[error("Failed to remove worktree: {0}")]
|
||||
#[error("Failed to remove workspace: {0}")]
|
||||
RemovalFailed(String),
|
||||
|
||||
#[error("Worktree not found: {0}")]
|
||||
#[error("Workspace not found: {0}")]
|
||||
NotFound(String),
|
||||
|
||||
#[error("Git operation failed: {0}")]
|
||||
GitError(String),
|
||||
|
||||
#[error("Merge conflict detected in: {0}")]
|
||||
#[error("jj operation failed: {0}")]
|
||||
JjError(String),
|
||||
|
||||
#[error("Merge conflict in: {0}")]
|
||||
MergeConflict(String),
|
||||
|
||||
#[error(".ontology/ write denied in degraded git mode — affected: {0:?}")]
|
||||
OntologyWriteDenied(Vec<String>),
|
||||
|
||||
#[error("Max concurrent workspaces exceeded (limit: {limit})")]
|
||||
MaxConcurrentExceeded { limit: usize },
|
||||
|
||||
#[error("No VCS detected at: {0}")]
|
||||
VcsDetectionFailed(String),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
||||
#[error("Invalid worktree state: {0}")]
|
||||
#[error("Invalid workspace state: {0}")]
|
||||
InvalidState(String),
|
||||
|
||||
#[error("Timeout waiting for operation")]
|
||||
|
|
|
|||
599
crates/vapora-worktree/src/git.rs
Normal file
599
crates/vapora-worktree/src/git.rs
Normal file
|
|
@ -0,0 +1,599 @@
|
|||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use chrono::Utc;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::backend::{
|
||||
ConflictInfo, MergeStatus, VcsCapabilities, VcsMetadata, WorkspaceBackend, WorkspaceInfo,
|
||||
};
|
||||
use crate::error::{Result, WorktreeError};
|
||||
|
||||
/// Constraints for the git backend.
|
||||
pub struct GitConstraints {
|
||||
/// Maximum workspaces that may exist simultaneously under `workspace_base`.
|
||||
pub max_concurrent: usize,
|
||||
pub merge_timeout: Duration,
|
||||
pub max_inactive: Duration,
|
||||
pub require_pre_check: bool,
|
||||
/// Reject any merge that touches `.ontology/` files.
|
||||
///
|
||||
/// Default `false` — set `true` only when using git as a **degraded
|
||||
/// fallback** in a project that normally runs jj, to prevent ontology
|
||||
/// state drift through a non-jj code path.
|
||||
pub deny_ontology_writes: bool,
|
||||
}
|
||||
|
||||
impl Default for GitConstraints {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_concurrent: 3,
|
||||
merge_timeout: Duration::from_secs(30),
|
||||
max_inactive: Duration::from_secs(3600),
|
||||
require_pre_check: true,
|
||||
deny_ontology_writes: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GitBackend {
|
||||
repo_path: PathBuf,
|
||||
workspace_base: PathBuf,
|
||||
constraints: GitConstraints,
|
||||
}
|
||||
|
||||
impl GitBackend {
|
||||
pub fn new(
|
||||
repo_path: PathBuf,
|
||||
workspace_base: PathBuf,
|
||||
constraints: GitConstraints,
|
||||
) -> Result<Self> {
|
||||
if !repo_path.exists() {
|
||||
return Err(WorktreeError::InvalidState(format!(
|
||||
"Repository path does not exist: {}",
|
||||
repo_path.display()
|
||||
)));
|
||||
}
|
||||
if !workspace_base.exists() {
|
||||
std::fs::create_dir_all(&workspace_base)?;
|
||||
}
|
||||
Ok(Self {
|
||||
repo_path,
|
||||
workspace_base,
|
||||
constraints,
|
||||
})
|
||||
}
|
||||
|
||||
fn branch_name(agent_id: &str, workspace_id: &str) -> String {
|
||||
format!("agent/{agent_id}/{workspace_id}")
|
||||
}
|
||||
|
||||
fn workspace_path(&self, workspace_id: &str) -> PathBuf {
|
||||
self.workspace_base.join(workspace_id)
|
||||
}
|
||||
|
||||
/// Read the HEAD branch of a git worktree at `path`.
|
||||
fn head_branch(path: &Path) -> Result<String> {
|
||||
let out = Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["rev-parse", "--abbrev-ref", "HEAD"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(e.to_string()))?;
|
||||
if !out.status.success() {
|
||||
return Err(WorktreeError::NotFound(path.display().to_string()));
|
||||
}
|
||||
Ok(String::from_utf8_lossy(&out.stdout).trim().to_string())
|
||||
}
|
||||
|
||||
/// Files under `.ontology/` that differ between `target` and `branch` in
|
||||
/// the repo.
|
||||
fn ontology_diff(&self, branch: &str, target: &str) -> Result<Vec<PathBuf>> {
|
||||
let out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["diff", "--name-only", target, branch])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("diff failed: {e}")))?;
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
Ok(stdout
|
||||
.lines()
|
||||
.filter(|f| f.starts_with(".ontology/"))
|
||||
.map(PathBuf::from)
|
||||
.collect())
|
||||
}
|
||||
|
||||
/// Error if `deny_ontology_writes` is set and `.ontology/` is modified.
|
||||
fn enforce_ontology_guard(&self, branch: &str, target: &str) -> Result<()> {
|
||||
if !self.constraints.deny_ontology_writes {
|
||||
return Ok(());
|
||||
}
|
||||
let modified = self.ontology_diff(branch, target)?;
|
||||
if !modified.is_empty() {
|
||||
return Err(WorktreeError::OntologyWriteDenied(
|
||||
modified
|
||||
.iter()
|
||||
.map(|p| p.to_string_lossy().into_owned())
|
||||
.collect(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn count_workspace_dirs(&self) -> Result<usize> {
|
||||
Ok(std::fs::read_dir(&self.workspace_base)?
|
||||
.flatten()
|
||||
.filter(|e| e.file_type().map(|t| t.is_dir()).unwrap_or(false))
|
||||
.count())
|
||||
}
|
||||
|
||||
fn head_sha(&self) -> String {
|
||||
Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["rev-parse", "HEAD"])
|
||||
.output()
|
||||
.ok()
|
||||
.and_then(|o| {
|
||||
if o.status.success() {
|
||||
Some(String::from_utf8_lossy(&o.stdout).trim().to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl WorkspaceBackend for GitBackend {
|
||||
fn name(&self) -> &'static str {
|
||||
"git"
|
||||
}
|
||||
|
||||
fn capabilities(&self) -> VcsCapabilities {
|
||||
VcsCapabilities {
|
||||
conflicts_as_data: false,
|
||||
stable_change_ids: false,
|
||||
native_workspaces: false,
|
||||
hooks_supported: true,
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_workspace(&self, agent_id: &str, workspace_id: &str) -> Result<WorkspaceInfo> {
|
||||
let active = self.count_workspace_dirs()?;
|
||||
if active >= self.constraints.max_concurrent {
|
||||
return Err(WorktreeError::MaxConcurrentExceeded {
|
||||
limit: self.constraints.max_concurrent,
|
||||
});
|
||||
}
|
||||
|
||||
let branch = Self::branch_name(agent_id, workspace_id);
|
||||
let path = self.workspace_path(workspace_id);
|
||||
let path_str = path
|
||||
.to_str()
|
||||
.ok_or_else(|| WorktreeError::InvalidState("Non-UTF8 workspace path".into()))?;
|
||||
|
||||
let out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "add", "-b", &branch, path_str])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("worktree add: {e}")))?;
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(WorktreeError::CreationFailed(
|
||||
String::from_utf8_lossy(&out.stderr).into_owned(),
|
||||
));
|
||||
}
|
||||
|
||||
let commit_sha = self.head_sha();
|
||||
info!("Created git workspace {workspace_id} for agent {agent_id} on branch {branch}");
|
||||
|
||||
Ok(WorkspaceInfo {
|
||||
id: workspace_id.to_string(),
|
||||
agent_id: agent_id.to_string(),
|
||||
path,
|
||||
vcs: VcsMetadata::Git { branch, commit_sha },
|
||||
created_at: Utc::now(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Destructive dry-run: `git merge --no-commit` then `git merge --abort`.
|
||||
///
|
||||
/// The working tree is temporarily mutated. If the process dies between
|
||||
/// the two steps, the repo will be in merge state and require manual abort.
|
||||
async fn check_merge(&self, workspace_id: &str, target: &str) -> Result<MergeStatus> {
|
||||
let ws_path = self.workspace_path(workspace_id);
|
||||
let branch = Self::head_branch(&ws_path)?;
|
||||
let ontology_files = self.ontology_diff(&branch, target)?;
|
||||
|
||||
self.enforce_ontology_guard(&branch, target)?;
|
||||
|
||||
// Best-effort fetch to ensure target is up to date
|
||||
let _ = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["fetch", "origin", &format!("{target}:{target}")])
|
||||
.output();
|
||||
|
||||
let merge_out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["merge", "--no-commit", "--no-ff", "--no-stat", &branch])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("merge dry-run: {e}")))?;
|
||||
|
||||
let has_conflict = !merge_out.status.success();
|
||||
|
||||
// Always abort — even on success (we only wanted the preview)
|
||||
let abort_out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["merge", "--abort"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("merge abort: {e}")))?;
|
||||
|
||||
if !abort_out.status.success() {
|
||||
warn!(
|
||||
"merge --abort returned non-zero for workspace {workspace_id}: {}",
|
||||
String::from_utf8_lossy(&abort_out.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
let conflicts = if has_conflict {
|
||||
vec![ConflictInfo {
|
||||
path: PathBuf::from(&branch),
|
||||
description: String::from_utf8_lossy(&merge_out.stderr).into_owned(),
|
||||
}]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
Ok(MergeStatus {
|
||||
can_merge: !has_conflict,
|
||||
conflicts,
|
||||
ontology_files_modified: ontology_files,
|
||||
})
|
||||
}
|
||||
|
||||
async fn merge_workspace(&self, workspace_id: &str, target: &str) -> Result<()> {
|
||||
let ws_path = self.workspace_path(workspace_id);
|
||||
let branch = Self::head_branch(&ws_path)?;
|
||||
|
||||
self.enforce_ontology_guard(&branch, target)?;
|
||||
|
||||
let out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["merge", "--no-edit", &branch])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("merge: {e}")))?;
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(WorktreeError::MergeConflict(
|
||||
String::from_utf8_lossy(&out.stderr).into_owned(),
|
||||
));
|
||||
}
|
||||
|
||||
info!("Merged git workspace {workspace_id} into {target}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_workspace(&self, workspace_id: &str) -> Result<()> {
|
||||
let path = self.workspace_path(workspace_id);
|
||||
|
||||
// Best-effort branch lookup before we remove the worktree
|
||||
let branch = Self::head_branch(&path).unwrap_or_default();
|
||||
|
||||
let path_str = path.to_str().unwrap_or_default();
|
||||
let rm_out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "remove", "-f", path_str])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::RemovalFailed(e.to_string()))?;
|
||||
|
||||
if !rm_out.status.success() {
|
||||
warn!(
|
||||
"worktree remove -f had issues for {workspace_id}: {}",
|
||||
String::from_utf8_lossy(&rm_out.stderr)
|
||||
);
|
||||
}
|
||||
|
||||
// Remove branch — best effort, workspace may be on a detached HEAD
|
||||
if !branch.is_empty() {
|
||||
let _ = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["branch", "-D", &branch])
|
||||
.output();
|
||||
}
|
||||
|
||||
info!("Removed git workspace {workspace_id}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_workspaces(&self) -> Result<Vec<WorkspaceInfo>> {
|
||||
let out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "list", "--porcelain"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(e.to_string()))?;
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(WorktreeError::GitError(
|
||||
String::from_utf8_lossy(&out.stderr).into_owned(),
|
||||
));
|
||||
}
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let mut workspaces = Vec::new();
|
||||
|
||||
// Porcelain format: blocks separated by blank lines
|
||||
// Each block: "worktree <path>\nHEAD <sha>\nbranch refs/heads/<name>"
|
||||
for block in stdout.split("\n\n") {
|
||||
let mut wt_path: Option<String> = None;
|
||||
let mut branch: Option<String> = None;
|
||||
let mut sha: Option<String> = None;
|
||||
|
||||
for line in block.lines() {
|
||||
if let Some(p) = line.strip_prefix("worktree ") {
|
||||
wt_path = Some(p.to_string());
|
||||
} else if let Some(b) = line.strip_prefix("branch refs/heads/") {
|
||||
branch = Some(b.to_string());
|
||||
} else if let Some(s) = line.strip_prefix("HEAD ") {
|
||||
sha = Some(s.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let (Some(p), Some(b), Some(s)) = (wt_path, branch, sha) else {
|
||||
continue;
|
||||
};
|
||||
|
||||
let path = PathBuf::from(&p);
|
||||
if !path.starts_with(&self.workspace_base) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let workspace_id = path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
|
||||
// agent_id is embedded in branch: agent/<agent_id>/<workspace_id>
|
||||
let agent_id = b
|
||||
.strip_prefix("agent/")
|
||||
.and_then(|rest| rest.split('/').next())
|
||||
.unwrap_or("unknown")
|
||||
.to_string();
|
||||
|
||||
workspaces.push(WorkspaceInfo {
|
||||
id: workspace_id,
|
||||
agent_id,
|
||||
path,
|
||||
vcs: VcsMetadata::Git {
|
||||
branch: b,
|
||||
commit_sha: s,
|
||||
},
|
||||
created_at: Utc::now(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(workspaces)
|
||||
}
|
||||
|
||||
async fn push_to_remote(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
_agent_id: &str,
|
||||
remote: &str,
|
||||
) -> Result<()> {
|
||||
let ws_path = self.workspace_path(workspace_id);
|
||||
let branch = Self::head_branch(&ws_path)?;
|
||||
|
||||
let out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["push", remote, &branch])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(e.to_string()))?;
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(WorktreeError::GitError(
|
||||
String::from_utf8_lossy(&out.stderr).into_owned(),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn cleanup_orphaned(&self) -> Result<()> {
|
||||
let out = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "list"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(e.to_string()))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
for line in stdout.lines() {
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if let Some(path_str) = line.split_whitespace().next() {
|
||||
let path = PathBuf::from(path_str);
|
||||
if path.starts_with(&self.workspace_base) {
|
||||
warn!("Removing orphaned git worktree: {}", path.display());
|
||||
let _ = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "remove", "-f", path_str])
|
||||
.output();
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::process::Command;
|
||||
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn init_git_repo(path: &Path) {
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["init"])
|
||||
.output()
|
||||
.unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["config", "user.email", "test@test.com"])
|
||||
.output()
|
||||
.unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["config", "user.name", "Test"])
|
||||
.output()
|
||||
.unwrap();
|
||||
std::fs::write(path.join("README"), "init").unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["add", "."])
|
||||
.output()
|
||||
.unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["commit", "-m", "init"])
|
||||
.output()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
fn backend(repo: &Path, ws: &Path) -> GitBackend {
|
||||
GitBackend::new(
|
||||
repo.to_path_buf(),
|
||||
ws.to_path_buf(),
|
||||
GitConstraints::default(),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn capabilities_correct() {
|
||||
let repo = TempDir::new().unwrap();
|
||||
let ws = TempDir::new().unwrap();
|
||||
init_git_repo(repo.path());
|
||||
let caps = backend(repo.path(), ws.path()).capabilities();
|
||||
assert!(!caps.conflicts_as_data);
|
||||
assert!(!caps.stable_change_ids);
|
||||
assert!(!caps.native_workspaces);
|
||||
assert!(caps.hooks_supported);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn create_and_remove() {
|
||||
let repo = TempDir::new().unwrap();
|
||||
let ws = TempDir::new().unwrap();
|
||||
init_git_repo(repo.path());
|
||||
let b = backend(repo.path(), ws.path());
|
||||
|
||||
let info = b.create_workspace("agent-001", "ws-0001").await.unwrap();
|
||||
assert_eq!(info.id, "ws-0001");
|
||||
assert_eq!(info.agent_id, "agent-001");
|
||||
assert!(info.path.exists());
|
||||
assert!(matches!(info.vcs, VcsMetadata::Git { .. }));
|
||||
|
||||
b.remove_workspace("ws-0001").await.unwrap();
|
||||
assert!(!info.path.exists());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn max_concurrent_enforced() {
|
||||
let repo = TempDir::new().unwrap();
|
||||
let ws = TempDir::new().unwrap();
|
||||
init_git_repo(repo.path());
|
||||
let b = GitBackend::new(
|
||||
repo.path().to_path_buf(),
|
||||
ws.path().to_path_buf(),
|
||||
GitConstraints {
|
||||
max_concurrent: 2,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
b.create_workspace("agent-001", "ws-0001").await.unwrap();
|
||||
b.create_workspace("agent-001", "ws-0002").await.unwrap();
|
||||
let err = b
|
||||
.create_workspace("agent-001", "ws-0003")
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(matches!(
|
||||
err,
|
||||
WorktreeError::MaxConcurrentExceeded { limit: 2 }
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn deny_ontology_writes_blocks_check_merge() {
|
||||
let repo = TempDir::new().unwrap();
|
||||
let ws = TempDir::new().unwrap();
|
||||
init_git_repo(repo.path());
|
||||
let b = GitBackend::new(
|
||||
repo.path().to_path_buf(),
|
||||
ws.path().to_path_buf(),
|
||||
GitConstraints {
|
||||
deny_ontology_writes: true,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let info = b.create_workspace("agent-001", "ws-0001").await.unwrap();
|
||||
|
||||
// Commit a .ontology/ file in the workspace
|
||||
let onto_dir = info.path.join(".ontology");
|
||||
std::fs::create_dir_all(&onto_dir).unwrap();
|
||||
std::fs::write(onto_dir.join("core.ncl"), "let x = 1").unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(&info.path)
|
||||
.args(["add", "."])
|
||||
.output()
|
||||
.unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(&info.path)
|
||||
.args(["commit", "-m", "add ontology"])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let err = b.check_merge("ws-0001", "main").await.unwrap_err();
|
||||
assert!(matches!(err, WorktreeError::OntologyWriteDenied(_)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn ontology_files_populated_when_not_denied() {
|
||||
let repo = TempDir::new().unwrap();
|
||||
let ws = TempDir::new().unwrap();
|
||||
init_git_repo(repo.path());
|
||||
let b = backend(repo.path(), ws.path()); // deny_ontology_writes: false
|
||||
|
||||
let info = b.create_workspace("agent-001", "ws-0001").await.unwrap();
|
||||
let onto_dir = info.path.join(".ontology");
|
||||
std::fs::create_dir_all(&onto_dir).unwrap();
|
||||
std::fs::write(onto_dir.join("state.ncl"), "let s = true").unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(&info.path)
|
||||
.args(["add", "."])
|
||||
.output()
|
||||
.unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(&info.path)
|
||||
.args(["commit", "-m", "state"])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let status = b.check_merge("ws-0001", "main").await.unwrap();
|
||||
assert!(
|
||||
status
|
||||
.ontology_files_modified
|
||||
.iter()
|
||||
.any(|p| p.ends_with("state.ncl")),
|
||||
"expected state.ncl in {:?}",
|
||||
status.ontology_files_modified
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,52 +1,64 @@
|
|||
use std::path::PathBuf;
|
||||
use std::path::Path;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
use crate::backend::{VcsMetadata, WorkspaceInfo};
|
||||
use crate::error::{Result, WorktreeError};
|
||||
|
||||
use crate::error::Result;
|
||||
|
||||
/// Handle to an active worktree managed by the system
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
/// Active workspace handle with lifecycle state.
|
||||
///
|
||||
/// Wraps [`WorkspaceInfo`] (the backend's DTO) with an `is_active` flag managed
|
||||
/// by [`crate::manager::WorktreeManager`]. Mutation methods live here, not on
|
||||
/// the trait, because the backend is stateless with respect to activation.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct WorktreeHandle {
|
||||
/// Unique worktree identifier
|
||||
pub id: String,
|
||||
/// Agent ID that owns this worktree
|
||||
pub agent_id: String,
|
||||
/// Branch name created for this worktree
|
||||
pub branch: String,
|
||||
/// Path to the worktree on disk
|
||||
pub path: PathBuf,
|
||||
/// When the worktree was created
|
||||
pub created_at: DateTime<Utc>,
|
||||
/// Whether changes can still be made
|
||||
pub info: WorkspaceInfo,
|
||||
pub is_active: bool,
|
||||
}
|
||||
|
||||
impl WorktreeHandle {
|
||||
/// Create a new worktree handle
|
||||
pub fn new(agent_id: String, branch: String, path: PathBuf) -> Self {
|
||||
pub fn new(info: WorkspaceInfo) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
agent_id,
|
||||
branch,
|
||||
path,
|
||||
created_at: Utc::now(),
|
||||
info,
|
||||
is_active: true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Mark worktree as inactive (no more changes allowed)
|
||||
pub fn id(&self) -> &str {
|
||||
&self.info.id
|
||||
}
|
||||
|
||||
pub fn agent_id(&self) -> &str {
|
||||
&self.info.agent_id
|
||||
}
|
||||
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.info.path
|
||||
}
|
||||
|
||||
/// Branch (git) or workspace name (jj) — the VCS ref for this workspace.
|
||||
pub fn ref_name(&self) -> &str {
|
||||
match &self.info.vcs {
|
||||
VcsMetadata::Git { branch, .. } => branch,
|
||||
VcsMetadata::Jj { workspace_name, .. } => workspace_name,
|
||||
}
|
||||
}
|
||||
|
||||
/// Stable identifier: change_id for jj, commit SHA for git.
|
||||
pub fn vcs_id(&self) -> &str {
|
||||
match &self.info.vcs {
|
||||
VcsMetadata::Jj { change_id, .. } => change_id,
|
||||
VcsMetadata::Git { commit_sha, .. } => commit_sha,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deactivate(&mut self) {
|
||||
self.is_active = false;
|
||||
}
|
||||
|
||||
/// Check if worktree is still active for modifications
|
||||
pub fn can_modify(&self) -> Result<()> {
|
||||
if !self.is_active {
|
||||
return Err(crate::error::WorktreeError::InvalidState(format!(
|
||||
"Worktree {} is no longer active",
|
||||
self.id
|
||||
return Err(WorktreeError::InvalidState(format!(
|
||||
"Workspace {} is no longer active",
|
||||
self.info.id
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
|
|
@ -55,31 +67,58 @@ impl WorktreeHandle {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
|
||||
use chrono::Utc;
|
||||
|
||||
use super::*;
|
||||
use crate::backend::VcsMetadata;
|
||||
|
||||
#[test]
|
||||
fn test_handle_creation() {
|
||||
let handle = WorktreeHandle::new(
|
||||
"agent-001".to_string(),
|
||||
"feature/test".to_string(),
|
||||
PathBuf::from("/tmp/wt-001"),
|
||||
);
|
||||
|
||||
assert_eq!(handle.agent_id, "agent-001");
|
||||
assert_eq!(handle.branch, "feature/test");
|
||||
assert!(handle.is_active);
|
||||
fn make_handle(agent_id: &str) -> WorktreeHandle {
|
||||
WorktreeHandle::new(WorkspaceInfo {
|
||||
id: "test-ws-001".to_string(),
|
||||
agent_id: agent_id.to_string(),
|
||||
path: PathBuf::from("/tmp/ws-001"),
|
||||
vcs: VcsMetadata::Git {
|
||||
branch: "agent/agent-001/test-ws-001".to_string(),
|
||||
commit_sha: "abc123".to_string(),
|
||||
},
|
||||
created_at: Utc::now(),
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deactivate() {
|
||||
let mut handle = WorktreeHandle::new(
|
||||
"agent-001".to_string(),
|
||||
"feature/test".to_string(),
|
||||
PathBuf::from("/tmp/wt-001"),
|
||||
);
|
||||
fn handle_creation() {
|
||||
let h = make_handle("agent-001");
|
||||
assert_eq!(h.agent_id(), "agent-001");
|
||||
assert_eq!(h.id(), "test-ws-001");
|
||||
assert_eq!(h.ref_name(), "agent/agent-001/test-ws-001");
|
||||
assert!(h.is_active);
|
||||
assert!(h.can_modify().is_ok());
|
||||
}
|
||||
|
||||
assert!(handle.can_modify().is_ok());
|
||||
handle.deactivate();
|
||||
assert!(handle.can_modify().is_err());
|
||||
#[test]
|
||||
fn deactivation_blocks_modify() {
|
||||
let mut h = make_handle("agent-001");
|
||||
assert!(h.can_modify().is_ok());
|
||||
h.deactivate();
|
||||
assert!(!h.is_active);
|
||||
assert!(h.can_modify().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn jj_handle_ref_name() {
|
||||
let h = WorktreeHandle::new(WorkspaceInfo {
|
||||
id: "ws-uuid".to_string(),
|
||||
agent_id: "agent-002".to_string(),
|
||||
path: PathBuf::from("/tmp/ws-uuid"),
|
||||
vcs: VcsMetadata::Jj {
|
||||
change_id: "yostqsxwmrkpkzqz".to_string(),
|
||||
workspace_name: "ws-uuid".to_string(),
|
||||
},
|
||||
created_at: Utc::now(),
|
||||
});
|
||||
assert_eq!(h.ref_name(), "ws-uuid");
|
||||
assert_eq!(h.vcs_id(), "yostqsxwmrkpkzqz");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
478
crates/vapora-worktree/src/jj.rs
Normal file
478
crates/vapora-worktree/src/jj.rs
Normal file
|
|
@ -0,0 +1,478 @@
|
|||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use chrono::Utc;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::backend::{MergeStatus, VcsCapabilities, VcsMetadata, WorkspaceBackend, WorkspaceInfo};
|
||||
use crate::error::{Result, WorktreeError};
|
||||
|
||||
/// Template: prints `CONFLICT` or `CLEAN` followed by newline.
|
||||
const CONFLICT_CHECK_TEMPLATE: &str = r#"if(conflict, "CONFLICT", "CLEAN") ++ "\n""#;
|
||||
/// Template: prints the full change_id followed by newline.
|
||||
const CHANGE_ID_TEMPLATE: &str = r#"change_id ++ "\n""#;
|
||||
|
||||
pub struct JjBackend {
|
||||
repo_path: PathBuf,
|
||||
workspace_base: PathBuf,
|
||||
}
|
||||
|
||||
impl JjBackend {
|
||||
pub fn new(repo_path: PathBuf, workspace_base: PathBuf) -> Result<Self> {
|
||||
if !repo_path.exists() {
|
||||
return Err(WorktreeError::InvalidState(format!(
|
||||
"Repository path does not exist: {}",
|
||||
repo_path.display()
|
||||
)));
|
||||
}
|
||||
if !workspace_base.exists() {
|
||||
std::fs::create_dir_all(&workspace_base)?;
|
||||
}
|
||||
Ok(Self {
|
||||
repo_path,
|
||||
workspace_base,
|
||||
})
|
||||
}
|
||||
|
||||
/// Run a jj command in `repo_path` and return stdout.
|
||||
///
|
||||
/// `--no-pager` prevents jj from spawning a pager in non-TTY contexts.
|
||||
fn run_jj(&self, args: &[&str]) -> Result<String> {
|
||||
let out = Command::new("jj")
|
||||
.current_dir(&self.repo_path)
|
||||
.arg("--no-pager")
|
||||
.args(args)
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::JjError(format!("jj exec: {e}")))?;
|
||||
|
||||
if !out.status.success() {
|
||||
return Err(WorktreeError::JjError(
|
||||
String::from_utf8_lossy(&out.stderr).trim().to_string(),
|
||||
));
|
||||
}
|
||||
Ok(String::from_utf8_lossy(&out.stdout).into_owned())
|
||||
}
|
||||
|
||||
fn workspace_path(&self, workspace_id: &str) -> PathBuf {
|
||||
self.workspace_base.join(workspace_id)
|
||||
}
|
||||
|
||||
/// Retrieve the change_id of the working-copy commit in workspace
|
||||
/// `workspace_id`.
|
||||
///
|
||||
/// Revset: `<workspace_id>@` — jj's syntax for "working copy of named
|
||||
/// workspace".
|
||||
fn get_change_id(&self, workspace_id: &str) -> Result<String> {
|
||||
let revset = format!("{workspace_id}@");
|
||||
let out = self.run_jj(&["log", "--no-graph", "-r", &revset, "-T", CHANGE_ID_TEMPLATE])?;
|
||||
let id = out.trim().to_string();
|
||||
if id.is_empty() {
|
||||
return Err(WorktreeError::NotFound(format!(
|
||||
"jj workspace '{workspace_id}' has no working-copy commit"
|
||||
)));
|
||||
}
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
/// After `jj new --no-edit <target> <change_id>`, the merge commit is the
|
||||
/// only head in `change_id::` (the merge commit is a descendant of
|
||||
/// change_id and has no children yet).
|
||||
fn find_merge_commit_revset(change_id: &str) -> String {
|
||||
format!("heads({change_id}::)")
|
||||
}
|
||||
|
||||
/// Files modified in `workspace_id`'s working-copy commit relative to its
|
||||
/// parent.
|
||||
fn modified_files(&self, workspace_id: &str) -> Result<Vec<PathBuf>> {
|
||||
let revset = format!("{workspace_id}@");
|
||||
// `jj diff --name-only -r <rev>` shows files changed in that commit vs its
|
||||
// parent
|
||||
let out = self.run_jj(&["diff", "--name-only", "-r", &revset])?;
|
||||
Ok(out
|
||||
.lines()
|
||||
.filter(|l| !l.is_empty())
|
||||
.map(PathBuf::from)
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl WorkspaceBackend for JjBackend {
|
||||
fn name(&self) -> &'static str {
|
||||
"jj"
|
||||
}
|
||||
|
||||
fn capabilities(&self) -> VcsCapabilities {
|
||||
VcsCapabilities {
|
||||
conflicts_as_data: true,
|
||||
stable_change_ids: true,
|
||||
native_workspaces: true,
|
||||
hooks_supported: false,
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_workspace(&self, agent_id: &str, workspace_id: &str) -> Result<WorkspaceInfo> {
|
||||
let path = self.workspace_path(workspace_id);
|
||||
let path_str = path
|
||||
.to_str()
|
||||
.ok_or_else(|| WorktreeError::InvalidState("Non-UTF8 path".into()))?;
|
||||
|
||||
// `jj workspace add <path> --name <name>` — workspace_id doubles as the jj
|
||||
// workspace name
|
||||
self.run_jj(&["workspace", "add", path_str, "--name", workspace_id])?;
|
||||
|
||||
let change_id = self.get_change_id(workspace_id)?;
|
||||
|
||||
info!("Created jj workspace {workspace_id} for agent {agent_id} (change {change_id})");
|
||||
|
||||
Ok(WorkspaceInfo {
|
||||
id: workspace_id.to_string(),
|
||||
agent_id: agent_id.to_string(),
|
||||
path,
|
||||
vcs: VcsMetadata::Jj {
|
||||
change_id,
|
||||
workspace_name: workspace_id.to_string(),
|
||||
},
|
||||
created_at: Utc::now(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Non-destructive. Reports `.ontology/` files modified in the workspace.
|
||||
///
|
||||
/// Conflicts are not detected upfront — jj stores them as data in the merge
|
||||
/// commit (see `merge_workspace`). The protocol layer decides if conflict
|
||||
/// data is acceptable before calling merge.
|
||||
async fn check_merge(&self, workspace_id: &str, _target: &str) -> Result<MergeStatus> {
|
||||
let all_files = self.modified_files(workspace_id)?;
|
||||
let ontology_files: Vec<PathBuf> = all_files
|
||||
.into_iter()
|
||||
.filter(|p| p.starts_with(".ontology"))
|
||||
.collect();
|
||||
|
||||
Ok(MergeStatus {
|
||||
// jj merges always produce a commit — conflicts are data, not errors
|
||||
can_merge: true,
|
||||
conflicts: vec![],
|
||||
ontology_files_modified: ontology_files,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a merge commit via `jj new --no-edit <target> <change_id>`.
|
||||
///
|
||||
/// If the merge commit has conflicts in `.ontology/` files, aborts and
|
||||
/// abandons the merge commit. For conflicts in other files, proceeds —
|
||||
/// they are stored as jj conflict markers and resolved later.
|
||||
async fn merge_workspace(&self, workspace_id: &str, target: &str) -> Result<()> {
|
||||
let change_id = self.get_change_id(workspace_id)?;
|
||||
let merge_revset = Self::find_merge_commit_revset(&change_id);
|
||||
|
||||
// Create merge commit without touching any workspace's working copy
|
||||
self.run_jj(&["new", "--no-edit", target, &change_id])?;
|
||||
|
||||
// Check for conflicts in the new merge commit
|
||||
let conflict_out = self.run_jj(&[
|
||||
"log",
|
||||
"--no-graph",
|
||||
"-r",
|
||||
&merge_revset,
|
||||
"-T",
|
||||
CONFLICT_CHECK_TEMPLATE,
|
||||
])?;
|
||||
|
||||
if conflict_out.trim() == "CONFLICT" {
|
||||
// Inspect which files are conflicted
|
||||
let files_out = self
|
||||
.run_jj(&["diff", "--name-only", "-r", &merge_revset])
|
||||
.unwrap_or_default();
|
||||
|
||||
let ontology_conflicts: Vec<&str> = files_out
|
||||
.lines()
|
||||
.filter(|l| l.starts_with(".ontology/"))
|
||||
.collect();
|
||||
|
||||
if !ontology_conflicts.is_empty() {
|
||||
// Abandon the merge commit — `.ontology/` conflicts require NCL-aware
|
||||
// resolution
|
||||
if let Err(e) = self.run_jj(&["abandon", &merge_revset]) {
|
||||
warn!("Failed to abandon conflicted merge commit: {e}");
|
||||
}
|
||||
return Err(WorktreeError::MergeConflict(format!(
|
||||
".ontology/ files have conflicts that require NCL merge: {}",
|
||||
ontology_conflicts.join(", ")
|
||||
)));
|
||||
}
|
||||
|
||||
// Non-ontology conflicts are stored as jj data — log advisory and continue
|
||||
warn!(
|
||||
"Merge commit for workspace {workspace_id} has conflicts (non-ontology). Stored \
|
||||
as jj conflict markers; use `jj resolve` to clean up."
|
||||
);
|
||||
}
|
||||
|
||||
// Advance the target bookmark to the merge commit
|
||||
self.run_jj(&["bookmark", "set", target, "-r", &merge_revset])?;
|
||||
|
||||
info!("Merged jj workspace {workspace_id} into {target}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_workspace(&self, workspace_id: &str) -> Result<()> {
|
||||
let path = self.workspace_path(workspace_id);
|
||||
|
||||
// Forget the workspace from jj's registry
|
||||
if let Err(e) = self.run_jj(&["workspace", "forget", workspace_id]) {
|
||||
warn!("jj workspace forget failed for {workspace_id}: {e}");
|
||||
}
|
||||
|
||||
// Remove the directory
|
||||
if path.exists() {
|
||||
std::fs::remove_dir_all(&path).map_err(|e| {
|
||||
WorktreeError::RemovalFailed(format!(
|
||||
"Failed to remove workspace directory {}: {e}",
|
||||
path.display()
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
info!("Removed jj workspace {workspace_id}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_workspaces(&self) -> Result<Vec<WorkspaceInfo>> {
|
||||
let mut result = Vec::new();
|
||||
|
||||
let entries = match std::fs::read_dir(&self.workspace_base) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return Ok(result),
|
||||
};
|
||||
|
||||
for entry in entries.flatten() {
|
||||
if !entry.file_type().map(|t| t.is_dir()).unwrap_or(false) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let workspace_id = entry.file_name().to_string_lossy().into_owned();
|
||||
let path = entry.path();
|
||||
|
||||
// Verify the workspace is known to jj by querying its change_id
|
||||
let change_id = match self.get_change_id(&workspace_id) {
|
||||
Ok(id) => id,
|
||||
Err(_) => continue, // Directory exists but not a jj workspace
|
||||
};
|
||||
|
||||
result.push(WorkspaceInfo {
|
||||
id: workspace_id.clone(),
|
||||
// agent_id is stored in .ontoref-run metadata, not in jj state
|
||||
agent_id: workspace_id.clone(),
|
||||
path,
|
||||
vcs: VcsMetadata::Jj {
|
||||
change_id,
|
||||
workspace_name: workspace_id,
|
||||
},
|
||||
created_at: Utc::now(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Creates or updates bookmark `agent/<agent_id>/<workspace_id>` pointing
|
||||
/// to the workspace's current change, then pushes it to `remote`.
|
||||
///
|
||||
/// Uses `bookmark set` (not `create`) for idempotency — safe on re-push
|
||||
/// after the workspace commit is amended.
|
||||
async fn push_to_remote(&self, workspace_id: &str, agent_id: &str, remote: &str) -> Result<()> {
|
||||
let change_id = self.get_change_id(workspace_id)?;
|
||||
let bookmark = format!("agent/{agent_id}/{workspace_id}");
|
||||
|
||||
self.run_jj(&["bookmark", "set", &bookmark, "-r", &change_id])?;
|
||||
self.run_jj(&["git", "push", "--remote", remote, "--bookmark", &bookmark])?;
|
||||
|
||||
info!("Pushed jj workspace {workspace_id} to {remote} as bookmark {bookmark}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn cleanup_orphaned(&self) -> Result<()> {
|
||||
// Get workspaces jj knows about
|
||||
let known = self.run_jj(&["workspace", "list"])?;
|
||||
let known_names: Vec<&str> = known
|
||||
.lines()
|
||||
.filter_map(|l| l.split(':').next().map(str::trim))
|
||||
.collect();
|
||||
|
||||
// For each directory in workspace_base, forget if jj doesn't know it
|
||||
let entries = match std::fs::read_dir(&self.workspace_base) {
|
||||
Ok(e) => e,
|
||||
Err(_) => return Ok(()),
|
||||
};
|
||||
|
||||
for entry in entries.flatten() {
|
||||
let name = entry.file_name().to_string_lossy().into_owned();
|
||||
if !known_names.contains(&name.as_str()) {
|
||||
warn!("Removing orphaned workspace directory: {name}");
|
||||
let _ = std::fs::remove_dir_all(entry.path());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn capabilities_correct() {
|
||||
let dir = tempfile::TempDir::new().unwrap();
|
||||
let ws = tempfile::TempDir::new().unwrap();
|
||||
// JjBackend::new requires the repo path to exist
|
||||
let b = JjBackend::new(dir.path().to_path_buf(), ws.path().to_path_buf()).unwrap();
|
||||
let caps = b.capabilities();
|
||||
assert!(caps.conflicts_as_data);
|
||||
assert!(caps.stable_change_ids);
|
||||
assert!(caps.native_workspaces);
|
||||
assert!(!caps.hooks_supported);
|
||||
}
|
||||
|
||||
/// Integration tests that require a real jj repo are marked `#[ignore]`.
|
||||
/// Run with: cargo test -p vapora-worktree --features jj -- --ignored
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn create_and_remove_workspace_integration() {
|
||||
let repo = tempfile::TempDir::new().unwrap();
|
||||
let ws = tempfile::TempDir::new().unwrap();
|
||||
|
||||
// Init jj colocated repo
|
||||
Command::new("jj")
|
||||
.current_dir(repo.path())
|
||||
.args(["git", "init", "--colocate"])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let b = JjBackend::new(repo.path().to_path_buf(), ws.path().to_path_buf()).unwrap();
|
||||
let info = b.create_workspace("agent-001", "ws-0001").await.unwrap();
|
||||
|
||||
assert_eq!(info.id, "ws-0001");
|
||||
assert!(info.path.exists());
|
||||
assert!(matches!(info.vcs, VcsMetadata::Jj { .. }));
|
||||
|
||||
let listed = b.list_workspaces().await.unwrap();
|
||||
assert!(listed.iter().any(|w| w.id == "ws-0001"));
|
||||
|
||||
b.remove_workspace("ws-0001").await.unwrap();
|
||||
assert!(!info.path.exists());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn change_id_stable_after_describe() {
|
||||
let repo = tempfile::TempDir::new().unwrap();
|
||||
let ws = tempfile::TempDir::new().unwrap();
|
||||
|
||||
Command::new("jj")
|
||||
.current_dir(repo.path())
|
||||
.args(["git", "init", "--colocate"])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let b = JjBackend::new(repo.path().to_path_buf(), ws.path().to_path_buf()).unwrap();
|
||||
let info = b.create_workspace("agent-001", "ws-0001").await.unwrap();
|
||||
|
||||
let change_id_before = match &info.vcs {
|
||||
VcsMetadata::Jj { change_id, .. } => change_id.clone(),
|
||||
_ => panic!("expected jj metadata"),
|
||||
};
|
||||
|
||||
// Amend the workspace commit description — change_id must stay the same
|
||||
Command::new("jj")
|
||||
.current_dir(repo.path())
|
||||
.args([
|
||||
"--no-pager",
|
||||
"describe",
|
||||
"-r",
|
||||
&format!("{change_id_before}"),
|
||||
"-m",
|
||||
"amended",
|
||||
])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let change_id_after = b.get_change_id("ws-0001").unwrap();
|
||||
assert_eq!(
|
||||
change_id_before, change_id_after,
|
||||
"change_id must be stable across describe"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn merge_workspace_advances_bookmark() {
|
||||
let repo = tempfile::TempDir::new().unwrap();
|
||||
let ws = tempfile::TempDir::new().unwrap();
|
||||
|
||||
Command::new("jj")
|
||||
.current_dir(repo.path())
|
||||
.args(["git", "init", "--colocate"])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
// Create a file in the repo and set main bookmark
|
||||
std::fs::write(repo.path().join("hello.txt"), "hello").unwrap();
|
||||
Command::new("jj")
|
||||
.current_dir(repo.path())
|
||||
.args(["--no-pager", "describe", "-m", "initial"])
|
||||
.output()
|
||||
.unwrap();
|
||||
Command::new("jj")
|
||||
.current_dir(repo.path())
|
||||
.args(["--no-pager", "bookmark", "create", "main", "-r", "@"])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
let b = JjBackend::new(repo.path().to_path_buf(), ws.path().to_path_buf()).unwrap();
|
||||
let info = b.create_workspace("agent-001", "ws-0001").await.unwrap();
|
||||
|
||||
// Make a change in the workspace
|
||||
std::fs::write(info.path.join("world.txt"), "world").unwrap();
|
||||
Command::new("jj")
|
||||
.current_dir(repo.path())
|
||||
.args([
|
||||
"--no-pager",
|
||||
"describe",
|
||||
"-r",
|
||||
&format!("{}-@", "ws-0001"),
|
||||
"-m",
|
||||
"add world",
|
||||
])
|
||||
.output()
|
||||
.unwrap();
|
||||
|
||||
// Merge — main bookmark must advance to the merge commit
|
||||
b.merge_workspace("ws-0001", "main").await.unwrap();
|
||||
|
||||
// Verify main points to a commit that descends from both initial and the
|
||||
// workspace change
|
||||
let out = Command::new("jj")
|
||||
.current_dir(repo.path())
|
||||
.args([
|
||||
"--no-pager",
|
||||
"log",
|
||||
"--no-graph",
|
||||
"-r",
|
||||
"main",
|
||||
"-T",
|
||||
"description ++ '\n'",
|
||||
])
|
||||
.output()
|
||||
.unwrap();
|
||||
let desc = String::from_utf8_lossy(&out.stdout);
|
||||
// Merge commit description is empty by default; main must have advanced
|
||||
assert!(
|
||||
out.status.success(),
|
||||
"main bookmark should be valid after merge"
|
||||
);
|
||||
drop(desc);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,10 +1,29 @@
|
|||
// vapora-worktree: Git worktree isolation for code-modifying agents
|
||||
// Phase 3: Sandbox execution environment
|
||||
//! VCS-agnostic workspace isolation for code-modifying agents.
|
||||
//!
|
||||
//! Provides [`WorktreeManager`] backed by a [`WorkspaceBackend`] implementation
|
||||
//! selected at construction time. Supports jj (native workspaces, stable
|
||||
//! change IDs) and git (worktrees). Auto-detection via [`detect_vcs`].
|
||||
|
||||
pub mod backend;
|
||||
pub mod detect;
|
||||
pub mod error;
|
||||
pub mod handle;
|
||||
pub mod manager;
|
||||
|
||||
#[cfg(feature = "git")]
|
||||
pub mod git;
|
||||
|
||||
#[cfg(feature = "jj")]
|
||||
pub mod jj;
|
||||
|
||||
pub use backend::{
|
||||
ConflictInfo, MergeStatus, VcsCapabilities, VcsMetadata, WorkspaceBackend, WorkspaceInfo,
|
||||
};
|
||||
pub use detect::{detect_vcs, VcsDetection};
|
||||
pub use error::{Result, WorktreeError};
|
||||
#[cfg(feature = "git")]
|
||||
pub use git::{GitBackend, GitConstraints};
|
||||
pub use handle::WorktreeHandle;
|
||||
#[cfg(feature = "jj")]
|
||||
pub use jj::JjBackend;
|
||||
pub use manager::WorktreeManager;
|
||||
|
|
|
|||
|
|
@ -1,354 +1,278 @@
|
|||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use tracing::{debug, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::backend::{MergeStatus, VcsCapabilities, WorkspaceBackend};
|
||||
use crate::detect::{detect_vcs, VcsDetection};
|
||||
use crate::error::{Result, WorktreeError};
|
||||
#[cfg(feature = "git")]
|
||||
use crate::git::{GitBackend, GitConstraints};
|
||||
use crate::handle::WorktreeHandle;
|
||||
#[cfg(feature = "jj")]
|
||||
use crate::jj::JjBackend;
|
||||
|
||||
/// Manages git worktree lifecycle for code-modifying agents
|
||||
/// Manages workspace lifecycle for code-modifying agents.
|
||||
///
|
||||
/// Holds a `Box<dyn WorkspaceBackend>` for VCS operations and an in-memory
|
||||
/// `active` map for tracking handles across the process lifetime. VCS is
|
||||
/// selected at construction via `auto_detect`, `new_jj`, or `new_git`.
|
||||
///
|
||||
/// **No external callers exist at this time** — `vapora-worktree` is consumed
|
||||
/// only internally. External callers will appear during WS-5 (agent coordinator
|
||||
/// integration), at which point the public API here should be stabilised.
|
||||
pub struct WorktreeManager {
|
||||
/// Path to the root repository
|
||||
repo_path: PathBuf,
|
||||
/// Base directory for creating worktrees
|
||||
worktree_base: PathBuf,
|
||||
/// Active worktrees indexed by ID
|
||||
active_worktrees: Arc<RwLock<HashMap<String, WorktreeHandle>>>,
|
||||
backend: Box<dyn WorkspaceBackend>,
|
||||
active: Arc<RwLock<HashMap<String, WorktreeHandle>>>,
|
||||
}
|
||||
|
||||
impl WorktreeManager {
|
||||
/// Create a new worktree manager for a repository
|
||||
pub fn new(repo_path: PathBuf, worktree_base: PathBuf) -> Result<Self> {
|
||||
// Verify repository exists
|
||||
if !repo_path.exists() {
|
||||
return Err(WorktreeError::InvalidState(format!(
|
||||
"Repository path does not exist: {}",
|
||||
repo_path.display()
|
||||
)));
|
||||
pub fn new(backend: Box<dyn WorkspaceBackend>) -> Self {
|
||||
Self {
|
||||
backend,
|
||||
active: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
// Create worktree base directory if needed
|
||||
if !worktree_base.exists() {
|
||||
std::fs::create_dir_all(&worktree_base).map_err(|e| {
|
||||
WorktreeError::InvalidState(format!(
|
||||
"Failed to create worktree base directory: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
#[cfg(feature = "jj")]
|
||||
pub fn new_jj(repo_path: PathBuf, workspace_base: PathBuf) -> Result<Self> {
|
||||
Ok(Self::new(Box::new(JjBackend::new(
|
||||
repo_path,
|
||||
worktree_base,
|
||||
active_worktrees: Arc::new(RwLock::new(HashMap::new())),
|
||||
})
|
||||
workspace_base,
|
||||
)?)))
|
||||
}
|
||||
|
||||
/// Create a new worktree for an agent
|
||||
#[cfg(feature = "git")]
|
||||
pub fn new_git(
|
||||
repo_path: PathBuf,
|
||||
workspace_base: PathBuf,
|
||||
constraints: GitConstraints,
|
||||
) -> Result<Self> {
|
||||
Ok(Self::new(Box::new(GitBackend::new(
|
||||
repo_path,
|
||||
workspace_base,
|
||||
constraints,
|
||||
)?)))
|
||||
}
|
||||
|
||||
/// Detect VCS from filesystem and construct the appropriate backend.
|
||||
///
|
||||
/// git via `auto_detect` sets `deny_ontology_writes: false` because git is
|
||||
/// detected as the **primary** VCS — ontology writes through git are the
|
||||
/// normal path. Construct `new_git` with explicit constraints to override.
|
||||
pub fn auto_detect(repo_path: PathBuf, workspace_base: PathBuf) -> Result<Self> {
|
||||
match detect_vcs(&repo_path) {
|
||||
#[cfg(feature = "jj")]
|
||||
VcsDetection::JjColocated | VcsDetection::Jj => Self::new_jj(repo_path, workspace_base),
|
||||
#[cfg(feature = "git")]
|
||||
VcsDetection::Git => Self::new_git(
|
||||
repo_path,
|
||||
workspace_base,
|
||||
GitConstraints {
|
||||
deny_ontology_writes: false,
|
||||
..Default::default()
|
||||
},
|
||||
),
|
||||
_ => Err(WorktreeError::VcsDetectionFailed(
|
||||
repo_path.display().to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn backend_name(&self) -> &'static str {
|
||||
self.backend.name()
|
||||
}
|
||||
|
||||
pub fn capabilities(&self) -> VcsCapabilities {
|
||||
self.backend.capabilities()
|
||||
}
|
||||
|
||||
/// Create a new workspace for `agent_id`. The workspace ID is a fresh UUID.
|
||||
pub async fn create_for_agent(&self, agent_id: &str) -> Result<WorktreeHandle> {
|
||||
let worktree_id = Uuid::new_v4().to_string();
|
||||
let branch_name = format!("agent/{}/{}", agent_id, worktree_id);
|
||||
let worktree_path = self.worktree_base.join(&worktree_id);
|
||||
let workspace_id = Uuid::new_v4().to_string();
|
||||
debug!("Creating workspace {workspace_id} for agent {agent_id}");
|
||||
|
||||
debug!(
|
||||
"Creating worktree for agent {}: {}",
|
||||
agent_id,
|
||||
worktree_path.display()
|
||||
);
|
||||
let info = self
|
||||
.backend
|
||||
.create_workspace(agent_id, &workspace_id)
|
||||
.await?;
|
||||
let handle = WorktreeHandle::new(info);
|
||||
let id = handle.id().to_string();
|
||||
|
||||
// Create worktree with new branch
|
||||
let output = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args([
|
||||
"worktree",
|
||||
"add",
|
||||
"-b",
|
||||
&branch_name,
|
||||
worktree_path.to_str().ok_or_else(|| {
|
||||
WorktreeError::InvalidState("Invalid path encoding".to_string())
|
||||
})?,
|
||||
])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("Failed to create worktree: {}", e)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(WorktreeError::CreationFailed(stderr.to_string()));
|
||||
}
|
||||
|
||||
let handle = WorktreeHandle::new(agent_id.to_string(), branch_name, worktree_path);
|
||||
let handle_id = handle.id.clone();
|
||||
|
||||
// Track in active worktrees
|
||||
let mut worktrees = self.active_worktrees.write().await;
|
||||
worktrees.insert(handle_id.clone(), handle.clone());
|
||||
|
||||
info!(
|
||||
"Created worktree {} for agent {} on branch {}",
|
||||
handle_id, agent_id, handle.branch
|
||||
);
|
||||
self.active.write().await.insert(id.clone(), handle.clone());
|
||||
info!("Tracking workspace {id} for agent {agent_id}");
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
/// Prepare worktree for merge: check for conflicts before attempting merge
|
||||
pub async fn prepare_merge(&self, worktree: &WorktreeHandle) -> Result<()> {
|
||||
worktree.can_modify()?;
|
||||
|
||||
debug!("Preparing merge for worktree {}", worktree.id);
|
||||
|
||||
// Fetch latest main branch
|
||||
let output = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["fetch", "origin", "main:main"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("Failed to fetch: {}", e)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
warn!("Fetch returned non-zero status, continuing with merge check");
|
||||
/// Preview merge readiness. See [`WorkspaceBackend::check_merge`] for
|
||||
/// per-backend semantics (destructive dry-run for git, read-only for jj).
|
||||
pub async fn check_merge(&self, handle: &WorktreeHandle) -> Result<MergeStatus> {
|
||||
handle.can_modify()?;
|
||||
self.backend.check_merge(handle.id(), "main").await
|
||||
}
|
||||
|
||||
// Dry-run merge to detect conflicts
|
||||
let output = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args([
|
||||
"merge",
|
||||
"--no-commit",
|
||||
"--no-ff",
|
||||
"--no-stat",
|
||||
&worktree.branch,
|
||||
])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("Failed to check merge: {}", e)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
// Abort the dry-run merge
|
||||
let _ = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["merge", "--abort"])
|
||||
.output();
|
||||
|
||||
return Err(WorktreeError::MergeConflict(format!(
|
||||
"Merge would conflict: {}",
|
||||
stderr
|
||||
)));
|
||||
}
|
||||
|
||||
// Abort dry-run merge
|
||||
let output = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["merge", "--abort"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("Failed to abort dry-run: {}", e)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
warn!("Merge abort returned non-zero status");
|
||||
}
|
||||
|
||||
info!("Merge preparation successful for worktree {}", worktree.id);
|
||||
/// Merge the workspace into `main`, then remove the workspace.
|
||||
pub async fn merge_workspace(&self, handle: &WorktreeHandle) -> Result<()> {
|
||||
handle.can_modify()?;
|
||||
self.backend.merge_workspace(handle.id(), "main").await?;
|
||||
self.backend.remove_workspace(handle.id()).await?;
|
||||
self.retire(handle.id()).await;
|
||||
info!("Merged and removed workspace {}", handle.id());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merge worktree changes back to main and remove worktree
|
||||
pub async fn merge_and_cleanup(&self, worktree: &WorktreeHandle) -> Result<()> {
|
||||
// Prepare merge (dry-run checks)
|
||||
self.prepare_merge(worktree).await?;
|
||||
|
||||
debug!("Merging and cleaning up worktree {}", worktree.id);
|
||||
|
||||
// Perform actual merge
|
||||
let output = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["merge", "--no-edit", &worktree.branch])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("Failed to merge: {}", e)))?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
error!("Merge failed: {}", stderr);
|
||||
return Err(WorktreeError::GitError(format!("Merge failed: {}", stderr)));
|
||||
}
|
||||
|
||||
// Remove the worktree
|
||||
let output = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "remove", worktree.path.to_str().unwrap()])
|
||||
.output()
|
||||
.map_err(|e| {
|
||||
WorktreeError::RemovalFailed(format!("Failed to remove worktree: {}", e))
|
||||
})?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
error!("Worktree removal failed: {}", stderr);
|
||||
// Continue cleanup even if removal fails
|
||||
}
|
||||
|
||||
// Remove branch
|
||||
let _ = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["branch", "-D", &worktree.branch])
|
||||
.output();
|
||||
|
||||
// Remove from tracking
|
||||
let mut worktrees = self.active_worktrees.write().await;
|
||||
worktrees.remove(&worktree.id);
|
||||
|
||||
info!(
|
||||
"Merged and cleaned up worktree {} from branch {}",
|
||||
worktree.id, worktree.branch
|
||||
);
|
||||
|
||||
/// Force-remove workspace without merging (failed task cleanup).
|
||||
pub async fn remove_workspace(&self, handle: &WorktreeHandle) -> Result<()> {
|
||||
self.backend.remove_workspace(handle.id()).await?;
|
||||
self.retire(handle.id()).await;
|
||||
info!("Force-removed workspace {}", handle.id());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Force cleanup of a worktree without merging (used for failed tasks)
|
||||
pub async fn cleanup_without_merge(&self, worktree: &WorktreeHandle) -> Result<()> {
|
||||
debug!("Cleaning up worktree {} without merge", worktree.id);
|
||||
|
||||
// Remove the worktree forcefully
|
||||
let output = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "remove", "-f", worktree.path.to_str().unwrap()])
|
||||
.output()
|
||||
.map_err(|e| {
|
||||
WorktreeError::RemovalFailed(format!("Failed to remove worktree: {}", e))
|
||||
})?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
warn!("Worktree force removal had issues: {}", stderr);
|
||||
pub async fn push_to_remote(&self, handle: &WorktreeHandle, remote: &str) -> Result<()> {
|
||||
handle.can_modify()?;
|
||||
self.backend
|
||||
.push_to_remote(handle.id(), handle.agent_id(), remote)
|
||||
.await
|
||||
}
|
||||
|
||||
// Remove branch forcefully
|
||||
let _ = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["branch", "-D", "-f", &worktree.branch])
|
||||
.output();
|
||||
|
||||
// Remove from tracking
|
||||
let mut worktrees = self.active_worktrees.write().await;
|
||||
worktrees.remove(&worktree.id);
|
||||
|
||||
info!(
|
||||
"Force cleaned up worktree {} from branch {}",
|
||||
worktree.id, worktree.branch
|
||||
);
|
||||
|
||||
Ok(())
|
||||
pub async fn get_worktree(&self, id: &str) -> Option<WorktreeHandle> {
|
||||
self.active.read().await.get(id).cloned()
|
||||
}
|
||||
|
||||
/// Get an active worktree by ID
|
||||
pub async fn get_worktree(&self, id: &str) -> Result<Option<WorktreeHandle>> {
|
||||
let worktrees = self.active_worktrees.read().await;
|
||||
Ok(worktrees.get(id).cloned())
|
||||
pub async fn list_active(&self) -> Vec<WorktreeHandle> {
|
||||
self.active.read().await.values().cloned().collect()
|
||||
}
|
||||
|
||||
/// List all active worktrees
|
||||
pub async fn list_active(&self) -> Result<Vec<WorktreeHandle>> {
|
||||
let worktrees = self.active_worktrees.read().await;
|
||||
Ok(worktrees.values().cloned().collect())
|
||||
}
|
||||
|
||||
/// Get count of active worktrees for an agent
|
||||
pub async fn count_for_agent(&self, agent_id: &str) -> Result<usize> {
|
||||
let worktrees = self.active_worktrees.read().await;
|
||||
Ok(worktrees
|
||||
pub async fn count_for_agent(&self, agent_id: &str) -> usize {
|
||||
self.active
|
||||
.read()
|
||||
.await
|
||||
.values()
|
||||
.filter(|w| w.agent_id == agent_id)
|
||||
.count())
|
||||
.filter(|h| h.agent_id() == agent_id)
|
||||
.count()
|
||||
}
|
||||
|
||||
/// Cleanup all orphaned worktrees (for startup recovery)
|
||||
pub async fn cleanup_orphaned(&self) -> Result<()> {
|
||||
debug!("Cleaning up orphaned worktrees");
|
||||
|
||||
let output = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "list"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(format!("Failed to list worktrees: {}", e)))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
|
||||
for line in stdout.lines() {
|
||||
// Skip main worktree and empty lines
|
||||
if line.contains("(bare)") || line.contains("(detached)") || line.is_empty() {
|
||||
continue;
|
||||
self.backend.cleanup_orphaned().await
|
||||
}
|
||||
|
||||
// Extract path from output like "/path/to/wt-abc detached"
|
||||
if let Some(path_str) = line.split_whitespace().next() {
|
||||
let path = PathBuf::from(path_str);
|
||||
if path.starts_with(&self.worktree_base) {
|
||||
warn!("Removing orphaned worktree: {}", path.display());
|
||||
|
||||
// Remove forcefully
|
||||
let _ = Command::new("git")
|
||||
.current_dir(&self.repo_path)
|
||||
.args(["worktree", "remove", "-f", path_str])
|
||||
.output();
|
||||
async fn retire(&self, id: &str) {
|
||||
let mut active = self.active.write().await;
|
||||
if let Some(h) = active.get_mut(id) {
|
||||
h.deactivate();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Cleaned up orphaned worktrees");
|
||||
Ok(())
|
||||
active.remove(id);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::process::Command;
|
||||
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
use crate::error::WorktreeError;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_manager_creation() -> Result<()> {
|
||||
let repo_dir = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
let wt_dir = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
|
||||
// Initialize git repo
|
||||
#[cfg(feature = "git")]
|
||||
fn init_git_repo(path: &std::path::Path) {
|
||||
Command::new("git")
|
||||
.current_dir(repo_dir.path())
|
||||
.current_dir(path)
|
||||
.args(["init"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(e.to_string()))?;
|
||||
.unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["config", "user.email", "test@test.com"])
|
||||
.output()
|
||||
.unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["config", "user.name", "Test"])
|
||||
.output()
|
||||
.unwrap();
|
||||
std::fs::write(path.join("README"), "init").unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["add", "."])
|
||||
.output()
|
||||
.unwrap();
|
||||
Command::new("git")
|
||||
.current_dir(path)
|
||||
.args(["commit", "-m", "init"])
|
||||
.output()
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
let manager =
|
||||
WorktreeManager::new(repo_dir.path().to_path_buf(), wt_dir.path().to_path_buf())?;
|
||||
#[cfg(feature = "git")]
|
||||
#[tokio::test]
|
||||
async fn manager_creation_empty() -> Result<()> {
|
||||
let repo = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
let ws = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
init_git_repo(repo.path());
|
||||
|
||||
assert!(manager.list_active().await?.is_empty());
|
||||
let manager = WorktreeManager::new_git(
|
||||
repo.path().to_path_buf(),
|
||||
ws.path().to_path_buf(),
|
||||
GitConstraints::default(),
|
||||
)?;
|
||||
|
||||
assert!(manager.list_active().await.is_empty());
|
||||
assert_eq!(manager.backend_name(), "git");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "git")]
|
||||
#[tokio::test]
|
||||
async fn test_worktree_creation() -> Result<()> {
|
||||
let repo_dir = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
let wt_dir = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
async fn workspace_creation_tracked() -> Result<()> {
|
||||
let repo = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
let ws = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
init_git_repo(repo.path());
|
||||
|
||||
// Initialize git repo
|
||||
Command::new("git")
|
||||
.current_dir(repo_dir.path())
|
||||
.args(["init"])
|
||||
.output()
|
||||
.map_err(|e| WorktreeError::GitError(e.to_string()))?;
|
||||
|
||||
let manager =
|
||||
WorktreeManager::new(repo_dir.path().to_path_buf(), wt_dir.path().to_path_buf())?;
|
||||
let manager = WorktreeManager::new_git(
|
||||
repo.path().to_path_buf(),
|
||||
ws.path().to_path_buf(),
|
||||
GitConstraints::default(),
|
||||
)?;
|
||||
|
||||
let handle = manager.create_for_agent("agent-001").await?;
|
||||
|
||||
assert_eq!(handle.agent_id, "agent-001");
|
||||
assert_eq!(handle.agent_id(), "agent-001");
|
||||
assert!(handle.is_active);
|
||||
assert_eq!(manager.list_active().await?.len(), 1);
|
||||
assert_eq!(manager.count_for_agent("agent-001").await?, 1);
|
||||
assert_eq!(manager.list_active().await.len(), 1);
|
||||
assert_eq!(manager.count_for_agent("agent-001").await, 1);
|
||||
assert_eq!(manager.count_for_agent("agent-002").await, 0);
|
||||
assert!(manager.get_worktree(handle.id()).await.is_some());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "git")]
|
||||
#[tokio::test]
|
||||
async fn auto_detect_picks_git() -> Result<()> {
|
||||
let repo = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
let ws = TempDir::new().map_err(WorktreeError::IoError)?;
|
||||
init_git_repo(repo.path());
|
||||
|
||||
let manager =
|
||||
WorktreeManager::auto_detect(repo.path().to_path_buf(), ws.path().to_path_buf())?;
|
||||
|
||||
assert_eq!(manager.backend_name(), "git");
|
||||
assert!(!manager.capabilities().conflicts_as_data);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn auto_detect_fails_on_no_vcs() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ws = TempDir::new().unwrap();
|
||||
let result =
|
||||
WorktreeManager::auto_detect(dir.path().to_path_buf(), ws.path().to_path_buf());
|
||||
assert!(matches!(result, Err(WorktreeError::VcsDetectionFailed(_))));
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
<!-- REVIEW: Status claims "Specification (VAPORA v1.0)" — verify against vapora-agents/src/ which is production-ready at v1.2.0+; code snippets reference docker_image vapora/agents:developer-0.1.0 which is stale -->
|
||||
# 🤖 Agent Registry & Coordination
|
||||
## Multi-Agent Orchestration System
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
<!-- REVIEW: Status claims "Specification (VAPORA v1.0)" — verify against vapora-workflow-engine/src/ which is production-ready at v1.2.0+; workflow stage models may not match current WorkflowStage types -->
|
||||
# 🔄 Multi-Agent Workflows
|
||||
## End-to-End Parallel Task Orchestration
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
<!-- REVIEW: Status claims "Specification (VAPORA v1.0)" — verify against vapora-llm-router/src/ which is production-ready at v1.2.0+ with rig-core 0.30 (doc may reference 0.15) -->
|
||||
# 🧠 Multi-IA Router
|
||||
## Routing Inteligente entre Múltiples Proveedores de LLM
|
||||
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
<!-- REVIEW: (1) Status "Specification (VAPORA v1.0)" — Cedar is implemented in vapora-workflow-engine/src/auth.rs (not vapora-backend); (2) Code examples use cedar_policy_core:: which is an outdated API — actual crate is cedar_policy::; verify against crates/vapora-workflow-engine/src/auth.rs -->
|
||||
# 👥 Roles, Permissions & Profiles
|
||||
## Cedar-Based Access Control for Multi-Agent Teams
|
||||
|
||||
|
|
|
|||
7
justfile
7
justfile
|
|
@ -18,10 +18,17 @@
|
|||
set shell := ["nu", "-c"]
|
||||
set dotenv-load := true
|
||||
|
||||
project_root := justfile_directory()
|
||||
|
||||
# ============================================================================
|
||||
# Module Imports
|
||||
# ============================================================================
|
||||
mod distro "justfiles/distro.just"
|
||||
mod build "justfiles/build.just"
|
||||
mod test "justfiles/test.just"
|
||||
mod dev "justfiles/dev.just"
|
||||
mod ci "justfiles/ci.just"
|
||||
import 'justfiles/assets.just'
|
||||
|
||||
# ============================================================================
|
||||
# Default & Help
|
||||
|
|
|
|||
13
justfiles/assets.just
Normal file
13
justfiles/assets.just
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
# API catalog export
|
||||
#
|
||||
# Generates api-catalog.json from all #[onto_api] registered routes in vapora-backend.
|
||||
# Run after any handler annotation is added or changed.
|
||||
# Commit alongside the annotation changes — they are paired artifacts.
|
||||
|
||||
# Export #[onto_api] routes from vapora-backend to api-catalog.json
|
||||
[doc("Export #[onto_api] routes to api-catalog.json")]
|
||||
export-api-catalog:
|
||||
#!/usr/bin/env nu
|
||||
cargo run -p vapora-backend -- --dump-api-catalog out> api-catalog.json
|
||||
let count = open api-catalog.json | length
|
||||
print $"exported ($count) routes to api-catalog.json"
|
||||
17
justfiles/build.just
Normal file
17
justfiles/build.just
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
# Build recipes — compilation, linking, output generation
|
||||
|
||||
[doc("Show build recipes")]
|
||||
help:
|
||||
@just --list --list-prefix " build::"
|
||||
|
||||
[doc("Build workspace in debug mode")]
|
||||
debug:
|
||||
cargo build --workspace
|
||||
|
||||
[doc("Build workspace in release mode")]
|
||||
release:
|
||||
cargo build --release --workspace
|
||||
|
||||
[doc("Check workspace (syntax + deps, no output)")]
|
||||
check:
|
||||
cargo check --all-targets
|
||||
21
justfiles/dev.just
Normal file
21
justfiles/dev.just
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
# Development workflow — format, lint, watch
|
||||
|
||||
[doc("Show dev recipes")]
|
||||
help:
|
||||
@just --list --list-prefix " dev::"
|
||||
|
||||
[doc("Format all code")]
|
||||
fmt:
|
||||
cargo fmt --all
|
||||
|
||||
[doc("Check formatting without modifying files")]
|
||||
fmt-check:
|
||||
cargo fmt --all -- --check
|
||||
|
||||
[doc("Run clippy (strict: -D warnings)")]
|
||||
lint:
|
||||
cargo clippy --all-targets -- -D warnings
|
||||
|
||||
[doc("Clean build artifacts")]
|
||||
clean:
|
||||
cargo clean
|
||||
21
justfiles/test.just
Normal file
21
justfiles/test.just
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
# Test recipes — unit, integration, and doc tests
|
||||
|
||||
[doc("Show test recipes")]
|
||||
help:
|
||||
@just --list --list-prefix " test::"
|
||||
|
||||
[doc("Run all tests (lib + integration + doc)")]
|
||||
all:
|
||||
cargo test --workspace
|
||||
|
||||
[doc("Run library tests only")]
|
||||
lib:
|
||||
cargo test --lib --no-fail-fast
|
||||
|
||||
[doc("Run doc tests only")]
|
||||
doc:
|
||||
cargo test --doc
|
||||
|
||||
[doc("Run tests for a specific crate")]
|
||||
crate NAME:
|
||||
cargo test -p {{ NAME }}
|
||||
5
reflection/backlog.ncl
Normal file
5
reflection/backlog.ncl
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
let s = import "backlog" in
|
||||
|
||||
{
|
||||
items = [],
|
||||
} | s.BacklogStore
|
||||
68
reflection/modes/create_agent_task.ncl
Normal file
68
reflection/modes/create_agent_task.ncl
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
let s = import "reflection/schema.ncl" in
|
||||
|
||||
{
|
||||
id = "create_agent_task",
|
||||
trigger = "manual | NATS:ecosystem.reflection.request",
|
||||
|
||||
preconditions = [
|
||||
"vapora-agents service is running",
|
||||
"budget-boundary axiom enforced (BudgetEnforcer middleware active)",
|
||||
"provider-abstraction: LLMClient trait in use at all call sites",
|
||||
],
|
||||
|
||||
steps = [
|
||||
{
|
||||
id = "validate_task_params",
|
||||
action = "Validate task parameters against agent capability schema",
|
||||
cmd = "nickel export --format json {vapora_dir}/schemas/agent-task.ncl | jq --exit-status '.capabilities | contains([{capability}])'",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [],
|
||||
},
|
||||
|
||||
{
|
||||
id = "check_budget",
|
||||
action = "Verify current budget balance allows this task execution",
|
||||
cmd = "curl -sf http://localhost:8080/api/v1/budget/{tenant_id}/check?estimate=true | jq --exit-status '.allowed == true'",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [{ step = "validate_task_params", kind = 'OnSuccess }],
|
||||
},
|
||||
|
||||
{
|
||||
id = "check_gate",
|
||||
action = "Verify the agent capability gate is open for this signal type",
|
||||
cmd = "nu {vapora_dir}/scripts/check-gate.nu --ontology {vapora_dir}/.ontology --signal {signal_type}",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [{ step = "validate_task_params", kind = 'OnSuccess }],
|
||||
},
|
||||
|
||||
{
|
||||
id = "dispatch_task",
|
||||
action = "Dispatch validated task to the agent dispatcher",
|
||||
cmd = "curl -sf -X POST http://localhost:8080/api/v1/tasks -H 'Content-Type: application/json' -d '{\"capability\": \"{capability}\", \"tenant_id\": \"{tenant_id}\", \"payload\": {task_payload}}'",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [
|
||||
{ step = "check_budget", kind = 'OnSuccess },
|
||||
{ step = "check_gate", kind = 'OnSuccess },
|
||||
],
|
||||
},
|
||||
|
||||
{
|
||||
id = "capture_to_kogral",
|
||||
action = "Record task dispatch as an Execution node in the knowledge graph",
|
||||
cmd = "nu {stratumiops_dir}/scripts/kogral-bridge.nu --mode create_agent_task --project {project_name} --file /dev/stdin",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Continue },
|
||||
depends_on = [{ step = "dispatch_task", kind = 'OnSuccess }],
|
||||
},
|
||||
],
|
||||
|
||||
postconditions = [
|
||||
"Task is visible in syntaxis-core task tracker",
|
||||
"Budget ledger updated with estimated cost",
|
||||
"Execution node created in kogral shared graph",
|
||||
],
|
||||
} | (s.Mode String)
|
||||
92
reflection/modes/deploy_vapora_service.ncl
Normal file
92
reflection/modes/deploy_vapora_service.ncl
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
let s = import "reflection/schema.ncl" in
|
||||
|
||||
{
|
||||
id = "deploy_vapora_service",
|
||||
trigger = "manual | NATS:ecosystem.provisioning.ready",
|
||||
|
||||
preconditions = [
|
||||
"Provisioning scaffold complete (provisioning.ready event received)",
|
||||
"async-first: no blocking calls in deployment scripts",
|
||||
"budget-boundary: deployment cost within approved limits",
|
||||
],
|
||||
|
||||
steps = [
|
||||
{
|
||||
id = "validate_ontology",
|
||||
action = "Validate vapora ontology coherence before deployment",
|
||||
cmd = "nu {stratumiops_dir}/scripts/ontology-validate.nu {vapora_dir}/.ontology",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [],
|
||||
},
|
||||
|
||||
{
|
||||
id = "build_release",
|
||||
action = "Build vapora-backend in release mode",
|
||||
cmd = "cargo build --release -p vapora-backend 2>&1",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [{ step = "validate_ontology", kind = 'OnSuccess }],
|
||||
},
|
||||
|
||||
{
|
||||
id = "run_tests",
|
||||
action = "Run workspace tests before deploy",
|
||||
cmd = "cargo test --workspace --all-features 2>&1",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [{ step = "build_release", kind = 'OnSuccess }],
|
||||
},
|
||||
|
||||
{
|
||||
id = "build_docker",
|
||||
action = "Build and tag the Docker image for this service version",
|
||||
cmd = "docker build -f {vapora_dir}/docker/{service_name}.Dockerfile -t vapora/{service_name}:{version} {vapora_dir}",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [{ step = "run_tests", kind = 'OnSuccess }],
|
||||
},
|
||||
|
||||
{
|
||||
id = "push_image",
|
||||
action = "Push Docker image to registry",
|
||||
cmd = "docker push vapora/{service_name}:{version}",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [{ step = "build_docker", kind = 'OnSuccess }],
|
||||
},
|
||||
|
||||
{
|
||||
id = "apply_kubernetes",
|
||||
action = "Apply Kubernetes manifests for the service",
|
||||
cmd = "kubectl apply -f {vapora_dir}/kubernetes/{service_name}/ --context {k8s_context}",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Stop },
|
||||
depends_on = [{ step = "push_image", kind = 'OnSuccess }],
|
||||
},
|
||||
|
||||
{
|
||||
id = "verify_rollout",
|
||||
action = "Wait for rollout to complete and verify pod health",
|
||||
cmd = "kubectl rollout status deployment/{service_name} --context {k8s_context} --timeout=300s",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Retry },
|
||||
depends_on = [{ step = "apply_kubernetes", kind = 'OnSuccess }],
|
||||
},
|
||||
|
||||
{
|
||||
id = "notify_ecosystem",
|
||||
action = "Publish project-state-changed event to ecosystem NATS",
|
||||
cmd = "nu {stratumiops_dir}/scripts/nats-publish.nu project-created --project_name {service_name} --project_dir {vapora_dir} --stack rust-tokio --type service",
|
||||
actor = 'Agent,
|
||||
on_error = { strategy = 'Continue },
|
||||
depends_on = [{ step = "verify_rollout", kind = 'OnSuccess }],
|
||||
},
|
||||
],
|
||||
|
||||
postconditions = [
|
||||
"Service pods are Running in the target Kubernetes cluster",
|
||||
"Deployment visible in ecosystem NATS as project.created event",
|
||||
"Budget ledger updated with deployment compute cost",
|
||||
],
|
||||
} | (s.Mode String)
|
||||
5
reflection/qa.ncl
Normal file
5
reflection/qa.ncl
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
let s = import "qa" in
|
||||
|
||||
{
|
||||
entries = [],
|
||||
} | s.QaStore
|
||||
5
reflection/search_bookmarks.ncl
Normal file
5
reflection/search_bookmarks.ncl
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
let s = import "search_bookmarks" in
|
||||
|
||||
{
|
||||
entries = [],
|
||||
} | s.BookmarkStore
|
||||
23
schemas/agent-task.ncl
Normal file
23
schemas/agent-task.ncl
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
let capability_type = [|
|
||||
'CodeGeneration,
|
||||
'CodeReview,
|
||||
'DocumentSummarization,
|
||||
'SemanticSearch,
|
||||
'WorkflowOrchestration,
|
||||
'BudgetValidation,
|
||||
|] in
|
||||
|
||||
{
|
||||
capabilities = [
|
||||
"CodeGeneration",
|
||||
"CodeReview",
|
||||
"DocumentSummarization",
|
||||
"SemanticSearch",
|
||||
"WorkflowOrchestration",
|
||||
"BudgetValidation",
|
||||
],
|
||||
|
||||
max_budget_per_task = 50000,
|
||||
|
||||
allowed_tenants | Array String | default = [],
|
||||
}
|
||||
56
scripts/check-gate.nu
Normal file
56
scripts/check-gate.nu
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
#!/usr/bin/env nu
|
||||
# scripts/check-gate.nu
|
||||
# Query the vapora gate ontology for a given signal.
|
||||
# Exits 0 if at least one active membrana accepts the signal, 1 otherwise.
|
||||
#
|
||||
# Usage:
|
||||
# nu scripts/check-gate.nu --ontology .ontology --signal DepthDemonstrated
|
||||
# nu scripts/check-gate.nu --ontology .ontology --signal PreguntaQueRompeElMarco --verbose
|
||||
|
||||
def main [
|
||||
--ontology: path = ".ontology", # Path to the project .ontology directory
|
||||
--signal: string = "", # Signal to check (TipoSenal variant name)
|
||||
--verbose, # Print full membrana details
|
||||
] {
|
||||
if ($signal | is-empty) {
|
||||
error make { msg: "required flag --signal is missing" }
|
||||
}
|
||||
|
||||
let gate_file = $ontology | path join "gate.ncl"
|
||||
|
||||
if not ($gate_file | path exists) {
|
||||
error make { msg: $"gate.ncl not found at '($gate_file)'" }
|
||||
}
|
||||
|
||||
let result = do { ^nickel export --format json $gate_file } | complete
|
||||
if $result.exit_code != 0 {
|
||||
error make { msg: $"nickel export failed: ($result.stderr | str trim)" }
|
||||
}
|
||||
|
||||
let config = $result.stdout | from json
|
||||
let membranas = $config.membranas
|
||||
|
||||
let accepting = $membranas | where { |m|
|
||||
$m.activa and ($m.acepta | any { |s| $s == $signal })
|
||||
}
|
||||
|
||||
if $verbose {
|
||||
print $"Signal: ($signal)"
|
||||
print $"Active membranas checked: ($membranas | where activa | length)"
|
||||
if not ($accepting | is-empty) {
|
||||
print "Accepting membranas:"
|
||||
$accepting | each { |m|
|
||||
print $" ✓ ($m.id) [permeabilidad=($m.permeabilidad)] — ($m.descripcion)"
|
||||
} | ignore
|
||||
} else {
|
||||
print "No active membrana accepts this signal."
|
||||
}
|
||||
}
|
||||
|
||||
if ($accepting | is-empty) {
|
||||
print $"gate: BLOCKED — no active membrana accepts signal '($signal)'"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print $"gate: PASS — ($accepting | length) membrana(s) accept signal '($signal)'"
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue