diff --git a/.ontology/core.ncl b/.ontology/core.ncl index 43e5e76..a7c5873 100644 --- a/.ontology/core.ncl +++ b/.ontology/core.ncl @@ -79,9 +79,11 @@ let d = import "../ontology/defaults/core.ncl" in "adrs/adr-004-ncl-pipe-bootstrap-pattern.ncl", "adrs/adr-005-unified-auth-session-model.ncl", "adrs/adr-006-nushell-0111-string-interpolation-compat.ncl", + "adrs/adr-007-api-surface-discoverability-onto-api-proc-macro.ncl", + "adrs/adr-008-ncl-first-config-validation-and-override-layer.ncl", "CHANGELOG.md", ], - adrs = ["adr-001", "adr-002", "adr-003", "adr-004", "adr-005", "adr-006"], + adrs = ["adr-001", "adr-002", "adr-003", "adr-004", "adr-005", "adr-006", "adr-007", "adr-008"], }, d.make_node { @@ -367,6 +369,26 @@ let d = import "../ontology/defaults/core.ncl" in ], }, + d.make_node { + id = "config-surface", + name = "Config Surface", + pole = 'Yang, + level = 'Practice, + description = "Per-project config introspection, coherence verification, and documented mutation. Rust structs annotated with #[derive(ConfigFields)] + #[config_section(id, ncl_file)] emit inventory::submit!(ConfigFieldsEntry{...}) at link time — the same inventory pattern as API catalog. The daemon queries inventory::iter::() at startup to build a zero-maintenance registry of which Rust fields each struct reads from each NCL section. Multi-consumer coherence compares this registry against the NCL export, Nu script accessors, and CI pipeline fields declared in manifest.ncl's config_surface — any NCL field claimed by no consumer is flagged unclaimed. API endpoints: GET /projects/{slug}/config (full export), /config/{section} (single section), /config/schema (sections with contracts and consumers), /config/coherence (multi-consumer diff), /config/quickref (generated documentation with rationales, override history, coherence status). PUT /projects/{slug}/config/{section} mutates via an override layer: writes {section}.overrides.ncl with audit metadata (actor, reason, timestamp, previous value), appends a single import to the entry point (idempotent), validates with nickel export, reverts on contract violation. NCL contracts (std.contract.from_validator) enforce field constraints (enums, positive numbers, port ranges) before any Rust struct is populated — Nickel is the single validation layer. Ontoref describes its own config via .ontoref/contracts.ncl applying LogConfig and DaemonConfig contracts.", + invariant = false, + artifact_paths = [ + "crates/ontoref-daemon/src/config.rs", + "crates/ontoref-daemon/src/config_coherence.rs", + "crates/ontoref-daemon/src/api.rs", + "crates/ontoref-derive/src/lib.rs", + "crates/ontoref-ontology/src/lib.rs", + "ontology/schemas/manifest.ncl", + ".ontoref/contracts.ncl", + ".ontoref/config.ncl", + ], + adrs = ["adr-007", "adr-008"], + }, + ], edges = [ @@ -452,5 +474,18 @@ let d = import "../ontology/defaults/core.ncl" in note = "--gen-keys bootstraps the first keys into project.ncl during setup." }, { from = "project-onboarding", to = "daemon-config-management", kind = 'DependsOn, weight = 'Medium }, + # Config Surface edges + { from = "config-surface", to = "ontoref-daemon", kind = 'ManifestsIn, weight = 'High }, + { from = "config-surface", to = "ontoref-ontology-crate", kind = 'DependsOn, weight = 'High, + note = "ConfigFieldsEntry struct and inventory::collect!(ConfigFieldsEntry) live in ontoref-ontology — the zero-dep adoption surface." }, + { from = "config-surface", to = "api-catalog-surface", kind = 'Complements, weight = 'High, + note = "#[derive(ConfigFields)] extends the same inventory::submit! pattern as #[onto_api]. Both emit link-time registration entries collected by the daemon at startup." }, + { from = "config-surface", to = "dag-formalized", kind = 'ManifestsIn, weight = 'High, + note = "Config sections, consumers, and coherence reports are typed NCL/Rust records — the config tree is a queryable subgraph." }, + { from = "config-surface", to = "self-describing", kind = 'Complements, weight = 'High, + note = "Ontoref applies its own LogConfig and DaemonConfig contracts in .ontoref/contracts.ncl — the config surface is self-demonstrated, not just specified." }, + { from = "config-surface", to = "adopt-ontoref-tooling", kind = 'Complements, weight = 'Medium, + note = "Consumer projects adopting ontoref can annotate their config structs with #[derive(ConfigFields)] to participate in the coherence registry." }, + ], } diff --git a/.ontology/manifest.ncl b/.ontology/manifest.ncl index 7a07d4d..172736f 100644 --- a/.ontology/manifest.ncl +++ b/.ontology/manifest.ncl @@ -75,6 +75,144 @@ m.make_manifest { }, ], + config_surface = m.make_config_surface { + config_root = ".ontoref", + entry_point = "config.ncl", + kind = 'SingleFile, + contracts_path = ".ontoref", + + sections = [ + m.make_config_section { + id = "nickel_import_paths", + file = "config.ncl", + description = "Ordered list of directories added to NICKEL_IMPORT_PATH when invoking nickel.", + rationale = "Ontoref resolves ontology schemas, ADRs, and reflection schemas through this path list. Order matters: earlier entries shadow later ones.", + mutable = true, + consumers = [ + m.make_config_consumer { + id = "env", + kind = 'NuScript, + ref = "reflection/modules/env.nu", + fields = ["nickel_import_paths"], + }, + m.make_config_consumer { + id = "daemon-main", + kind = 'RustStruct, + ref = "crates/ontoref-daemon/src/main.rs", + fields = ["nickel_import_paths"], + }, + ], + }, + m.make_config_section { + id = "ui", + file = "config.ncl", + description = "Daemon HTTP/UI settings: template directory, static assets, TLS certs, logo override.", + rationale = "Allows dev-mode templates to be served from the source tree instead of the installed path, and TLS to be toggled without recompiling.", + mutable = true, + consumers = [ + m.make_config_consumer { + id = "daemon-main", + kind = 'RustStruct, + ref = "crates/ontoref-daemon/src/main.rs", + fields = ["templates_dir", "public_dir", "tls_cert", "tls_key", "logo"], + }, + ], + }, + m.make_config_section { + id = "log", + file = "config.ncl", + contract = "contracts.ncl → LogConfig", + description = "Daemon structured logging: level, rotation policy, archive and retention.", + rationale = "Daily rotation with 7-file retention keeps log footprint bounded; separate archive path allows cold storage without disrupting active logs.", + mutable = true, + consumers = [ + m.make_config_consumer { + id = "daemon-main", + kind = 'RustStruct, + ref = "crates/ontoref-daemon/src/main.rs", + fields = ["level", "path", "rotation", "compress", "archive", "max_files"], + }, + ], + }, + m.make_config_section { + id = "mode_run", + file = "config.ncl", + description = "ACL rules for which actors may execute which reflection modes.", + rationale = "Agent and CI actors need unrestricted mode access; human actors are gated per mode to prevent accidental destructive operations.", + mutable = true, + consumers = [ + m.make_config_consumer { + id = "daemon-main", + kind = 'RustStruct, + ref = "crates/ontoref-daemon/src/main.rs", + fields = ["rules"], + }, + ], + }, + m.make_config_section { + id = "nats_events", + file = "config.ncl", + description = "NATS event bus integration: enabled flag, server URL, emit/subscribe topic lists, handlers directory.", + rationale = "Disabled by default to keep ontoref zero-dependency for projects without a NATS deployment. Feature-gated in the daemon crate.", + mutable = true, + consumers = [ + m.make_config_consumer { + id = "daemon-main", + kind = 'RustStruct, + ref = "crates/ontoref-daemon/src/main.rs", + fields = ["enabled", "url", "emit", "subscribe", "handlers_dir"], + }, + ], + }, + m.make_config_section { + id = "actor_init", + file = "config.ncl", + description = "Per-actor bootstrap: which reflection mode to auto-run on first invocation.", + rationale = "Agents always auto-run 'describe capabilities' so they orient themselves before acting; developers and CI start clean.", + mutable = true, + consumers = [ + m.make_config_consumer { + id = "env", + kind = 'NuScript, + ref = "reflection/modules/env.nu", + fields = ["actor", "mode", "auto_run"], + }, + ], + }, + m.make_config_section { + id = "quick_actions", + file = "config.ncl", + description = "Shortcut actions surfaced in the daemon UI dashboard: id, label, icon, category, mode, allowed actors.", + rationale = "Frequently used modes (generate-mdbook, sync-ontology, coder-workflow) promoted to one-click access without navigating the modes list.", + mutable = true, + consumers = [ + m.make_config_consumer { + id = "daemon-ui", + kind = 'RustStruct, + ref = "crates/ontoref-daemon/src/ui/handlers.rs", + fields = ["id", "label", "icon", "category", "mode", "actors"], + }, + ], + }, + m.make_config_section { + id = "daemon", + file = "config.ncl", + contract = "contracts.ncl → DaemonConfig", + description = "Runtime overrides for daemon CLI defaults: port, timeouts, sweep intervals, notification limits.", + rationale = "All fields are optional — absent fields use the daemon's built-in CLI defaults. Set only when the defaults need project-specific tuning without rebuilding the binary.", + mutable = true, + consumers = [ + m.make_config_consumer { + id = "daemon-config", + kind = 'RustStruct, + ref = "crates/ontoref-daemon/src/config.rs → DaemonRuntimeConfig", + fields = ["port", "idle_timeout", "invalidation_interval", "actor_sweep_interval", "actor_stale_timeout", "max_notifications", "notification_ack_required"], + }, + ], + }, + ], + }, + layers = [ m.make_layer { id = "protocol", diff --git a/.ontology/state.ncl b/.ontology/state.ncl index d55868d..6c46d3f 100644 --- a/.ontology/state.ncl +++ b/.ontology/state.ncl @@ -25,7 +25,7 @@ let d = import "../ontology/defaults/state.ncl" in to = "protocol-stable", condition = "ADR-001 accepted, ontoref.dev published, at least two external projects consuming the protocol.", catalyst = "First external adoption.", - blocker = "ontoref.dev not yet published; no external consumers yet. Auth model complete. Install pipeline complete. Personal/career schema layer present; content modes operational. Nu 0.111 compat fixed (ADR-006). Protocol v2 complete: manifest.ncl + connections.ncl templates, update_ontoref mode, API catalog via #[onto_api], describe diff, describe api, per-file versioning. Syntaxis syntaxis-ontology crate has pending ES→EN migration errors.", + blocker = "ontoref.dev not yet published; no external consumers yet. Auth model complete. Install pipeline complete. Personal/career schema layer present; content modes operational. Nu 0.111 compat fixed (ADR-006). Protocol v2 complete: manifest.ncl + connections.ncl templates, update_ontoref mode, API catalog via #[onto_api], describe diff, describe api, per-file versioning. Config surface complete (ADR-008): typed DaemonNclConfig, #[derive(ConfigFields)] inventory coherence registry, NCL contracts (LogConfig/DaemonConfig in .ontoref/contracts.ncl), override-layer mutation API, multi-consumer manifest schema. Syntaxis syntaxis-ontology crate has pending ES→EN migration errors.", horizon = 'Months, }, ], @@ -52,7 +52,7 @@ let d = import "../ontology/defaults/state.ncl" in from = "modes-and-web-present", to = "fully-self-described", condition = "At least 3 ADRs accepted, reflection/backlog.ncl present, describe project returns complete picture.", - catalyst = "ADR-001–ADR-006 authored (6 ADRs present). Auth model, project onboarding, and session management nodes added in 2026-03-13. Personal/career/project-card schemas, 5 content modes, search bookmarks, and ADR-006 (Nu 0.111 compat) added in session 2026-03-15. Session 2026-03-23: api-catalog-surface node added (#[onto_api] proc-macro + inventory catalog), describe-query-layer updated (diff + api subcommands), adopt-ontoref-tooling updated (update_ontoref mode + manifest/connections templates + enrichment prompt), ontoref-daemon updated (11 pages, 29 MCP tools, per-file versioning, API catalog endpoint).", + catalyst = "ADR-001–ADR-006 authored (6 ADRs present). Auth model, project onboarding, and session management nodes added in 2026-03-13. Personal/career/project-card schemas, 5 content modes, search bookmarks, and ADR-006 (Nu 0.111 compat) added in session 2026-03-15. Session 2026-03-23: api-catalog-surface node added (#[onto_api] proc-macro + inventory catalog), describe-query-layer updated (diff + api subcommands), adopt-ontoref-tooling updated (update_ontoref mode + manifest/connections templates + enrichment prompt), ontoref-daemon updated (11 pages, 29 MCP tools, per-file versioning, API catalog endpoint). Session 2026-03-26: config-surface node added — typed DaemonNclConfig (parse-at-boundary pattern), #[derive(ConfigFields)] coherence registry, override-layer mutation API (PUT /config/{section}), NCL contracts (.ontoref/contracts.ncl: LogConfig + DaemonConfig), manifest config_surface with multi-consumer sections. ADR-007 (inventory/onto_api) extended to ConfigFields; ADR-008 (NCL-first config validation + override-layer mutation).", blocker = "none", horizon = 'Weeks, }, diff --git a/.ontoref/config.ncl b/.ontoref/config.ncl index 5b4cafc..3189bf7 100644 --- a/.ontoref/config.ncl +++ b/.ontoref/config.ncl @@ -1,3 +1,5 @@ +let C = import "contracts.ncl" in + { nickel_import_paths = [".", ".ontology", "ontology/schemas", "adrs", "reflection/requirements", "reflection/schemas"], @@ -9,7 +11,7 @@ logo = "ontoref-logo.svg", }, - log = { + log | C.LogConfig = { level = "info", path = "logs", rotation = "daily", diff --git a/.ontoref/contracts.ncl b/.ontoref/contracts.ncl new file mode 100644 index 0000000..eb5755d --- /dev/null +++ b/.ontoref/contracts.ncl @@ -0,0 +1,62 @@ +# Contracts for .ontoref/config.ncl sections. +# +# Applied in config.ncl with `section | C.SectionContract = { ... }`. +# Consumed by the daemon coherence / quickref tooling via the +# config_surface.sections[].contract field in .ontology/manifest.ncl. + +let contract = std.contract in + +# ── Primitive contracts ────────────────────────────────────────────────────── + +let LogLevel = contract.from_validator (fun value => + if std.array.elem value ["error", "warn", "info", "debug", "trace"] then + 'Ok + else + 'Error { message = "log.level must be one of: error, warn, info, debug, trace" } +) in + +let LogRotation = contract.from_validator (fun value => + if std.array.elem value ["daily", "hourly", "never"] then + 'Ok + else + 'Error { message = "log.rotation must be one of: daily, hourly, never" } +) in + +let PositiveInt = contract.from_validator (fun value => + if std.is_number value && value > 0 then + 'Ok + else + 'Error { message = "value must be a positive integer (> 0)" } +) in + +let Port = contract.from_validator (fun value => + if std.is_number value && value >= 1 && value <= 65535 then + 'Ok + else + 'Error { message = "port must be a number between 1 and 65535" } +) in + +# ── Section contracts ──────────────────────────────────────────────────────── + +{ + LogConfig = { + level | LogLevel | default = "info", + path | String | default = "logs", + rotation | LogRotation | default = "daily", + compress | Bool | default = false, + archive | String | default = "logs-archive", + max_files | PositiveInt | default = 7, + }, + + # All daemon fields are optional — they override CLI defaults only when set. + # Absent fields fall back to the daemon's built-in defaults (see Cli struct). + DaemonConfig = { + port | Port | optional, + idle_timeout | PositiveInt | optional, + invalidation_interval | PositiveInt | optional, + actor_sweep_interval | PositiveInt | optional, + actor_stale_timeout | PositiveInt | optional, + max_notifications | PositiveInt | optional, + notification_ack_required | Array String | default = [], + }, +} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 90cf45c..5f85508 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: - id: rust-test name: Rust tests - entry: bash -c 'cargo test --workspace' + entry: bash -c 'cargo test --all-features --workspace' language: system types: [rust] pass_filenames: false diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b4d573..d237930 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,106 @@ ADRs referenced below live in `adrs/` as typed Nickel records. ## [Unreleased] +### Config Surface — typed config, NCL contracts, override-layer mutation + +Per-project config introspection, coherence verification, and audited mutation. NCL contracts are the single +validation gate; config mutation never modifies source NCL files. + +#### `crates/ontoref-daemon/src/config.rs` — typed `DaemonNclConfig` (parse-at-boundary) + +- `DaemonNclConfig` — top-level deserialize target for `nickel export .ontoref/config.ncl | daemon --config-stdin`; + fields: `nickel_import_paths`, `ui: UiConfig`, `log: LogConfig`, `mode_run: ModeRunConfig`, + `nats_events: NatsEventsConfig`, `actor_init: Vec`, `quick_actions: Vec`, + `daemon: DaemonRuntimeConfig`. `#[cfg(feature = "db")] db: DbConfig`. All `#[serde(default)]`. +- Each section struct derives `#[derive(Deserialize, ConfigFields)]` + `#[config_section(id, ncl_file)]` + — emits `inventory::submit!(ConfigFieldsEntry{...})` at link time. +- `DaemonRuntimeConfig` — optional port, timeouts, sweep intervals, `notification_ack_required: Vec`. + +#### `crates/ontoref-daemon/src/main.rs` — 3-tuple bootstrap block + +- Bootstrap block changed to `(nickel_import_path, loaded_ncl_config, stdin_raw)` — `loaded_ncl_config: Option` + replaces raw `Option`. `stdin_raw: Option` retained only + for service-mode `projects` extraction. +- `apply_stdin_config` now deserializes JSON to `DaemonNclConfig` before applying CLI overrides; + `apply_ui_config` signature changed from `&serde_json::Value` to `&UiConfig`. +- `load_config_overrides` returns `(Option, Option)` — all `.get("daemon").and_then(...)` chains + replaced with typed field access (`ncl.daemon.port`, etc.). +- NATS call site updated to `loaded_ncl_config.as_ref().map(|c| &c.nats_events)`. +- `resolve_asset_dir` gated with `#[cfg(feature = "ui")]`; `#[allow(unused_variables)]` on bootstrap tuple for `--no-default-features`. + +#### `crates/ontoref-derive/src/lib.rs` — `#[derive(ConfigFields)]` macro + +- New `proc_macro_derive` `ConfigFields` with helper attribute `config_section(id, ncl_file)`. + Extracts serde-renamed field names; emits `inventory::submit!(ConfigFieldsEntry{section_id, ncl_file, struct_name, fields})`. +- Extracted `serde_rename_of(field)` helper to fix `clippy::excessive_nesting` (depth was 6). + Field names collected via `.iter().map(|f| serde_rename_of(f).unwrap_or_else(|| f.ident...)).filter(|s| !s.is_empty())`. + +#### `crates/ontoref-daemon/src/config_coherence.rs` — clippy fixes + +- `and_then(|_| full_export.as_ref())` → `.and(full_export.as_ref())` (unnecessary lazy evaluation). +- Extracted `merge_meta_into_section` helper to reduce nesting depth for `_meta_*` record merging. + +#### `crates/ontoref-daemon/src/api.rs` — `index_section_fields` helper + +- Extracted `index_section_fields` to fix `clippy::excessive_nesting` at the cross-project field indexing loop. + Skips `_meta_*` and `_overrides_meta` keys; indexes `(section_id, field) → Vec<(slug, value)>`. + +#### `.ontoref/contracts.ncl` — new file + +NCL contracts for ontoref's own config sections using `std.contract.from_validator` (not the deprecated `fun label value =>` pattern): + +- `LogLevel` — enum validator: `error | warn | info | debug | trace` +- `LogRotation` — enum validator: `daily | hourly | never` +- `PositiveInt` — `value > 0 && is_number` +- `Port` — `value >= 1 && value <= 65535` +- `LogConfig` — applies per-field contracts + defaults (`level = "info"`, `rotation = "daily"`, `max_files = 7`) +- `DaemonConfig` — all fields optional (override-only); port, timeouts, intervals, `notification_ack_required` + +#### `.ontoref/config.ncl` — contracts applied + +- `let C = import "contracts.ncl"` added at top. +- `log | C.LogConfig = { ... }` — contract enforced before JSON reaches Rust. + +#### `.ontology/manifest.ncl` — config surface enriched + +- `contracts_path = ".ontoref"` added to `config_surface`. +- `log` section: `contract = "contracts.ncl → LogConfig"` added. +- New `daemon` section: `contract = "contracts.ncl → DaemonConfig"`, consumer `daemon-config` pointing to + `crates/ontoref-daemon/src/config.rs → DaemonRuntimeConfig` with 7 declared fields. + +### Protocol + +- ADR-007 extended: `#[derive(ConfigFields)]` is a second application of the `inventory::submit!` / `inventory::collect!` + linker registration pattern first established by `#[onto_api]`. Both are now referenced from the `config-surface` node. +- ADR-008 accepted: NCL-first config validation and override-layer mutation. NCL contracts are the single validation gate; + Rust structs are contract-trusted with `#[serde(default)]`. Config mutation writes `{section}.overrides.ncl` with + `_overrides_meta` audit record; original NCL source files are never modified. nickel export validates the merged + result before commit; contract violations revert the override file. + ([adr-008](adrs/adr-008-ncl-first-config-validation-and-override-layer.ncl)) + +### Self-Description — on+re Update + +`.ontology/core.ncl` — new Practice node, updated nodes, 6 new edges: + +| Change | Detail | +| --- | --- | +| New node `config-surface` | Yang — typed DaemonNclConfig, ConfigFields inventory registry, override-layer mutation API, NCL contracts, multi-consumer manifest schema; `adrs = ["adr-007", "adr-008"]` | +| Updated `adr-lifecycle` | ADR-007 + ADR-008 added to `artifact_paths` and `adrs` list (now 8 ADRs) | + +New edges: `config-surface → ontoref-daemon` (ManifestsIn/High), +`config-surface → ontoref-ontology-crate` (DependsOn/High — ConfigFieldsEntry lives in zero-dep crate), +`config-surface → api-catalog-surface` (Complements/High — shared inventory pattern), +`config-surface → dag-formalized` (ManifestsIn/High), +`config-surface → self-describing` (Complements/High — ontoref validates its own config with its own contracts), +`config-surface → adopt-ontoref-tooling` (Complements/Medium). + +`.ontology/state.ncl` — `protocol-maturity` blocker updated to record config surface completion. +`self-description-coverage` catalyst updated with session 2026-03-26 additions. + +Previous: 4 axioms, 2 tensions, 27 practices. Current: 4 axioms, 2 tensions, 28 practices. + +--- + ### API Catalog Surface — `#[onto_api]` proc-macro Annotated HTTP surface discoverable at compile time via `inventory`. diff --git a/Cargo.lock b/Cargo.lock index 14f27e1..38438fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2891,6 +2891,7 @@ dependencies = [ "libc", "notify", "ontoref-derive", + "ontoref-ontology", "platform-nats", "reqwest", "rmcp", diff --git a/README.md b/README.md index 1145ce9..2a45b84 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ crates/ Rust implementation — typed struct loaders and mode executo | `ontoref-ontology` | `.ontology/` NCL → typed Rust structs: Node, Edge, Dimension, Gate, Membrane. `Node` carries `artifact_paths` and `adrs` (`Vec`, both `serde(default)`). Graph traversal, invariant queries. Zero deps. | | `ontoref-reflection` | NCL DAG contract executor: ADR lifecycle, step dep resolution, config seal. `stratum-graph` + `stratum-state` required. | | `ontoref-daemon` | HTTP UI (11 pages), actor registry, notification barrier, MCP (29 tools), search engine, search bookmarks, SurrealDB, NCL export cache, per-file ontology versioning, annotated API catalog. | -| `ontoref-derive` | Proc-macro crate. `#[onto_api(...)]` annotates HTTP handlers; `inventory::submit!` emits route entries at link time. `GET /api/catalog` aggregates them via `inventory::collect!`. | +| `ontoref-derive` | Proc-macro crate. `#[onto_api(...)]` annotates HTTP handlers; `#[derive(ConfigFields)]` + `#[config_section(id, ncl_file)]` registers config struct fields — both emit `inventory::submit!` at link time. `GET /api/catalog` and `GET /config/coherence` aggregate via `inventory::collect!`. | `ontoref-daemon` caches `nickel export` results (keyed by path + mtime), reducing full sync scans from ~2m42s to <30s. The daemon is always optional — every module falls back to direct @@ -110,6 +110,27 @@ MISSING/STALE/DRIFT/BROKEN items are found. Never applies changes — `apply` is `.ontoref/config.ncl`. Accessible from HTTP (`/actions`), CLI (`ontoref`), and MCP (`ontoref_action_list/add`). +**Config Surface** — per-project config introspection, coherence verification, and documented +mutation. Rust structs annotated with `#[derive(ConfigFields)]` + `#[config_section(id, ncl_file)]` +register their field names at link time via `inventory::submit!(ConfigFieldsEntry{...})`. The daemon +queries `inventory::iter::()` at startup to build a zero-maintenance registry of +which Rust fields each struct reads from each NCL section. Multi-consumer coherence +(`GET /projects/{slug}/config/coherence`) compares the inventory registry against NCL export keys, +Nu script accessor patterns, and CI fields declared in `manifest.ncl` — any NCL field claimed by no +consumer is flagged unclaimed. `GET /projects/{slug}/config/quickref` generates living documentation +(rationales, override history, coherence status) on demand. + +Config mutation never modifies source NCL files. `PUT /projects/{slug}/config/{section}` writes a +`{section}.overrides.ncl` file with only the changed fields plus a `_overrides_meta` audit record +(actor, reason, timestamp, previous value), then appends a single idempotent import line to the +entry-point NCL using the `&` merge operator. `nickel export` validates the merged result against the +section's declared contract before committing; contract violations revert the override file and return +the nickel error verbatim. NCL contracts (`std.contract.from_validator`) are the single validation +gate — Rust structs are contract-trusted readers with `#[serde(default)]`. + +Ontoref demonstrates the pattern on itself: `.ontoref/contracts.ncl` applies `LogConfig` and +`DaemonConfig` contracts to `.ontoref/config.ncl`. ([ADR-008](adrs/adr-008-ncl-first-config-validation-and-override-layer.ncl)) + ## Install ```sh diff --git a/adrs/adr-004-ncl-pipe-bootstrap-pattern.ncl b/adrs/adr-004-ncl-pipe-bootstrap-pattern.ncl index ad1e854..373db69 100644 --- a/adrs/adr-004-ncl-pipe-bootstrap-pattern.ncl +++ b/adrs/adr-004-ncl-pipe-bootstrap-pattern.ncl @@ -74,11 +74,7 @@ d.make_adr { claim = "The bootstrap pipeline must not write an intermediate config file to disk at any stage", scope = "scripts/ontoref-daemon-start, reflection/nulib/bootstrap.nu", severity = 'Hard, - check = 'Grep { - pattern = "tee |tempfile|mktemp", - paths = ["scripts/ontoref-daemon-start"], - must_be_empty = true, - }, + check = { tag = 'Grep, pattern = "tee |tempfile|mktemp", paths = ["scripts/ontoref-daemon-start"], must_be_empty = true }, rationale = "An intermediate file defeats the purpose of the pipeline. If a file is needed for debugging, use --dry-run which prints to stdout only.", }, { @@ -86,7 +82,7 @@ d.make_adr { claim = "The bash wrapper must depend only on bash, nickel, and the target binary — no Nu, no jq unless SOPS/Vault stage is active", scope = "scripts/ontoref-daemon-start", severity = 'Hard, - check = 'FileExists { path = "scripts/ontoref-daemon-start", present = true }, + check = { tag = 'FileExists, path = "scripts/ontoref-daemon-start", present = true }, rationale = "System service managers may not have Nu on PATH. The wrapper must be portable across launchctl, systemd, Docker entrypoints.", }, { @@ -94,11 +90,7 @@ d.make_adr { claim = "The target process must redirect stdin to /dev/null after reading the config JSON", scope = "crates/ontoref-daemon/src/main.rs", severity = 'Hard, - check = 'Grep { - pattern = "/dev/null|stdin.*close|drop.*stdin", - paths = ["crates/ontoref-daemon/src/main.rs"], - must_be_empty = false, - }, + check = { tag = 'Grep, pattern = "/dev/null|stdin.*close|drop.*stdin", paths = ["crates/ontoref-daemon/src/main.rs"], must_be_empty = false }, rationale = "stdin left open blocks terminal interaction and causes confusion in interactive sessions. The daemon is a server — it must not hold stdin.", }, { @@ -106,10 +98,7 @@ d.make_adr { claim = "NCL config files used with ncl-bootstrap must not contain plaintext secret values — only SecretRef placeholders or empty fields", scope = ".ontoref/config.ncl, APP_SUPPORT/ontoref/config.ncl", severity = 'Hard, - check = 'NuCmd { - cmd = "nickel export .ontoref/config.ncl | from json | transpose key value | where { |row| $row.key =~ 'password|secret|key|token|hash' and ($row.value | describe) == 'string' and ($row.value | str length) > 0 } | length | into string", - expect_exit = 0, - }, + check = { tag = 'NuCmd, cmd = "nickel export .ontoref/config.ncl | from json | transpose key value | where { |row| $row.key =~ 'password|secret|key|token|hash' and ($row.value | describe) == 'string' and ($row.value | str length) > 0 } | length | into string", expect_exit = 0 }, rationale = "If secrets are in the NCL file, they are readable as plaintext by anyone with filesystem access. Secrets enter the pipeline only at the SOPS/Vault stage.", }, ], diff --git a/adrs/adr-005-unified-auth-session-model.ncl b/adrs/adr-005-unified-auth-session-model.ncl index 5482606..c63b978 100644 --- a/adrs/adr-005-unified-auth-session-model.ncl +++ b/adrs/adr-005-unified-auth-session-model.ncl @@ -81,11 +81,7 @@ d.make_adr { claim = "GET /sessions responses must never include the bearer token, only the public session id", scope = "crates/ontoref-daemon/src/session.rs, crates/ontoref-daemon/src/api.rs", severity = 'Hard, - check = 'Grep { - pattern = "pub token", - paths = ["crates/ontoref-daemon/src/session.rs"], - must_be_empty = true, - }, + check = { tag = 'Grep, pattern = "pub token", paths = ["crates/ontoref-daemon/src/session.rs"], must_be_empty = true }, rationale = "Exposing bearer tokens in list responses would allow admins to impersonate other sessions. The session.id field is a second UUID v4, safe to expose.", }, { @@ -93,11 +89,7 @@ d.make_adr { claim = "POST /sessions must not require authentication — it is the credential exchange endpoint", scope = "crates/ontoref-daemon/src/api.rs", severity = 'Hard, - check = 'Grep { - pattern = "require_session|check_primary_auth", - paths = ["crates/ontoref-daemon/src/api.rs"], - must_be_empty = false, - }, + check = { tag = 'Grep, pattern = "require_session|check_primary_auth", paths = ["crates/ontoref-daemon/src/api.rs"], must_be_empty = false }, rationale = "Requiring auth to obtain auth is a bootstrap deadlock. Rate-limiting on failure is the correct mitigation, not pre-authentication.", }, { @@ -105,11 +97,7 @@ d.make_adr { claim = "PUT /projects/{slug}/keys must call revoke_all_for_slug before persisting new keys", scope = "crates/ontoref-daemon/src/api.rs", severity = 'Hard, - check = 'Grep { - pattern = "revoke_all_for_slug", - paths = ["crates/ontoref-daemon/src/api.rs"], - must_be_empty = false, - }, + check = { tag = 'Grep, pattern = "revoke_all_for_slug", paths = ["crates/ontoref-daemon/src/api.rs"], must_be_empty = false }, rationale = "Sessions authenticated against the old key set become invalid after rotation. Failing to revoke them would leave stale sessions with elevated access.", }, { @@ -117,11 +105,7 @@ d.make_adr { claim = "All CLI HTTP calls to the daemon must use bearer-args from store.nu — no hardcoded curl without auth args", scope = "reflection/modules/store.nu, reflection/bin/ontoref.nu", severity = 'Soft, - check = 'Grep { - pattern = "bearer-args|http-get|http-post-json|http-delete", - paths = ["reflection/modules/store.nu"], - must_be_empty = false, - }, + check = { tag = 'Grep, pattern = "bearer-args|http-get|http-post-json|http-delete", paths = ["reflection/modules/store.nu"], must_be_empty = false }, rationale = "ONTOREF_TOKEN is the single credential source for CLI. Direct curl without bearer-args bypasses the auth model silently.", }, ], diff --git a/adrs/adr-006-nushell-0111-string-interpolation-compat.ncl b/adrs/adr-006-nushell-0111-string-interpolation-compat.ncl index f5d131e..9574843 100644 --- a/adrs/adr-006-nushell-0111-string-interpolation-compat.ncl +++ b/adrs/adr-006-nushell-0111-string-interpolation-compat.ncl @@ -53,11 +53,7 @@ d.make_adr { claim = "String interpolations in ontoref.nu must not use `(identifier: expr)` patterns — use bare `identifier: (expr)` instead", scope = "ontoref (reflection/bin/ontoref.nu, all .nu files)", severity = 'Hard, - check = 'Grep { - pattern = "\\([a-z_]+: \\(", - paths = ["reflection/bin/ontoref.nu"], - must_be_empty = true, - }, + check = { tag = 'Grep, pattern = "\\([a-z_]+: \\(", paths = ["reflection/bin/ontoref.nu"], must_be_empty = true }, rationale = "Nushell 0.111 parses (identifier: expr) inside $\"...\" as a command call. The fix pattern (bare label + variable interpolation) is equivalent visually and immune to this parser behaviour.", }, { @@ -65,11 +61,7 @@ d.make_adr { claim = "Print statements with no variable interpolation must use plain strings, not `$\"...\"`", scope = "ontoref (all .nu files)", severity = 'Soft, - check = 'Grep { - pattern = "\\$\"[^%(]*\"", - paths = ["reflection"], - must_be_empty = true, - }, + check = { tag = 'Grep, pattern = "\\$\"[^%(]*\"", paths = ["reflection"], must_be_empty = true }, rationale = "Zero-interpolation `$\"...\"` strings are fragile against future parser changes and mislead readers into expecting variable substitution.", }, ], diff --git a/adrs/adr-007-api-surface-discoverability-onto-api-proc-macro.ncl b/adrs/adr-007-api-surface-discoverability-onto-api-proc-macro.ncl index 2f8e566..50fc355 100644 --- a/adrs/adr-007-api-surface-discoverability-onto-api-proc-macro.ncl +++ b/adrs/adr-007-api-surface-discoverability-onto-api-proc-macro.ncl @@ -61,11 +61,7 @@ d.make_adr { claim = "Every public HTTP handler in ontoref-daemon must carry #[onto_api(...)]", scope = "ontoref-daemon (crates/ontoref-daemon/src/api.rs, crates/ontoref-daemon/src/sync.rs)", severity = 'Hard, - check = 'Grep { - pattern = "#\\[onto_api", - paths = ["crates/ontoref-daemon/src/api.rs", "crates/ontoref-daemon/src/sync.rs"], - must_be_empty = false, - }, + check = { tag = 'Grep, pattern = "#\\[onto_api", paths = ["crates/ontoref-daemon/src/api.rs", "crates/ontoref-daemon/src/sync.rs"], must_be_empty = false }, rationale = "catalog() is only as complete as the set of annotated handlers. Unannotated handlers are invisible to agents, CLI, and the web UI — equivalent to undocumented and unauditable routes.", }, { @@ -73,11 +69,7 @@ d.make_adr { claim = "inventory must remain a workspace dependency gated behind the 'catalog' feature of ontoref-derive; ontoref-ontology must not depend on inventory", scope = "ontoref-ontology (Cargo.toml), ontoref-derive (Cargo.toml)", severity = 'Hard, - check = 'Grep { - pattern = "inventory", - paths = ["crates/ontoref-ontology/Cargo.toml"], - must_be_empty = true, - }, + check = { tag = 'Grep, pattern = "inventory", paths = ["crates/ontoref-ontology/Cargo.toml"], must_be_empty = true }, rationale = "ontoref-ontology is the zero-dep adoption surface (ADR-001). Adding inventory — even as an optional dep — violates that contract and makes protocol adoption heavier for downstream crates that only need typed NCL loading.", }, ], diff --git a/adrs/adr-008-ncl-first-config-validation-and-override-layer.ncl b/adrs/adr-008-ncl-first-config-validation-and-override-layer.ncl new file mode 100644 index 0000000..11fb2b4 --- /dev/null +++ b/adrs/adr-008-ncl-first-config-validation-and-override-layer.ncl @@ -0,0 +1,93 @@ +let d = import "adr-defaults.ncl" in + +d.make_adr { + id = "adr-008", + title = "NCL-First Config Validation and Override-Layer Mutation", + status = 'Accepted, + date = "2026-03-26", + + context = "The config surface feature adds per-project config introspection and mutation to ontoref-daemon. Two design questions arise: (1) Where does config field validation live — in NCL contracts, in Rust struct validation, or both? (2) How does a PUT /config/{section} request mutate a project's config without corrupting the source NCL files? Direct mutation via nickel export → JSON → write-back destroys NCL comments, contract annotations, and section merge structure. Duplicating validation in both NCL and Rust creates two sources of truth with guaranteed divergence. The config surface spans ontoref's own .ontoref/config.ncl and all consumer-project configs, making the choice of validation ownership a protocol-level constraint.", + + decision = "NCL contracts (std.contract.from_validator) are the single validation layer for all config fields. Rust serde structs are contract-trusted readers: they carry #[serde(default)] and consume pre-validated JSON from nickel export — no validator(), no custom Deserialize, no duplicate field constraints. Config mutation via PUT /projects/{slug}/config/{section} never modifies the original NCL source files. Instead it writes a {section}.overrides.ncl file containing only the changed fields plus a _overrides_meta audit record (actor, reason, timestamp, previous values), then appends a single idempotent import line to the entry-point NCL (using NCL's & merge operator so the override wins). nickel export validates the merged result against the section's declared contract before the mutation is committed; validation failure reverts the override file and returns the nickel error verbatim. Ontoref demonstrates this pattern on itself: .ontoref/contracts.ncl declares LogConfig and DaemonConfig contracts applied in .ontoref/config.ncl.", + + rationale = [ + { + claim = "NCL contracts are the correct validation boundary", + detail = "nickel export runs the contract check before any JSON reaches Rust. A value that violates LogLevel (must be one of error/warn/info/debug/trace) is rejected by nickel with a precise error message pointing to the exact field and contract. If Rust also validates, the two validators must stay in sync forever — and will diverge. Concentrating validation in NCL means the contract file is the authoritative spec for both the schema documentation and the runtime constraint.", + }, + { + claim = "Override-layer mutation preserves NCL structure integrity", + detail = "A round-trip of nickel export → JSON → overwrite produces a file with no comments, no contract annotations, no merge structure, and no section rationale. The override layer avoids this entirely: the source file is immutable, the override file contains only changed fields, and NCL's & merge operator applies them at export time. The override file is git-versioned, human-readable, and revertable by deletion. nickel export on the merged entry point validates the result through the declared contract — the same validation that runs in production.", + }, + { + claim = "Audit metadata in _overrides_meta closes the mutation traceability gap", + detail = "Each override file carries a top-level _overrides_meta record with managed_by = 'ontoref', created_at, and an entries array (field, from, to, reason, actor, ts). This record is consumed by GET /config/quickref to render an override history timeline and by GET /config/coherence to flag fields whose current value differs from the contract default. The metadata is a first-class NCL record — not a comment — so it survives export and is queryable by agents.", + }, + ], + + consequences = { + positive = [ + "Config field constraints are documented once (NCL contract) and enforced at the nickel export boundary — no duplication", + "Original NCL source files are immutable under daemon operation — diffs are clean, history is readable", + "nickel export validates override correctness before committing — contract violations return verbatim nickel errors to the caller", + "Override files are deletable to revert — no migration needed", + "_overrides_meta enables GET /config/quickref to render full change history with reasons", + "Ontoref's own .ontoref/contracts.ncl serves as a working example for consumer projects adopting the pattern", + ], + negative = [ + "Override layer requires the entry-point NCL to use & merge operators; SingleFile configs without section-level imports need a one-time restructure before overrides work", + "nickel export is the validation gate — validation errors are nickel syntax, not structured JSON; callers must parse nickel error output to surface friendly messages", + "#[serde(default)] on all Rust config structs means a missing NCL field silently uses the Rust default instead of erroring; the NCL contract (with its own defaults) is the intended fallback, not Rust", + ], + }, + + alternatives_considered = [ + { + option = "Duplicate validation in Rust (validator crate or custom Deserialize)", + why_rejected = "Two validators for the same field inevitably diverge. The NCL contract is already the authoritative schema for documentation, MCP export, and quickref generation — adding a Rust duplicate makes it decorative. validator crate adds 8+ transitive dependencies and requires annotation churn across every config struct.", + }, + { + option = "Direct NCL file mutation (read → merge JSON → overwrite)", + why_rejected = "nickel export → JSON write-back destroys comments, contract annotations (| C.LogConfig), section merge structure, and in-file rationale. The resulting file is syntactically valid but semantically impoverished. Once a file is overwritten this way, the original structure cannot be recovered from git history if the file was also changed manually between sessions.", + }, + { + option = "Separate config store (JSON or TOML side-file)", + why_rejected = "A side-file in a different format bypasses NCL type safety entirely — the merge operator and contract validation no longer apply. The daemon would need a custom merge algorithm to reconcile the side-file with the source NCL, and agents would need to understand two config representations. NCL's & merge operator is purpose-built for this use case.", + }, + ], + + constraints = [ + { + id = "override-layer-only", + claim = "The daemon must never write to original NCL config source files during a PUT /config/{section} mutation; only {section}.overrides.ncl may be created or modified", + scope = "crates/ontoref-daemon/src/api.rs (config mutation endpoints)", + severity = 'Hard, + check = { tag = 'Grep, pattern = "overrides\\.ncl", paths = ["crates/ontoref-daemon/src/api.rs"], must_be_empty = false }, + rationale = "Immutability of source files is the safety contract the override layer provides. Violating it removes the ability to revert by deletion and breaks git diff clarity.", + }, + { + id = "ncl-first-validation", + claim = "Rust config structs must not implement custom field validation; all field constraints live in NCL contracts applied before JSON reaches Rust", + scope = "crates/ontoref-daemon/src/config.rs", + severity = 'Hard, + check = { tag = 'Grep, pattern = "impl.*Validate|#\\[validate", paths = ["crates/ontoref-daemon/src/config.rs"], must_be_empty = true }, + rationale = "Duplicate validation creates two diverging sources of truth. NCL contracts with std.contract.from_validator are the specified validation layer; Rust structs are downstream consumers of pre-validated data.", + }, + { + id = "overrides-meta-required", + claim = "Every {section}.overrides.ncl file written by the daemon must contain a top-level _overrides_meta record with managed_by, created_at, and entries fields", + scope = "crates/ontoref-daemon/src/api.rs (override file generation)", + severity = 'Soft, + check = { tag = 'Grep, pattern = "_overrides_meta", paths = ["crates/ontoref-daemon/src/api.rs"], must_be_empty = false }, + rationale = "Without audit metadata the override file is opaque — no way to surface change history in quickref or trace who changed what and why. Soft because deletion-based revert remains available even without metadata.", + }, + ], + + related_adrs = ["adr-002", "adr-007"], + + ontology_check = { + decision_string = "NCL contracts (std.contract.from_validator) are the single validation gate; Rust structs are contract-trusted with #[serde(default)]; config mutation uses override-layer files, never modifying original NCL sources", + invariants_at_risk = ["dag-formalized", "protocol-not-runtime"], + verdict = 'Safe, + }, +} diff --git a/crates/ontoref-daemon/Cargo.toml b/crates/ontoref-daemon/Cargo.toml index 0369b24..f606afb 100644 --- a/crates/ontoref-daemon/Cargo.toml +++ b/crates/ontoref-daemon/Cargo.toml @@ -37,7 +37,8 @@ hostname = { workspace = true } reqwest = { workspace = true } tokio-stream = { version = "0.1", features = ["sync"] } inventory = { workspace = true } -ontoref-derive = { path = "../ontoref-derive" } +ontoref-derive = { path = "../ontoref-derive" } +ontoref-ontology = { path = "../ontoref-ontology", features = ["derive"] } [target.'cfg(unix)'.dependencies] libc = { workspace = true } diff --git a/crates/ontoref-daemon/src/api.rs b/crates/ontoref-daemon/src/api.rs index 80a497b..47cf519 100644 --- a/crates/ontoref-daemon/src/api.rs +++ b/crates/ontoref-daemon/src/api.rs @@ -340,7 +340,29 @@ pub fn router(state: AppState) -> axum::Router { ) // Project registry management. .route("/projects", get(projects_list).post(project_add)) - .route("/projects/{slug}", delete(project_delete)); + .route("/projects/{slug}", delete(project_delete)) + // Config surface — read + .route("/projects/{slug}/config", get(project_config)) + .route("/projects/{slug}/config/schema", get(project_config_schema)) + .route( + "/projects/{slug}/config/coherence", + get(project_config_coherence), + ) + .route( + "/projects/{slug}/config/quickref", + get(project_config_quickref), + ) + .route( + "/projects/{slug}/config/{section}", + get(project_config_section), + ) + // Config surface — cross-project comparison (no slug) + .route("/config/cross-project", get(config_cross_project)) + // Config surface — mutation via override layer (admin only) + .route( + "/projects/{slug}/config/{section}", + put(project_config_update), + ); // Session endpoints — gated on ui feature (requires SessionStore). #[cfg(feature = "ui")] @@ -2257,6 +2279,707 @@ async fn project_file_versions( .into_response() } +// ── Config surface endpoints +// ────────────────────────────────────────────────── + +/// Resolve a project context or return 404. +macro_rules! require_project { + ($state:expr, $slug:expr) => { + match $state.registry.get(&$slug) { + Some(ctx) => ctx, + None => { + return ( + StatusCode::NOT_FOUND, + Json(serde_json::json!({"error": format!("project '{}' not registered", $slug)})), + ) + .into_response() + } + } + }; +} + +/// Resolve the config surface for a project or return 404. +macro_rules! require_config_surface { + ($ctx:expr, $slug:expr) => { + match &$ctx.config_surface { + Some(s) => s.clone(), + None => { + return ( + StatusCode::NOT_FOUND, + Json(serde_json::json!({"error": format!("project '{}' has no config_surface in manifest.ncl", $slug)})), + ) + .into_response() + } + } + }; +} + +#[ontoref_derive::onto_api( + method = "GET", + path = "/projects/{slug}/config", + description = "Full config export for a registered project (merged with any active overrides)", + auth = "none", + actors = "agent, developer", + params = "slug:string:required:Project slug", + tags = "config" +)] +async fn project_config( + State(state): State, + Path(slug): Path, +) -> impl IntoResponse { + state.touch_activity(); + let ctx = require_project!(state, slug); + let surface = require_config_surface!(ctx, slug); + + let entry = surface.entry_point_path(&ctx.root); + match ctx.cache.export(&entry, ctx.import_path.as_deref()).await { + Ok((json, _)) => Json(json).into_response(), + Err(e) => ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error": e.to_string()})), + ) + .into_response(), + } +} + +#[ontoref_derive::onto_api( + method = "GET", + path = "/projects/{slug}/config/schema", + description = "Config surface schema: sections with descriptions, rationales, contracts, and \ + declared consumers", + auth = "none", + actors = "agent, developer", + params = "slug:string:required:Project slug", + tags = "config" +)] +async fn project_config_schema( + State(state): State, + Path(slug): Path, +) -> impl IntoResponse { + state.touch_activity(); + let ctx = require_project!(state, slug); + let surface = require_config_surface!(ctx, slug); + + let sections: Vec = surface + .sections + .iter() + .map(|s| { + serde_json::json!({ + "id": s.id, + "file": s.file, + "contract": s.contract, + "description": s.description, + "rationale": s.rationale, + "mutable": s.mutable, + "consumers": s.consumers.iter().map(|c| serde_json::json!({ + "id": c.id, + "kind": format!("{:?}", c.kind), + "ref": c.reference, + "fields": c.fields, + })).collect::>(), + }) + }) + .collect(); + + Json(serde_json::json!({ + "slug": slug, + "config_root": surface.config_root.display().to_string(), + "entry_point": surface.entry_point, + "kind": format!("{:?}", surface.kind), + "contracts_path": surface.contracts_path, + "sections": sections, + })) + .into_response() +} + +#[ontoref_derive::onto_api( + method = "GET", + path = "/projects/{slug}/config/{section}", + description = "Values for a single config section (from the merged NCL export)", + auth = "none", + actors = "agent, developer", + params = "slug:string:required:Project slug; section:string:required:Section id", + tags = "config" +)] +async fn project_config_section( + State(state): State, + Path((slug, section)): Path<(String, String)>, +) -> impl IntoResponse { + state.touch_activity(); + let ctx = require_project!(state, slug); + let surface = require_config_surface!(ctx, slug); + + if surface.section(§ion).is_none() { + return ( + StatusCode::NOT_FOUND, + Json(serde_json::json!({"error": format!("section '{section}' not declared in config_surface")})), + ) + .into_response(); + } + + let entry = surface.entry_point_path(&ctx.root); + match ctx.cache.export(&entry, ctx.import_path.as_deref()).await { + Ok((json, _)) => { + let val = json + .get(§ion) + .cloned() + .unwrap_or(serde_json::Value::Null); + Json(serde_json::json!({"slug": slug, "section": section, "values": val})) + .into_response() + } + Err(e) => ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(serde_json::json!({"error": e.to_string()})), + ) + .into_response(), + } +} + +#[ontoref_derive::onto_api( + method = "GET", + path = "/projects/{slug}/config/coherence", + description = "Multi-consumer coherence report: unclaimed NCL fields, consumer field \ + mismatches", + auth = "none", + actors = "agent, developer", + params = "slug:string:required:Project slug; section:string:optional:Filter to one section", + tags = "config" +)] +async fn project_config_coherence( + State(state): State, + Path(slug): Path, + Query(q): Query>, +) -> impl IntoResponse { + state.touch_activity(); + let ctx = require_project!(state, slug); + let surface = require_config_surface!(ctx, slug); + + let section_filter = q.get("section").map(String::as_str); + + let report = crate::config_coherence::check_project( + &slug, + &surface, + &ctx.root, + &ctx.cache, + ctx.import_path.as_deref(), + section_filter, + ) + .await; + + Json(serde_json::to_value(&report).unwrap_or(serde_json::Value::Null)).into_response() +} + +#[ontoref_derive::onto_api( + method = "GET", + path = "/projects/{slug}/config/quickref", + description = "Generated config documentation with rationales, override history, and \ + coherence status", + auth = "none", + actors = "agent, developer", + params = "slug:string:required:Project slug; section:string:optional:Filter to one section; \ + format:string:optional:Output format (json|markdown)", + tags = "config" +)] +async fn project_config_quickref( + State(state): State, + Path(slug): Path, + Query(q): Query>, +) -> impl IntoResponse { + state.touch_activity(); + let ctx = require_project!(state, slug); + let surface = require_config_surface!(ctx, slug); + + let section_filter = q.get("section").map(String::as_str); + + let quickref = crate::config_coherence::build_quickref( + &slug, + &surface, + &ctx.root, + &ctx.cache, + ctx.import_path.as_deref(), + section_filter, + ) + .await; + + Json(quickref).into_response() +} + +fn index_section_fields( + sec_val: &serde_json::Map, + section_id: &str, + slug: &str, + index: &mut std::collections::BTreeMap<(String, String), Vec<(String, serde_json::Value)>>, +) { + for (field, value) in sec_val { + if field.starts_with("_meta_") || field == "_overrides_meta" { + continue; + } + index + .entry((section_id.to_owned(), field.clone())) + .or_default() + .push((slug.to_owned(), value.clone())); + } +} + +#[ontoref_derive::onto_api( + method = "GET", + path = "/config/cross-project", + description = "Compare config surfaces across all registered projects: shared values, \ + conflicts, coverage gaps", + auth = "none", + actors = "agent, developer", + tags = "config" +)] +async fn config_cross_project(State(state): State) -> impl IntoResponse { + state.touch_activity(); + + // Collect all registered projects that have a config_surface. + let candidates: Vec> = state + .registry + .all() + .into_iter() + .filter(|ctx| ctx.config_surface.is_some()) + .collect(); + + // Export each project's full config. NclCache makes repeated calls cheap. + // (section_id, field_path) → Vec<(slug, serde_json::Value)> + let mut field_index: std::collections::BTreeMap< + (String, String), + Vec<(String, serde_json::Value)>, + > = std::collections::BTreeMap::new(); + + let mut project_summaries: Vec = Vec::new(); + + for ctx in &candidates { + let surface = ctx.config_surface.as_ref().unwrap(); + let entry = surface.entry_point_path(&ctx.root); + let export = ctx + .cache + .export(&entry, ctx.import_path.as_deref()) + .await + .ok() + .map(|(j, _)| j); + + let section_ids: Vec = surface.sections.iter().map(|s| s.id.clone()).collect(); + + if let Some(ref full) = export { + for section in &surface.sections { + let Some(sec_val) = full.get(§ion.id).and_then(|v| v.as_object()) else { + continue; + }; + index_section_fields(sec_val, §ion.id, &ctx.slug, &mut field_index); + } + } + + project_summaries.push(serde_json::json!({ + "slug": ctx.slug, + "config_root": surface.config_root.display().to_string(), + "kind": format!("{:?}", surface.kind), + "sections": section_ids, + "export_ok": export.is_some(), + })); + } + + // Shared values: same (section, field) present in ≥2 projects with identical + // value. + let mut shared: Vec = Vec::new(); + // Conflicts: same (section, field) with differing values across projects. + let mut conflicts: Vec = Vec::new(); + // Port collisions: numeric fields named `port` or ending in `_port`. + let mut port_collisions: Vec = Vec::new(); + + for ((section_id, field), entries) in &field_index { + if entries.len() < 2 { + continue; + } + let first_val = &entries[0].1; + let all_same = entries.iter().all(|(_, v)| v == first_val); + + let is_port_field = field == "port" || field.ends_with("_port"); + if is_port_field { + if !all_same { + // Different ports — not necessarily a conflict, but flag them. + port_collisions.push(serde_json::json!({ + "section": section_id, + "field": field, + "values": entries.iter().map(|(slug, v)| serde_json::json!({ "slug": slug, "value": v })).collect::>(), + })); + } + } else if all_same { + shared.push(serde_json::json!({ + "section": section_id, + "field": field, + "value": first_val, + "projects": entries.iter().map(|(s, _)| s).collect::>(), + })); + } else { + conflicts.push(serde_json::json!({ + "section": section_id, + "field": field, + "values": entries.iter().map(|(slug, v)| serde_json::json!({ "slug": slug, "value": v })).collect::>(), + })); + } + } + + // Coverage gaps: section present in some projects but not others. + let all_sections: std::collections::BTreeSet = + field_index.keys().map(|(s, _)| s.clone()).collect(); + let mut coverage_gaps: Vec = Vec::new(); + for section_id in &all_sections { + let present_in: Vec = candidates + .iter() + .filter(|ctx| { + ctx.config_surface + .as_ref() + .map(|s| s.sections.iter().any(|sec| &sec.id == section_id)) + .unwrap_or(false) + }) + .map(|ctx| ctx.slug.clone()) + .collect(); + if present_in.len() < candidates.len() { + let absent_in: Vec = candidates + .iter() + .filter(|ctx| !present_in.contains(&ctx.slug)) + .map(|ctx| ctx.slug.clone()) + .collect(); + coverage_gaps.push(serde_json::json!({ + "section": section_id, + "present_in": present_in, + "absent_in": absent_in, + })); + } + } + + Json(serde_json::json!({ + "projects": project_summaries, + "shared_values": shared, + "conflicts": conflicts, + "port_report": port_collisions, + "coverage_gaps": coverage_gaps, + "total_projects": candidates.len(), + })) + .into_response() +} + +#[derive(Deserialize)] +pub struct ConfigUpdateRequest { + /// JSON object with fields to set in this section. + pub values: serde_json::Value, + /// Reason for the change — written as a comment in the override file. + #[serde(default)] + pub reason: String, + /// When true (default for safety): return the proposed override NCL + /// without writing to disk. + #[serde(default = "default_dry_run")] + pub dry_run: bool, +} + +fn default_dry_run() -> bool { + true +} + +#[ontoref_derive::onto_api( + method = "PUT", + path = "/projects/{slug}/config/{section}", + description = "Mutate a config section via the override layer. dry_run=true (default) returns \ + the proposed change without writing.", + auth = "admin", + actors = "agent, developer", + params = "slug:string:required:Project slug; section:string:required:Section id", + tags = "config" +)] +async fn project_config_update( + State(state): State, + headers: axum::http::HeaderMap, + Path((slug, section)): Path<(String, String)>, + Json(req): Json, +) -> impl IntoResponse { + state.touch_activity(); + + let ctx = require_project!(state, slug); + + // Require admin auth if the project has keys. + if ctx.auth_enabled() { + let bearer = headers + .get(axum::http::header::AUTHORIZATION) + .and_then(|v| v.to_str().ok()) + .and_then(|v| v.strip_prefix("Bearer ")) + .map(str::trim) + .filter(|s| !s.is_empty()); + + match bearer { + None => { + return ( + StatusCode::UNAUTHORIZED, + Json(serde_json::json!({"error": "Authorization: Bearer required"})), + ) + .into_response(); + } + Some(password) => match ctx.verify_key(password).map(|m| m.role) { + Some(crate::registry::Role::Admin) => {} + Some(crate::registry::Role::Viewer) => { + return ( + StatusCode::FORBIDDEN, + Json(serde_json::json!({"error": "admin role required to mutate config"})), + ) + .into_response(); + } + None => { + return ( + StatusCode::UNAUTHORIZED, + Json(serde_json::json!({"error": "invalid credentials"})), + ) + .into_response(); + } + }, + } + } + + let surface = require_config_surface!(ctx, slug); + + // TypeDialog projects manage their own write pipeline (form.toml → + // validators → fragments). The NCL override layer is not applicable. + if matches!(surface.kind, crate::registry::ConfigKind::TypeDialog) { + return ( + StatusCode::METHOD_NOT_ALLOWED, + Json(serde_json::json!({ + "error": "TypeDialog config surfaces are not mutable via this endpoint", + "detail": "TypeDialog projects use form.toml + validators + fragments. \ + Mutate values through the TypeDialog pipeline directly.", + "kind": "TypeDialog", + })), + ) + .into_response(); + } + + let section_meta = match surface.section(§ion) { + Some(s) => s.clone(), + None => return ( + StatusCode::NOT_FOUND, + Json( + serde_json::json!({"error": format!("section '{section}' not in config_surface")}), + ), + ) + .into_response(), + }; + + if !section_meta.mutable { + return ( + StatusCode::METHOD_NOT_ALLOWED, + Json(serde_json::json!({ + "error": format!("section '{section}' is marked immutable in manifest.ncl"), + "detail": "Set mutable = true in the config_section declaration to enable writes.", + })), + ) + .into_response(); + } + + if ctx.push_only { + // push_only projects never have writable local files. + return generate_override_diff(&surface, §ion, &req, &ctx.root).into_response(); + } + + if req.dry_run { + return generate_override_diff(&surface, §ion, &req, &ctx.root).into_response(); + } + + // Apply the override: write {section}.overrides.ncl, patch entry point, + // validate with nickel export, revert on failure. + match apply_config_override( + &surface, + §ion, + &req, + &ctx.root, + &ctx.cache, + ctx.import_path.as_deref(), + ) + .await + { + Ok(result) => Json(result).into_response(), + Err(e) => ( + StatusCode::UNPROCESSABLE_ENTITY, + Json(serde_json::json!({"error": e.to_string()})), + ) + .into_response(), + } +} + +/// Generate a dry-run diff as a `serde_json::Value` (shared by HTTP and MCP). +pub fn generate_override_diff_value( + surface: &crate::registry::ConfigSurface, + section: &str, + req: &ConfigUpdateRequest, + project_root: &std::path::Path, +) -> serde_json::Value { + let overrides_dir = surface.resolved_overrides_dir(); + let override_path = project_root + .join(overrides_dir) + .join(format!("{section}.overrides.ncl")); + let ncl_content = render_override_ncl(section, &req.values, &req.reason, project_root); + serde_json::json!({ + "dry_run": true, + "section": section, + "override_file": override_path.display().to_string(), + "proposed_ncl": ncl_content, + "values": req.values, + }) +} + +/// Generate a dry-run HTTP response showing what the override file would look +/// like. +fn generate_override_diff( + surface: &crate::registry::ConfigSurface, + section: &str, + req: &ConfigUpdateRequest, + project_root: &std::path::Path, +) -> impl IntoResponse { + Json(generate_override_diff_value( + surface, + section, + req, + project_root, + )) +} + +/// Render the NCL content for an override file. +fn render_override_ncl( + section: &str, + values: &serde_json::Value, + reason: &str, + _project_root: &std::path::Path, +) -> String { + let ts = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0); + + let mut lines = vec![ + format!("# {section}.overrides.ncl — generated by ontoref"), + "# DO NOT edit manually — managed by ontoref config surface.".to_owned(), + "# To revert: delete this file and remove the import from the entry point.".to_owned(), + "{".to_owned(), + format!(" _overrides_meta = {{"), + format!(" managed_by = \"ontoref\","), + format!(" updated_at = {ts},"), + ]; + + if !reason.is_empty() { + lines.push(format!(" reason = {reason:?},")); + } + lines.push(" },".to_owned()); + + // Emit each key as a top-level field override. + if let Some(obj) = values.as_object() { + for (key, val) in obj { + let ncl_val = json_to_ncl_literal(val); + lines.push(format!(" {section}.{key} = {ncl_val},")); + } + } + + lines.push("}".to_owned()); + lines.join("\n") +} + +/// Best-effort conversion of a JSON value to a NCL literal. +/// Supports primitives, arrays of primitives, and nested objects. +fn json_to_ncl_literal(val: &serde_json::Value) -> String { + match val { + serde_json::Value::Null => "null".to_owned(), + serde_json::Value::Bool(b) => b.to_string(), + serde_json::Value::Number(n) => n.to_string(), + serde_json::Value::String(s) => format!("{s:?}"), + serde_json::Value::Array(arr) => { + let items: Vec = arr.iter().map(json_to_ncl_literal).collect(); + format!("[{}]", items.join(", ")) + } + serde_json::Value::Object(obj) => { + let fields: Vec = obj + .iter() + .map(|(k, v)| format!(" {k} = {}", json_to_ncl_literal(v))) + .collect(); + format!("{{\n{}\n}}", fields.join(",\n")) + } + } +} + +/// Write the override file, patch the entry point, and validate. +/// Reverts on validation failure. +pub async fn apply_config_override( + surface: &crate::registry::ConfigSurface, + section: &str, + req: &ConfigUpdateRequest, + project_root: &std::path::Path, + cache: &crate::cache::NclCache, + import_path: Option<&str>, +) -> anyhow::Result { + use std::io::Write; + + let overrides_dir = project_root.join(surface.resolved_overrides_dir()); + let override_file = overrides_dir.join(format!("{section}.overrides.ncl")); + let entry_point = surface.entry_point_path(project_root); + + let ncl_content = render_override_ncl(section, &req.values, &req.reason, project_root); + + // Backup old override file if it exists. + let backup = override_file.with_extension("ncl.bak"); + if override_file.exists() { + std::fs::copy(&override_file, &backup)?; + } + + // Write the override file. + { + let mut f = std::fs::File::create(&override_file)?; + f.write_all(ncl_content.as_bytes())?; + } + + // Patch entry point to import the override if not already present. + let override_import = format!("(import \"./{section}.overrides.ncl\")"); + let entry_original = std::fs::read_to_string(&entry_point)?; + let patched = if !entry_original.contains(&override_import) { + // Append import at the end — NCL merge chain, override wins. + Some(format!("{entry_original}\n& {override_import}\n")) + } else { + None + }; + + if let Some(ref new_content) = patched { + std::fs::write(&entry_point, new_content)?; + } + + // Validate by re-exporting the entry point. + cache.invalidate_file(&override_file); + cache.invalidate_file(&entry_point); + + match cache.export(&entry_point, import_path).await { + Ok(_) => { + // Clean up backup on success. + let _ = std::fs::remove_file(&backup); + Ok(serde_json::json!({ + "applied": true, + "section": section, + "override_file": override_file.display().to_string(), + "values": req.values, + })) + } + Err(e) => { + // Revert: restore backup or remove the override file. + if backup.exists() { + let _ = std::fs::copy(&backup, &override_file); + let _ = std::fs::remove_file(&backup); + } else { + let _ = std::fs::remove_file(&override_file); + } + // Restore entry point if we patched it. + if patched.is_some() { + let _ = std::fs::write(&entry_point, &entry_original); + } + cache.invalidate_file(&override_file); + cache.invalidate_file(&entry_point); + anyhow::bail!("nickel export validation failed after override: {e}") + } + } +} + /// Exchange a key for a session token. /// /// Accepts project keys (looked up by slug) or the daemon admin password. @@ -2615,6 +3338,7 @@ mod tests { stale_actor_timeout: 300, max_notifications: 100, ack_required: vec![], + config_surface: None, }); let ctx = Arc::new(ctx); let actors = Arc::clone(&ctx.actors); diff --git a/crates/ontoref-daemon/src/config.rs b/crates/ontoref-daemon/src/config.rs new file mode 100644 index 0000000..e9b502a --- /dev/null +++ b/crates/ontoref-daemon/src/config.rs @@ -0,0 +1,220 @@ +/// Typed representation of `.ontoref/config.ncl`. +/// +/// Every section that the daemon reads from its own config file is captured +/// here with `#[derive(ConfigFields)]` so that `inventory` registers the +/// consumed fields at link time. The coherence endpoint can then compare +/// these registrations against the live NCL export without needing a +/// hand-maintained `fields` list in `manifest.ncl`. +/// +/// All fields carry `#[serde(default)]` — a project may omit any section and +/// the daemon degrades gracefully rather than failing to start. +use ontoref_ontology::ConfigFields; +use serde::Deserialize; + +/// Full deserialized view of `.ontoref/config.ncl`. +#[derive(Debug, Deserialize, Default)] +pub struct DaemonNclConfig { + #[serde(default)] + pub nickel_import_paths: Vec, + #[serde(default)] + pub ui: UiConfig, + #[serde(default)] + pub log: LogConfig, + #[serde(default)] + pub mode_run: ModeRunConfig, + #[serde(default)] + pub nats_events: NatsEventsConfig, + #[serde(default)] + pub actor_init: Vec, + #[serde(default)] + pub quick_actions: Vec, + #[serde(default)] + pub daemon: DaemonRuntimeConfig, + #[cfg(feature = "db")] + #[serde(default)] + pub db: DbConfig, +} + +/// `ui` section — template and asset paths, optional TLS cert overrides. +#[derive(Debug, Deserialize, Default, ConfigFields)] +#[config_section(id = "ui", ncl_file = ".ontoref/config.ncl")] +pub struct UiConfig { + #[serde(default)] + pub templates_dir: String, + #[serde(default)] + pub public_dir: String, + #[serde(default)] + pub tls_cert: String, + #[serde(default)] + pub tls_key: String, + #[serde(default)] + pub logo: String, + #[serde(default)] + pub logo_dark: String, +} + +/// `log` section — structured logging policy. +#[derive(Debug, Deserialize, ConfigFields)] +#[config_section(id = "log", ncl_file = ".ontoref/config.ncl")] +pub struct LogConfig { + #[serde(default = "default_log_level")] + pub level: String, + #[serde(default = "default_log_path")] + pub path: String, + #[serde(default = "default_rotation")] + pub rotation: String, + #[serde(default)] + pub compress: bool, + #[serde(default = "default_log_archive")] + pub archive: String, + #[serde(default = "default_max_files")] + pub max_files: u32, +} + +impl Default for LogConfig { + fn default() -> Self { + Self { + level: default_log_level(), + path: default_log_path(), + rotation: default_rotation(), + compress: false, + archive: default_log_archive(), + max_files: default_max_files(), + } + } +} + +fn default_log_level() -> String { + "info".into() +} +fn default_log_path() -> String { + "logs".into() +} +fn default_rotation() -> String { + "daily".into() +} +fn default_log_archive() -> String { + "logs-archive".into() +} +fn default_max_files() -> u32 { + 7 +} + +/// `nats_events` section — NATS JetStream integration. +#[derive(Debug, Deserialize, Default, ConfigFields)] +#[config_section(id = "nats_events", ncl_file = ".ontoref/config.ncl")] +pub struct NatsEventsConfig { + #[serde(default)] + pub enabled: bool, + #[serde(default = "default_nats_url")] + pub url: String, + #[serde(default)] + pub emit: Vec, + #[serde(default)] + pub subscribe: Vec, + #[serde(default)] + pub handlers_dir: String, + #[serde(default)] + pub nkey_seed: Option, + #[serde(default)] + pub require_signed_messages: bool, + #[serde(default)] + pub trusted_nkeys: Vec, + #[serde(default)] + pub streams_config: String, +} + +fn default_nats_url() -> String { + "nats://localhost:4222".into() +} + +/// `mode_run` section — per-actor mode execution ACL. +#[derive(Debug, Deserialize, Default, ConfigFields)] +#[config_section(id = "mode_run", ncl_file = ".ontoref/config.ncl")] +pub struct ModeRunConfig { + #[serde(default)] + pub rules: Vec, +} + +#[derive(Debug, Deserialize, Default)] +pub struct ModeRunRule { + #[serde(default)] + pub when: ModeRunWhen, + #[serde(default)] + pub allow: bool, + #[serde(default)] + pub reason: String, +} + +#[derive(Debug, Deserialize, Default)] +pub struct ModeRunWhen { + #[serde(default)] + pub mode_id: Option, + #[serde(default)] + pub actor: Option, +} + +/// One entry in the `actor_init` array. +#[derive(Debug, Deserialize, Default)] +pub struct ActorInit { + #[serde(default)] + pub actor: String, + #[serde(default)] + pub mode: String, + #[serde(default)] + pub auto_run: bool, +} + +/// One entry in the `quick_actions` array. +#[derive(Debug, Deserialize, Default)] +pub struct QuickAction { + #[serde(default)] + pub id: String, + #[serde(default)] + pub label: String, + #[serde(default)] + pub icon: String, + #[serde(default)] + pub category: String, + #[serde(default)] + pub mode: String, + #[serde(default)] + pub actors: Vec, +} + +/// `daemon` section — overrides for CLI defaults set at startup. +#[derive(Debug, Deserialize, Default, ConfigFields)] +#[config_section(id = "daemon", ncl_file = ".ontoref/config.ncl")] +pub struct DaemonRuntimeConfig { + #[serde(default)] + pub port: Option, + #[serde(default)] + pub idle_timeout: Option, + #[serde(default)] + pub invalidation_interval: Option, + #[serde(default)] + pub actor_sweep_interval: Option, + #[serde(default)] + pub actor_stale_timeout: Option, + #[serde(default)] + pub max_notifications: Option, + #[serde(default)] + pub notification_ack_required: Vec, +} + +/// `db` section — SurrealDB connection (feature-gated). +#[cfg(feature = "db")] +#[derive(Debug, Deserialize, Default, ConfigFields)] +#[config_section(id = "db", ncl_file = ".ontoref/config.ncl")] +pub struct DbConfig { + #[serde(default)] + pub enabled: bool, + #[serde(default)] + pub url: String, + #[serde(default)] + pub namespace: String, + #[serde(default)] + pub username: String, + #[serde(default)] + pub password: String, +} diff --git a/crates/ontoref-daemon/src/config_coherence.rs b/crates/ontoref-daemon/src/config_coherence.rs new file mode 100644 index 0000000..0a5e682 --- /dev/null +++ b/crates/ontoref-daemon/src/config_coherence.rs @@ -0,0 +1,375 @@ +//! Multi-consumer config coherence verification. +//! +//! For each section in a project's `config_surface`, this module compares the +//! fields present in the exported NCL JSON against the fields declared by each +//! consumer. A field absent from all consumers is "unclaimed" — it exists in +//! the config but nothing reads it. +//! +//! Coherence is checked from two directions: +//! - **NCL-only fields**: present in NCL, not claimed by any consumer. +//! - **Consumer-only fields**: a consumer declares a field that doesn't exist +//! in the NCL export. The consumer either references a renamed/removed field +//! or the NCL contract is incomplete. + +use std::collections::{BTreeMap, BTreeSet}; +use std::path::Path; + +use serde::{Deserialize, Serialize}; +use tracing::warn; + +use crate::cache::NclCache; +use crate::registry::{ConfigSection, ConfigSurface}; + +/// Merge optional NCL `_meta_*` record fields into a quickref section object. +/// Only sets `rationale` when the section object currently has an empty value. +fn merge_meta_into_section( + obj: &mut serde_json::Map, + meta_val: &serde_json::Value, +) { + if let Some(rationale) = meta_val.get("rationale").and_then(|v| v.as_str()) { + if obj["rationale"].as_str().unwrap_or("").is_empty() { + obj["rationale"] = serde_json::Value::String(rationale.to_owned()); + } + } + if let Some(alt) = meta_val.get("alternatives_rejected") { + obj.insert("alternatives_rejected".to_owned(), alt.clone()); + } + if let Some(constraints) = meta_val.get("constraints") { + obj.insert("constraints".to_owned(), constraints.clone()); + } + if let Some(see_also) = meta_val.get("see_also") { + obj.insert("see_also".to_owned(), see_also.clone()); + } +} + +/// Status of a section's coherence check. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum CoherenceStatus { + /// All fields are claimed by at least one consumer, and no consumer + /// references a field absent from the NCL export. + Ok, + /// Some fields are unclaimed or a consumer references missing fields — + /// worth reviewing but not necessarily a bug. + Warning, + /// The NCL export failed; coherence could not be checked. + Error, +} + +/// Per-consumer coherence result within a section. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsumerCoherenceReport { + pub consumer_id: String, + pub kind: String, + /// Fields the consumer declared but are absent in the NCL export. + pub missing_in_ncl: Vec, + /// Fields in the NCL export that this consumer doesn't declare. + /// Non-empty when the consumer has an explicit field list (not "reads + /// all"). + pub extra_in_ncl: Vec, +} + +/// Full coherence report for one config section. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SectionCoherenceReport { + pub section_id: String, + /// All top-level keys in the NCL export for this section (excluding + /// `_meta_*` and `_overrides_meta` keys). + pub ncl_fields: Vec, + pub consumers: Vec, + /// Fields present in NCL but claimed by no consumer. + pub unclaimed_fields: Vec, + pub status: CoherenceStatus, +} + +/// Coherence report for an entire project. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProjectCoherenceReport { + pub project_slug: String, + pub sections: Vec, + pub has_config_surface: bool, +} + +impl ProjectCoherenceReport { + /// Overall status: worst status across all sections. + pub fn overall_status(&self) -> CoherenceStatus { + if self + .sections + .iter() + .any(|s| s.status == CoherenceStatus::Error) + { + CoherenceStatus::Error + } else if self + .sections + .iter() + .any(|s| s.status == CoherenceStatus::Warning) + { + CoherenceStatus::Warning + } else { + CoherenceStatus::Ok + } + } +} + +/// Run coherence check for all sections of a project's config surface. +/// +/// `section_filter` — if `Some`, only check this section id. +pub async fn check_project( + slug: &str, + surface: &ConfigSurface, + project_root: &Path, + cache: &NclCache, + import_path: Option<&str>, + section_filter: Option<&str>, +) -> ProjectCoherenceReport { + let sections_to_check: Vec<&ConfigSection> = surface + .sections + .iter() + .filter(|s| section_filter.is_none_or(|f| s.id == f)) + .collect(); + + let mut section_reports = Vec::with_capacity(sections_to_check.len()); + + for section in sections_to_check { + let report = check_section(section, surface, project_root, cache, import_path).await; + section_reports.push(report); + } + + ProjectCoherenceReport { + project_slug: slug.to_owned(), + sections: section_reports, + has_config_surface: true, + } +} + +async fn check_section( + section: &ConfigSection, + surface: &ConfigSurface, + project_root: &Path, + cache: &NclCache, + import_path: Option<&str>, +) -> SectionCoherenceReport { + let ncl_path = project_root.join(&surface.config_root).join(§ion.file); + + let ncl_export = cache.export(&ncl_path, import_path).await; + let (json, _) = match ncl_export { + Ok(pair) => pair, + Err(e) => { + warn!( + section = %section.id, + path = %ncl_path.display(), + error = %e, + "config coherence: nickel export failed" + ); + return SectionCoherenceReport { + section_id: section.id.clone(), + ncl_fields: vec![], + consumers: vec![], + unclaimed_fields: vec![], + status: CoherenceStatus::Error, + }; + } + }; + + // Collect top-level keys for this section from the export. + // A section NCL file may export { server = { ... }, _meta_server = {...} } + // We want the section key matching section.id; if the whole file is the + // section value, use all keys. + let ncl_fields: BTreeSet = extract_section_fields(&json, §ion.id); + + // Build consumer reports. + let mut all_claimed: BTreeSet = BTreeSet::new(); + let mut consumer_reports = Vec::with_capacity(section.consumers.len()); + + for consumer in §ion.consumers { + let consumer_fields: BTreeSet = if consumer.fields.is_empty() { + // Empty field list means the consumer claims all NCL fields. + all_claimed.extend(ncl_fields.iter().cloned()); + consumer_reports.push(ConsumerCoherenceReport { + consumer_id: consumer.id.clone(), + kind: format!("{:?}", consumer.kind), + missing_in_ncl: vec![], + extra_in_ncl: vec![], + }); + continue; + } else { + consumer.fields.iter().cloned().collect() + }; + + all_claimed.extend(consumer_fields.iter().cloned()); + + let missing_in_ncl: Vec = + consumer_fields.difference(&ncl_fields).cloned().collect(); + let extra_in_ncl: Vec = ncl_fields.difference(&consumer_fields).cloned().collect(); + + consumer_reports.push(ConsumerCoherenceReport { + consumer_id: consumer.id.clone(), + kind: format!("{:?}", consumer.kind), + missing_in_ncl, + extra_in_ncl, + }); + } + + let unclaimed_fields: Vec = ncl_fields.difference(&all_claimed).cloned().collect(); + + let has_missing = consumer_reports + .iter() + .any(|c| !c.missing_in_ncl.is_empty()); + let status = if !unclaimed_fields.is_empty() || has_missing { + CoherenceStatus::Warning + } else { + CoherenceStatus::Ok + }; + + SectionCoherenceReport { + section_id: section.id.clone(), + ncl_fields: ncl_fields.into_iter().collect(), + consumers: consumer_reports, + unclaimed_fields, + status, + } +} + +/// Extract the field names for a section from the NCL export JSON. +/// +/// If the JSON has a top-level key matching `section_id`, returns the keys of +/// that sub-object. Otherwise treats the entire top-level object as the section +/// fields. Strips `_meta_*` and `_overrides_meta` keys from the result. +fn extract_section_fields(json: &serde_json::Value, section_id: &str) -> BTreeSet { + let obj = if let Some(sub) = json.get(section_id).and_then(|v| v.as_object()) { + sub.keys().cloned().collect() + } else if let Some(top) = json.as_object() { + top.keys() + .filter(|k| !k.starts_with("_meta_") && *k != "_overrides_meta") + .cloned() + .collect() + } else { + BTreeSet::new() + }; + obj +} + +/// Generate a quickref document for a project's config surface. +/// +/// Combines: NCL export values + manifest metadata (descriptions, rationales, +/// consumers) + override history from `_overrides_meta` + coherence status. +pub async fn build_quickref( + slug: &str, + surface: &ConfigSurface, + project_root: &Path, + cache: &NclCache, + import_path: Option<&str>, + section_filter: Option<&str>, +) -> serde_json::Value { + let entry_point = surface.entry_point_path(project_root); + let full_export = cache.export(&entry_point, import_path).await.ok(); + + let coherence = check_project( + slug, + surface, + project_root, + cache, + import_path, + section_filter, + ) + .await; + + let coherence_by_id: BTreeMap = coherence + .sections + .iter() + .map(|s| (s.section_id.clone(), s)) + .collect(); + + let sections: Vec = surface + .sections + .iter() + .filter(|s| section_filter.is_none_or(|f| s.id == f)) + .map(|section| { + let current_values = full_export + .as_ref() + .and_then(|(json, _)| json.get(§ion.id)) + .cloned() + .unwrap_or(serde_json::Value::Null); + + // Extract _meta_{section} from the section's own NCL file. + let meta_key = format!("_meta_{}", section.id); + let section_ncl_path = + project_root.join(&surface.config_root).join(§ion.file); + let meta = tokio::task::block_in_place(|| { + // Use a sync export via the cached path — avoids async recursion. + std::process::Command::new("nickel") + .args(["export", "--format", "json"]) + .arg(§ion_ncl_path) + .current_dir(project_root) + .output() + .ok() + .and_then(|o| serde_json::from_slice::(&o.stdout).ok()) + .and_then(|j| j.get(&meta_key).cloned()) + }); + + // Extract override history from _overrides_meta if present. + let overrides = current_values + .as_object() + .and(full_export.as_ref()) + .and_then(|(j, _)| { + j.get("_overrides_meta") + .and_then(|m| m.get("entries")) + .cloned() + }) + .unwrap_or(serde_json::Value::Array(vec![])); + + let coh = coherence_by_id.get(§ion.id); + let coherence_summary = serde_json::json!({ + "unclaimed_fields": coh.map(|c| c.unclaimed_fields.as_slice()).unwrap_or(&[]), + "status": coh.map(|c| format!("{:?}", c.status)).unwrap_or_else(|| "unknown".into()), + }); + + let consumers: Vec = section + .consumers + .iter() + .map(|c| { + serde_json::json!({ + "id": c.id, + "kind": format!("{:?}", c.kind), + "ref": c.reference, + "fields": c.fields, + }) + }) + .collect(); + + let mut s = serde_json::json!({ + "id": section.id, + "file": section.file, + "mutable": section.mutable, + "description": section.description, + "rationale": section.rationale, + "contract": section.contract, + "current_values": current_values, + "overrides": overrides, + "consumers": consumers, + "coherence": coherence_summary, + }); + + if let Some(meta_val) = meta { + if let Some(obj) = s.as_object_mut() { + merge_meta_into_section(obj, &meta_val); + } + } + + s + }) + .collect(); + + serde_json::json!({ + "project": slug, + "generated_at": std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .map(|d| d.as_secs()) + .unwrap_or(0), + "config_root": surface.config_root.display().to_string(), + "entry_point": surface.entry_point, + "kind": format!("{:?}", surface.kind), + "sections": sections, + "overall_coherence": format!("{:?}", coherence.overall_status()), + }) +} diff --git a/crates/ontoref-daemon/src/lib.rs b/crates/ontoref-daemon/src/lib.rs index 63ebf7c..062bcd0 100644 --- a/crates/ontoref-daemon/src/lib.rs +++ b/crates/ontoref-daemon/src/lib.rs @@ -2,6 +2,8 @@ pub mod actors; pub mod api; pub mod api_catalog; pub mod cache; +pub mod config; +pub mod config_coherence; pub mod error; pub mod federation; #[cfg(feature = "mcp")] diff --git a/crates/ontoref-daemon/src/main.rs b/crates/ontoref-daemon/src/main.rs index df034a6..1267c11 100644 --- a/crates/ontoref-daemon/src/main.rs +++ b/crates/ontoref-daemon/src/main.rs @@ -16,14 +16,11 @@ use tracing::{error, info, warn}; /// Read and apply bootstrap config from stdin (ADR-004: NCL pipe bootstrap). /// -/// Reads all of stdin as JSON, applies top-level values to CLI defaults, then -/// redirects stdin to /dev/null so the daemon's event loop does not block on -/// it. Returns the parsed JSON for downstream consumers (e.g. NATS init). -/// Aborts with exit(1) if stdin is not a pipe or JSON is invalid. -/// Read and apply bootstrap config from stdin (ADR-004: NCL pipe bootstrap). -/// -/// Returns the full parsed JSON. Callers extract feature-gated sections (e.g. -/// projects) after this returns. +/// Reads all of stdin as JSON, deserializes into `DaemonNclConfig` to apply +/// typed values to CLI defaults, then redirects stdin to /dev/null. Returns +/// the raw JSON so the caller can extract service-mode fields (`projects`) +/// that are not part of `DaemonNclConfig`. Aborts with exit(1) on parse +/// errors. fn apply_stdin_config(cli: &mut Cli) -> serde_json::Value { use std::io::{IsTerminal, Read}; @@ -49,48 +46,31 @@ fn apply_stdin_config(cli: &mut Cli) -> serde_json::Value { } }; - // daemon port - if let Some(port) = json - .get("daemon") - .and_then(|d| d.get("port")) - .and_then(|p| p.as_u64()) - { - cli.port = port as u16; + let ncl: ontoref_daemon::config::DaemonNclConfig = + serde_json::from_value(json.clone()).unwrap_or_default(); + + if let Some(port) = ncl.daemon.port { + cli.port = port; } - // db credentials — only applied when enabled = true #[cfg(feature = "db")] - if let Some(db) = json.get("db").and_then(|d| d.as_object()) { - let db_enabled = db.get("enabled").and_then(|e| e.as_bool()).unwrap_or(false); - if db_enabled && cli.db_url.is_none() { - if let Some(url) = db.get("url").and_then(|u| u.as_str()) { - if !url.is_empty() { - cli.db_url = Some(url.to_string()); - } - } + if ncl.db.enabled { + if cli.db_url.is_none() && !ncl.db.url.is_empty() { + cli.db_url = Some(ncl.db.url.clone()); } - if cli.db_namespace.is_none() { - if let Some(ns) = db.get("namespace").and_then(|n| n.as_str()) { - if !ns.is_empty() { - cli.db_namespace = Some(ns.to_string()); - } - } + if cli.db_namespace.is_none() && !ncl.db.namespace.is_empty() { + cli.db_namespace = Some(ncl.db.namespace.clone()); } - if let Some(user) = db.get("username").and_then(|u| u.as_str()) { - if !user.is_empty() { - cli.db_username = user.to_string(); - } + if !ncl.db.username.is_empty() { + cli.db_username = ncl.db.username.clone(); } - if let Some(pass) = db.get("password").and_then(|p| p.as_str()) { - if !pass.is_empty() { - cli.db_password = pass.to_string(); - } + if !ncl.db.password.is_empty() { + cli.db_password = ncl.db.password.clone(); } } - // ui paths #[cfg(feature = "ui")] - apply_ui_config(cli, &json); + apply_ui_config(cli, &ncl.ui); tracing::info!("config loaded from stdin (ADR-004 NCL pipe bootstrap)"); @@ -126,8 +106,13 @@ fn run_nickel_config( } /// Load daemon config from .ontoref/config.ncl and override CLI defaults. -/// Returns (NICKEL_IMPORT_PATH, parsed config JSON) — both optional. -fn load_config_overrides(cli: &mut Cli) -> (Option, Option) { +/// Returns (NICKEL_IMPORT_PATH, typed config) — both optional. +fn load_config_overrides( + cli: &mut Cli, +) -> ( + Option, + Option, +) { let config_path = cli.project_root.join(".ontoref").join("config.ncl"); if !config_path.exists() { return (None, None); @@ -154,66 +139,50 @@ fn load_config_overrides(cli: &mut Cli) -> (Option, Option v, + Err(e) => { + warn!(error = %e, "config.ncl deserialization failed — using defaults"); + return (None, None); + } + }; + + if let Some(port) = ncl.daemon.port { + cli.port = port; + } + if let Some(timeout) = ncl.daemon.idle_timeout { + cli.idle_timeout = timeout; + } + if let Some(interval) = ncl.daemon.invalidation_interval { + cli.invalidation_interval = interval; + } + if let Some(sweep) = ncl.daemon.actor_sweep_interval { + cli.actor_sweep_interval = sweep; + } + if let Some(stale) = ncl.daemon.actor_stale_timeout { + cli.actor_stale_timeout = stale; + } + if let Some(max) = ncl.daemon.max_notifications { + cli.max_notifications = max; + } + if !ncl.daemon.notification_ack_required.is_empty() { + cli.notification_ack_required = ncl.daemon.notification_ack_required.clone(); } - // Extract db config — only when enabled = true #[cfg(feature = "db")] - if let Some(db) = config_json.get("db").and_then(|d| d.as_object()) { - let db_enabled = db.get("enabled").and_then(|e| e.as_bool()).unwrap_or(false); - if db_enabled { - cli.db_url = db - .get("url") - .and_then(|u| u.as_str()) - .filter(|s| !s.is_empty()) - .map(str::to_string); - cli.db_namespace = db - .get("namespace") - .and_then(|n| n.as_str()) - .filter(|s| !s.is_empty()) - .map(str::to_string); - if let Some(user) = db - .get("username") - .and_then(|u| u.as_str()) - .filter(|s| !s.is_empty()) - { - cli.db_username = user.to_string(); - } - if let Some(pass) = db - .get("password") - .and_then(|p| p.as_str()) - .filter(|s| !s.is_empty()) - { - cli.db_password = pass.to_string(); - } + if ncl.db.enabled { + if cli.db_url.is_none() && !ncl.db.url.is_empty() { + cli.db_url = Some(ncl.db.url.clone()); + } + if cli.db_namespace.is_none() && !ncl.db.namespace.is_empty() { + cli.db_namespace = Some(ncl.db.namespace.clone()); + } + if !ncl.db.username.is_empty() { + cli.db_username = ncl.db.username.clone(); + } + if !ncl.db.password.is_empty() { + cli.db_password = ncl.db.password.clone(); } } @@ -232,35 +201,33 @@ fn load_config_overrides(cli: &mut Cli) -> (Option, Option>() - .join(":") - }) - .filter(|s| !s.is_empty()); + let import_path = { + let joined = ncl + .nickel_import_paths + .iter() + .map(|p| { + let candidate = std::path::Path::new(p.as_str()); + if candidate.is_absolute() { + p.clone() + } else { + abs_root.join(candidate).display().to_string() + } + }) + .collect::>() + .join(":"); + if joined.is_empty() { + None + } else { + Some(joined) + } + }; - (import_path, Some(config_json)) + (import_path, Some(ncl)) } #[derive(Parser)] @@ -498,16 +465,22 @@ async fn main() { // Bootstrap config from stdin pipe (ADR-004). // When --config-stdin is set the stdin JSON is the authoritative config; // the project .ontoref/config.ncl is not read. - let (nickel_import_path, loaded_config) = if cli.config_stdin { + // loaded_ncl_config is consumed by feature-gated blocks (nats, ui, db); + // the binding is intentionally unused when all three features are off. + #[allow(unused_variables)] + let (nickel_import_path, loaded_ncl_config, stdin_raw) = if cli.config_stdin { let json = apply_stdin_config(&mut cli); - (None, Some(json)) + let ncl = + serde_json::from_value::(json.clone()).ok(); + (None, ncl, Some(json)) } else { - load_config_overrides(&mut cli) + let (ip, ncl) = load_config_overrides(&mut cli); + (ip, ncl, None) }; // Extract registered projects from the stdin config (service mode). let stdin_projects: Vec = if cli.config_stdin { - loaded_config + stdin_raw .as_ref() .and_then(|j| j.get("projects")) .and_then(|p| serde_json::from_value(p.clone()).ok()) @@ -624,6 +597,8 @@ async fn main() { } else { cli.notification_ack_required.clone() }; + let primary_config_surface = + ontoref_daemon::registry::load_config_surface(&project_root, nickel_import_path.as_deref()); let primary_ctx = ontoref_daemon::registry::make_context(ontoref_daemon::registry::ContextSpec { slug: primary_slug.clone(), @@ -635,6 +610,7 @@ async fn main() { stale_actor_timeout: cli.actor_stale_timeout, max_notifications: cli.max_notifications, ack_required, + config_surface: primary_config_surface, }); // Alias the primary Arcs into local bindings for use before and after @@ -707,7 +683,7 @@ async fn main() { .unwrap_or("unknown") .to_string(); match ontoref_daemon::nats::NatsPublisher::connect( - loaded_config.as_ref(), + loaded_ncl_config.as_ref().map(|c| &c.nats_events), project_name, cli.port, ) @@ -1321,7 +1297,6 @@ async fn connect_db(cli: &Cli) -> Option> { Some(Arc::new(db)) } -#[cfg(feature = "ui")] fn resolve_nickel_import_path(p: &str, project_root: &std::path::Path) -> String { let c = std::path::Path::new(p); if c.is_absolute() { @@ -1331,6 +1306,7 @@ fn resolve_nickel_import_path(p: &str, project_root: &std::path::Path) -> String } } +#[cfg(feature = "ui")] fn resolve_asset_dir(project_root: &std::path::Path, config_dir: &str) -> std::path::PathBuf { let from_root = project_root.join(config_dir); if from_root.exists() { @@ -1353,38 +1329,20 @@ fn resolve_asset_dir(project_root: &std::path::Path, config_dir: &str) -> std::p } #[cfg(feature = "ui")] -fn apply_ui_config(cli: &mut Cli, config: &serde_json::Value) { - let Some(ui) = config.get("ui").and_then(|u| u.as_object()) else { - return; - }; - if cli.templates_dir.is_none() { - let dir = ui - .get("templates_dir") - .and_then(|d| d.as_str()) - .unwrap_or(""); - if !dir.is_empty() { - cli.templates_dir = Some(resolve_asset_dir(&cli.project_root, dir)); - } +fn apply_ui_config(cli: &mut Cli, ui: &ontoref_daemon::config::UiConfig) { + if cli.templates_dir.is_none() && !ui.templates_dir.is_empty() { + cli.templates_dir = Some(resolve_asset_dir(&cli.project_root, &ui.templates_dir)); } - if cli.public_dir.is_none() { - let dir = ui.get("public_dir").and_then(|d| d.as_str()).unwrap_or(""); - if !dir.is_empty() { - cli.public_dir = Some(resolve_asset_dir(&cli.project_root, dir)); - } + if cli.public_dir.is_none() && !ui.public_dir.is_empty() { + cli.public_dir = Some(resolve_asset_dir(&cli.project_root, &ui.public_dir)); } #[cfg(feature = "tls")] { - if cli.tls_cert.is_none() { - let p = ui.get("tls_cert").and_then(|d| d.as_str()).unwrap_or(""); - if !p.is_empty() { - cli.tls_cert = Some(cli.project_root.join(p)); - } + if cli.tls_cert.is_none() && !ui.tls_cert.is_empty() { + cli.tls_cert = Some(cli.project_root.join(&ui.tls_cert)); } - if cli.tls_key.is_none() { - let p = ui.get("tls_key").and_then(|d| d.as_str()).unwrap_or(""); - if !p.is_empty() { - cli.tls_key = Some(cli.project_root.join(p)); - } + if cli.tls_key.is_none() && !ui.tls_key.is_empty() { + cli.tls_key = Some(cli.project_root.join(&ui.tls_key)); } } } diff --git a/crates/ontoref-daemon/src/mcp/mod.rs b/crates/ontoref-daemon/src/mcp/mod.rs index a21f470..a28d701 100644 --- a/crates/ontoref-daemon/src/mcp/mod.rs +++ b/crates/ontoref-daemon/src/mcp/mod.rs @@ -229,6 +229,32 @@ struct ActionAddInput { project: Option, } +// ── Config surface input types +// ────────────────────────────────────────────── + +#[derive(Deserialize, JsonSchema, Default)] +struct ConfigReadInput { + /// Project slug. Omit to use the default project. + project: Option, + /// Config section id (e.g. `"server"`). Omit to return all sections. + section: Option, +} + +#[derive(Deserialize, JsonSchema, Default)] +struct ConfigUpdateInput { + /// Project slug. Omit to use the default project. + project: Option, + /// Section id to mutate (e.g. `"server"`). + section: String, + /// Key/value pairs to write into the override layer. + values: serde_json::Value, + /// When true (default), return the proposed diff without writing anything. + dry_run: Option, + /// Human-readable reason for this change (stored in the override audit + /// trail). + reason: Option, +} + // ── Server ────────────────────────────────────────────────────────────────────── #[derive(Clone)] @@ -276,6 +302,10 @@ impl OntoreServer { .with_async_tool::() .with_async_tool::() .with_async_tool::() + .with_async_tool::() + .with_async_tool::() + .with_async_tool::() + .with_async_tool::() } fn project_ctx(&self, slug: Option<&str>) -> ProjectCtx { @@ -2555,6 +2585,309 @@ impl AsyncTool for BookmarkAddTool { } } +// ── Tool: project_config (read full export or single section) +// ───────────────── + +struct ProjectConfigTool; + +impl ToolBase for ProjectConfigTool { + type Parameter = ConfigReadInput; + type Output = serde_json::Value; + type Error = ToolError; + + fn name() -> Cow<'static, str> { + "ontoref_project_config".into() + } + + fn description() -> Option> { + Some( + "Read a project's config surface. Returns the full NCL export (merged with any active \ + overrides) for all sections, or a single section when `section` is given." + .into(), + ) + } + + fn output_schema() -> Option> { + None + } +} + +impl AsyncTool for ProjectConfigTool { + async fn invoke( + service: &OntoreServer, + param: ConfigReadInput, + ) -> Result { + debug!(tool = "project_config", project = ?param.project, section = ?param.section); + let ctx = service.project_ctx(param.project.as_deref()); + + let surface = service + .state + .registry + .get( + param + .project + .as_deref() + .unwrap_or(ctx.root.file_name().and_then(|n| n.to_str()).unwrap_or("")), + ) + .and_then(|c| c.config_surface.clone()); + + let Some(surface) = surface else { + return Err(ToolError( + "project has no config_surface in manifest.ncl".into(), + )); + }; + + let entry = surface.entry_point_path(&ctx.root); + let (full, _) = ctx + .cache + .export(&entry, ctx.import_path.as_deref()) + .await + .map_err(|e| ToolError(e.to_string()))?; + + if let Some(section_id) = ¶m.section { + Ok(full + .get(section_id) + .cloned() + .unwrap_or(serde_json::Value::Null)) + } else { + Ok(full) + } + } +} + +// ── Tool: config_coherence +// ────────────────────────────────────────────────── + +struct ConfigCoherenceTool; + +impl ToolBase for ConfigCoherenceTool { + type Parameter = ConfigReadInput; + type Output = serde_json::Value; + type Error = ToolError; + + fn name() -> Cow<'static, str> { + "ontoref_config_coherence".into() + } + + fn description() -> Option> { + Some( + "Run the multi-consumer coherence check for a project's config surface. Reports \ + unclaimed NCL fields (present in export but claimed by no consumer) and consumer \ + fields missing from the NCL export. Supply `section` to check one section only." + .into(), + ) + } + + fn output_schema() -> Option> { + None + } +} + +impl AsyncTool for ConfigCoherenceTool { + async fn invoke( + service: &OntoreServer, + param: ConfigReadInput, + ) -> Result { + debug!(tool = "config_coherence", project = ?param.project, section = ?param.section); + let ctx = service.project_ctx(param.project.as_deref()); + let slug = param + .project + .as_deref() + .or_else(|| ctx.root.file_name().and_then(|n| n.to_str())) + .unwrap_or("default"); + + let surface = service + .state + .registry + .get(slug) + .and_then(|c| c.config_surface.clone()); + + let Some(surface) = surface else { + return Err(ToolError( + "project has no config_surface in manifest.ncl".into(), + )); + }; + + let report = crate::config_coherence::check_project( + slug, + &surface, + &ctx.root, + &ctx.cache, + ctx.import_path.as_deref(), + param.section.as_deref(), + ) + .await; + + serde_json::to_value(&report).map_err(ToolError::from) + } +} + +// ── Tool: config_quickref +// ─────────────────────────────────────────────────── + +struct ConfigQuickrefTool; + +impl ToolBase for ConfigQuickrefTool { + type Parameter = ConfigReadInput; + type Output = serde_json::Value; + type Error = ToolError; + + fn name() -> Cow<'static, str> { + "ontoref_config_quickref".into() + } + + fn description() -> Option> { + Some( + "Generate living config documentation for a project. Combines current NCL values, \ + manifest rationales, per-section _meta_ records, override history, and coherence \ + status into a single JSON document. Use `section` to scope to one section." + .into(), + ) + } + + fn output_schema() -> Option> { + None + } +} + +impl AsyncTool for ConfigQuickrefTool { + async fn invoke( + service: &OntoreServer, + param: ConfigReadInput, + ) -> Result { + debug!(tool = "config_quickref", project = ?param.project, section = ?param.section); + let ctx = service.project_ctx(param.project.as_deref()); + let slug = param + .project + .as_deref() + .or_else(|| ctx.root.file_name().and_then(|n| n.to_str())) + .unwrap_or("default"); + + let surface = service + .state + .registry + .get(slug) + .and_then(|c| c.config_surface.clone()); + + let Some(surface) = surface else { + return Err(ToolError( + "project has no config_surface in manifest.ncl".into(), + )); + }; + + Ok(crate::config_coherence::build_quickref( + slug, + &surface, + &ctx.root, + &ctx.cache, + ctx.import_path.as_deref(), + param.section.as_deref(), + ) + .await) + } +} + +// ── Tool: config_update (override layer mutation) +// ──────────────────────────── + +struct ConfigUpdateTool; + +impl ToolBase for ConfigUpdateTool { + type Parameter = ConfigUpdateInput; + type Output = serde_json::Value; + type Error = ToolError; + + fn name() -> Cow<'static, str> { + "ontoref_config_update".into() + } + + fn description() -> Option> { + Some( + "Mutate a config section via the override layer. Generates a \ + `{section}.overrides.ncl` file that is merged after the original NCL, preserving \ + comments and contracts. `dry_run` defaults to true — returns the proposed diff \ + without writing. Set `dry_run: false` to apply. Always include a `reason` for the \ + audit trail." + .into(), + ) + } + + fn output_schema() -> Option> { + None + } +} + +impl AsyncTool for ConfigUpdateTool { + async fn invoke( + service: &OntoreServer, + param: ConfigUpdateInput, + ) -> Result { + let dry_run = param.dry_run.unwrap_or(true); + debug!( + tool = "config_update", + project = ?param.project, + section = %param.section, + dry_run + ); + let ctx = service.project_ctx(param.project.as_deref()); + let slug = param + .project + .as_deref() + .or_else(|| ctx.root.file_name().and_then(|n| n.to_str())) + .unwrap_or("default"); + + let surface = service + .state + .registry + .get(slug) + .and_then(|c| c.config_surface.clone()); + + let Some(surface) = surface else { + return Err(ToolError( + "project has no config_surface in manifest.ncl".into(), + )); + }; + + let section_meta = surface + .section(¶m.section) + .ok_or_else(|| ToolError(format!("section '{}' not in config_surface", param.section)))? + .clone(); + + if !section_meta.mutable { + return Err(ToolError(format!( + "section '{}' is marked immutable", + param.section + ))); + } + + let req = crate::api::ConfigUpdateRequest { + values: param.values, + dry_run, + reason: param.reason.unwrap_or_default(), + }; + + if dry_run { + Ok(crate::api::generate_override_diff_value( + &surface, + ¶m.section, + &req, + &ctx.root, + )) + } else { + crate::api::apply_config_override( + &surface, + ¶m.section, + &req, + &ctx.root, + &ctx.cache, + ctx.import_path.as_deref(), + ) + .await + .map_err(|e| ToolError(e.to_string())) + } + } +} + /// Run the MCP server over stdin/stdout — for use as a `command`-mode MCP /// server in Claude Desktop, Cursor, or any stdio-compatible AI client. pub async fn serve_stdio(state: AppState) -> anyhow::Result<()> { diff --git a/crates/ontoref-daemon/src/nats.rs b/crates/ontoref-daemon/src/nats.rs index 5f54f9d..b91d689 100644 --- a/crates/ontoref-daemon/src/nats.rs +++ b/crates/ontoref-daemon/src/nats.rs @@ -30,67 +30,31 @@ pub struct NatsPublisher { #[cfg(feature = "nats")] impl NatsPublisher { /// Connect to NATS JetStream, apply topology from config, bind consumer. - /// Reads `nats_events` section from `.ontoref/config.ncl`. - /// Returns `Ok(None)` if disabled or unavailable (graceful degradation). + /// Returns `Ok(None)` when `cfg` is `None`, disabled, or unavailable. pub async fn connect( - config: Option<&serde_json::Value>, + cfg: Option<&crate::config::NatsEventsConfig>, project: String, port: u16, ) -> Result> { - let config = match config { - Some(v) => v.clone(), - None => return Ok(None), + let Some(nats) = cfg else { + return Ok(None); }; - let nats_section = match config.get("nats_events") { - Some(section) => section, - None => return Ok(None), - }; - - let enabled = nats_section - .get("enabled") - .and_then(|e| e.as_bool()) - .unwrap_or(false); - - if !enabled { + if !nats.enabled { return Ok(None); } info!("connecting to NATS..."); - let url = nats_section - .get("url") - .and_then(|u| u.as_str()) - .unwrap_or("nats://localhost:4222") - .to_string(); - - let nkey_seed = nats_section - .get("nkey_seed") - .and_then(|s| s.as_str()) - .map(|s| s.to_string()); - - let require_signed = nats_section - .get("require_signed_messages") - .and_then(|r| r.as_bool()) - .unwrap_or(false); - - let trusted_nkeys = nats_section - .get("trusted_nkeys") - .and_then(|t| t.as_array()) - .map(|arr| { - arr.iter() - .filter_map(|v| v.as_str().map(String::from)) - .collect() - }) - .unwrap_or_default(); - let conn_cfg = NatsConnectionConfig { - url: url.clone(), - nkey_seed, - require_signed_messages: require_signed, - trusted_nkeys, + url: nats.url.clone(), + nkey_seed: nats.nkey_seed.clone(), + require_signed_messages: nats.require_signed_messages, + trusted_nkeys: nats.trusted_nkeys.clone(), }; + let url = nats.url.clone(); + let mut stream = match tokio::time::timeout( std::time::Duration::from_secs(3), EventStream::connect_client(&conn_cfg), @@ -110,15 +74,8 @@ impl NatsPublisher { info!(url = %url, "NATS connected"); - // Apply topology from streams_config file declared in project config. - // Empty string → None so TopologyConfig::load falls back to NATS_STREAMS_CONFIG - // env var (set by ontoref-daemon-boot to - // ~/.config/ontoref/streams.json). - let topology_path = nats_section - .get("streams_config") - .and_then(|s| s.as_str()) - .filter(|s| !s.is_empty()) - .map(std::path::PathBuf::from); + let topology_path = (!nats.streams_config.is_empty()) + .then(|| std::path::PathBuf::from(&nats.streams_config)); let topology = match TopologyConfig::load(topology_path.as_deref()) { Ok(Some(t)) => Some(t), diff --git a/crates/ontoref-daemon/src/registry.rs b/crates/ontoref-daemon/src/registry.rs index 199c1bf..39ce7ce 100644 --- a/crates/ontoref-daemon/src/registry.rs +++ b/crates/ontoref-daemon/src/registry.rs @@ -11,6 +11,103 @@ use crate::actors::ActorRegistry; use crate::cache::NclCache; use crate::notifications::NotificationStore; +// ── Config surface +// ──────────────────────────────────────────────────────────── + +/// Which process reads a config section. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ConsumerKind { + RustStruct, + NuScript, + CiPipeline, + External, +} + +/// A single process that reads fields from a config section. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConfigConsumer { + /// Identifier for this consumer (e.g. "vapora-backend", "deploy-script"). + pub id: String, + pub kind: ConsumerKind, + /// Rust fully-qualified type or script path. + pub reference: String, + /// Fields this consumer reads. Empty = reads all fields. + pub fields: Vec, +} + +/// Describes one NCL config file and all processes that read it. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConfigSection { + pub id: String, + /// Path to the NCL file, relative to `config_root`. + pub file: String, + /// Path to the NCL contract file. Relative to `contracts_path` or project + /// root. + pub contract: String, + pub description: String, + /// Why this section exists and why current values were chosen. + pub rationale: String, + /// When false, ontoref will only read this section, never write. + pub mutable: bool, + pub consumers: Vec, +} + +/// How the project's config files are organised. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum ConfigKind { + /// Multiple .ncl files merged via & operator. + NclMerge, + /// .typedialog/ structure with form.toml + validators + fragments. + TypeDialog, + /// Single monolithic .ncl file. + SingleFile, +} + +/// Project-level config surface metadata — loaded once from manifest.ncl at +/// registration time and stored in `ProjectContext`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConfigSurface { + /// Directory containing config NCL files, relative to project root. + pub config_root: PathBuf, + /// Main NCL file (entry point for `nickel export`). + pub entry_point: String, + pub kind: ConfigKind, + /// Directory added to NICKEL_IMPORT_PATH when exporting config. + pub contracts_path: String, + /// Where ontoref writes `{section}.overrides.ncl` files. + /// Defaults to `config_root` when empty. + pub overrides_dir: PathBuf, + pub sections: Vec, +} + +impl ConfigSurface { + /// Resolve the directory where override files are written. + /// Returns `overrides_dir` if set, otherwise `config_root`. + pub fn resolved_overrides_dir(&self) -> &Path { + if self.overrides_dir == PathBuf::new() { + &self.config_root + } else { + &self.overrides_dir + } + } + + /// Resolve the absolute path to the config entry point given the project + /// root. + pub fn entry_point_path(&self, project_root: &Path) -> PathBuf { + project_root.join(&self.config_root).join(&self.entry_point) + } + + /// Find a section by id. + pub fn section(&self, id: &str) -> Option<&ConfigSection> { + self.sections.iter().find(|s| s.id == id) + } +} + +// ── Auth / keys +// ─────────────────────────────────────────────────────────────── + #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] pub enum Role { @@ -107,6 +204,10 @@ pub struct ProjectContext { /// on every cache invalidation for that file. Consumers compare snapshots /// to detect which individual files changed between polls. pub file_versions: Arc>, + /// Config surface metadata loaded from the project's manifest.ncl at + /// registration time. `None` when the project's manifest has no + /// `config_surface` field or when the manifest can't be exported. + pub config_surface: Option, } impl ProjectContext { @@ -181,6 +282,7 @@ impl ProjectRegistry { stale_actor_timeout, max_notifications, ack_required: vec![], + config_surface: None, }); registry.contexts.insert(entry.slug, Arc::new(ctx)); continue; @@ -200,6 +302,7 @@ impl ProjectRegistry { }; let import_path = resolve_import_path(&entry.nickel_import_paths, &root); + let config_surface = load_config_surface(&root, import_path.as_deref()); let ctx = make_context(ContextSpec { slug: entry.slug.clone(), root, @@ -210,6 +313,7 @@ impl ProjectRegistry { stale_actor_timeout, max_notifications, ack_required: vec![], + config_surface, }); registry.contexts.insert(entry.slug, Arc::new(ctx)); } @@ -246,6 +350,11 @@ impl ProjectRegistry { let ip = resolve_import_path(&entry.nickel_import_paths, &r); (r, ip) }; + let config_surface = if entry.push_only { + None + } else { + load_config_surface(&root, import_path.as_deref()) + }; let ctx = make_context(ContextSpec { slug: entry.slug.clone(), root, @@ -256,6 +365,7 @@ impl ProjectRegistry { stale_actor_timeout: self.stale_actor_timeout, max_notifications: self.max_notifications, ack_required: vec![], + config_surface, }); self.contexts.insert(entry.slug, Arc::new(ctx)); Ok(()) @@ -321,6 +431,164 @@ pub struct ContextSpec { pub max_notifications: usize, /// Directories that require notification acknowledgment. pub ack_required: Vec, + /// Pre-loaded config surface from the project's manifest.ncl. + /// Pass `None` for push_only projects or when the manifest has no + /// `config_surface` field. + pub config_surface: Option, +} + +/// Attempt to load `ConfigSurface` from a project's `manifest.ncl` +/// synchronously. +/// +/// Runs `nickel export` on `.ontology/manifest.ncl` and deserialises the +/// `config_surface` key. Returns `None` when: +/// - the manifest file doesn't exist +/// - the manifest has no `config_surface` field +/// - nickel export or deserialisation fails (logged at warn level) +pub fn load_config_surface(root: &Path, import_path: Option<&str>) -> Option { + let manifest = root.join(".ontology").join("manifest.ncl"); + if !manifest.exists() { + return None; + } + + let mut cmd = std::process::Command::new("nickel"); + cmd.args(["export", "--format", "json"]) + .arg(&manifest) + .current_dir(root); + if let Some(ip) = import_path { + cmd.env("NICKEL_IMPORT_PATH", ip); + } + let output = cmd.output().ok()?; + + if !output.status.success() { + warn!( + path = %manifest.display(), + stderr = %String::from_utf8_lossy(&output.stderr), + "nickel export of manifest.ncl failed — config_surface not loaded" + ); + return None; + } + + let json: serde_json::Value = serde_json::from_slice(&output.stdout).ok()?; + let surface_val = json.get("config_surface")?; + + // Deserialise into an intermediate form that matches the NCL schema, + // then convert to the canonical ConfigSurface. + #[derive(Deserialize)] + struct NclConfigConsumer { + id: String, + kind: String, + #[serde(default, rename = "ref")] + reference: String, + #[serde(default)] + fields: Vec, + } + + #[derive(Deserialize)] + struct NclConfigSection { + id: String, + file: String, + #[serde(default)] + contract: String, + #[serde(default)] + description: String, + #[serde(default)] + rationale: String, + #[serde(default = "default_true")] + mutable: bool, + #[serde(default)] + consumers: Vec, + } + + fn default_true() -> bool { + true + } + + #[derive(Deserialize)] + struct NclConfigSurface { + config_root: String, + #[serde(default = "default_config_ncl")] + entry_point: String, + #[serde(default = "default_ncl_merge")] + kind: String, + #[serde(default)] + contracts_path: String, + #[serde(default)] + overrides_dir: String, + #[serde(default)] + sections: Vec, + } + + fn default_config_ncl() -> String { + "config.ncl".to_string() + } + fn default_ncl_merge() -> String { + "NclMerge".to_string() + } + + let ncl: NclConfigSurface = match serde_json::from_value(surface_val.clone()) { + Ok(v) => v, + Err(e) => { + warn!(error = %e, "failed to deserialise config_surface from manifest.ncl"); + return None; + } + }; + + let kind = match ncl.kind.as_str() { + "TypeDialog" => ConfigKind::TypeDialog, + "SingleFile" => ConfigKind::SingleFile, + _ => ConfigKind::NclMerge, + }; + + let config_root = root.join(&ncl.config_root); + let overrides_dir = if ncl.overrides_dir.is_empty() { + PathBuf::new() + } else { + root.join(&ncl.overrides_dir) + }; + + let sections = ncl + .sections + .into_iter() + .map(|s| { + let consumers = s + .consumers + .into_iter() + .map(|c| { + let consumer_kind = match c.kind.as_str() { + "NuScript" => ConsumerKind::NuScript, + "CiPipeline" => ConsumerKind::CiPipeline, + "External" => ConsumerKind::External, + _ => ConsumerKind::RustStruct, + }; + ConfigConsumer { + id: c.id, + kind: consumer_kind, + reference: c.reference, + fields: c.fields, + } + }) + .collect(); + ConfigSection { + id: s.id, + file: s.file, + contract: s.contract, + description: s.description, + rationale: s.rationale, + mutable: s.mutable, + consumers, + } + }) + .collect(); + + Some(ConfigSurface { + config_root, + entry_point: ncl.entry_point, + kind, + contracts_path: ncl.contracts_path, + overrides_dir, + sections, + }) } pub fn make_context(spec: ContextSpec) -> ProjectContext { @@ -348,6 +616,7 @@ pub fn make_context(spec: ContextSpec) -> ProjectContext { seed_lock: Arc::new(Semaphore::new(1)), ontology_version: Arc::new(AtomicU64::new(0)), file_versions: Arc::new(DashMap::new()), + config_surface: spec.config_surface, } } @@ -481,6 +750,7 @@ mod tests { stale_actor_timeout: 300, max_notifications: 64, ack_required: vec![], + config_surface: None, })) } diff --git a/crates/ontoref-daemon/src/ui/handlers.rs b/crates/ontoref-daemon/src/ui/handlers.rs index 1afcde7..0a0f1d7 100644 --- a/crates/ontoref-daemon/src/ui/handlers.rs +++ b/crates/ontoref-daemon/src/ui/handlers.rs @@ -36,10 +36,22 @@ impl IntoResponse for UiError { UiError::Forbidden(_) => StatusCode::FORBIDDEN, _ => StatusCode::INTERNAL_SERVER_ERROR, }; + let detail = if let UiError::Render(ref e) = self { + use std::error::Error; + let mut chain = format!("{e}"); + let mut src = e.source(); + while let Some(s) = src { + chain.push_str(&format!("\nCaused by: {s}")); + src = s.source(); + } + chain + } else { + format!("{self}") + }; let html = format!( r#"

UI Error

-
{self}
+
{detail}
"# ); (status, Html(html)).into_response() @@ -58,8 +70,17 @@ pub(crate) async fn render( ctx: &Context, ) -> Result, UiError> { let guard = tera.read().await; - let html = guard.render(template, ctx)?; - Ok(Html(html)) + guard.render(template, ctx).map(Html).map_err(|e| { + use std::error::Error; + let mut chain = format!("{e}"); + let mut src = e.source(); + while let Some(s) = src { + chain.push_str(&format!(" → {s}")); + src = s.source(); + } + tracing::error!(template, error = %chain, "tera render failed"); + UiError::Render(e) + }) } pub(crate) fn tera_ref(state: &AppState) -> Result<&Arc>, UiError> { @@ -915,39 +936,48 @@ pub async fn api_catalog_page_mp( let ctx_ref = state.registry.get(&slug).ok_or(UiError::NotConfigured)?; let base_url = format!("/ui/{slug}"); - let routes: Vec = crate::api_catalog::catalog() - .into_iter() - .map(|r| { - let params: Vec = r - .params - .iter() - .map(|p| { - serde_json::json!({ - "name": p.name, - "type": p.kind, - "constraint": p.constraint, - "description": p.description, + // The #[onto_api] catalog is the ontoref-daemon's own HTTP surface. + // Only expose it for the primary project (ontoref itself). Consumer + // projects have their own API surfaces not registered in this process. + let is_primary = slug == state.registry.primary_slug(); + let routes: Vec = if is_primary { + crate::api_catalog::catalog() + .into_iter() + .map(|r| { + let params: Vec = r + .params + .iter() + .map(|p| { + serde_json::json!({ + "name": p.name, + "type": p.kind, + "constraint": p.constraint, + "description": p.description, + }) }) + .collect(); + serde_json::json!({ + "method": r.method, + "path": r.path, + "description": r.description, + "auth": r.auth, + "actors": r.actors, + "params": params, + "tags": r.tags, + "feature": r.feature, }) - .collect(); - serde_json::json!({ - "method": r.method, - "path": r.path, - "description": r.description, - "auth": r.auth, - "actors": r.actors, - "params": params, - "tags": r.tags, - "feature": r.feature, }) - }) - .collect(); + .collect() + } else { + vec![] + }; let catalog_json = serde_json::to_string(&routes).unwrap_or_else(|_| "[]".to_string()); let mut ctx = Context::new(); ctx.insert("catalog_json", &catalog_json); ctx.insert("route_count", &routes.len()); + ctx.insert("is_primary", &is_primary); ctx.insert("base_url", &base_url); ctx.insert("slug", &slug); ctx.insert("current_role", &auth_role_str(&auth)); @@ -2747,7 +2777,7 @@ async fn run_action_by_id( } match tokio::process::Command::new(&ontoref_bin) - .arg(&mode) + .args(["run", &mode]) .current_dir(root) .spawn() { @@ -2893,3 +2923,163 @@ fn resolve_bookmark_ctx( } (state.project_root.clone(), state.cache.clone()) } + +// ── Config surface page +// ────────────────────────────────────────────────────── + +pub async fn config_page_mp( + State(state): State, + Path(slug): Path, + auth: AuthUser, +) -> Result, UiError> { + let tera = tera_ref(&state)?; + let ctx_ref = state.registry.get(&slug).ok_or(UiError::NotConfigured)?; + let base_url = format!("/ui/{slug}"); + + let surface = ctx_ref.config_surface.clone(); + let has_config_surface = surface.is_some(); + + let mut ctx = Context::new(); + ctx.insert("slug", &slug); + ctx.insert("base_url", &base_url); + ctx.insert("current_role", &auth_role_str(&auth)); + ctx.insert("has_config_surface", &has_config_surface); + + if let Some(ref surface) = surface { + ctx.insert("config_root", &surface.config_root.display().to_string()); + ctx.insert("entry_point", &surface.entry_point); + ctx.insert("kind", &format!("{:?}", surface.kind)); + + let quickref = crate::config_coherence::build_quickref( + &slug, + surface, + &ctx_ref.root, + &ctx_ref.cache, + ctx_ref.import_path.as_deref(), + None, + ) + .await; + + let overall_status = quickref + .get("overall_coherence") + .and_then(|v| v.as_str()) + .unwrap_or("Unknown"); + ctx.insert("overall_status", overall_status); + + let sections = quickref + .get("sections") + .and_then(|v| v.as_array()) + .cloned() + .unwrap_or_default(); + ctx.insert("sections", §ions); + } else { + ctx.insert("config_root", ""); + ctx.insert("entry_point", ""); + ctx.insert("kind", ""); + ctx.insert("overall_status", "Unknown"); + ctx.insert("sections", &serde_json::Value::Array(vec![])); + } + + insert_brand_ctx( + &mut ctx, + &ctx_ref.root, + &ctx_ref.cache, + ctx_ref.import_path.as_deref(), + &base_url, + ) + .await; + + render(tera, "pages/config.html", &ctx).await +} + +pub async fn adrs_page_mp( + State(state): State, + Path(slug): Path, + auth: AuthUser, +) -> Result, UiError> { + let tera = tera_ref(&state)?; + let ctx_ref = state.registry.get(&slug).ok_or(UiError::NotConfigured)?; + let base_url = format!("/ui/{slug}"); + + let adrs_dir = ctx_ref.root.join("adrs"); + let import_path = ctx_ref.import_path.clone(); + let cache = ctx_ref.cache.clone(); + + let mut adrs: Vec = Vec::new(); + if let Ok(entries) = std::fs::read_dir(&adrs_dir) { + for entry in entries.flatten() { + let path = entry.path(); + if path.extension().and_then(|e| e.to_str()) != Some("ncl") { + continue; + } + let stem = path + .file_stem() + .and_then(|s| s.to_str()) + .unwrap_or("") + .to_string(); + // Only keep actual ADR files: adr-NNN-* where NNN starts with a digit + let after_prefix = match stem.strip_prefix("adr-") { + Some(s) => s, + None => continue, + }; + if !after_prefix.starts_with(|c: char| c.is_ascii_digit()) { + continue; + } + match cache.export(&path, import_path.as_deref()).await { + Ok((v, _)) => { + let hard_count = v + .get("constraints") + .and_then(|c| c.get("hard")) + .and_then(|h| h.as_array()) + .map(|a| a.len()) + .unwrap_or(0); + let soft_count = v + .get("constraints") + .and_then(|c| c.get("soft")) + .and_then(|s| s.as_array()) + .map(|a| a.len()) + .unwrap_or(0); + adrs.push(serde_json::json!({ + "id": v.get("id").and_then(|i| i.as_str()).unwrap_or(&stem), + "title": v.get("title").and_then(|t| t.as_str()).unwrap_or(""), + "status": v.get("status").and_then(|s| s.as_str()).unwrap_or(""), + "date": v.get("date").and_then(|d| d.as_str()).unwrap_or(""), + "context": v.get("context").and_then(|c| c.as_str()).unwrap_or(""), + "decision": v.get("decision").and_then(|d| d.as_str()).unwrap_or(""), + "hard_constraints": hard_count, + "soft_constraints": soft_count, + "file": stem, + })); + } + Err(e) => { + tracing::warn!(path = %path.display(), error = %e, "adrs_page: export failed"); + adrs.push(serde_json::json!({ + "id": stem, "title": "", "status": "Error", + "date": "", "context": "", "decision": "", + "hard_constraints": 0, "soft_constraints": 0, "file": stem, + })); + } + } + } + } + adrs.sort_by_key(|v| v["id"].as_str().unwrap_or("").to_string()); + + let adrs_json = serde_json::to_string(&adrs).unwrap_or_else(|_| "[]".to_string()); + + let mut ctx = Context::new(); + ctx.insert("slug", &slug); + ctx.insert("base_url", &base_url); + ctx.insert("current_role", &auth_role_str(&auth)); + ctx.insert("adrs_json", &adrs_json); + ctx.insert("adr_count", &adrs.len()); + insert_brand_ctx( + &mut ctx, + &ctx_ref.root, + &ctx_ref.cache, + ctx_ref.import_path.as_deref(), + &base_url, + ) + .await; + + render(tera, "pages/adrs.html", &ctx).await +} diff --git a/crates/ontoref-daemon/src/ui/mod.rs b/crates/ontoref-daemon/src/ui/mod.rs index c4d1dd5..237c1d9 100644 --- a/crates/ontoref-daemon/src/ui/mod.rs +++ b/crates/ontoref-daemon/src/ui/mod.rs @@ -92,6 +92,8 @@ fn multi_router(state: AppState) -> axum::Router { ) .route("/{slug}/compose/send", post(handlers::compose_send_mp)) .route("/{slug}/api", get(handlers::api_catalog_page_mp)) + .route("/{slug}/config", get(handlers::config_page_mp)) + .route("/{slug}/adrs", get(handlers::adrs_page_mp)) .route("/{slug}/actions", get(handlers::actions_page_mp)) .route("/{slug}/actions/run", post(handlers::actions_run_mp)) .route("/{slug}/qa", get(handlers::qa_page_mp)) diff --git a/crates/ontoref-daemon/templates/base.html b/crates/ontoref-daemon/templates/base.html index 0de9376..91d729e 100644 --- a/crates/ontoref-daemon/templates/base.html +++ b/crates/ontoref-daemon/templates/base.html @@ -16,7 +16,10 @@ {% block head %}{% endblock head %} @@ -35,7 +38,7 @@