ontoref-derive: #[onto_mcp_tool] attribute macro registers MCP tool unit-structs in
the catalog at link time via inventory::submit!; annotated item is emitted unchanged,
ToolBase/AsyncTool impls stay on the struct. All 34 tools migrated from manual wiring
(net +5: ontoref_list_projects, ontoref_search, ontoref_describe,
ontoref_list_ontology_extensions, ontoref_get_ontology_extension).
validate modes (ADR-018): reads level_hierarchy from workflow.ncl and checks every
.ncl mode for level declared, strategy declared, delegate chain coherent, compose
extends valid. mode resolve <id> shows which hierarchy level handles a mode and why.
--self-test generates synthetic fixtures in a temp dir for CI smoke-testing.
validate run-cargo: two-step Cargo.toml resolution — workspace layout first
(crates/<check.crate>/Cargo.toml), single-crate fallback by package name or repo
basename. Lets the same ADR constraint shape apply to workspace and single-crate repos.
ontology/schemas/manifest.ncl: registry_topology_type contract — multi-registry
coordination, push targets, participant scopes, per-namespace capability.
reflection/requirements/base.ncl: oras ≥1.2.0, cosign ≥2.0.0, sops ≥3.9.0, age
≥1.1.0, restic declared as Hard/Soft requirements with version_min, check_cmd, and
install_hint (ADR-017 toolchain surface).
ADR-019: per-file recipient routing for tenant isolation without multi-vault. Schema
additions: sops.recipient_groups + sops.recipient_rules in ontoref-project.ncl.
secrets-bootstrap generates .sops.yaml from project.ncl in declarative mode. Three
new secrets-audit checks: recipient-routing-coherent, recipient-routing-coverage,
no-multi-vault. Adoption templates: single-team/, multi-tenant/, agent-first/.
Integration templates: domain-producer/, mode-producer/, mode-consumer/.
UI: project_picker surfaces registry badge (⟳ participant) and vault badge
(⛁ vault_id · N, green=declarative / amber=legacy) per project card. Expanded panel
adds collapsible Registry section with namespace, endpoint, and push/pull capability.
manage.html gains Runtime Services card — MCP and GraphQL toggleable without restart
via HTMX POST /ui/manage/services/{service}/toggle.
describe.nu: capabilities JSON includes registry_topology and vault_state per project.
sync.nu: drift check extended to detect //! absence on newly registered crates.
qa.ncl: six entries — credential-vault-best-practice (layered data-flow diagram),
credential-vault-templates (paths A/B/C), credential-vault-troubleshooting (15 named
errors), integration-what-and-why (ADR-042 OCI federation), integration-how-to-implement,
integration-troubleshooting.
on+re: core.ncl + manifest.ncl updated to reflect OCI, MCP, and mode-hierarchy nodes.
Deleted stale presentation assets (2026-02 slides + voice notes).
380 lines
14 KiB
Text
380 lines
14 KiB
Text
#!/usr/bin/env nu
|
|
# store.nu — Nushell client for ontoref-daemon HTTP API.
|
|
#
|
|
# Provides cached nickel export via daemon (with subprocess fallback),
|
|
# plus query/sync/nodes/dimensions when daemon has DB enabled.
|
|
#
|
|
# All HTTP calls use ^curl (external command) because Nushell's internal
|
|
# http get/post cannot be captured with `| complete` on connection errors.
|
|
#
|
|
# Usage:
|
|
# use ../modules/store.nu *
|
|
#
|
|
# daemon-export ".ontology/core.ncl" # cached export
|
|
# daemon-export ".ontology/core.ncl" --import-path $ip # with NICKEL_IMPORT_PATH
|
|
# store nodes --level Axiom # query ontology nodes
|
|
# store dimensions # query dimensions
|
|
# daemon-health # check daemon status
|
|
|
|
# ── Utilities (self-contained to avoid circular imports with shared.nu) ─────────
|
|
|
|
def project-root []: nothing -> string {
|
|
let pr = ($env.ONTOREF_PROJECT_ROOT? | default "")
|
|
if ($pr | is-not-empty) and ($pr != $env.ONTOREF_ROOT) { $pr } else { $env.ONTOREF_ROOT }
|
|
}
|
|
|
|
def nickel-import-path [root: string]: nothing -> string {
|
|
let entries = [
|
|
$"($root)/.ontology"
|
|
$"($root)/adrs"
|
|
$"($root)/.ontoref/ontology/schemas"
|
|
$"($root)/.ontoref/adrs"
|
|
$"($root)/.onref"
|
|
$root
|
|
$"($env.ONTOREF_ROOT)/ontology"
|
|
$"($env.ONTOREF_ROOT)/ontology/schemas"
|
|
$"($env.ONTOREF_ROOT)/adrs"
|
|
$env.ONTOREF_ROOT
|
|
]
|
|
let valid = ($entries | where { |p| $p | path exists } | uniq)
|
|
let existing = ($env.NICKEL_IMPORT_PATH? | default "")
|
|
if ($existing | is-not-empty) {
|
|
($valid | append $existing) | str join ":"
|
|
} else {
|
|
$valid | str join ":"
|
|
}
|
|
}
|
|
|
|
# ── Configuration ────────────────────────────────────────────────────────────────
|
|
|
|
# Resolve the daemon HTTP port. Reads ~/.config/ontoref/config.ncl::daemon.port
|
|
# as the canonical source — same value the daemon binary reads at startup —
|
|
# so client and daemon never drift on the port. ONTOREF_DAEMON_PORT env override
|
|
# takes precedence for ad-hoc redirection. Final fallback is the schema default
|
|
# (matches install/resources/config.ncl).
|
|
export def daemon-port []: nothing -> int {
|
|
let env_port = ($env.ONTOREF_DAEMON_PORT? | default "")
|
|
if ($env_port | is-not-empty) { return ($env_port | into int) }
|
|
let cfg_path = $"($env.HOME)/.config/ontoref/config.ncl"
|
|
if not ($cfg_path | path exists) { return 7890 }
|
|
let exported = (do { ^nickel export $cfg_path } | complete)
|
|
if $exported.exit_code != 0 { return 7890 }
|
|
let parsed = (try { $exported.stdout | from json } catch { {} })
|
|
($parsed.daemon?.port? | default 7890)
|
|
}
|
|
|
|
# Resolve the daemon URL. ONTOREF_DAEMON_URL env override takes precedence;
|
|
# otherwise composes from the canonical port (see daemon-port). Use this from
|
|
# every nu module that talks to the daemon — never hard-code 127.0.0.1:NNNN.
|
|
export def daemon-url []: nothing -> string {
|
|
let env_url = ($env.ONTOREF_DAEMON_URL? | default "")
|
|
if ($env_url | is-not-empty) { return $env_url }
|
|
$"http://127.0.0.1:(daemon-port)"
|
|
}
|
|
|
|
# Load project config to check if DB is enabled
|
|
def project-config-db-status []: nothing -> record<enabled: bool, url: string, namespace: string> {
|
|
let root = (project-root)
|
|
let config_path = $"($root)/.ontoref/config.ncl"
|
|
if not ($config_path | path exists) {
|
|
return { enabled: false, url: "", namespace: "" }
|
|
}
|
|
|
|
# Export config and extract db section
|
|
let result = (do { ^nickel export $config_path } | complete)
|
|
if $result.exit_code != 0 {
|
|
return { enabled: false, url: "", namespace: "" }
|
|
}
|
|
|
|
# Parse JSON safely
|
|
let parse_result = (do { $result.stdout | from json } | complete)
|
|
if $parse_result.exit_code != 0 {
|
|
return { enabled: false, url: "", namespace: "" }
|
|
}
|
|
|
|
let config = $parse_result.stdout
|
|
let db = ($config.db? | default {})
|
|
{
|
|
enabled: ($db.enabled? | default false),
|
|
url: ($db.url? | default ""),
|
|
namespace: ($db.namespace? | default "ontoref"),
|
|
}
|
|
}
|
|
|
|
# ── HTTP helpers (external ^curl) ────────────────────────────────────────────────
|
|
|
|
# Build the Authorization header args for curl if ONTOREF_TOKEN is set.
|
|
# Returns [] when no token is configured so callers can splat unconditionally.
|
|
export def bearer-args []: nothing -> list<string> {
|
|
let token = ($env.ONTOREF_TOKEN? | default "")
|
|
if ($token | is-not-empty) {
|
|
["-H" $"Authorization: Bearer ($token)"]
|
|
} else {
|
|
[]
|
|
}
|
|
}
|
|
|
|
def http-get [url: string]: nothing -> record {
|
|
let auth = (bearer-args)
|
|
do { ^curl -sf ...$auth $url } | complete
|
|
}
|
|
|
|
def http-post-json [url: string, body: string]: nothing -> record {
|
|
let auth = (bearer-args)
|
|
do { ^curl -sf -X POST -H "Content-Type: application/json" ...$auth -d $body $url } | complete
|
|
}
|
|
|
|
def http-delete [url: string]: nothing -> record {
|
|
let auth = (bearer-args)
|
|
do { ^curl -sf -X DELETE ...$auth $url } | complete
|
|
}
|
|
|
|
# ── Availability check ───────────────────────────────────────────────────────────
|
|
|
|
# Check if daemon is reachable. Caches "true" in env var for the session.
|
|
# A "false" result is NOT cached — allows recovery when daemon starts mid-session.
|
|
export def --env daemon-available []: nothing -> bool {
|
|
let cached = ($env.ONTOREF_DAEMON_AVAILABLE? | default "")
|
|
if $cached == "true" { return true }
|
|
|
|
let url = $"(daemon-url)/health"
|
|
let result = (http-get $url)
|
|
if $result.exit_code == 0 {
|
|
$env.ONTOREF_DAEMON_AVAILABLE = "true"
|
|
true
|
|
} else {
|
|
false
|
|
}
|
|
}
|
|
|
|
# ── Health ───────────────────────────────────────────────────────────────────────
|
|
|
|
# Show daemon health status with optional DB info from config.
|
|
# Returns record with { status, uptime_secs, cache_*, db_enabled?, db_config? }
|
|
# or null if daemon unreachable or response is not valid JSON.
|
|
export def daemon-health []: nothing -> any {
|
|
let url = $"(daemon-url)/health"
|
|
let result = (http-get $url)
|
|
if $result.exit_code != 0 {
|
|
null
|
|
} else {
|
|
let body = ($result.stdout | str trim)
|
|
if not ($body | str starts-with "{") {
|
|
null
|
|
} else {
|
|
let parse_result = (do { $body | from json } | complete)
|
|
if $parse_result.exit_code != 0 {
|
|
null
|
|
} else {
|
|
let health = $parse_result.stdout
|
|
let db_config = (project-config-db-status)
|
|
$health | insert db_config $db_config
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# ── NCL Export (core function) ───────────────────────────────────────────────────
|
|
|
|
# Export a Nickel file to JSON via daemon (cached) with subprocess fallback.
|
|
#
|
|
# When daemon is available: POST /nickel/export → cached result.
|
|
# When daemon is unreachable: falls back to ^nickel export subprocess.
|
|
# System works identically either way — just slower without daemon.
|
|
export def --env daemon-export [
|
|
file: string,
|
|
--import-path: string = "",
|
|
]: nothing -> any {
|
|
let ip = if ($import_path | is-not-empty) {
|
|
$import_path
|
|
} else {
|
|
nickel-import-path (project-root)
|
|
}
|
|
if (daemon-available) {
|
|
let result = (daemon-export-http $file $ip)
|
|
if $result != null { return $result }
|
|
# HTTP call failed despite health check — clear cache to re-probe next call
|
|
$env.ONTOREF_DAEMON_AVAILABLE = ""
|
|
}
|
|
daemon-export-subprocess $file $ip
|
|
}
|
|
|
|
# Safe version: returns null on failure instead of throwing.
|
|
# Use for call sites that handle missing data gracefully (return [] or {}).
|
|
export def --env daemon-export-safe [
|
|
file: string,
|
|
--import-path: string = "",
|
|
]: nothing -> any {
|
|
if not ($file | path exists) { return null }
|
|
let ip = if ($import_path | is-not-empty) {
|
|
$import_path
|
|
} else {
|
|
nickel-import-path (project-root)
|
|
}
|
|
if (daemon-available) {
|
|
let result = (daemon-export-http $file $ip)
|
|
if $result != null { return $result }
|
|
$env.ONTOREF_DAEMON_AVAILABLE = ""
|
|
}
|
|
let result = do { with-env { NICKEL_IMPORT_PATH: $ip } { ^nickel export $file } } | complete
|
|
if $result.exit_code != 0 { return null }
|
|
$result.stdout | from json
|
|
}
|
|
|
|
# ── Daemon HTTP calls ────────────────────────────────────────────────────────────
|
|
|
|
def daemon-export-http [file: string, import_path: string]: nothing -> any {
|
|
let url = $"(daemon-url)/nickel/export"
|
|
let body = if ($import_path | is-not-empty) {
|
|
{ path: $file, import_path: $import_path } | to json
|
|
} else {
|
|
{ path: $file } | to json
|
|
}
|
|
let result = (http-post-json $url $body)
|
|
if $result.exit_code != 0 { return null }
|
|
let response = ($result.stdout | from json)
|
|
$response.data? | default null
|
|
}
|
|
|
|
# ── Subprocess fallback ──────────────────────────────────────────────────────────
|
|
|
|
def daemon-export-subprocess [file: string, import_path: string]: nothing -> any {
|
|
let ip = if ($import_path | is-not-empty) {
|
|
$import_path
|
|
} else {
|
|
let root = (project-root)
|
|
nickel-import-path $root
|
|
}
|
|
let result = do { with-env { NICKEL_IMPORT_PATH: $ip } { ^nickel export $file } } | complete
|
|
if $result.exit_code != 0 {
|
|
error make { msg: $"nickel export failed for ($file): ($result.stderr)" }
|
|
}
|
|
$result.stdout | from json
|
|
}
|
|
|
|
# ── Store query commands ─────────────────────────────────────────────────────────
|
|
|
|
# Query ontology nodes. Returns empty list on failure.
|
|
export def --env "store nodes" [
|
|
--level: string = "",
|
|
]: nothing -> list {
|
|
let root = (project-root)
|
|
let core_file = $"($root)/.ontology/core.ncl"
|
|
if not ($core_file | path exists) { return [] }
|
|
let data = (daemon-export-safe $core_file)
|
|
if $data == null { return [] }
|
|
let nodes = ($data.nodes? | default [])
|
|
if ($level | is-not-empty) {
|
|
$nodes | where { |n| ($n.level? | default "") == $level }
|
|
} else {
|
|
$nodes
|
|
}
|
|
}
|
|
|
|
# Query ontology dimensions. Returns empty list on failure.
|
|
export def --env "store dimensions" []: nothing -> list {
|
|
let root = (project-root)
|
|
let state_file = $"($root)/.ontology/state.ncl"
|
|
if not ($state_file | path exists) { return [] }
|
|
let data = (daemon-export-safe $state_file)
|
|
if $data == null { return [] }
|
|
$data.dimensions? | default []
|
|
}
|
|
|
|
# Query membranes. Returns empty list on failure.
|
|
export def --env "store membranes" [
|
|
--all = false,
|
|
]: nothing -> list {
|
|
let root = (project-root)
|
|
let gate_file = $"($root)/.ontology/gate.ncl"
|
|
if not ($gate_file | path exists) { return [] }
|
|
let data = (daemon-export-safe $gate_file)
|
|
if $data == null { return [] }
|
|
let membranes = ($data.membranes? | default [])
|
|
if $all {
|
|
$membranes
|
|
} else {
|
|
$membranes | where { |m| ($m.active? | default false) == true }
|
|
}
|
|
}
|
|
|
|
# ── Cache management ─────────────────────────────────────────────────────────────
|
|
|
|
# Show daemon cache statistics.
|
|
export def --env "store cache-stats" []: nothing -> any {
|
|
if not (daemon-available) {
|
|
print " daemon not available"
|
|
return null
|
|
}
|
|
let url = $"(daemon-url)/cache/stats"
|
|
let result = (http-get $url)
|
|
if $result.exit_code == 0 {
|
|
$result.stdout | from json
|
|
} else {
|
|
null
|
|
}
|
|
}
|
|
|
|
# Invalidate daemon cache (all entries or by prefix/file).
|
|
export def --env "store cache-invalidate" [
|
|
--prefix: string = "",
|
|
--file: string = "",
|
|
--all = false,
|
|
]: nothing -> any {
|
|
if not (daemon-available) {
|
|
print " daemon not available"
|
|
return null
|
|
}
|
|
let url = $"(daemon-url)/cache/invalidate"
|
|
if (not $all) and ($prefix | is-empty) and ($file | is-empty) {
|
|
error make { msg: "cache-invalidate requires --all, --prefix, or --file" }
|
|
}
|
|
let body = if $all {
|
|
{ all: true } | to json
|
|
} else if ($prefix | is-not-empty) {
|
|
{ prefix: $prefix } | to json
|
|
} else {
|
|
{ file: $file } | to json
|
|
}
|
|
let result = (http-post-json $url $body)
|
|
if $result.exit_code == 0 {
|
|
$result.stdout | from json
|
|
} else {
|
|
null
|
|
}
|
|
}
|
|
|
|
# ── Push-based sync ───────────────────────────────────────────────────────────────
|
|
|
|
# Export local NCL ontology to JSON and push to daemon /sync endpoint.
|
|
#
|
|
# The daemon stores the result in SurrealDB as a rebuildable projection.
|
|
# The repo (.ontology/, adrs/) remains the source of truth at all times.
|
|
# Suitable for both local and remote daemon scenarios.
|
|
export def "store sync-push" []: nothing -> any {
|
|
if not (daemon-available) {
|
|
error make { msg: "daemon not available — check ONTOREF_DAEMON_URL" }
|
|
}
|
|
|
|
let root = (project-root)
|
|
let ip = (nickel-import-path $root)
|
|
|
|
def export-ncl [file: string]: nothing -> any {
|
|
if not ($file | path exists) { return null }
|
|
let r = (do { ^nickel export --format json --import-path $ip $file } | complete)
|
|
if $r.exit_code != 0 { return null }
|
|
do { $r.stdout | from json } | complete | get -o stdout
|
|
}
|
|
|
|
let core = (export-ncl $"($root)/.ontology/core.ncl")
|
|
let state = (export-ncl $"($root)/.ontology/state.ncl")
|
|
let gate = (export-ncl $"($root)/.ontology/gate.ncl")
|
|
|
|
let payload = { core: $core, state: $state, gate: $gate } | to json
|
|
|
|
let result = (http-post-json $"(daemon-url)/sync" $payload)
|
|
if $result.exit_code != 0 {
|
|
error make { msg: $"sync push failed: ($result.stderr | str trim)" }
|
|
}
|
|
$result.stdout | from json
|
|
}
|