use std::net::IpAddr; use std::path::PathBuf; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::Arc; use std::time::{Instant, SystemTime, UNIX_EPOCH}; use axum::extract::{Path, Query, State}; use axum::http::StatusCode; use axum::response::IntoResponse; use axum::routing::{delete, get, post, put}; use axum::Json; use dashmap::DashMap; use serde::{Deserialize, Serialize}; use tracing::{error, warn}; use crate::actors::{ActorRegistry, ActorSessionView, RegisterRequest}; use crate::cache::NclCache; use crate::notifications::{AckRequest, NotificationStore, NotificationView}; use crate::watcher::FileWatcher; /// Fixed-window per-IP rate limiter for failed Bearer authentication attempts. /// /// Disabled automatically when the daemon binds to a loopback address — rate /// limiting is only meaningful when external clients can reach the endpoint. /// When `enabled = false`, every method is a no-op. pub struct AuthRateLimiter { enabled: bool, window_secs: u64, max_failures: u32, counts: DashMap, } impl AuthRateLimiter { pub fn new(enabled: bool) -> Self { Self { enabled, window_secs: 60, max_failures: 10, counts: DashMap::new(), } } /// Returns `true` if this IP has exceeded the failure threshold within the /// window. pub fn is_limited(&self, ip: IpAddr) -> bool { if !self.enabled { return false; } self.counts .get(&ip) .is_some_and(|e| e.1.elapsed().as_secs() < self.window_secs && e.0 >= self.max_failures) } /// Record a failed attempt. Returns `true` if the IP is now rate-limited. pub fn record_failure(&self, ip: IpAddr) -> bool { if !self.enabled { return false; } let now = Instant::now(); let mut entry = self.counts.entry(ip).or_insert((0, now)); if entry.1.elapsed().as_secs() >= self.window_secs { entry.0 = 0; entry.1 = now; } entry.0 += 1; entry.0 >= self.max_failures } /// Clear failure count after a successful authentication. pub fn record_success(&self, ip: IpAddr) { if self.enabled { self.counts.remove(&ip); } } } /// Returns true if `s` has the format of a UUID v4 (36 chars, hyphens at /// positions 8/13/18/23). Used to distinguish session tokens from raw passwords /// in `check_primary_auth` without needing to attempt argon2 on token strings. fn is_uuid_v4(s: &str) -> bool { if s.len() != 36 { return false; } let b = s.as_bytes(); b[8] == b'-' && b[13] == b'-' && b[18] == b'-' && b[23] == b'-' && b.iter() .enumerate() .all(|(i, &c)| matches!(i, 8 | 13 | 18 | 23) || c.is_ascii_hexdigit()) } /// Shared application state injected into handlers. #[derive(Clone)] pub struct AppState { pub cache: Arc, pub project_root: PathBuf, pub ontoref_root: Option, pub started_at: Instant, pub last_activity: Arc, pub actors: Arc, pub notifications: Arc, /// Resolved NICKEL_IMPORT_PATH for UI-initiated NCL exports. pub nickel_import_path: Option, #[cfg(feature = "db")] pub db: Option>, #[cfg(feature = "nats")] pub nats: Option>, #[cfg(feature = "ui")] pub tera: Option>>, #[cfg(feature = "ui")] pub public_dir: Option, /// Project registry. Always present — primary project is always registered /// under `registry.primary_slug()`. Registry and primary project auth share /// the same `ProjectContext` code path; `primary_keys` is gone. pub registry: Arc, /// Channel to start a file watcher for a project added at runtime. pub new_project_tx: Option>>>, /// Path to `~/.config/ontoref/` — used to persist key overrides. pub config_dir: Option, #[cfg(feature = "ui")] pub sessions: Arc, /// Current project set by `set_project` MCP tool — shared across all /// connections. #[cfg(feature = "mcp")] pub mcp_current_project: Arc>>, /// Live watchers for runtime-added registry projects. /// Keyed by slug; removing an entry drops the `FileWatcher` and stops /// watching. pub watcher_map: Arc>>, /// Per-IP failed auth rate limiter. Disabled on loopback binds. pub auth_rate_limiter: Arc, /// Per-path mutex registry for NCL file mutations (backlog, QA). /// Serializes concurrent read-mutate-write sequences on the same file. #[cfg(feature = "ui")] pub ncl_write_lock: Arc, /// Argon2id hash of the daemon-level admin password. /// Set via ONTOREF_ADMIN_TOKEN (inline hash) or ONTOREF_ADMIN_TOKEN_FILE /// (path to a file containing the hash). When present, /ui/manage/login /// accepts this password and creates a "_daemon" session with Role::Admin. #[cfg(feature = "ui")] pub daemon_admin_hash: Option, } impl AppState { fn touch_activity(&self) { let now = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() .as_secs(); self.last_activity.store(now, Ordering::Relaxed); } /// Check `Authorization: Bearer` against the primary project's keys. /// /// Accepts two credential forms (tried in order): /// 1. Session token (UUID v4) — looked up in SessionStore; O(1), cheap. /// 2. Raw password — verified via argon2id against project keys; O(~100ms). /// /// Returns `None` when the request should proceed, `Some(response)` for /// 429 (rate-limited) or 401 (bad/missing credentials). pub(crate) fn check_primary_auth( &self, headers: &axum::http::HeaderMap, client_ip: IpAddr, ) -> Option { if self.auth_rate_limiter.is_limited(client_ip) { return Some( ( StatusCode::TOO_MANY_REQUESTS, Json(serde_json::json!({ "error": "too many failed auth attempts", "retry_after_secs": 60 })), ) .into_response(), ); } let primary = self.registry.primary(); if !primary.auth_enabled() { return None; } let Some(bearer) = extract_bearer(headers) else { return Some( ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({ "error": "Authorization: Bearer required" })), ) .into_response(), ); }; // Try session token first (UUID v4 format). #[cfg(feature = "ui")] if is_uuid_v4(bearer) { if let Some(entry) = self.sessions.get(bearer) { if entry.slug == primary.slug || entry.slug == "_daemon" { self.auth_rate_limiter.record_success(client_ip); return None; } } // UUID format but not a valid session — fall through to password // check. } // Password verification (backward-compatible). if primary.verify_key(bearer).is_some() { self.auth_rate_limiter.record_success(client_ip); None } else { self.auth_rate_limiter.record_failure(client_ip); Some( ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "invalid credentials"})), ) .into_response(), ) } } /// The slug of the primary (local) project. pub fn default_project_name(&self) -> String { self.registry.primary_slug().to_string() } } /// Validate a Bearer session token and return the `SessionEntry`. /// /// Only accepts UUID v4 tokens (not raw passwords). Returns a 401 response on /// any failure so callers can propagate directly with `?`. #[cfg(feature = "ui")] fn require_session( state: &AppState, headers: &axum::http::HeaderMap, ) -> Result> { let Some(bearer) = extract_bearer(headers) else { return Err(Box::new( ( StatusCode::UNAUTHORIZED, Json( serde_json::json!({"error": "Authorization: Bearer required"}), ), ) .into_response(), )); }; if !is_uuid_v4(bearer) { return Err(Box::new( ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "session token required; use POST /sessions to obtain one"})), ) .into_response(), )); } state.sessions.get(bearer).ok_or_else(|| { Box::new( ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "invalid or expired session"})), ) .into_response(), ) }) } /// Extract the raw password from `Authorization: Bearer `. pub(crate) fn extract_bearer(headers: &axum::http::HeaderMap) -> Option<&str> { headers .get(axum::http::header::AUTHORIZATION) .and_then(|v| v.to_str().ok()) .and_then(|v| v.strip_prefix("Bearer ")) .map(str::trim) .filter(|s| !s.is_empty()) } pub fn router(state: AppState) -> axum::Router { let app = axum::Router::new() // Existing endpoints .route("/health", get(health)) .route("/nickel/export", post(nickel_export)) .route("/cache/stats", get(cache_stats)) .route("/cache/invalidate", post(cache_invalidate)) // Actor endpoints .route("/actors/register", post(actor_register)) .route("/actors/{token}", delete(actor_deregister)) .route("/actors/{token}/touch", post(actor_touch)) .route("/actors/{token}/profile", post(actor_update_profile)) .route("/actors", get(actors_list)) // Notification endpoints .route("/notifications/pending", get(notifications_pending)) .route("/notifications/ack", post(notifications_ack)) // SSE push stream — actor subscribes once instead of polling. .route("/notifications/stream", get(notifications_stream)) // Git hook endpoint — actor signs a file-change it caused. .route("/ontology/changed", post(ontology_changed)) // Search endpoint .route("/search", get(search)) // Describe endpoints .route("/describe/project", get(describe_project)) .route("/describe/capabilities", get(describe_capabilities)) .route("/describe/connections", get(describe_connections)) .route("/describe/actor-init", get(describe_actor_init)) // Backlog JSON endpoint .route("/backlog-json", get(backlog_json)) // Q&A read endpoint .route("/qa-json", get(qa_json)) // Push-based sync endpoint: projects export their NCL and POST here. // Body limited to 4 MiB — ontology payloads are small; larger bodies are rejected. // Auth: Bearer token verified against project keys (see sync.rs). .route( "/sync", post(crate::sync::sync_push) .layer(axum::extract::DefaultBodyLimit::max(4 * 1024 * 1024)), ) // Runtime key rotation for registered projects. // Requires Bearer token with admin role (or no auth if project has no keys yet). .route("/projects/{slug}/keys", put(project_update_keys)) // Project registry management. .route("/projects", get(projects_list).post(project_add)) .route("/projects/{slug}", delete(project_delete)); // Session endpoints — gated on ui feature (requires SessionStore). #[cfg(feature = "ui")] let app = app .route("/sessions", post(session_create).get(sessions_list)) .route("/sessions/{id}", delete(session_revoke)); // Gate the mutation endpoint behind the ui feature (requires crate::ui). #[cfg(feature = "ui")] let app = app .route("/qa/add", post(crate::ui::handlers::qa_add)) .route("/qa/delete", post(crate::ui::handlers::qa_delete)) .route("/qa/update", post(crate::ui::handlers::qa_update)); let app = app.with_state(state.clone()); #[cfg(feature = "ui")] let app = { use axum::response::Redirect; use tower_http::services::ServeDir; let app = app .route("/ui", get(|| async { Redirect::permanent("/ui/") })) .nest("/ui/", crate::ui::router(state.clone())); if let Some(ref public_dir) = state.public_dir { app.nest_service("/public", ServeDir::new(public_dir)) } else { app } }; // MCP streamable-HTTP endpoint — stateless per-request factory. #[cfg(feature = "mcp")] let app = { use rmcp::transport::streamable_http_server::{ session::local::LocalSessionManager, StreamableHttpServerConfig, StreamableHttpService, }; let mcp_state = state.clone(); let mcp_svc = StreamableHttpService::new( move || Ok(crate::mcp::OntoreServer::new(mcp_state.clone())), std::sync::Arc::new(LocalSessionManager::default()), StreamableHttpServerConfig::default(), ); app.nest_service("/mcp", mcp_svc) }; app } // ── Health ────────────────────────────────────────────────────────────── #[derive(Serialize)] struct HealthResponse { status: &'static str, uptime_secs: u64, cache_entries: usize, cache_hits: u64, cache_misses: u64, project_root: String, active_actors: usize, ontology_version: u64, #[serde(skip_serializing_if = "Option::is_none")] db_enabled: Option, } async fn health(State(state): State) -> Json { state.touch_activity(); let db_enabled = { #[cfg(feature = "db")] { Some(state.db.is_some()) } #[cfg(not(feature = "db"))] { None } }; Json(HealthResponse { status: "ok", uptime_secs: state.started_at.elapsed().as_secs(), cache_entries: state.cache.len(), cache_hits: state.cache.hit_count(), cache_misses: state.cache.miss_count(), project_root: state.project_root.display().to_string(), active_actors: state.actors.count(), ontology_version: state .registry .primary() .ontology_version .load(Ordering::Acquire), db_enabled, }) } // ── Nickel Export ─────────────────────────────────────────────────────── #[derive(Deserialize)] struct ExportRequest { path: String, import_path: Option, } #[derive(Serialize)] struct ExportResponse { data: serde_json::Value, cached: bool, elapsed_ms: u64, } async fn nickel_export( State(state): State, headers: axum::http::HeaderMap, axum::extract::ConnectInfo(addr): axum::extract::ConnectInfo, Json(req): Json, ) -> axum::response::Response { state.touch_activity(); if let Some(resp) = state.check_primary_auth(&headers, addr.ip()) { return resp; } let start = Instant::now(); // Accept absolute paths — daemon is loopback-only; OS permissions are the // boundary. Relative paths are still resolved against project_root for // backward compatibility. let file_path = match resolve_any_path(&state.project_root, &req.path) { Ok(p) => p, Err((status, msg)) => { return (status, Json(serde_json::json!({"error": msg}))).into_response() } }; let inherited_ip = std::env::var("NICKEL_IMPORT_PATH").unwrap_or_default(); let merged_ip: Option = match req.import_path.as_deref() { Some(caller_ip) => { if inherited_ip.is_empty() { Some(caller_ip.to_string()) } else { Some(format!("{caller_ip}:{inherited_ip}")) } } None => { if inherited_ip.is_empty() { None } else { Some(inherited_ip) } } }; let (data, was_hit) = match state.cache.export(&file_path, merged_ip.as_deref()).await { Ok(r) => r, Err(e) => { error!(path = %file_path.display(), error = %e, "nickel export failed"); return ( StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({"error": e.to_string()})), ) .into_response(); } }; Json(ExportResponse { data, cached: was_hit, elapsed_ms: start.elapsed().as_millis() as u64, }) .into_response() } // ── Cache Management ──────────────────────────────────────────────────── #[derive(Serialize)] struct CacheStatsResponse { entries: usize, hits: u64, misses: u64, hit_rate: f64, } async fn cache_stats(State(state): State) -> Json { state.touch_activity(); let hits = state.cache.hit_count(); let misses = state.cache.miss_count(); let total = hits + misses; Json(CacheStatsResponse { entries: state.cache.len(), hits, misses, hit_rate: if total > 0 { hits as f64 / total as f64 } else { 0.0 }, }) } #[derive(Deserialize)] struct InvalidateRequest { /// Optional project slug. When present, invalidates the registry project's /// cache instead of the primary project cache. slug: Option, prefix: Option, file: Option, all: Option, } #[derive(Serialize)] struct InvalidateResponse { invalidated: bool, entries_remaining: usize, } async fn cache_invalidate( State(state): State, headers: axum::http::HeaderMap, axum::extract::ConnectInfo(addr): axum::extract::ConnectInfo, Json(req): Json, ) -> std::result::Result, (StatusCode, String)> { state.touch_activity(); if let Some(resp) = state.check_primary_auth(&headers, addr.ip()) { let status = resp.status(); return Err((status, "authentication required".to_string())); } // Resolve which cache to operate on. let cache = match req.slug.as_deref() { Some(slug) => { let ctx = state.registry.get(slug).ok_or_else(|| { ( StatusCode::NOT_FOUND, format!("project '{slug}' not registered"), ) })?; ctx.cache.clone() } None => state.cache.clone(), }; let project_root = match req.slug.as_deref() { Some(slug) => state .registry .get(slug) .map(|ctx| ctx.root.clone()) .unwrap_or_else(|| state.project_root.clone()), None => state.project_root.clone(), }; if req.all.unwrap_or(false) { cache.invalidate_all(); } else if let Some(prefix) = &req.prefix { let path = resolve_path(&project_root, prefix)?; cache.invalidate_prefix(&path); } else if let Some(file) = &req.file { let path = resolve_path(&project_root, file)?; cache.invalidate_file(&path); } else { return Err(( StatusCode::BAD_REQUEST, "at least one of 'all', 'prefix', or 'file' must be specified".to_string(), )); } Ok(Json(InvalidateResponse { invalidated: true, entries_remaining: cache.len(), })) } // ── Actor Endpoints ───────────────────────────────────────────────────── #[derive(Serialize)] struct RegisterResponse { token: String, actors_connected: usize, } async fn actor_register( State(state): State, Json(req): Json, ) -> (StatusCode, Json) { state.touch_activity(); #[cfg(feature = "nats")] let actor_type = req.actor_type.clone(); #[cfg(feature = "nats")] let project = req.project.clone(); let token = state.actors.register(req); let count = state.actors.count(); #[cfg(feature = "nats")] { if let Some(ref nats) = state.nats { if let Err(e) = nats .publish_actor_registered(&token, &actor_type, &project) .await { tracing::warn!(error = %e, "failed to publish actor.registered event"); } } } ( StatusCode::CREATED, Json(RegisterResponse { token, actors_connected: count, }), ) } async fn actor_deregister(State(state): State, Path(token): Path) -> StatusCode { state.touch_activity(); if state.actors.deregister(&token) { #[cfg(feature = "nats")] { if let Some(ref nats) = state.nats { if let Err(e) = nats.publish_actor_deregistered(&token, "explicit").await { tracing::warn!(error = %e, "failed to publish actor.deregistered event"); } } } StatusCode::NO_CONTENT } else { StatusCode::NOT_FOUND } } async fn actor_touch(State(state): State, Path(token): Path) -> StatusCode { state.touch_activity(); if state.actors.touch(&token) { StatusCode::NO_CONTENT } else { StatusCode::NOT_FOUND } } #[derive(Deserialize)] struct ProfileRequest { #[serde(default)] role: Option, #[serde(default)] preferences: Option, } async fn actor_update_profile( State(state): State, Path(token): Path, Json(req): Json, ) -> StatusCode { state.touch_activity(); if state .actors .update_profile(&token, req.role, req.preferences) { StatusCode::NO_CONTENT } else { StatusCode::NOT_FOUND } } #[derive(Serialize)] struct ActorsListResponse { actors: Vec, total: usize, } #[derive(Serialize)] struct ActorEntry { token: String, #[serde(flatten)] session: ActorSessionView, } #[derive(Deserialize)] struct ActorsQuery { project: Option, } async fn actors_list( State(state): State, Query(query): Query, ) -> Json { state.touch_activity(); let entries = match query.project { Some(ref project) => state.actors.list_for_project(project), None => state.actors.list(), }; let total = entries.len(); let actors = entries .into_iter() .map(|(token, session)| ActorEntry { token, session }) .collect(); Json(ActorsListResponse { actors, total }) } // ── Notification Endpoints ────────────────────────────────────────────── #[derive(Deserialize)] struct PendingQuery { token: String, project: Option, #[serde(default)] check_only: bool, } #[derive(Serialize)] struct PendingResponse { pending: usize, #[serde(skip_serializing_if = "Option::is_none")] notifications: Option>, } async fn notifications_pending( State(state): State, Query(query): Query, ) -> impl IntoResponse { state.touch_activity(); if state.actors.get_and_touch(&query.token).is_none() { return ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "unknown or expired actor token"})), ) .into_response(); } let project = query .project .unwrap_or_else(|| state.default_project_name()); if query.check_only { let count = state.notifications.pending_count(&project, &query.token); Json(PendingResponse { pending: count, notifications: None, }) .into_response() } else { let notifications = state.notifications.pending(&project, &query.token); let count = notifications.len(); Json(PendingResponse { pending: count, notifications: Some(notifications), }) .into_response() } } #[derive(Serialize)] struct AckResponse { acknowledged: usize, } async fn notifications_ack( State(state): State, Json(req): Json, ) -> std::result::Result, (StatusCode, String)> { state.touch_activity(); state.actors.touch(&req.token); let project = req.project.unwrap_or_else(|| state.default_project_name()); let count = if req.all { let acked = state.notifications.ack_all(&project, &req.token); state.actors.clear_pending(&req.token); acked } else if let Some(id) = req.notification_id { if state.notifications.ack_one(&project, &req.token, id) { 1 } else { 0 } } else { return Err(( StatusCode::BAD_REQUEST, "either 'all: true' or 'notification_id' must be specified".to_string(), )); }; Ok(Json(AckResponse { acknowledged: count, })) } // ── SSE Notification Stream ─────────────────────────────────────────────── /// Query params for `GET /notifications/stream`. #[derive(Deserialize)] struct StreamQuery { /// Actor token — used to scope the stream to this actor's project. token: String, /// Optional project slug override. Defaults to the actor's project. project: Option, } /// `GET /notifications/stream?token=[&project=]` /// /// Server-Sent Events stream. Each event is a JSON-serialized /// `NotificationView`. Clients receive push notifications without polling. /// Reconnects automatically pick up new events (no replay of missed events — /// use `/notifications/pending` for that). async fn notifications_stream( State(state): State, Query(params): Query, ) -> axum::response::Response { use axum::response::sse::{Event, KeepAlive}; use axum::response::Sse; use tokio_stream::wrappers::BroadcastStream; use tokio_stream::StreamExt; let session = match state.actors.get_and_touch(¶ms.token) { Some(s) => s, None => { return ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "unknown or expired actor token"})), ) .into_response(); } }; // Resolve which notification store to subscribe to. let notifications = if let Some(slug) = ¶ms.project { state .registry .get(slug) .map(|ctx| Arc::clone(&ctx.notifications)) .unwrap_or_else(|| Arc::clone(&state.notifications)) } else { // Derive project from the actor session — reuse the snapshot from // get_and_touch. let actor_project = Some(session.project.clone()); actor_project .as_deref() .and_then(|p| state.registry.get(p)) .map(|ctx| Arc::clone(&ctx.notifications)) .unwrap_or_else(|| Arc::clone(&state.notifications)) }; let rx = notifications.subscribe(); let stream = BroadcastStream::new(rx).filter_map(|result| match result { Ok(view) => { let json = serde_json::to_string(&view).unwrap_or_default(); Some(Ok::<_, std::convert::Infallible>( Event::default().event("notification").data(json), )) } // Lagged — receiver missed events; client should call /notifications/pending to catch up. Err(tokio_stream::wrappers::errors::BroadcastStreamRecvError::Lagged(n)) => { let msg = format!("{{\"lagged\":{n}}}"); Some(Ok(Event::default().event("lagged").data(msg))) } }); Sse::new(stream) .keep_alive(KeepAlive::default()) .into_response() } // ── Git Hook — Ontology Changed ─────────────────────────────────────────── /// Body for `POST /ontology/changed` — sent by a git hook after NCL files /// change. #[derive(Deserialize)] struct OntologyChangedRequest { /// Actor token that caused the change (from `ONTOREF_TOKEN` env var). token: String, /// Relative file paths that changed (e.g. `[".ontology/core.ncl"]`). files: Vec, /// Project slug. Defaults to the actor's registered project. #[serde(default)] project: Option, } /// `POST /ontology/changed` — actor-attributed file-change notification. /// /// Called by git hooks (post-merge, post-commit) so the daemon knows *who* /// caused the change. Creates a notification with `source_actor` set, enabling /// multi-actor coordination UIs to display attribution. async fn ontology_changed( State(state): State, Json(req): Json, ) -> impl IntoResponse { // Reject unknown tokens — git hook attribution requires a registered actor. let actor_project = match state.actors.get(&req.token) { Some(s) => s.project.clone(), None => { return ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "unknown or expired actor token"})), ) .into_response(); } }; // Resolve project and notification store from the actor token. let (project, notifications) = { let project = match &req.project { // If the caller explicitly names a project, it must match the // token's registered project — prevents cross-project injection. Some(req_project) if req_project != &actor_project => { return ( StatusCode::FORBIDDEN, Json(serde_json::json!({ "error": format!( "token is registered to project '{}', not '{req_project}'", actor_project ) })), ) .into_response(); } Some(req_project) => req_project.clone(), None => actor_project, }; let store = state .registry .get(&project) .map(|ctx| Arc::clone(&ctx.notifications)) .unwrap_or_else(|| Arc::clone(&state.notifications)); (project, store) }; if req.files.is_empty() { return ( StatusCode::BAD_REQUEST, Json(serde_json::json!({"error": "files must not be empty"})), ) .into_response(); } let ids = notifications.push(&project, req.files, Some(req.token.clone())); // Increment pending for all actors in the project except the source actor. if !ids.is_empty() { for token in state.actors.tokens_for_project(&project) { if token != req.token { state.actors.increment_pending(&token); } } } ( StatusCode::OK, Json(serde_json::json!({ "project": project, "notifications_created": ids.len(), "source_actor": req.token, })), ) .into_response() } // ── Search ─────────────────────────────────────────────────────────────── #[derive(Deserialize)] struct SearchQuery { q: Option, #[cfg(feature = "ui")] slug: Option, } #[derive(Serialize)] struct SearchResponse { query: String, results: Vec, } async fn search( State(state): State, Query(params): Query, ) -> impl IntoResponse { let q = params.q.as_deref().unwrap_or("").trim().to_string(); if q.is_empty() { return Json(SearchResponse { query: q, results: vec![], }); } // In multi-project mode, resolve context from slug; fall back to primary // project. #[cfg(feature = "ui")] let results = { if let Some(slug) = params.slug.as_deref() { if let Some(ctx) = state.registry.get(slug) { crate::search::search_project(&ctx.root, &ctx.cache, ctx.import_path.as_deref(), &q) .await } else { vec![] } } else { crate::search::search_project( &state.project_root, &state.cache, state.nickel_import_path.as_deref(), &q, ) .await } }; #[cfg(not(feature = "ui"))] let results = crate::search::search_project( &state.project_root, &state.cache, state.nickel_import_path.as_deref(), &q, ) .await; Json(SearchResponse { query: q, results }) } // ── Describe Endpoints ─────────────────────────────────────────────────── #[derive(Deserialize)] struct DescribeQuery { slug: Option, } #[derive(Deserialize)] struct ActorInitQuery { actor: Option, slug: Option, } /// Resolve project context from an optional slug. /// Falls back to the primary project when slug is absent or not found in /// registry. #[cfg(feature = "ui")] fn resolve_project_ctx( state: &AppState, slug: Option<&str>, ) -> (PathBuf, Arc, Option) { if let Some(s) = slug { if let Some(ctx) = state.registry.get(s) { return (ctx.root.clone(), ctx.cache.clone(), ctx.import_path.clone()); } } ( state.project_root.clone(), state.cache.clone(), state.nickel_import_path.clone(), ) } #[cfg(not(feature = "ui"))] fn resolve_project_ctx( state: &AppState, _slug: Option<&str>, ) -> (PathBuf, Arc, Option) { ( state.project_root.clone(), state.cache.clone(), state.nickel_import_path.clone(), ) } async fn describe_project( State(state): State, Query(q): Query, ) -> impl IntoResponse { state.touch_activity(); let (root, cache, import_path) = resolve_project_ctx(&state, q.slug.as_deref()); let path = root.join(".ontology").join("core.ncl"); if !path.exists() { return ( StatusCode::NOT_FOUND, Json(serde_json::json!({ "error": "core.ncl not found" })), ); } match cache.export(&path, import_path.as_deref()).await { Ok((data, _)) => (StatusCode::OK, Json(data)), Err(e) => { error!(path = %path.display(), error = %e, "describe_project export failed"); ( StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({ "error": e.to_string() })), ) } } } async fn describe_connections( State(state): State, Query(q): Query, ) -> impl IntoResponse { state.touch_activity(); let (root, cache, import_path) = resolve_project_ctx(&state, q.slug.as_deref()); let path = root.join(".ontology").join("connections.ncl"); if !path.exists() { return ( StatusCode::NOT_FOUND, Json(serde_json::json!({ "error": "connections.ncl not found" })), ); } match cache.export(&path, import_path.as_deref()).await { Ok((data, _)) => (StatusCode::OK, Json(data)), Err(e) => { error!(path = %path.display(), error = %e, "describe_connections export failed"); ( StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({ "error": e.to_string() })), ) } } } async fn describe_capabilities( State(state): State, Query(q): Query, ) -> impl IntoResponse { state.touch_activity(); let (root, cache, import_path) = resolve_project_ctx(&state, q.slug.as_deref()); let modes_dir = root.join("reflection").join("modes"); let adrs_dir = root.join("adrs"); let forms_dir = root.join("reflection").join("forms"); // Modes: export each NCL for id + trigger let mut modes: Vec = Vec::new(); if let Ok(entries) = std::fs::read_dir(&modes_dir) { for entry in entries.flatten() { let path = entry.path(); if path.extension().and_then(|e| e.to_str()) != Some("ncl") { continue; } let stem = path .file_stem() .and_then(|s| s.to_str()) .unwrap_or("") .to_string(); match cache.export(&path, import_path.as_deref()).await { Ok((json, _)) => { let id = json .get("id") .and_then(|v| v.as_str()) .unwrap_or(&stem) .to_string(); let trigger = json .get("trigger") .and_then(|v| v.as_str()) .unwrap_or("") .to_string(); modes.push(serde_json::json!({ "id": id, "trigger": trigger })); } Err(e) => { warn!(path = %path.display(), error = %e, "capabilities: mode export failed"); modes.push(serde_json::json!({ "id": stem, "trigger": "" })); } } } } modes.sort_by_key(|v| v["id"].as_str().unwrap_or("").to_string()); // ADRs: count only let adr_count = std::fs::read_dir(&adrs_dir) .map(|rd| { rd.flatten() .filter(|e| e.path().extension().and_then(|x| x.to_str()) == Some("ncl")) .count() }) .unwrap_or(0); // Forms: list file stems let mut forms: Vec = std::fs::read_dir(&forms_dir) .map(|rd| { rd.flatten() .filter_map(|e| { let p = e.path(); if p.extension().and_then(|x| x.to_str()) != Some("ncl") { return None; } p.file_stem().and_then(|s| s.to_str()).map(str::to_string) }) .collect() }) .unwrap_or_default(); forms.sort(); let mode_count = modes.len(); let form_count = forms.len(); ( StatusCode::OK, Json(serde_json::json!({ "modes": modes, "adrs": adr_count, "forms": forms, "mode_count": mode_count, "adr_count": adr_count, "form_count": form_count, })), ) } async fn describe_actor_init( State(state): State, Query(q): Query, ) -> impl IntoResponse { state.touch_activity(); let (root, cache, import_path) = resolve_project_ctx(&state, q.slug.as_deref()); let actor = q.actor.as_deref().unwrap_or("agent"); let config_path = root.join(".ontoref").join("config.ncl"); if !config_path.exists() { return ( StatusCode::OK, Json(serde_json::json!({ "mode": "", "auto_run": false })), ); } match cache.export(&config_path, import_path.as_deref()).await { Ok((json, _)) => { let entry = json .get("actor_init") .and_then(|v| v.as_array()) .and_then(|arr| { arr.iter() .find(|e| e.get("actor").and_then(|v| v.as_str()) == Some(actor)) }) .cloned(); let result = match entry { Some(e) => { let mode = e .get("mode") .and_then(|v| v.as_str()) .unwrap_or("") .to_string(); let auto_run = e.get("auto_run").and_then(|v| v.as_bool()).unwrap_or(false); serde_json::json!({ "mode": mode, "auto_run": auto_run }) } None => serde_json::json!({ "mode": "", "auto_run": false }), }; (StatusCode::OK, Json(result)) } Err(e) => { warn!(path = %config_path.display(), error = %e, "describe_actor_init export failed"); ( StatusCode::OK, Json(serde_json::json!({ "mode": "", "auto_run": false })), ) } } } async fn backlog_json( State(state): State, Query(q): Query, ) -> impl IntoResponse { state.touch_activity(); let (root, cache, import_path) = resolve_project_ctx(&state, q.slug.as_deref()); let backlog_path = root.join("reflection").join("backlog.ncl"); if !backlog_path.exists() { return (StatusCode::OK, Json(serde_json::json!([]))); } match cache.export(&backlog_path, import_path.as_deref()).await { Ok((json, _)) => { let items = json .get("items") .and_then(|v| v.as_array()) .cloned() .unwrap_or_default(); (StatusCode::OK, Json(serde_json::Value::Array(items))) } Err(e) => { error!(path = %backlog_path.display(), error = %e, "backlog_json export failed"); ( StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({ "error": e.to_string() })), ) } } } // ── Q&A endpoints ─────────────────────────────────────────────────────── async fn qa_json( State(state): State, Query(q): Query, ) -> impl IntoResponse { state.touch_activity(); let (root, cache, import_path) = resolve_project_ctx(&state, q.slug.as_deref()); let qa_path = root.join("reflection").join("qa.ncl"); if !qa_path.exists() { return (StatusCode::OK, Json(serde_json::json!([]))); } match cache.export(&qa_path, import_path.as_deref()).await { Ok((json, _)) => { let entries = json .get("entries") .and_then(|v| v.as_array()) .cloned() .unwrap_or_default(); (StatusCode::OK, Json(serde_json::Value::Array(entries))) } Err(e) => { error!(path = %qa_path.display(), error = %e, "qa_json export failed"); ( StatusCode::INTERNAL_SERVER_ERROR, Json(serde_json::json!({ "error": e.to_string() })), ) } } } // ── Project Registry Endpoints ─────────────────────────────────────────────── #[derive(Serialize)] struct ProjectView { slug: String, root: String, push_only: bool, remote_url: String, auth_enabled: bool, /// Monotonically increasing counter — incremented after each successful /// seed. Clients compare this value to detect stale local state. ontology_version: u64, } async fn projects_list(State(state): State) -> impl IntoResponse { use std::sync::atomic::Ordering; state.touch_activity(); let projects: Vec = state .registry .all() .into_iter() .map(|ctx| ProjectView { slug: ctx.slug.clone(), root: ctx.root.display().to_string(), push_only: ctx.push_only, remote_url: ctx.remote_url.clone(), auth_enabled: ctx.auth_enabled(), ontology_version: ctx.ontology_version.load(Ordering::Acquire), }) .collect(); Json(serde_json::json!({ "projects": projects, "total": projects.len() })) } /// Validate that a project slug contains only alphanumeric characters, `-`, or /// `_`. Rejects slugs with `/`, `.`, null bytes, or other path-significant /// characters. fn validate_slug(slug: &str) -> std::result::Result<(), (StatusCode, String)> { if slug.is_empty() { return Err((StatusCode::BAD_REQUEST, "slug must not be empty".into())); } if slug.len() > 64 { return Err(( StatusCode::BAD_REQUEST, "slug must not exceed 64 characters".into(), )); } let valid = slug .chars() .all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_'); if !valid { return Err(( StatusCode::BAD_REQUEST, format!("slug '{slug}' contains invalid characters — only [a-zA-Z0-9_-] are allowed"), )); } Ok(()) } async fn project_add( State(state): State, Json(entry): Json, ) -> impl IntoResponse { state.touch_activity(); if let Err(e) = validate_slug(&entry.slug) { return e.into_response(); } let registry = &state.registry; let slug = entry.slug.clone(); if let Err(e) = registry.add_project(entry) { return ( StatusCode::CONFLICT, Json(serde_json::json!({"error": e.to_string()})), ) .into_response(); } // Notify the runtime watcher task to start watching the new project. if let Some(ctx) = registry.get(&slug) { if let Some(ref tx) = state.new_project_tx { let _ = tx.send(ctx); } } ( StatusCode::CREATED, Json(serde_json::json!({"slug": slug, "registered": true})), ) .into_response() } async fn project_delete( State(state): State, Path(slug): Path, ) -> impl IntoResponse { state.touch_activity(); let registry = &state.registry; if registry.get(&slug).is_none() { return ( StatusCode::NOT_FOUND, Json(serde_json::json!({"error": format!("project '{slug}' not registered")})), ) .into_response(); } registry.remove_project(&slug); // Abort and drop the file watcher for this project so the OS watch // handle is released and the debounce task doesn't keep running. if let Some(watcher) = state.watcher_map.lock().await.remove(&slug) { watcher.abort(); } StatusCode::NO_CONTENT.into_response() } // ── Project Key Rotation ───────────────────────────────────────────────────── #[derive(Deserialize)] struct UpdateKeysRequest { keys: Vec, } #[derive(Serialize)] struct UpdateKeysResponse { slug: String, keys_active: usize, } /// `PUT /projects/{slug}/keys` — replace the key set for a registered project. /// /// Auth: /// - If the project has existing keys, requires `Authorization: Bearer /// ` with **admin** role. This prevents a viewer or an attacker /// from rotating keys. /// - If the project has no keys yet (bootstrap case), the request is accepted /// without credentials — the daemon is loopback-only, so OS-level access /// controls apply. async fn project_update_keys( State(state): State, headers: axum::http::HeaderMap, Path(slug): Path, Json(req): Json, ) -> impl IntoResponse { let registry = &state.registry; let ctx = match registry.get(&slug) { Some(c) => c, None => { return ( StatusCode::NOT_FOUND, Json(serde_json::json!({"error": format!("project '{slug}' not registered")})), ) .into_response(); } }; // If the project already has keys, require admin credentials before allowing // rotation. if ctx.auth_enabled() { let bearer = headers .get(axum::http::header::AUTHORIZATION) .and_then(|v| v.to_str().ok()) .and_then(|v| v.strip_prefix("Bearer ")) .map(str::trim) .filter(|s| !s.is_empty()); match bearer { None => { return ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "Authorization: Bearer required"})), ) .into_response(); } Some(password) => match ctx.verify_key(password).map(|m| m.role) { Some(crate::registry::Role::Admin) => {} Some(crate::registry::Role::Viewer) => { return ( StatusCode::FORBIDDEN, Json(serde_json::json!({"error": "admin role required to rotate keys"})), ) .into_response(); } None => { return ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "invalid credentials"})), ) .into_response(); } }, } } let keys_active = ctx.set_keys(req.keys); // Invalidate all active actor sessions. let evicted_actors = ctx.actors.deregister_all_for_project(&slug); if evicted_actors > 0 { tracing::info!(slug = %slug, evicted = evicted_actors, "actor sessions invalidated after key rotation"); } // Invalidate all UI/API sessions for this project — they authenticated // against the old key set and must re-authenticate after rotation. #[cfg(feature = "ui")] { let evicted_sessions = state.sessions.revoke_all_for_slug(&slug); if evicted_sessions > 0 { tracing::info!(slug = %slug, evicted = evicted_sessions, "UI sessions invalidated after key rotation"); } } // Persist keys to keys-overlay.json so they survive daemon restart. if let Some(ref config_dir) = state.config_dir { persist_keys_overlay(config_dir, &state.registry); } ( StatusCode::OK, Json(UpdateKeysResponse { slug, keys_active }), ) .into_response() } /// Exchange a key for a session token. /// /// Accepts project keys (looked up by slug) or the daemon admin password. /// Returns a short-lived token suitable for use as `Authorization: Bearer /// ` or as a browser session cookie (set by the UI login flow). /// /// Rate-limited: repeated failures from the same IP exhaust the same budget as /// failed Bearer attempts on all other endpoints. #[cfg(feature = "ui")] #[derive(Deserialize)] struct SessionRequest { /// Raw (unhashed) key to verify. key: String, /// Project slug to authenticate against. `"_daemon"` or absent → daemon /// admin. project: Option, } #[cfg(feature = "ui")] #[derive(Serialize)] struct SessionResponse { token: String, role: String, key_label: String, project: String, expires_at: u64, } #[cfg(feature = "ui")] async fn session_create( State(state): State, axum::extract::ConnectInfo(addr): axum::extract::ConnectInfo, Json(req): Json, ) -> impl IntoResponse { use std::time::{SystemTime, UNIX_EPOCH}; let client_ip = addr.ip(); if state.auth_rate_limiter.is_limited(client_ip) { return ( StatusCode::TOO_MANY_REQUESTS, Json(serde_json::json!({ "error": "too many failed auth attempts", "retry_after_secs": 60 })), ) .into_response(); } let target = req.project.as_deref().unwrap_or("_daemon"); // Daemon admin path. if target == "_daemon" { let Some(ref hash) = state.daemon_admin_hash else { state.auth_rate_limiter.record_failure(client_ip); return ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "daemon admin auth not configured"})), ) .into_response(); }; let key_entry = crate::registry::KeyEntry { role: crate::registry::Role::Admin, hash: hash.clone(), label: "daemon-admin".to_string(), }; if crate::registry::verify_keys_list(std::slice::from_ref(&key_entry), &req.key).is_some() { state.auth_rate_limiter.record_success(client_ip); let token = state.sessions.create( "_daemon".to_string(), crate::registry::Role::Admin, "daemon-admin".to_string(), ); let expires_at = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() .as_secs() + 30 * 24 * 3600; return Json(SessionResponse { token, role: "admin".to_string(), key_label: "daemon-admin".to_string(), project: "_daemon".to_string(), expires_at, }) .into_response(); } state.auth_rate_limiter.record_failure(client_ip); return ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "invalid daemon admin credentials"})), ) .into_response(); } // Project key path. let Some(ctx) = state.registry.get(target) else { state.auth_rate_limiter.record_failure(client_ip); return ( StatusCode::NOT_FOUND, Json(serde_json::json!({"error": format!("project '{target}' not registered")})), ) .into_response(); }; match ctx.verify_key(&req.key) { Some(km) => { state.auth_rate_limiter.record_success(client_ip); let role_str = match km.role { crate::registry::Role::Admin => "admin", crate::registry::Role::Viewer => "viewer", }; let token = state .sessions .create(target.to_string(), km.role, km.label.clone()); let expires_at = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() .as_secs() + 30 * 24 * 3600; Json(SessionResponse { token, role: role_str.to_string(), key_label: km.label, project: target.to_string(), expires_at, }) .into_response() } None => { state.auth_rate_limiter.record_failure(client_ip); ( StatusCode::UNAUTHORIZED, Json(serde_json::json!({"error": "invalid credentials"})), ) .into_response() } } } /// `GET /sessions?project=` — list active sessions for a project. /// /// Auth: /// - Any valid session for the requested project (viewer or admin). /// - Daemon admin session (slug == `_daemon`) can query any project. /// - Without `?project=`, requires daemon admin and returns all sessions. #[cfg(feature = "ui")] #[derive(Deserialize)] struct SessionsQuery { project: Option, } #[cfg(feature = "ui")] async fn sessions_list( State(state): State, headers: axum::http::HeaderMap, Query(q): Query, ) -> axum::response::Response { let acting = match require_session(&state, &headers) { Ok(e) => e, Err(r) => return *r, }; let views = match q.project.as_deref() { Some(slug) => { // Must be a session for that project or daemon admin. let authorized = acting.slug == "_daemon" || acting.slug == slug; if !authorized { return ( StatusCode::FORBIDDEN, Json(serde_json::json!({"error": "session is not scoped to this project"})), ) .into_response(); } if state.registry.get(slug).is_none() { return ( StatusCode::NOT_FOUND, Json(serde_json::json!({"error": format!("project '{slug}' not registered")})), ) .into_response(); } state.sessions.list_for_slug(slug) } None => { // No project filter — daemon admin only. if acting.slug != "_daemon" { return ( StatusCode::FORBIDDEN, Json(serde_json::json!({"error": "daemon admin session required to list all sessions"})), ) .into_response(); } state.sessions.list_all() } }; Json(serde_json::json!({"sessions": views})).into_response() } /// `DELETE /sessions/{id}` — revoke a session by its public id. /// /// Auth: /// - Project admin: may revoke sessions scoped to their own project. /// - Daemon admin: may revoke any session. #[cfg(feature = "ui")] async fn session_revoke( State(state): State, headers: axum::http::HeaderMap, Path(id): Path, ) -> axum::response::Response { let acting = match require_session(&state, &headers) { Ok(e) => e, Err(r) => return *r, }; use crate::session::RevokeResult; match state.sessions.revoke_by_id(&id, &acting.slug, &acting.role) { RevokeResult::Revoked => StatusCode::NO_CONTENT.into_response(), RevokeResult::NotFound => ( StatusCode::NOT_FOUND, Json(serde_json::json!({"error": "session not found or already expired"})), ) .into_response(), RevokeResult::Forbidden => ( StatusCode::FORBIDDEN, Json(serde_json::json!({"error": "not authorized to revoke this session"})), ) .into_response(), } } /// Write a snapshot of all project keys to `{config_dir}/keys-overlay.json`. /// Format: `{ "slug": [KeyEntry, ...], ... }` — loaded at next boot by main.rs. fn persist_keys_overlay(config_dir: &std::path::Path, registry: &crate::registry::ProjectRegistry) { let map: std::collections::HashMap> = registry .all() .into_iter() .filter_map(|ctx| { let keys = ctx.keys.read().unwrap_or_else(|e| e.into_inner()).clone(); if keys.is_empty() { None } else { Some((ctx.slug.clone(), keys)) } }) .collect(); let path = config_dir.join("keys-overlay.json"); match serde_json::to_string_pretty(&map) { Ok(json) => { if let Err(e) = std::fs::write(&path, json) { tracing::warn!(error = %e, path = %path.display(), "failed to persist keys overlay"); } } Err(e) => tracing::warn!(error = %e, "failed to serialize keys overlay"), } } // ── Helpers ───────────────────────────────────────────────────────────── /// Resolve a path that may be absolute or relative to project_root. /// Absolute paths are accepted as-is (daemon is loopback-only; OS enforces /// access). Relative paths are resolved against project_root and must stay /// within it. fn resolve_any_path( project_root: &std::path::Path, path: &str, ) -> std::result::Result { let p = PathBuf::from(path); if p.is_absolute() { return p.canonicalize().map_err(|e| { ( StatusCode::BAD_REQUEST, format!("path does not exist or is inaccessible: {e}"), ) }); } let joined = project_root.join(p); let canonical = joined.canonicalize().map_err(|e| { ( StatusCode::BAD_REQUEST, format!("path does not exist or is inaccessible: {e}"), ) })?; if !canonical.starts_with(project_root) { return Err(( StatusCode::BAD_REQUEST, format!("path escapes project root: {path}"), )); } Ok(canonical) } /// Resolve a path relative to the project root and verify it stays within. /// Rejects absolute paths — used for cache management operations scoped to a /// project. fn resolve_path( project_root: &std::path::Path, path: &str, ) -> std::result::Result { let p = PathBuf::from(path); if p.is_absolute() { return Err(( StatusCode::BAD_REQUEST, "absolute paths are not accepted".to_string(), )); } let joined = project_root.join(p); let canonical = joined.canonicalize().map_err(|e| { ( StatusCode::BAD_REQUEST, format!("path does not exist or is inaccessible: {e}"), ) })?; if !canonical.starts_with(project_root) { return Err(( StatusCode::BAD_REQUEST, format!("path escapes project root: {path}"), )); } Ok(canonical) } impl IntoResponse for crate::error::DaemonError { fn into_response(self) -> axum::response::Response { let body = serde_json::json!({"error": self.to_string()}); (StatusCode::INTERNAL_SERVER_ERROR, Json(body)).into_response() } } #[cfg(test)] mod tests { use std::sync::atomic::AtomicU64; use std::sync::Arc; use std::time::Instant; use axum::body::to_bytes; use axum::http::{Request, StatusCode}; use tower::ServiceExt as _; use super::*; use crate::registry::{make_context, ContextSpec, ProjectRegistry}; /// Build a minimal `AppState` suitable for handler tests. /// /// Auth is disabled (no keys). Registry primary slug is "test". /// All optional feature-gated fields are `None`. fn test_state(project_root: std::path::PathBuf) -> AppState { let ctx = make_context(ContextSpec { slug: "test".into(), root: project_root.clone(), import_path: None, keys: vec![], remote_url: String::new(), push_only: false, stale_actor_timeout: 300, max_notifications: 100, ack_required: vec![], }); let ctx = Arc::new(ctx); let actors = Arc::clone(&ctx.actors); let notifications = Arc::clone(&ctx.notifications); let cache = Arc::clone(&ctx.cache); let registry = Arc::new(ProjectRegistry::with_primary( "test".into(), ctx, vec![], 300, 100, )); AppState { cache, project_root, ontoref_root: None, started_at: Instant::now(), last_activity: Arc::new(AtomicU64::new(0)), actors, notifications, nickel_import_path: None, #[cfg(feature = "db")] db: None, #[cfg(feature = "nats")] nats: None, #[cfg(feature = "ui")] tera: None, #[cfg(feature = "ui")] public_dir: None, registry, new_project_tx: None, config_dir: None, #[cfg(feature = "ui")] sessions: Arc::new(crate::session::SessionStore::new()), #[cfg(feature = "mcp")] mcp_current_project: Arc::new(std::sync::RwLock::new(None)), watcher_map: Arc::new(tokio::sync::Mutex::new(std::collections::HashMap::new())), auth_rate_limiter: Arc::new(AuthRateLimiter::new(false)), #[cfg(feature = "ui")] ncl_write_lock: Arc::new(crate::ui::ncl_write::NclWriteLock::new()), #[cfg(feature = "ui")] daemon_admin_hash: None, } } async fn body_json(resp: axum::response::Response) -> serde_json::Value { let bytes = to_bytes(resp.into_body(), usize::MAX).await.unwrap(); serde_json::from_slice(&bytes).unwrap() } #[tokio::test] async fn health_returns_ok_status() { let state = test_state(std::path::PathBuf::from("/tmp/test-project")); let app = router(state); let resp = app .oneshot( Request::builder() .uri("/health") .body(axum::body::Body::empty()) .unwrap(), ) .await .unwrap(); assert_eq!(resp.status(), StatusCode::OK); let json = body_json(resp).await; assert_eq!(json["status"], "ok"); assert_eq!(json["project_root"], "/tmp/test-project"); assert_eq!(json["active_actors"], 0); assert_eq!(json["cache_entries"], 0); } #[tokio::test] async fn cache_stats_empty_on_fresh_state() { let state = test_state(std::path::PathBuf::from("/tmp/test-project")); let app = router(state); let resp = app .oneshot( Request::builder() .uri("/cache/stats") .body(axum::body::Body::empty()) .unwrap(), ) .await .unwrap(); assert_eq!(resp.status(), StatusCode::OK); let json = body_json(resp).await; assert_eq!(json["entries"], 0); assert_eq!(json["hits"], 0); assert_eq!(json["misses"], 0); assert_eq!(json["hit_rate"], 0.0); } #[tokio::test] async fn actor_register_returns_201_with_token() { let state = test_state(std::path::PathBuf::from("/tmp/test-project")); let app = router(state); let body = serde_json::json!({ "actor_type": "developer", "project": "test", "hostname": "host-a", "pid": 1001 }); let resp = app .oneshot( Request::builder() .method("POST") .uri("/actors/register") .header("content-type", "application/json") .body(axum::body::Body::from(body.to_string())) .unwrap(), ) .await .unwrap(); assert_eq!(resp.status(), StatusCode::CREATED); let json = body_json(resp).await; assert!(json["token"].is_string(), "token must be present"); assert_eq!(json["actors_connected"], 1); } #[tokio::test] async fn actor_deregister_204_then_404() { let state = test_state(std::path::PathBuf::from("/tmp/test-project")); let register_body = serde_json::json!({ "actor_type": "agent", "project": "test", "hostname": "host-b", "pid": 1002 }); let register_resp = router(state.clone()) .oneshot( Request::builder() .method("POST") .uri("/actors/register") .header("content-type", "application/json") .body(axum::body::Body::from(register_body.to_string())) .unwrap(), ) .await .unwrap(); let token = body_json(register_resp).await["token"] .as_str() .unwrap() .to_string(); let del_resp = router(state.clone()) .oneshot( Request::builder() .method("DELETE") .uri(format!("/actors/{token}")) .body(axum::body::Body::empty()) .unwrap(), ) .await .unwrap(); assert_eq!(del_resp.status(), StatusCode::NO_CONTENT); let del_again = router(state) .oneshot( Request::builder() .method("DELETE") .uri(format!("/actors/{token}")) .body(axum::body::Body::empty()) .unwrap(), ) .await .unwrap(); assert_eq!(del_again.status(), StatusCode::NOT_FOUND); } #[tokio::test] async fn actor_touch_204_for_known_404_for_unknown() { let state = test_state(std::path::PathBuf::from("/tmp/test-project")); let reg_body = serde_json::json!({ "actor_type": "ci", "project": "test", "hostname": "host-c", "pid": 1003 }); let reg_resp = router(state.clone()) .oneshot( Request::builder() .method("POST") .uri("/actors/register") .header("content-type", "application/json") .body(axum::body::Body::from(reg_body.to_string())) .unwrap(), ) .await .unwrap(); let token = body_json(reg_resp).await["token"] .as_str() .unwrap() .to_string(); let touch_ok = router(state.clone()) .oneshot( Request::builder() .method("POST") .uri(format!("/actors/{token}/touch")) .body(axum::body::Body::empty()) .unwrap(), ) .await .unwrap(); assert_eq!(touch_ok.status(), StatusCode::NO_CONTENT); let touch_miss = router(state) .oneshot( Request::builder() .method("POST") .uri("/actors/nonexistent-token/touch") .body(axum::body::Body::empty()) .unwrap(), ) .await .unwrap(); assert_eq!(touch_miss.status(), StatusCode::NOT_FOUND); } #[tokio::test] async fn actors_list_reflects_registered_actors() { let state = test_state(std::path::PathBuf::from("/tmp/test-project")); for i in 0..3 { let body = serde_json::json!({ "actor_type": "developer", "project": "test", "hostname": format!("host-{i}"), "pid": 2000u32 + i as u32 }); router(state.clone()) .oneshot( Request::builder() .method("POST") .uri("/actors/register") .header("content-type", "application/json") .body(axum::body::Body::from(body.to_string())) .unwrap(), ) .await .unwrap(); } let list_resp = router(state) .oneshot( Request::builder() .uri("/actors") .body(axum::body::Body::empty()) .unwrap(), ) .await .unwrap(); assert_eq!(list_resp.status(), StatusCode::OK); let json = body_json(list_resp).await; assert_eq!(json["total"], 3); assert_eq!(json["actors"].as_array().unwrap().len(), 3); } /// `check_primary_auth` is called per-handler, not via middleware, so we /// test it as a unit rather than through the full HTTP stack. #[test] fn check_primary_auth_returns_401_without_bearer() { use argon2::password_hash::{rand_core::OsRng, SaltString}; use argon2::{Argon2, PasswordHasher}; use crate::registry::{KeyEntry, Role}; let salt = SaltString::generate(&mut OsRng); let hash = Argon2::default() .hash_password(b"s3cr3t", &salt) .unwrap() .to_string(); let state = test_state(std::path::PathBuf::from("/tmp/test-project")); state.registry.primary().set_keys(vec![KeyEntry { role: Role::Admin, hash, label: String::new(), }]); let client_ip = "10.0.0.1".parse::().unwrap(); let headers = axum::http::HeaderMap::new(); let result = state.check_primary_auth(&headers, client_ip); assert!(result.is_some(), "must reject missing Bearer"); let resp = result.unwrap(); assert_eq!(resp.status(), StatusCode::UNAUTHORIZED); } #[test] fn check_primary_auth_accepts_correct_bearer() { use argon2::password_hash::{rand_core::OsRng, SaltString}; use argon2::{Argon2, PasswordHasher}; use crate::registry::{KeyEntry, Role}; let salt = SaltString::generate(&mut OsRng); let hash = Argon2::default() .hash_password(b"s3cr3t", &salt) .unwrap() .to_string(); let state = test_state(std::path::PathBuf::from("/tmp/test-project")); state.registry.primary().set_keys(vec![KeyEntry { role: Role::Admin, hash, label: String::new(), }]); let client_ip = "10.0.0.1".parse::().unwrap(); let mut headers = axum::http::HeaderMap::new(); headers.insert( axum::http::header::AUTHORIZATION, "Bearer s3cr3t".parse().unwrap(), ); let result = state.check_primary_auth(&headers, client_ip); assert!(result.is_none(), "valid credentials must pass through"); } }