272 lines
11 KiB
Rust
Raw Normal View History

2026-03-13 00:18:14 +00:00
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use notify::{Config, Event, RecommendedWatcher, RecursiveMode, Watcher};
use tokio::sync::mpsc;
use tracing::{debug, info, warn};
use crate::actors::ActorRegistry;
use crate::cache::NclCache;
use crate::notifications::NotificationStore;
/// Directories to watch for NCL changes relative to a project root.
const WATCH_DIRS: &[&str] = &[".ontology", "adrs", "reflection", "ontology"];
/// File watcher that invalidates the NCL cache on filesystem changes
/// and pushes notifications to the notification store.
pub struct FileWatcher {
_watcher: RecommendedWatcher,
_debounce_task: tokio::task::JoinHandle<()>,
}
/// Optional dependencies injected into the file watcher.
pub struct WatcherDeps {
#[cfg(feature = "db")]
pub db: Option<Arc<stratum_db::StratumDb>>,
pub import_path: Option<String>,
pub notifications: Arc<NotificationStore>,
pub actors: Arc<ActorRegistry>,
#[cfg(feature = "nats")]
pub nats: Option<Arc<crate::nats::NatsPublisher>>,
}
impl FileWatcher {
/// Start watching NCL-relevant directories under `project_root`.
///
/// Changes are debounced (200ms) before invalidating the cache.
/// A periodic full invalidation runs every `full_invalidation_secs` as
/// safety net.
pub fn start(
project_root: &Path,
cache: Arc<NclCache>,
full_invalidation_secs: u64,
deps: WatcherDeps,
) -> std::result::Result<Self, crate::error::DaemonError> {
let (tx, rx) = mpsc::channel::<Vec<PathBuf>>(256);
let project_root_owned = project_root
.canonicalize()
.unwrap_or_else(|_| project_root.to_path_buf());
let tx_notify = tx.clone();
let mut watcher = RecommendedWatcher::new(
move |res: std::result::Result<Event, notify::Error>| match res {
Ok(event) => {
let ncl_paths: Vec<PathBuf> = event
.paths
.into_iter()
.filter(|p| {
p.extension()
.is_some_and(|ext| ext == "ncl" || ext == "jsonl")
})
.collect();
if !ncl_paths.is_empty() {
let _ = tx_notify.try_send(ncl_paths);
}
}
Err(e) => warn!(error = %e, "file watcher error"),
},
Config::default(),
)
.map_err(|e| crate::error::DaemonError::Watcher(e.to_string()))?;
let mut watched_count = 0;
for dir_name in WATCH_DIRS {
let dir = project_root.join(dir_name);
if dir.is_dir() {
if let Err(e) = watcher.watch(&dir, RecursiveMode::Recursive) {
warn!(dir = %dir.display(), error = %e, "failed to watch directory");
} else {
info!(dir = %dir.display(), "watching directory");
watched_count += 1;
}
}
}
info!(watched_count, "file watcher started");
let debounce_task = tokio::spawn(debounce_loop(
rx,
cache,
project_root_owned,
full_invalidation_secs,
deps,
));
Ok(Self {
_watcher: watcher,
_debounce_task: debounce_task,
})
}
}
/// Debounce filesystem events: collect paths over 200ms windows, then
/// invalidate once. Also runs periodic full invalidation as safety net.
/// Pushes notifications to the store and optionally publishes via NATS.
async fn debounce_loop(
mut rx: mpsc::Receiver<Vec<PathBuf>>,
cache: Arc<NclCache>,
project_root: PathBuf,
full_invalidation_secs: u64,
deps: WatcherDeps,
) {
let debounce = Duration::from_millis(200);
let effective_secs = if full_invalidation_secs == 0 {
60
} else {
full_invalidation_secs
};
let mut full_tick = tokio::time::interval(Duration::from_secs(effective_secs));
full_tick.tick().await; // consume immediate first tick
let project_name = project_root
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("unknown")
.to_string();
loop {
tokio::select! {
recv = rx.recv() => match recv {
None => {
debug!("watcher channel closed — debounce task exiting");
return;
}
Some(paths) => {
// Collect all events within debounce window
let mut all_paths = paths;
tokio::time::sleep(debounce).await;
while let Ok(more) = rx.try_recv() {
all_paths.extend(more);
}
// Canonicalize, deduplicate, and invalidate.
let mut canonical: Vec<PathBuf> = all_paths
.into_iter()
.filter_map(|p| p.canonicalize().ok())
.collect();
canonical.sort();
canonical.dedup();
let file_names: Vec<String> = canonical
.iter()
.filter_map(|p| p.file_name())
.map(|n| n.to_string_lossy().to_string())
.collect();
for path in &canonical {
cache.invalidate_file(path);
}
info!(
files = canonical.len(),
names = %file_names.join(", "),
"cache invalidated — files changed"
);
// Convert to relative paths for notification matching
let relative_paths: Vec<String> = canonical
.iter()
.filter_map(|p| {
p.strip_prefix(&project_root)
.ok()
.map(|rel| rel.to_string_lossy().to_string())
})
.collect();
// Publish general file.changed event via NATS (all files, not just ack-required)
#[cfg(feature = "nats")]
{
if !relative_paths.is_empty() {
if let Some(ref nats) = deps.nats {
if let Err(e) = nats.publish_file_changed(&project_name, &relative_paths).await {
warn!(error = %e, "NATS file.changed publish failed");
}
}
}
}
// Push notifications — one per event type, actors need to ack
if !relative_paths.is_empty() {
let notification_ids = deps.notifications.push(
&project_name,
relative_paths.clone(),
None, // source_actor unknown from fs event
);
if !notification_ids.is_empty() {
let actor_tokens = deps.actors.tokens_for_project(&project_name);
// Increment pending count on each actor for each notification
for token in &actor_tokens {
for _ in &notification_ids {
deps.actors.increment_pending(token);
}
}
info!(
notifications = notification_ids.len(),
project = %project_name,
actors = actor_tokens.len(),
"notifications pushed"
);
// Publish via NATS — derive events from the file paths directly
#[cfg(feature = "nats")]
{
if let Some(ref nats) = deps.nats {
let mut published_events = std::collections::HashSet::new();
for file in &relative_paths {
if let Some(event) = crate::notifications::NotificationEvent::from_path(file) {
if published_events.insert(event) {
let event_files: Vec<String> = relative_paths
.iter()
.filter(|f| crate::notifications::NotificationEvent::from_path(f) == Some(event))
.cloned()
.collect();
if let Err(e) = nats.publish_notification(
&project_name,
&event,
&event_files,
).await {
warn!(error = %e, "NATS notification publish failed");
}
}
}
}
}
}
}
}
// Re-seed DB if ontology files changed
#[cfg(feature = "db")]
{
let ontology_changed = canonical.iter().any(|p| {
p.to_string_lossy().contains(".ontology")
});
if ontology_changed {
if let Some(ref db) = deps.db {
info!("re-seeding ontology tables from changed files");
crate::seed::seed_ontology(
db,
&project_root,
&cache,
deps.import_path.as_deref(),
).await;
}
}
}
}
},
_ = full_tick.tick() => {
// Periodic full invalidation as safety net against missed events.
let before = cache.len();
cache.invalidate_all();
if before > 0 {
info!(evicted = before, "periodic full cache invalidation");
}
}
}
}
}