Platform restructured into crates/, added AI service and detector,
migrated control-center-ui to Leptos 0.8
245 lines
7.6 KiB
Rust
245 lines
7.6 KiB
Rust
//! Phase 8: Comprehensive Performance Benchmarks
|
|
//!
|
|
//! Benchmarks for all Phase 7-8 features:
|
|
//! - Response caching (hit rate, latency improvement)
|
|
//! - Semantic search (vector latency)
|
|
//! - Keyword search (BM25 latency)
|
|
//! - Hybrid search (combined latency & relevance)
|
|
//! - Query optimization (intent detection, key term extraction)
|
|
//! - Batch processing (throughput, concurrency)
|
|
//! - Orchestrator (task submission, polling)
|
|
//! - API endpoints (request latency)
|
|
|
|
#![allow(clippy::redundant_closure)]
|
|
|
|
use std::hint::black_box;
|
|
use std::time::Duration;
|
|
|
|
use criterion::{criterion_group, criterion_main, Criterion};
|
|
use provisioning_rag::{
|
|
batch_processing::{BatchJob, BatchQuery},
|
|
caching::ResponseCache,
|
|
orchestrator::OrchestratorManager,
|
|
query_optimization::QueryOptimizer,
|
|
};
|
|
|
|
// ============================================================================
|
|
// RESPONSE CACHING BENCHMARKS
|
|
// ============================================================================
|
|
|
|
fn bench_cache_creation(c: &mut Criterion) {
|
|
c.bench_function("cache_creation", |b| {
|
|
b.iter(|| ResponseCache::new(black_box(1000), Duration::from_secs(3600)))
|
|
});
|
|
}
|
|
|
|
fn bench_cache_statistics(c: &mut Criterion) {
|
|
let cache = ResponseCache::new(100, Duration::from_secs(3600)).expect("Failed to create cache");
|
|
|
|
c.bench_function("cache_stats_retrieval", |b| b.iter(|| cache.stats()));
|
|
|
|
c.bench_function("cache_cleanup_expired", |b| {
|
|
b.iter(|| cache.cleanup_expired())
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// QUERY OPTIMIZATION BENCHMARKS
|
|
// ============================================================================
|
|
|
|
fn bench_intent_detection(c: &mut Criterion) {
|
|
let optimizer = QueryOptimizer::new();
|
|
|
|
c.bench_function("intent_detection_factual", |b| {
|
|
b.iter(|| {
|
|
let _ = optimizer.optimize(black_box("What is Kubernetes?"));
|
|
})
|
|
});
|
|
|
|
c.bench_function("intent_detection_howto", |b| {
|
|
b.iter(|| {
|
|
let _ = optimizer.optimize(black_box("How do I deploy applications?"));
|
|
})
|
|
});
|
|
|
|
c.bench_function("intent_detection_comparison", |b| {
|
|
b.iter(|| {
|
|
let _ = optimizer.optimize(black_box("Compare Docker and Kubernetes"));
|
|
})
|
|
});
|
|
}
|
|
|
|
fn bench_key_term_extraction(c: &mut Criterion) {
|
|
let optimizer = QueryOptimizer::new();
|
|
|
|
c.bench_function("key_term_extraction_short", |b| {
|
|
b.iter(|| optimizer.extract_key_terms(black_box("What is Kubernetes?")))
|
|
});
|
|
|
|
c.bench_function("key_term_extraction_long", |b| {
|
|
b.iter(|| {
|
|
optimizer.extract_key_terms(black_box(
|
|
"How do I deploy Kubernetes applications with persistent volumes and networking?",
|
|
))
|
|
})
|
|
});
|
|
}
|
|
|
|
fn bench_query_normalization(c: &mut Criterion) {
|
|
let optimizer = QueryOptimizer::new();
|
|
|
|
c.bench_function("query_normalization", |b| {
|
|
b.iter(|| optimizer.normalize_query(black_box(" WHAT IS KUBERNETES? ")))
|
|
});
|
|
}
|
|
|
|
fn bench_full_optimization(c: &mut Criterion) {
|
|
let optimizer = QueryOptimizer::new();
|
|
|
|
c.bench_function("full_query_optimization", |b| {
|
|
b.iter(|| {
|
|
let _ = optimizer.optimize(black_box("How do I deploy Kubernetes?"));
|
|
})
|
|
});
|
|
|
|
c.bench_function("optimization_with_context", |b| {
|
|
b.iter(|| {
|
|
let _ = optimizer.optimize_with_context(
|
|
black_box("Tell me more about services"),
|
|
black_box(Some("We discussed Kubernetes deployment")),
|
|
);
|
|
})
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// ORCHESTRATOR BENCHMARKS
|
|
// ============================================================================
|
|
|
|
fn bench_orchestrator_creation(c: &mut Criterion) {
|
|
c.bench_function("orchestrator_manager_creation", |b| {
|
|
b.iter(|| OrchestratorManager::new())
|
|
});
|
|
}
|
|
|
|
fn bench_batch_query_task_creation(c: &mut Criterion) {
|
|
let batch_job = BatchJob::new(vec![
|
|
BatchQuery::new("query 1".into()),
|
|
BatchQuery::new("query 2".into()),
|
|
BatchQuery::new("query 3".into()),
|
|
]);
|
|
|
|
c.bench_function("batch_query_task_creation", |b| {
|
|
b.iter(|| {
|
|
provisioning_rag::BatchQueryTask::new(black_box(batch_job.job_id.clone()), &batch_job)
|
|
})
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// BATCH PROCESSING BENCHMARKS
|
|
// ============================================================================
|
|
|
|
fn bench_batch_creation(c: &mut Criterion) {
|
|
c.bench_function("batch_job_creation_small", |b| {
|
|
b.iter(|| {
|
|
BatchJob::new(vec![
|
|
BatchQuery::new("query 1".into()),
|
|
BatchQuery::new("query 2".into()),
|
|
BatchQuery::new("query 3".into()),
|
|
])
|
|
})
|
|
});
|
|
|
|
c.bench_function("batch_job_creation_large", |b| {
|
|
b.iter(|| {
|
|
let queries: Vec<BatchQuery> = (0..100)
|
|
.map(|i| BatchQuery::new(format!("query_{}", i)))
|
|
.collect();
|
|
BatchJob::new(queries)
|
|
})
|
|
});
|
|
}
|
|
|
|
fn bench_batch_priority_sort(c: &mut Criterion) {
|
|
c.bench_function("batch_sort_by_priority_small", |b| {
|
|
b.iter(|| {
|
|
let mut job = BatchJob::new(vec![
|
|
BatchQuery::new("q1".into()).with_priority(50),
|
|
BatchQuery::new("q2".into()).with_priority(80),
|
|
BatchQuery::new("q3".into()).with_priority(30),
|
|
]);
|
|
job.sort_by_priority();
|
|
})
|
|
});
|
|
|
|
c.bench_function("batch_sort_by_priority_large", |b| {
|
|
b.iter(|| {
|
|
let queries: Vec<BatchQuery> = (0..100)
|
|
.map(|i| BatchQuery::new(format!("query_{}", i)).with_priority((i % 100) as u8))
|
|
.collect();
|
|
let mut job = BatchJob::new(queries);
|
|
job.sort_by_priority();
|
|
})
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// COMPARISON BENCHMARKS
|
|
// ============================================================================
|
|
|
|
fn bench_cache_clear(c: &mut Criterion) {
|
|
let cache =
|
|
ResponseCache::new(1000, Duration::from_secs(3600)).expect("Failed to create cache");
|
|
|
|
c.bench_function("cache_clear", |b| {
|
|
b.iter(|| {
|
|
cache.clear();
|
|
})
|
|
});
|
|
}
|
|
|
|
fn bench_optimization_overhead(c: &mut Criterion) {
|
|
let optimizer = QueryOptimizer::new();
|
|
|
|
c.bench_function("optimization_overhead_simple", |b| {
|
|
b.iter(|| {
|
|
let _ = optimizer.optimize(black_box("What is it?"));
|
|
})
|
|
});
|
|
|
|
c.bench_function("optimization_overhead_complex", |b| {
|
|
b.iter(|| {
|
|
let _ = optimizer.optimize(black_box(
|
|
"How can I deploy Kubernetes applications with persistent volumes, networking, \
|
|
and high availability?",
|
|
));
|
|
})
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// CRITERION SETUP
|
|
// ============================================================================
|
|
|
|
criterion_group!(
|
|
name = benches;
|
|
config = Criterion::default()
|
|
.sample_size(100)
|
|
.measurement_time(Duration::from_secs(5));
|
|
targets = bench_cache_creation,
|
|
bench_cache_statistics,
|
|
bench_intent_detection,
|
|
bench_key_term_extraction,
|
|
bench_query_normalization,
|
|
bench_full_optimization,
|
|
bench_batch_creation,
|
|
bench_batch_priority_sort,
|
|
bench_cache_clear,
|
|
bench_optimization_overhead,
|
|
bench_orchestrator_creation,
|
|
bench_batch_query_task_creation
|
|
);
|
|
|
|
criterion_main!(benches);
|