Platform restructured into crates/, added AI service and detector,
migrated control-center-ui to Leptos 0.8
776 lines
24 KiB
Rust
776 lines
24 KiB
Rust
//! Phase 8d: End-to-End Integration Tests
|
|
//!
|
|
//! Comprehensive integration tests covering complete workflows across all Phase
|
|
//! 7-8 features:
|
|
//! - Multi-turn conversations with hybrid search
|
|
//! - Batch processing with caching
|
|
//! - Tool execution with orchestration
|
|
//! - Failure scenarios and recovery
|
|
//! - API + Orchestrator integration
|
|
//! - Performance under load
|
|
|
|
use std::sync::Arc;
|
|
use std::time::Duration;
|
|
|
|
use provisioning_rag::{
|
|
batch_processing::{BatchJob, BatchQuery},
|
|
caching::ResponseCache,
|
|
conversations::{ConversationContext, ConversationTurn},
|
|
orchestrator::OrchestratorManager,
|
|
query_optimization::QueryOptimizer,
|
|
tools::ToolInput,
|
|
};
|
|
|
|
// ============================================================================
|
|
// TEST 1: CACHE + SEARCH INTEGRATION
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_cache_with_hybrid_search() {
|
|
// Setup
|
|
let cache = ResponseCache::new(100, Duration::from_secs(3600)).expect("Failed to create cache");
|
|
|
|
// Initial stats should be empty
|
|
let initial_stats = cache.stats();
|
|
assert_eq!(initial_stats.hits, 0);
|
|
assert_eq!(initial_stats.misses, 0);
|
|
assert_eq!(initial_stats.total_queries, 0);
|
|
|
|
// Cache should be created successfully
|
|
assert!(initial_stats.hit_rate.is_finite());
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 2: MULTI-TURN CONVERSATION INTEGRATION
|
|
// ============================================================================
|
|
|
|
#[test]
|
|
fn test_multiturn_conversation_with_context() {
|
|
// Create conversation context
|
|
let mut conversation = ConversationContext::new("conv-123".to_string());
|
|
|
|
// Add first turn
|
|
let turn1 = ConversationTurn::new(
|
|
0,
|
|
"What is Kubernetes?".to_string(),
|
|
"Kubernetes is an orchestration platform".to_string(),
|
|
vec!["docs.k8s.io".to_string()],
|
|
0.95,
|
|
);
|
|
conversation.add_turn(turn1);
|
|
|
|
let turns = conversation.get_turns();
|
|
assert_eq!(turns.len(), 1);
|
|
|
|
// Add follow-up turn
|
|
let turn2 = ConversationTurn::new(
|
|
1,
|
|
"How to deploy?".to_string(),
|
|
"Use kubectl or Helm charts...".to_string(),
|
|
vec!["helm.sh".to_string()],
|
|
0.92,
|
|
);
|
|
conversation.add_turn(turn2);
|
|
|
|
let turns = conversation.get_turns();
|
|
assert_eq!(turns.len(), 2);
|
|
|
|
// Verify conversation context string building
|
|
let context_str = conversation.build_context_string();
|
|
assert!(context_str.contains("What is Kubernetes?"));
|
|
assert!(context_str.contains("Kubernetes is an orchestration platform"));
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 3: BATCH PROCESSING INTEGRATION
|
|
// ============================================================================
|
|
|
|
#[test]
|
|
fn test_batch_processing_workflow() {
|
|
// Create batch with 5 queries
|
|
let queries = vec![
|
|
BatchQuery::new("What is Kubernetes?".into()),
|
|
BatchQuery::new("How to deploy?".into()),
|
|
BatchQuery::new("What about networking?".into()),
|
|
BatchQuery::new("Explain storage?".into()),
|
|
BatchQuery::new("Security best practices?".into()),
|
|
];
|
|
|
|
let batch = BatchJob::new(queries);
|
|
|
|
// Verify batch properties
|
|
assert_eq!(batch.queries.len(), 5);
|
|
assert!(!batch.job_id.is_empty());
|
|
assert!(batch.max_concurrent > 0);
|
|
assert!(batch.timeout_secs > 0);
|
|
|
|
// Test priority sorting
|
|
let mut batch_with_priorities = BatchJob::new(vec![
|
|
BatchQuery::new("q1".into()).with_priority(50),
|
|
BatchQuery::new("q2".into()).with_priority(80),
|
|
BatchQuery::new("q3".into()).with_priority(30),
|
|
]);
|
|
|
|
batch_with_priorities.sort_by_priority();
|
|
|
|
// After sort, highest priority first
|
|
assert_eq!(batch_with_priorities.queries[0].priority, 80);
|
|
assert_eq!(batch_with_priorities.queries[1].priority, 50);
|
|
assert_eq!(batch_with_priorities.queries[2].priority, 30);
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 4: BATCH PROCESSING WITH RETRIES
|
|
// ============================================================================
|
|
|
|
#[test]
|
|
fn test_batch_with_retry_strategy() {
|
|
let query = BatchQuery::new("Deploy Kubernetes".into()).with_retries(3);
|
|
|
|
assert_eq!(query.retry_count, 3);
|
|
|
|
// Simulate retry logic
|
|
let max_retries = query.retry_count;
|
|
let mut retry_count = 0;
|
|
while retry_count < max_retries {
|
|
retry_count += 1;
|
|
}
|
|
assert_eq!(retry_count, 3);
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 5: QUERY OPTIMIZATION INTEGRATION
|
|
// ============================================================================
|
|
|
|
#[test]
|
|
fn test_query_optimization_workflow() {
|
|
let optimizer = QueryOptimizer::new();
|
|
|
|
// Test intent detection
|
|
let simple_query = "What is Kubernetes?";
|
|
let optimized = optimizer.optimize(simple_query);
|
|
assert!(optimized.is_ok());
|
|
|
|
let optimized_query = optimized.unwrap();
|
|
assert!(!optimized_query.optimized.is_empty());
|
|
assert!(!optimized_query.key_terms.is_empty());
|
|
|
|
// Test with context
|
|
let context_query = "Tell me more about networking";
|
|
let previous_context = Some("We discussed Kubernetes deployment");
|
|
let with_context = optimizer.optimize_with_context(context_query, previous_context);
|
|
assert!(with_context.is_ok());
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 6: ORCHESTRATOR + BATCH INTEGRATION
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_orchestrator_with_batch_processing() {
|
|
let orchestrator = OrchestratorManager::new();
|
|
|
|
// Create batch job
|
|
let batch = BatchJob::new(vec![
|
|
BatchQuery::new("Query 1".into()),
|
|
BatchQuery::new("Query 2".into()),
|
|
BatchQuery::new("Query 3".into()),
|
|
]);
|
|
|
|
// Submit to orchestrator
|
|
let task_id = orchestrator
|
|
.submit_batch_task(&batch)
|
|
.await
|
|
.expect("Failed to submit batch");
|
|
|
|
assert!(!task_id.is_empty());
|
|
|
|
// Get task status
|
|
let status = orchestrator
|
|
.get_task_status(&task_id)
|
|
.await
|
|
.expect("Failed to get status");
|
|
|
|
// Should be pending initially
|
|
assert_eq!(status, provisioning_rag::orchestrator::TaskStatus::Pending);
|
|
|
|
// Update progress
|
|
orchestrator
|
|
.update_batch_progress(&task_id, 1, 0)
|
|
.await
|
|
.expect("Failed to update progress");
|
|
|
|
// Get task details
|
|
let task = orchestrator
|
|
.get_task(&task_id)
|
|
.await
|
|
.expect("Failed to get task");
|
|
|
|
assert!(matches!(
|
|
task,
|
|
provisioning_rag::orchestrator::OrchestratorTask::BatchQuery(_)
|
|
));
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 7: CONVERSATION + ORCHESTRATOR INTEGRATION
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_conversation_with_orchestrator() {
|
|
let orchestrator = OrchestratorManager::new();
|
|
|
|
// Submit conversation task
|
|
let task_id = orchestrator
|
|
.submit_conversation_task("conv-123".into(), "What is Kubernetes?".into())
|
|
.await
|
|
.expect("Failed to submit conversation");
|
|
|
|
assert!(!task_id.is_empty());
|
|
|
|
// Complete conversation with response
|
|
orchestrator
|
|
.complete_conversation_task(
|
|
&task_id,
|
|
"Kubernetes is a container orchestration platform...".into(),
|
|
)
|
|
.await
|
|
.expect("Failed to complete conversation");
|
|
|
|
// Verify status changed to completed
|
|
let status = orchestrator
|
|
.get_task_status(&task_id)
|
|
.await
|
|
.expect("Failed to get status");
|
|
|
|
assert_eq!(
|
|
status,
|
|
provisioning_rag::orchestrator::TaskStatus::Completed
|
|
);
|
|
|
|
// Get task and verify response
|
|
let task = orchestrator
|
|
.get_task(&task_id)
|
|
.await
|
|
.expect("Failed to get task");
|
|
|
|
assert!(matches!(
|
|
task,
|
|
provisioning_rag::orchestrator::OrchestratorTask::Conversation(_)
|
|
));
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 8: TOOL EXECUTION + ORCHESTRATOR
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_tool_execution_with_orchestrator() {
|
|
let orchestrator = OrchestratorManager::new();
|
|
|
|
// Create tool input
|
|
let tool_input = ToolInput {
|
|
params: serde_json::json!({
|
|
"server_name": "web-01",
|
|
"cores": 4
|
|
}),
|
|
};
|
|
|
|
// Submit tool task
|
|
let task_id = orchestrator
|
|
.submit_tool_task("create_server".into(), tool_input)
|
|
.await
|
|
.expect("Failed to submit tool task");
|
|
|
|
assert!(!task_id.is_empty());
|
|
|
|
// Verify task was created
|
|
let task = orchestrator
|
|
.get_task(&task_id)
|
|
.await
|
|
.expect("Failed to get task");
|
|
|
|
assert!(matches!(
|
|
task,
|
|
provisioning_rag::orchestrator::OrchestratorTask::ToolExecution(_)
|
|
));
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 9: MULTIPLE CONCURRENT TASKS
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_concurrent_task_submissions() {
|
|
let orchestrator = Arc::new(OrchestratorManager::new());
|
|
|
|
// Submit 10 batch tasks concurrently
|
|
let mut handles = vec![];
|
|
|
|
for i in 0..10 {
|
|
let orch = orchestrator.clone();
|
|
let handle = tokio::spawn(async move {
|
|
let batch = BatchJob::new(vec![
|
|
BatchQuery::new(format!("query_{}_1", i)),
|
|
BatchQuery::new(format!("query_{}_2", i)),
|
|
]);
|
|
|
|
orch.submit_batch_task(&batch).await
|
|
});
|
|
handles.push(handle);
|
|
}
|
|
|
|
// Wait for all submissions
|
|
let mut task_ids = vec![];
|
|
for handle in handles {
|
|
let result = handle
|
|
.await
|
|
.expect("Failed to await")
|
|
.expect("Failed to submit");
|
|
task_ids.push(result);
|
|
}
|
|
|
|
assert_eq!(task_ids.len(), 10);
|
|
|
|
// Verify all tasks exist
|
|
for task_id in &task_ids {
|
|
let status = orchestrator
|
|
.get_task_status(task_id)
|
|
.await
|
|
.expect("Failed to get status");
|
|
assert_eq!(status, provisioning_rag::orchestrator::TaskStatus::Pending);
|
|
}
|
|
|
|
// Get stats
|
|
let stats = orchestrator.get_stats().await.expect("Failed to get stats");
|
|
|
|
assert_eq!(stats.total_tasks, 10);
|
|
assert_eq!(stats.pending, 10);
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 10: TASK FILTERING AND LISTING
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_task_listing_and_filtering() {
|
|
let orchestrator = OrchestratorManager::new();
|
|
|
|
// Submit mixed task types
|
|
let batch = BatchJob::new(vec![BatchQuery::new("test".into())]);
|
|
let _batch_task_id = orchestrator
|
|
.submit_batch_task(&batch)
|
|
.await
|
|
.expect("Failed to submit batch");
|
|
|
|
let conv_task_id = orchestrator
|
|
.submit_conversation_task("conv-1".into(), "Hello".into())
|
|
.await
|
|
.expect("Failed to submit conversation");
|
|
|
|
// List all tasks
|
|
let all_tasks = orchestrator
|
|
.list_tasks()
|
|
.await
|
|
.expect("Failed to list tasks");
|
|
|
|
assert_eq!(all_tasks.len(), 2);
|
|
|
|
// List pending tasks
|
|
let pending = orchestrator
|
|
.list_tasks_by_status(provisioning_rag::orchestrator::TaskStatus::Pending)
|
|
.await
|
|
.expect("Failed to list pending");
|
|
|
|
assert_eq!(pending.len(), 2);
|
|
|
|
// Complete one task
|
|
orchestrator
|
|
.complete_conversation_task(&conv_task_id, "Hi!".into())
|
|
.await
|
|
.expect("Failed to complete");
|
|
|
|
// List completed tasks
|
|
let completed = orchestrator
|
|
.list_tasks_by_status(provisioning_rag::orchestrator::TaskStatus::Completed)
|
|
.await
|
|
.expect("Failed to list completed");
|
|
|
|
assert_eq!(completed.len(), 1);
|
|
|
|
// Verify stats updated
|
|
let stats = orchestrator.get_stats().await.expect("Failed to get stats");
|
|
|
|
assert_eq!(stats.total_tasks, 2);
|
|
assert_eq!(stats.pending, 1);
|
|
assert_eq!(stats.completed, 1);
|
|
// Success rate only counts completed vs (completed + failed)
|
|
// 1 completed, 0 failed = 100% success rate
|
|
assert_eq!(stats.success_rate, 100.0);
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 11: FAILURE HANDLING AND ERROR RECOVERY
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_error_handling_with_nonexistent_task() {
|
|
let orchestrator = OrchestratorManager::new();
|
|
|
|
// Try to get status of non-existent task
|
|
let result = orchestrator.get_task_status("nonexistent").await;
|
|
assert!(result.is_err());
|
|
|
|
// Verify error message contains task ID
|
|
if let Err(e) = result {
|
|
let error_msg = format!("{}", e);
|
|
assert!(error_msg.contains("nonexistent"));
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 12: BATCH PROGRESS TRACKING
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_batch_progress_tracking() {
|
|
let orchestrator = OrchestratorManager::new();
|
|
|
|
let batch = BatchJob::new(vec![
|
|
BatchQuery::new("q1".into()),
|
|
BatchQuery::new("q2".into()),
|
|
BatchQuery::new("q3".into()),
|
|
]);
|
|
|
|
let task_id = orchestrator
|
|
.submit_batch_task(&batch)
|
|
.await
|
|
.expect("Failed to submit");
|
|
|
|
// Update progress - 1 completed
|
|
orchestrator
|
|
.update_batch_progress(&task_id, 1, 0)
|
|
.await
|
|
.expect("Failed to update");
|
|
|
|
let task = orchestrator
|
|
.get_task(&task_id)
|
|
.await
|
|
.expect("Failed to get task");
|
|
|
|
if let provisioning_rag::orchestrator::OrchestratorTask::BatchQuery(batch_task) = task {
|
|
assert_eq!(batch_task.completed, 1);
|
|
assert_eq!(batch_task.failed, 0);
|
|
assert_eq!(batch_task.progress_percent(), 33); // 1 out of 3
|
|
} else {
|
|
panic!("Expected BatchQueryTask");
|
|
}
|
|
|
|
// Update progress - 2 completed, 1 failed
|
|
orchestrator
|
|
.update_batch_progress(&task_id, 2, 1)
|
|
.await
|
|
.expect("Failed to update");
|
|
|
|
let task = orchestrator
|
|
.get_task(&task_id)
|
|
.await
|
|
.expect("Failed to get task");
|
|
|
|
if let provisioning_rag::orchestrator::OrchestratorTask::BatchQuery(batch_task) = task {
|
|
assert_eq!(batch_task.completed, 2);
|
|
assert_eq!(batch_task.failed, 1);
|
|
assert_eq!(batch_task.progress_percent(), 66); // 2 out of 3
|
|
} else {
|
|
panic!("Expected BatchQueryTask");
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 13: CACHE + OPTIMIZER + SEARCH WORKFLOW
|
|
// ============================================================================
|
|
|
|
#[test]
|
|
fn test_complete_query_processing_workflow() {
|
|
let optimizer = QueryOptimizer::new();
|
|
|
|
// Test complete workflow
|
|
let raw_query = "What is Kubernetes?";
|
|
|
|
// Step 1: Extract key terms
|
|
let key_terms = optimizer.extract_key_terms(raw_query);
|
|
assert!(!key_terms.is_empty());
|
|
|
|
// Step 2: Full optimization
|
|
let optimized = optimizer.optimize(raw_query);
|
|
assert!(optimized.is_ok());
|
|
|
|
let opt_query = optimized.unwrap();
|
|
assert!(!opt_query.key_terms.is_empty());
|
|
assert!(!opt_query.optimized.is_empty());
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 14: STRESS TEST - MANY CONCURRENT OPERATIONS
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_stress_many_concurrent_batch_tasks() {
|
|
let orchestrator = Arc::new(OrchestratorManager::new());
|
|
|
|
// Create 50 concurrent batch submissions
|
|
let mut handles = vec![];
|
|
|
|
for i in 0..50 {
|
|
let orch = orchestrator.clone();
|
|
let handle = tokio::spawn(async move {
|
|
let batch = BatchJob::new(vec![
|
|
BatchQuery::new(format!("stress_test_{}_q1", i)),
|
|
BatchQuery::new(format!("stress_test_{}_q2", i)),
|
|
BatchQuery::new(format!("stress_test_{}_q3", i)),
|
|
]);
|
|
|
|
orch.submit_batch_task(&batch).await
|
|
});
|
|
handles.push(handle);
|
|
}
|
|
|
|
// Collect all results
|
|
let mut count = 0;
|
|
for handle in handles {
|
|
if let Ok(Ok(_)) = handle.await {
|
|
count += 1;
|
|
}
|
|
}
|
|
|
|
assert_eq!(count, 50);
|
|
|
|
// Verify orchestrator stats
|
|
let stats = orchestrator.get_stats().await.expect("Failed to get stats");
|
|
|
|
assert_eq!(stats.total_tasks, 50);
|
|
assert_eq!(stats.pending, 50);
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 15: ORCHESTRATOR STATISTICS AND METRICS
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_orchestrator_statistics_computation() {
|
|
let orchestrator = OrchestratorManager::new();
|
|
|
|
// Submit 5 tasks
|
|
let mut task_ids = vec![];
|
|
for i in 0..5 {
|
|
let batch = BatchJob::new(vec![BatchQuery::new(format!("q{}", i))]);
|
|
let task_id = orchestrator
|
|
.submit_batch_task(&batch)
|
|
.await
|
|
.expect("Failed");
|
|
task_ids.push(task_id);
|
|
}
|
|
|
|
// Complete 3 tasks
|
|
for task_id in task_ids.iter().take(3) {
|
|
let _ = orchestrator.complete_batch_task(task_id).await;
|
|
}
|
|
|
|
// Get stats
|
|
let stats = orchestrator.get_stats().await.expect("Failed to get stats");
|
|
|
|
assert_eq!(stats.total_tasks, 5);
|
|
assert_eq!(stats.completed, 3);
|
|
assert_eq!(stats.pending, 2);
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 16: CONTEXT-AWARE OPTIMIZATION
|
|
// ============================================================================
|
|
|
|
#[test]
|
|
fn test_context_aware_query_optimization() {
|
|
let optimizer = QueryOptimizer::new();
|
|
|
|
let follow_up_query = "Tell me more";
|
|
let previous_context = Some("We were discussing Kubernetes networking");
|
|
|
|
let result = optimizer.optimize_with_context(follow_up_query, previous_context);
|
|
assert!(result.is_ok());
|
|
|
|
let optimized = result.unwrap();
|
|
// Context should be integrated into optimization
|
|
assert!(!optimized.optimized.is_empty());
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 17: BATCH SIZE VARIATIONS
|
|
// ============================================================================
|
|
|
|
#[test]
|
|
fn test_batch_processing_various_sizes() {
|
|
// Small batch
|
|
let small_batch = BatchJob::new(vec![BatchQuery::new("q1".into())]);
|
|
assert_eq!(small_batch.queries.len(), 1);
|
|
|
|
// Medium batch
|
|
let medium_batch = BatchJob::new(
|
|
(0..50)
|
|
.map(|i| BatchQuery::new(format!("q{}", i)))
|
|
.collect(),
|
|
);
|
|
assert_eq!(medium_batch.queries.len(), 50);
|
|
|
|
// Large batch
|
|
let large_batch = BatchJob::new(
|
|
(0..1000)
|
|
.map(|i| BatchQuery::new(format!("q{}", i)))
|
|
.collect(),
|
|
);
|
|
assert_eq!(large_batch.queries.len(), 1000);
|
|
|
|
// Verify properties scale correctly
|
|
assert!(small_batch.max_concurrent > 0);
|
|
assert_eq!(medium_batch.queries.len(), 50);
|
|
assert_eq!(large_batch.queries.len(), 1000);
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 18: CONVERSATION HISTORY MANAGEMENT
|
|
// ============================================================================
|
|
|
|
#[test]
|
|
fn test_conversation_history_with_multiple_turns() {
|
|
let mut conversation = ConversationContext::new("multi-turn-conv".to_string());
|
|
|
|
// Add 5 conversation turns
|
|
for i in 0..5 {
|
|
let question = format!("Question {}", i + 1);
|
|
let answer = format!("Answer {}", i + 1);
|
|
let turn = ConversationTurn::new(i, question, answer, vec!["source".to_string()], 0.9);
|
|
conversation.add_turn(turn);
|
|
}
|
|
|
|
let turns = conversation.get_turns();
|
|
assert_eq!(turns.len(), 5);
|
|
|
|
// Get recent turns
|
|
let recent = conversation.get_recent_turns(3);
|
|
assert_eq!(recent.len(), 3);
|
|
|
|
// Get specific turn
|
|
let turn_2 = conversation.get_turn(2);
|
|
assert!(turn_2.is_some());
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 19: TASK TYPE ENUMERATION
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_all_orchestrator_task_types() {
|
|
let orchestrator = OrchestratorManager::new();
|
|
|
|
// Submit all task types
|
|
let batch = BatchJob::new(vec![BatchQuery::new("test".into())]);
|
|
let batch_id = orchestrator
|
|
.submit_batch_task(&batch)
|
|
.await
|
|
.expect("Failed");
|
|
|
|
let conv_id = orchestrator
|
|
.submit_conversation_task("conv".into(), "msg".into())
|
|
.await
|
|
.expect("Failed");
|
|
|
|
let tool_id = orchestrator
|
|
.submit_tool_task(
|
|
"tool".into(),
|
|
ToolInput {
|
|
params: serde_json::json!({}),
|
|
},
|
|
)
|
|
.await
|
|
.expect("Failed");
|
|
|
|
// Verify all types exist
|
|
let batch_task = orchestrator.get_task(&batch_id).await.expect("Failed");
|
|
assert!(matches!(
|
|
batch_task,
|
|
provisioning_rag::orchestrator::OrchestratorTask::BatchQuery(_)
|
|
));
|
|
|
|
let conv_task = orchestrator.get_task(&conv_id).await.expect("Failed");
|
|
assert!(matches!(
|
|
conv_task,
|
|
provisioning_rag::orchestrator::OrchestratorTask::Conversation(_)
|
|
));
|
|
|
|
let tool_task = orchestrator.get_task(&tool_id).await.expect("Failed");
|
|
assert!(matches!(
|
|
tool_task,
|
|
provisioning_rag::orchestrator::OrchestratorTask::ToolExecution(_)
|
|
));
|
|
}
|
|
|
|
// ============================================================================
|
|
// TEST 20: COMPLETE END-TO-END WORKFLOW
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
async fn test_complete_end_to_end_workflow() {
|
|
// Initialize all components
|
|
let optimizer = QueryOptimizer::new();
|
|
let cache = ResponseCache::new(100, Duration::from_secs(3600)).expect("Failed");
|
|
let orchestrator = Arc::new(OrchestratorManager::new());
|
|
|
|
// Simulate complete workflow:
|
|
// 1. User submits multi-query batch
|
|
let batch = BatchJob::new(vec![
|
|
BatchQuery::new("What is Kubernetes?".into()),
|
|
BatchQuery::new("How to deploy?".into()),
|
|
BatchQuery::new("Networking best practices?".into()),
|
|
]);
|
|
|
|
let task_id = orchestrator
|
|
.submit_batch_task(&batch)
|
|
.await
|
|
.expect("Failed to submit");
|
|
|
|
// 2. Verify task submitted
|
|
let status = orchestrator
|
|
.get_task_status(&task_id)
|
|
.await
|
|
.expect("Failed");
|
|
|
|
assert_eq!(status, provisioning_rag::orchestrator::TaskStatus::Pending);
|
|
|
|
// 3. Optimize first query
|
|
let optimized = optimizer.optimize("What is Kubernetes?");
|
|
assert!(optimized.is_ok());
|
|
|
|
// 4. Simulate processing progress
|
|
orchestrator
|
|
.update_batch_progress(&task_id, 1, 0)
|
|
.await
|
|
.expect("Failed");
|
|
|
|
// 5. Add to cache (simulated)
|
|
let cache_stats = cache.stats();
|
|
assert_eq!(cache_stats.total_queries, 0); // Not yet used
|
|
|
|
// 6. Verify final state
|
|
let task = orchestrator.get_task(&task_id).await.expect("Failed");
|
|
|
|
assert!(matches!(
|
|
task,
|
|
provisioning_rag::orchestrator::OrchestratorTask::BatchQuery(_)
|
|
));
|
|
|
|
// 7. Get final statistics
|
|
let stats = orchestrator.get_stats().await.expect("Failed");
|
|
|
|
assert_eq!(stats.total_tasks, 1);
|
|
assert_eq!(stats.pending, 1);
|
|
}
|