Vapora/docs/architecture/multi-ia-router.html

712 lines
29 KiB
HTML
Raw Normal View History

<!DOCTYPE HTML>
<html lang="en" class="light sidebar-visible" dir="ltr">
<head>
<!-- Book generated using mdBook -->
<meta charset="UTF-8">
<title>Multi-IA Router - VAPORA Platform Documentation</title>
<!-- Custom HTML head -->
<meta name="description" content="Comprehensive documentation for VAPORA, an intelligent development orchestration platform built entirely in Rust.">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="theme-color" content="#ffffff">
<link rel="icon" href="../favicon.svg">
<link rel="shortcut icon" href="../favicon.png">
<link rel="stylesheet" href="../css/variables.css">
<link rel="stylesheet" href="../css/general.css">
<link rel="stylesheet" href="../css/chrome.css">
<link rel="stylesheet" href="../css/print.css" media="print">
<!-- Fonts -->
<link rel="stylesheet" href="../FontAwesome/css/font-awesome.css">
<link rel="stylesheet" href="../fonts/fonts.css">
<!-- Highlight.js Stylesheets -->
<link rel="stylesheet" id="highlight-css" href="../highlight.css">
<link rel="stylesheet" id="tomorrow-night-css" href="../tomorrow-night.css">
<link rel="stylesheet" id="ayu-highlight-css" href="../ayu-highlight.css">
<!-- Custom theme stylesheets -->
<!-- Provide site root and default themes to javascript -->
<script>
const path_to_root = "../";
const default_light_theme = "light";
const default_dark_theme = "dark";
</script>
<!-- Start loading toc.js asap -->
<script src="../toc.js"></script>
</head>
<body>
<div id="mdbook-help-container">
<div id="mdbook-help-popup">
<h2 class="mdbook-help-title">Keyboard shortcuts</h2>
<div>
<p>Press <kbd></kbd> or <kbd></kbd> to navigate between chapters</p>
<p>Press <kbd>S</kbd> or <kbd>/</kbd> to search in the book</p>
<p>Press <kbd>?</kbd> to show this help</p>
<p>Press <kbd>Esc</kbd> to hide this help</p>
</div>
</div>
</div>
<div id="body-container">
<!-- Work around some values being stored in localStorage wrapped in quotes -->
<script>
try {
let theme = localStorage.getItem('mdbook-theme');
let sidebar = localStorage.getItem('mdbook-sidebar');
if (theme.startsWith('"') && theme.endsWith('"')) {
localStorage.setItem('mdbook-theme', theme.slice(1, theme.length - 1));
}
if (sidebar.startsWith('"') && sidebar.endsWith('"')) {
localStorage.setItem('mdbook-sidebar', sidebar.slice(1, sidebar.length - 1));
}
} catch (e) { }
</script>
<!-- Set the theme before any content is loaded, prevents flash -->
<script>
const default_theme = window.matchMedia("(prefers-color-scheme: dark)").matches ? default_dark_theme : default_light_theme;
let theme;
try { theme = localStorage.getItem('mdbook-theme'); } catch(e) { }
if (theme === null || theme === undefined) { theme = default_theme; }
const html = document.documentElement;
html.classList.remove('light')
html.classList.add(theme);
html.classList.add("js");
</script>
<input type="checkbox" id="sidebar-toggle-anchor" class="hidden">
<!-- Hide / unhide sidebar before it is displayed -->
<script>
let sidebar = null;
const sidebar_toggle = document.getElementById("sidebar-toggle-anchor");
if (document.body.clientWidth >= 1080) {
try { sidebar = localStorage.getItem('mdbook-sidebar'); } catch(e) { }
sidebar = sidebar || 'visible';
} else {
sidebar = 'hidden';
}
sidebar_toggle.checked = sidebar === 'visible';
html.classList.remove('sidebar-visible');
html.classList.add("sidebar-" + sidebar);
</script>
<nav id="sidebar" class="sidebar" aria-label="Table of contents">
<!-- populated by js -->
<mdbook-sidebar-scrollbox class="sidebar-scrollbox"></mdbook-sidebar-scrollbox>
<noscript>
<iframe class="sidebar-iframe-outer" src="../toc.html"></iframe>
</noscript>
<div id="sidebar-resize-handle" class="sidebar-resize-handle">
<div class="sidebar-resize-indicator"></div>
</div>
</nav>
<div id="page-wrapper" class="page-wrapper">
<div class="page">
<div id="menu-bar-hover-placeholder"></div>
<div id="menu-bar" class="menu-bar sticky">
<div class="left-buttons">
<label id="sidebar-toggle" class="icon-button" for="sidebar-toggle-anchor" title="Toggle Table of Contents" aria-label="Toggle Table of Contents" aria-controls="sidebar">
<i class="fa fa-bars"></i>
</label>
<button id="theme-toggle" class="icon-button" type="button" title="Change theme" aria-label="Change theme" aria-haspopup="true" aria-expanded="false" aria-controls="theme-list">
<i class="fa fa-paint-brush"></i>
</button>
<ul id="theme-list" class="theme-popup" aria-label="Themes" role="menu">
<li role="none"><button role="menuitem" class="theme" id="default_theme">Auto</button></li>
<li role="none"><button role="menuitem" class="theme" id="light">Light</button></li>
<li role="none"><button role="menuitem" class="theme" id="rust">Rust</button></li>
<li role="none"><button role="menuitem" class="theme" id="coal">Coal</button></li>
<li role="none"><button role="menuitem" class="theme" id="navy">Navy</button></li>
<li role="none"><button role="menuitem" class="theme" id="ayu">Ayu</button></li>
</ul>
<button id="search-toggle" class="icon-button" type="button" title="Search (`/`)" aria-label="Toggle Searchbar" aria-expanded="false" aria-keyshortcuts="/ s" aria-controls="searchbar">
<i class="fa fa-search"></i>
</button>
</div>
<h1 class="menu-title">VAPORA Platform Documentation</h1>
<div class="right-buttons">
<a href="../print.html" title="Print this book" aria-label="Print this book">
<i id="print-button" class="fa fa-print"></i>
</a>
<a href="https://github.com/vapora-platform/vapora" title="Git repository" aria-label="Git repository">
<i id="git-repository-button" class="fa fa-github"></i>
</a>
<a href="https://github.com/vapora-platform/vapora/edit/main/docs/src/../architecture/multi-ia-router.md" title="Suggest an edit" aria-label="Suggest an edit">
<i id="git-edit-button" class="fa fa-edit"></i>
</a>
</div>
</div>
<div id="search-wrapper" class="hidden">
<form id="searchbar-outer" class="searchbar-outer">
<input type="search" id="searchbar" name="searchbar" placeholder="Search this book ..." aria-controls="searchresults-outer" aria-describedby="searchresults-header">
</form>
<div id="searchresults-outer" class="searchresults-outer hidden">
<div id="searchresults-header" class="searchresults-header"></div>
<ul id="searchresults">
</ul>
</div>
</div>
<!-- Apply ARIA attributes after the sidebar and the sidebar toggle button are added to the DOM -->
<script>
document.getElementById('sidebar-toggle').setAttribute('aria-expanded', sidebar === 'visible');
document.getElementById('sidebar').setAttribute('aria-hidden', sidebar !== 'visible');
Array.from(document.querySelectorAll('#sidebar a')).forEach(function(link) {
link.setAttribute('tabIndex', sidebar === 'visible' ? 0 : -1);
});
</script>
<div id="content" class="content">
<main>
<h1 id="-multi-ia-router"><a class="header" href="#-multi-ia-router">🧠 Multi-IA Router</a></h1>
<h2 id="routing-inteligente-entre-múltiples-proveedores-de-llm"><a class="header" href="#routing-inteligente-entre-múltiples-proveedores-de-llm">Routing Inteligente entre Múltiples Proveedores de LLM</a></h2>
<p><strong>Version</strong>: 0.1.0
<strong>Status</strong>: Specification (VAPORA v1.0 - Multi-Agent Multi-IA)
<strong>Purpose</strong>: Sistema de routing dinámico que selecciona el LLM óptimo por contexto</p>
<hr />
<h2 id="-objetivo"><a class="header" href="#-objetivo">🎯 Objetivo</a></h2>
<p><strong>Problema</strong>:</p>
<ul>
<li>Cada tarea necesita un LLM diferente (code ≠ embeddings ≠ review)</li>
<li>Costos varían enormemente (Ollama gratis vs Claude Opus $$$)</li>
<li>Disponibilidad varía (rate limits, latencia)</li>
<li>Necesidad de fallback automático</li>
</ul>
<p><strong>Solución</strong>: Sistema inteligente de routing que decide qué LLM usar según:</p>
<ol>
<li><strong>Contexto de la tarea</strong> (type, domain, complexity)</li>
<li><strong>Reglas predefinidas</strong> (mappings estáticos)</li>
<li><strong>Decisión dinámica</strong> (disponibilidad, costo, carga)</li>
<li><strong>Override manual</strong> (usuario especifica LLM requerido)</li>
</ol>
<hr />
<h2 id="-arquitectura"><a class="header" href="#-arquitectura">🏗️ Arquitectura</a></h2>
<h3 id="layer-1-llm-providers-trait-pattern"><a class="header" href="#layer-1-llm-providers-trait-pattern">Layer 1: LLM Providers (Trait Pattern)</a></h3>
<pre><pre class="playground"><code class="language-rust"><span class="boring">#![allow(unused)]
</span><span class="boring">fn main() {
</span>pub enum LLMProvider {
Claude {
api_key: String,
model: String, // "opus-4", "sonnet-4", "haiku-3"
max_tokens: usize,
},
OpenAI {
api_key: String,
model: String, // "gpt-4", "gpt-4-turbo", "gpt-3.5-turbo"
max_tokens: usize,
},
Gemini {
api_key: String,
model: String, // "gemini-2.0-pro", "gemini-pro", "gemini-flash"
max_tokens: usize,
},
Ollama {
endpoint: String, // "http://localhost:11434"
model: String, // "llama3.2", "mistral", "neural-chat"
max_tokens: usize,
},
}
pub trait LLMClient: Send + Sync {
async fn complete(
&amp;self,
prompt: String,
context: Option&lt;String&gt;,
) -&gt; anyhow::Result&lt;String&gt;;
async fn stream(
&amp;self,
prompt: String,
) -&gt; anyhow::Result&lt;tokio::sync::mpsc::Receiver&lt;String&gt;&gt;;
fn cost_per_1k_tokens(&amp;self) -&gt; f64;
fn latency_ms(&amp;self) -&gt; u32;
fn available(&amp;self) -&gt; bool;
}
<span class="boring">}</span></code></pre></pre>
<h3 id="layer-2-task-context-classifier"><a class="header" href="#layer-2-task-context-classifier">Layer 2: Task Context Classifier</a></h3>
<pre><pre class="playground"><code class="language-rust"><span class="boring">#![allow(unused)]
</span><span class="boring">fn main() {
</span>#[derive(Debug, Clone, PartialEq)]
pub enum TaskType {
// Code tasks
CodeGeneration,
CodeReview,
CodeRefactor,
UnitTest,
Integration Test,
// Analysis tasks
ArchitectureDesign,
SecurityAnalysis,
PerformanceAnalysis,
// Documentation
DocumentGeneration,
CodeDocumentation,
APIDocumentation,
// Search/RAG
Embeddings,
SemanticSearch,
ContextRetrieval,
// General
GeneralQuery,
Summarization,
Translation,
}
#[derive(Debug, Clone)]
pub struct TaskContext {
pub task_type: TaskType,
pub domain: String, // "backend", "frontend", "infra"
pub complexity: Complexity, // Low, Medium, High, Critical
pub quality_requirement: Quality, // Low, Medium, High, Critical
pub latency_required_ms: u32, // 500 = &lt;500ms required
pub budget_cents: Option&lt;u32&gt;, // Cost limit in cents for 1k tokens
}
#[derive(Debug, Clone, PartialEq, PartialOrd)]
pub enum Complexity {
Low,
Medium,
High,
Critical,
}
#[derive(Debug, Clone, PartialEq, PartialOrd)]
pub enum Quality {
Low, // Quick &amp; cheap
Medium, // Balanced
High, // Good quality
Critical // Best possible
}
<span class="boring">}</span></code></pre></pre>
<h3 id="layer-3-mapping-engine-reglas-predefinidas"><a class="header" href="#layer-3-mapping-engine-reglas-predefinidas">Layer 3: Mapping Engine (Reglas Predefinidas)</a></h3>
<pre><pre class="playground"><code class="language-rust"><span class="boring">#![allow(unused)]
</span><span class="boring">fn main() {
</span>pub struct IAMapping {
pub task_type: TaskType,
pub primary: LLMProvider,
pub fallback_order: Vec&lt;LLMProvider&gt;,
pub reasoning: String,
pub cost_estimate_per_task: f64,
}
pub static DEFAULT_MAPPINGS: &amp;[IAMapping] = &amp;[
// Embeddings → Ollama (local, free)
IAMapping {
task_type: TaskType::Embeddings,
primary: LLMProvider::Ollama {
endpoint: "http://localhost:11434".to_string(),
model: "nomic-embed-text".to_string(),
max_tokens: 8192,
},
fallback_order: vec![
LLMProvider::OpenAI {
api_key: "".to_string(),
model: "text-embedding-3-small".to_string(),
max_tokens: 8192,
},
],
reasoning: "Ollama local es gratis y rápido para embeddings. Fallback a OpenAI si Ollama no disponible".to_string(),
cost_estimate_per_task: 0.0, // Gratis localmente
},
// Code Generation → Claude Opus (máxima calidad)
IAMapping {
task_type: TaskType::CodeGeneration,
primary: LLMProvider::Claude {
api_key: "".to_string(),
model: "opus-4".to_string(),
max_tokens: 8000,
},
fallback_order: vec![
LLMProvider::OpenAI {
api_key: "".to_string(),
model: "gpt-4".to_string(),
max_tokens: 8000,
},
],
reasoning: "Claude Opus mejor para código complejo. GPT-4 como fallback".to_string(),
cost_estimate_per_task: 0.06, // ~6 cents per 1k tokens
},
// Code Review → Claude Sonnet (balance calidad/costo)
IAMapping {
task_type: TaskType::CodeReview,
primary: LLMProvider::Claude {
api_key: "".to_string(),
model: "sonnet-4".to_string(),
max_tokens: 4000,
},
fallback_order: vec![
LLMProvider::Gemini {
api_key: "".to_string(),
model: "gemini-pro".to_string(),
max_tokens: 4000,
},
],
reasoning: "Sonnet balance perfecto. Gemini como fallback".to_string(),
cost_estimate_per_task: 0.015,
},
// Documentation → GPT-4 (mejor formato)
IAMapping {
task_type: TaskType::DocumentGeneration,
primary: LLMProvider::OpenAI {
api_key: "".to_string(),
model: "gpt-4".to_string(),
max_tokens: 4000,
},
fallback_order: vec![
LLMProvider::Claude {
api_key: "".to_string(),
model: "sonnet-4".to_string(),
max_tokens: 4000,
},
],
reasoning: "GPT-4 mejor formato para docs. Claude como fallback".to_string(),
cost_estimate_per_task: 0.03,
},
// Quick Queries → Gemini Flash (velocidad)
IAMapping {
task_type: TaskType::GeneralQuery,
primary: LLMProvider::Gemini {
api_key: "".to_string(),
model: "gemini-flash-2.0".to_string(),
max_tokens: 1000,
},
fallback_order: vec![
LLMProvider::Ollama {
endpoint: "http://localhost:11434".to_string(),
model: "llama3.2".to_string(),
max_tokens: 1000,
},
],
reasoning: "Gemini Flash muy rápido. Ollama como fallback".to_string(),
cost_estimate_per_task: 0.002,
},
];
<span class="boring">}</span></code></pre></pre>
<h3 id="layer-4-routing-engine-decisiones-dinámicas"><a class="header" href="#layer-4-routing-engine-decisiones-dinámicas">Layer 4: Routing Engine (Decisiones Dinámicas)</a></h3>
<pre><pre class="playground"><code class="language-rust"><span class="boring">#![allow(unused)]
</span><span class="boring">fn main() {
</span>pub struct LLMRouter {
pub mappings: HashMap&lt;TaskType, Vec&lt;LLMProvider&gt;&gt;,
pub providers: HashMap&lt;String, Box&lt;dyn LLMClient&gt;&gt;,
pub cost_tracker: CostTracker,
pub rate_limiter: RateLimiter,
}
impl LLMRouter {
/// Routing decision: hybrid (rules + dynamic + override)
pub async fn route(
&amp;mut self,
context: TaskContext,
override_llm: Option&lt;LLMProvider&gt;,
) -&gt; anyhow::Result&lt;LLMProvider&gt; {
// 1. Si hay override manual, usar ese
if let Some(llm) = override_llm {
self.cost_tracker.log_usage(&amp;llm, &amp;context);
return Ok(llm);
}
// 2. Obtener mappings predefinidos
let mut candidates = self.get_mapping(&amp;context.task_type)?;
// 3. Filtrar por disponibilidad (rate limits, latencia)
candidates = self.filter_by_availability(candidates).await?;
// 4. Filtrar por presupuesto si existe
if let Some(budget) = context.budget_cents {
candidates = candidates.into_iter()
.filter(|llm| llm.cost_per_1k_tokens() * 10.0 &lt; budget as f64)
.collect();
}
// 5. Seleccionar por balance calidad/costo/latencia
let selected = self.select_optimal(candidates, &amp;context)?;
self.cost_tracker.log_usage(&amp;selected, &amp;context);
Ok(selected)
}
async fn filter_by_availability(
&amp;self,
candidates: Vec&lt;LLMProvider&gt;,
) -&gt; anyhow::Result&lt;Vec&lt;LLMProvider&gt;&gt; {
let mut available = Vec::new();
for llm in candidates {
if self.rate_limiter.can_use(&amp;llm).await? {
available.push(llm);
}
}
Ok(available.is_empty() ? candidates : available)
}
fn select_optimal(
&amp;self,
candidates: Vec&lt;LLMProvider&gt;,
context: &amp;TaskContext,
) -&gt; anyhow::Result&lt;LLMProvider&gt; {
// Scoring: quality * 0.4 + cost * 0.3 + latency * 0.3
let best = candidates.iter().max_by(|a, b| {
let score_a = self.score_llm(a, context);
let score_b = self.score_llm(b, context);
score_a.partial_cmp(&amp;score_b).unwrap()
});
Ok(best.ok_or(anyhow::anyhow!("No LLM available"))?.clone())
}
fn score_llm(&amp;self, llm: &amp;LLMProvider, context: &amp;TaskContext) -&gt; f64 {
let quality_score = match context.quality_requirement {
Quality::Critical =&gt; 1.0,
Quality::High =&gt; 0.9,
Quality::Medium =&gt; 0.7,
Quality::Low =&gt; 0.5,
};
let cost = llm.cost_per_1k_tokens();
let cost_score = 1.0 / (1.0 + cost); // Inverse: lower cost = higher score
let latency = llm.latency_ms();
let latency_score = 1.0 / (1.0 + latency as f64);
quality_score * 0.4 + cost_score * 0.3 + latency_score * 0.3
}
}
<span class="boring">}</span></code></pre></pre>
<h3 id="layer-5-cost-tracking--monitoring"><a class="header" href="#layer-5-cost-tracking--monitoring">Layer 5: Cost Tracking &amp; Monitoring</a></h3>
<pre><pre class="playground"><code class="language-rust"><span class="boring">#![allow(unused)]
</span><span class="boring">fn main() {
</span>pub struct CostTracker {
pub tasks_completed: HashMap&lt;TaskType, u32&gt;,
pub total_tokens_used: u64,
pub total_cost_cents: u32,
pub cost_by_provider: HashMap&lt;String, u32&gt;,
pub cost_by_task_type: HashMap&lt;TaskType, u32&gt;,
}
impl CostTracker {
pub fn log_usage(&amp;mut self, llm: &amp;LLMProvider, context: &amp;TaskContext) {
let provider_name = llm.provider_name();
let cost = (llm.cost_per_1k_tokens() * 10.0) as u32; // Estimate per task
*self.cost_by_provider.entry(provider_name).or_insert(0) += cost;
*self.cost_by_task_type.entry(context.task_type.clone()).or_insert(0) += cost;
self.total_cost_cents += cost;
*self.tasks_completed.entry(context.task_type.clone()).or_insert(0) += 1;
}
pub fn monthly_cost_estimate(&amp;self) -&gt; f64 {
self.total_cost_cents as f64 / 100.0 // Convert to dollars
}
pub fn generate_report(&amp;self) -&gt; String {
format!(
"Cost Report:\n Total: ${:.2}\n By Provider: {:?}\n By Task: {:?}",
self.monthly_cost_estimate(),
self.cost_by_provider,
self.cost_by_task_type
)
}
}
<span class="boring">}</span></code></pre></pre>
<hr />
<h2 id="-routing-tres-modos"><a class="header" href="#-routing-tres-modos">🔧 Routing: Tres Modos</a></h2>
<h3 id="modo-1-reglas-estáticas-default"><a class="header" href="#modo-1-reglas-estáticas-default">Modo 1: Reglas Estáticas (Default)</a></h3>
<pre><pre class="playground"><code class="language-rust"><span class="boring">#![allow(unused)]
</span><span class="boring">fn main() {
</span>// Automático, usa DEFAULT_MAPPINGS
let router = LLMRouter::new();
let llm = router.route(
TaskContext {
task_type: TaskType::CodeGeneration,
domain: "backend".to_string(),
complexity: Complexity::High,
quality_requirement: Quality::High,
latency_required_ms: 5000,
budget_cents: None,
},
None, // Sin override
).await?;
// Resultado: Claude Opus (regla predefinida)
<span class="boring">}</span></code></pre></pre>
<h3 id="modo-2-decisión-dinámica-smart"><a class="header" href="#modo-2-decisión-dinámica-smart">Modo 2: Decisión Dinámica (Smart)</a></h3>
<pre><pre class="playground"><code class="language-rust"><span class="boring">#![allow(unused)]
</span><span class="boring">fn main() {
</span>// Router evalúa disponibilidad, latencia, costo
let router = LLMRouter::with_tracking();
let llm = router.route(
TaskContext {
task_type: TaskType::CodeReview,
domain: "frontend".to_string(),
complexity: Complexity::Medium,
quality_requirement: Quality::Medium,
latency_required_ms: 2000,
budget_cents: Some(20), // Max 2 cents por task
},
None,
).await?;
// Router elige entre Sonnet vs Gemini según disponibilidad y presupuesto
<span class="boring">}</span></code></pre></pre>
<h3 id="modo-3-override-manual-control-total"><a class="header" href="#modo-3-override-manual-control-total">Modo 3: Override Manual (Control Total)</a></h3>
<pre><pre class="playground"><code class="language-rust"><span class="boring">#![allow(unused)]
</span><span class="boring">fn main() {
</span>// Usuario especifica exactamente qué LLM usar
let llm = router.route(
context,
Some(LLMProvider::Claude {
api_key: "sk-...".to_string(),
model: "opus-4".to_string(),
max_tokens: 8000,
}),
).await?;
// Usa exactamente lo especificado, registra en cost tracker
<span class="boring">}</span></code></pre></pre>
<hr />
<h2 id="-configuración-vaporatoml"><a class="header" href="#-configuración-vaporatoml">📊 Configuración (vapora.toml)</a></h2>
<pre><code class="language-toml">[llm_router]
# Mapeos personalizados (override DEFAULT_MAPPINGS)
[[llm_router.custom_mapping]]
task_type = "CodeGeneration"
primary_provider = "claude"
primary_model = "opus-4"
fallback_providers = ["openai:gpt-4"]
# Proveedores disponibles
[[llm_router.providers]]
name = "claude"
api_key = "${ANTHROPIC_API_KEY}"
model_variants = ["opus-4", "sonnet-4", "haiku-3"]
rate_limit = { tokens_per_minute = 1000000 }
[[llm_router.providers]]
name = "openai"
api_key = "${OPENAI_API_KEY}"
model_variants = ["gpt-4", "gpt-4-turbo"]
rate_limit = { tokens_per_minute = 500000 }
[[llm_router.providers]]
name = "gemini"
api_key = "${GEMINI_API_KEY}"
model_variants = ["gemini-pro", "gemini-flash-2.0"]
[[llm_router.providers]]
name = "ollama"
endpoint = "http://localhost:11434"
model_variants = ["llama3.2", "mistral", "neural-chat"]
rate_limit = { tokens_per_minute = 10000000 } # Local, sin límites reales
# Cost tracking
[llm_router.cost_tracking]
enabled = true
warn_when_exceeds_cents = 1000 # Warn if daily cost &gt; $10
</code></pre>
<hr />
<h2 id="-implementation-checklist"><a class="header" href="#-implementation-checklist">🎯 Implementation Checklist</a></h2>
<ul>
<li><input disabled="" type="checkbox"/>
Trait <code>LLMClient</code> + implementaciones (Claude, OpenAI, Gemini, Ollama)</li>
<li><input disabled="" type="checkbox"/>
<code>TaskContext</code> y clasificación de tareas</li>
<li><input disabled="" type="checkbox"/>
<code>IAMapping</code> y DEFAULT_MAPPINGS</li>
<li><input disabled="" type="checkbox"/>
<code>LLMRouter</code> con routing híbrido</li>
<li><input disabled="" type="checkbox"/>
Fallback automático + error handling</li>
<li><input disabled="" type="checkbox"/>
<code>CostTracker</code> para monitoreo</li>
<li><input disabled="" type="checkbox"/>
Config loading desde vapora.toml</li>
<li><input disabled="" type="checkbox"/>
CLI: <code>vapora llm-router status</code> (ver providers, costos)</li>
<li><input disabled="" type="checkbox"/>
Tests unitarios (routing logic)</li>
<li><input disabled="" type="checkbox"/>
Integration tests (real providers)</li>
</ul>
<hr />
<h2 id="-success-metrics"><a class="header" href="#-success-metrics">📈 Success Metrics</a></h2>
<p>✅ Routing decision &lt; 100ms
✅ Fallback automático funciona
✅ Cost tracking preciso
✅ Documentación de costos por tarea
✅ Override manual siempre funciona
✅ Rate limiting respetado</p>
<hr />
<p><strong>Version</strong>: 0.1.0
<strong>Status</strong>: ✅ Specification Complete (VAPORA v1.0)
<strong>Purpose</strong>: Multi-IA routing system para orquestación de agentes</p>
</main>
<nav class="nav-wrapper" aria-label="Page navigation">
<!-- Mobile navigation buttons -->
<a rel="prev" href="../../architecture/agent-registry-coordination.html" class="mobile-nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../../architecture/multi-agent-workflows.html" class="mobile-nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
<div style="clear: both"></div>
</nav>
</div>
</div>
<nav class="nav-wide-wrapper" aria-label="Page navigation">
<a rel="prev" href="../../architecture/agent-registry-coordination.html" class="nav-chapters previous" title="Previous chapter" aria-label="Previous chapter" aria-keyshortcuts="Left">
<i class="fa fa-angle-left"></i>
</a>
<a rel="next prefetch" href="../../architecture/multi-agent-workflows.html" class="nav-chapters next" title="Next chapter" aria-label="Next chapter" aria-keyshortcuts="Right">
<i class="fa fa-angle-right"></i>
</a>
</nav>
</div>
<script>
window.playground_copyable = true;
</script>
<script src="../elasticlunr.min.js"></script>
<script src="../mark.min.js"></script>
<script src="../searcher.js"></script>
<script src="../clipboard.min.js"></script>
<script src="../highlight.js"></script>
<script src="../book.js"></script>
<!-- Custom JS scripts -->
</div>
</body>
</html>