Platform restructured into crates/, added AI service and detector,
migrated control-center-ui to Leptos 0.8
36 lines
512 B
TOML
36 lines
512 B
TOML
[rag.embeddings]
|
|
batch_size = 32
|
|
dimension = 384
|
|
model = "all-MiniLM-L6-v2"
|
|
provider = "local"
|
|
|
|
[rag.ingestion]
|
|
auto_ingest = true
|
|
chunk_size = 512
|
|
doc_types = [
|
|
"md",
|
|
"txt",
|
|
"toml",
|
|
]
|
|
overlap = 50
|
|
|
|
[rag.llm]
|
|
api_url = "http://localhost:11434"
|
|
max_tokens = 2048
|
|
model = "llama3.2"
|
|
provider = "ollama"
|
|
temperature = 0.7
|
|
|
|
[rag.rag]
|
|
enabled = true
|
|
|
|
[rag.retrieval]
|
|
hybrid = false
|
|
reranking = false
|
|
similarity_threshold = 0.7
|
|
top_k = 5
|
|
|
|
[rag.vector_db]
|
|
db_type = "memory"
|
|
namespace = "provisioning-solo"
|