32 lines
837 B
TOML
32 lines
837 B
TOML
|
|
# AI Backend - Development Configuration
|
||
|
|
# Inherits defaults from default.toml, override values here for local development
|
||
|
|
|
||
|
|
[llm]
|
||
|
|
# Use ollama for local development (requires local Ollama instance)
|
||
|
|
provider = "ollama"
|
||
|
|
model = "llama2" # Or whatever model you have installed locally
|
||
|
|
api_endpoint = "http://localhost:11434/api"
|
||
|
|
|
||
|
|
[llm.generation]
|
||
|
|
# Faster responses for iteration
|
||
|
|
temperature = 0.5
|
||
|
|
max_tokens = 1024
|
||
|
|
|
||
|
|
[rag]
|
||
|
|
# Enable RAG for development
|
||
|
|
enabled = true
|
||
|
|
index_path = "~/.config/typedialog/ai/rag-index-dev"
|
||
|
|
embedding_dims = 384
|
||
|
|
cache_size = 500
|
||
|
|
|
||
|
|
[microservice]
|
||
|
|
host = "127.0.0.1"
|
||
|
|
port = 3001
|
||
|
|
enable_cors = true # Allow localhost:3000, localhost:5173, etc.
|
||
|
|
enable_websocket = true
|
||
|
|
|
||
|
|
[appearance]
|
||
|
|
interaction_mode = "interactive"
|
||
|
|
show_suggestions = true
|
||
|
|
suggestion_confidence_threshold = 0.3 # Lower threshold for dev feedback
|