# AI Backend - Development Configuration # Inherits defaults from default.toml, override values here for local development [llm] # Use ollama for local development (requires local Ollama instance) api_endpoint = "http://localhost:11434/api" model = "llama2" # Or whatever model you have installed locally provider = "ollama" [llm.generation] # Faster responses for iteration max_tokens = 1024 temperature = 0.5 [rag] # Enable RAG for development cache_size = 500 embedding_dims = 384 enabled = true index_path = "~/.config/typedialog/ai/rag-index-dev" [microservice] enable_cors = true # Allow localhost:3000, localhost:5173, etc. enable_websocket = true host = "127.0.0.1" port = 3001 [appearance] interaction_mode = "interactive" show_suggestions = true suggestion_confidence_threshold = 0.3 # Lower threshold for dev feedback