{ llm = { provider | default = "openai", model | default = "gpt-3.5-turbo", api_endpoint | default = "", generation = { temperature | default = 0.7, max_tokens | default = 2048, top_p | default = 0.9, }, }, rag = { enabled | default = true, index_path | default = "~/.config/typedialog/ai/rag-index", embedding_dims | default = 384, cache_size | default = 1000, }, microservice = { host | default = "127.0.0.1", port | default = 3001, enable_cors | default = false, enable_websocket | default = true, }, appearance = { interaction_mode | default = "interactive", show_suggestions | default = true, suggestion_confidence_threshold | default = 0.5, }, }