TypeDialog/config/ai/default.toml

67 lines
1.7 KiB
TOML
Raw Normal View History

2025-12-24 03:16:53 +00:00
# AI Backend - Default Configuration
# Provides intelligent form assistance using LLM + RAG system
[llm]
# LLM Provider: openai, anthropic, ollama
provider = "openai"
# Model to use for the selected provider
# OpenAI: gpt-4, gpt-3.5-turbo
# Anthropic: claude-3-opus, claude-3-sonnet, claude-3-haiku
# Ollama: depends on locally installed models
model = "gpt-3.5-turbo"
# API endpoint (optional, uses provider defaults if not set)
# OpenAI: https://api.openai.com/v1
# Anthropic: https://api.anthropic.com/v1
# Ollama: http://localhost:11434/api
api_endpoint = ""
[llm.generation]
# Temperature: 0.0-2.0, higher = more creative, lower = more focused
temperature = 0.7
# Maximum tokens in response
max_tokens = 2048
# Top-p (nucleus) sampling: 0.0-1.0
top_p = 0.9
[rag]
# Enable RAG (Retrieval-Augmented Generation) system
enabled = true
# Index directory for cached embeddings and vector store
# If relative path, resolved from ~/.config/typedialog/ai/
index_path = "~/.config/typedialog/ai/rag-index"
# Embedding dimensions: 384, 768, 1024
embedding_dims = 384
# Cache size for vector store (approximate, in embeddings)
cache_size = 1000
[microservice]
# HTTP server settings
host = "127.0.0.1"
port = 3001
# Enable CORS for web clients
enable_cors = false
# WebSocket support for streaming responses
enable_websocket = true
[appearance]
# Interaction mode: interactive, autocomplete, validate_only
# - interactive: LLM suggests, user can override
# - autocomplete: LLM generates all values
# - validate_only: User provides, LLM validates
interaction_mode = "interactive"
# Show LLM suggestions in user-facing prompts
show_suggestions = true
# Confidence threshold for suggestions (0.0-1.0)
suggestion_confidence_threshold = 0.5