chore: update deps and config

This commit is contained in:
Jesús Pérez 2025-12-24 03:16:53 +00:00
parent 64ea463b69
commit 5218bf8867
Signed by: jesus
GPG Key ID: 9F243E355E0BC939
14 changed files with 5802 additions and 128 deletions

5219
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,10 @@ members = [
"crates/typedialog",
"crates/typedialog-tui",
"crates/typedialog-web",
"crates/typedialog-ai",
"crates/typedialog-prov-gen",
"crates/typedialog-agent/typedialog-ag-core",
"crates/typedialog-agent/typedialog-ag",
]
resolver = "2"
@ -12,10 +16,17 @@ resolver = "2"
version = "0.1.0"
authors = ["Jesús Pérez <jpl@jesusperez.com>"]
edition = "2021"
rust-version = "1.75"
repository = "https://github.com/jesusperezlorenzo/typedialog"
license = "MIT"
keywords = ["forms", "cli", "tui", "web", "ai"]
categories = ["command-line-utilities", "web-programming"]
[workspace.dependencies]
# Internal crates
typedialog-ag-core = { path = "crates/typedialog-agent/typedialog-ag-core" }
typedialog-core = { path = "crates/typedialog-core" }
# Core serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
@ -50,6 +61,7 @@ nu-plugin = "0.109.1"
# CLI Backend (inquire)
inquire = { version = "0.9", features = ["editor", "date"] }
dialoguer = "0.12"
colored = "3"
rpassword = "7.4"
# TUI Backend (ratatui)
@ -58,15 +70,50 @@ crossterm = "0.29"
atty = "0.2"
# Web Backend (axum)
axum = { version = "0.8.8", features = ["multipart"] }
axum = { version = "0.8.8", features = ["multipart", "ws"] }
tower = "0.5.2"
tower-http = { version = "0.6.8", features = ["fs", "cors"] }
tower-http = { version = "0.6.8", features = ["fs", "cors", "trace"] }
tracing = "0.1"
tracing-subscriber = "0.3"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
# AI/ML Backend
instant-distance = "0.6"
tantivy = "0.25"
# this bincode can not be updated - Wait for tantivy/syntect to support 2.0
# and Migrate 8 code locations to new API with explicit config
bincode = "=1.3"
serde_bytes = "0.11"
rand = "0.9"
petgraph = "0.8"
surrealdb = { version = "2.4", features = ["kv-mem"] }
# Misc
tempfile = "3.23"
# Testing & Benchmarking
criterion = { version = "0.5", features = ["html_reports"] }
proptest = "1.4"
# TypeAgent dependencies
nickel-lang-core = "0.16"
nom = "8"
lru = "0.16"
reqwest = { version = "0.12", features = ["json", "rustls-tls", "stream"] }
globset = "0.4"
ignore = "0.4"
notify = "8"
sha2 = "0.10"
hex = "0.4"
uuid = { version = "1.19", features = ["v4", "serde"] }
console = "0.16"
indicatif = "0.18"
cargo_toml = "0.22"
regex = "1.12"
strum = { version = "0.27", features = ["derive"] }
strum_macros = "0.27"
[workspace.lints.rust]
unsafe_code = "forbid"

View File

@ -4,7 +4,7 @@ Pre-configured settings for each typedialog backend and environment.
## Overview
Configuration files are organized by **backend** (CLI, TUI, Web) and **environment** (default, dev, production).
Configuration files are organized by **backend** (CLI, TUI, Web, AI, Agent, Prov-gen) and **environment** (default, dev, production).
```
config/
@ -16,10 +16,22 @@ config/
│ ├── default.toml # Standard TUI settings
│ ├── dev.toml # Development features enabled
│ └── production.toml # Optimized for deployment
└── web/
├── default.toml # Standard web server settings
├── dev.toml # Development (hot reload)
└── production.toml # Hardened for production
├── web/
│ ├── default.toml # Standard web server settings
│ ├── dev.toml # Development (hot reload)
│ └── production.toml # Hardened for production
├── ai/
│ ├── default.toml # Standard AI/RAG settings
│ ├── dev.toml # Development RAG pipeline
│ └── production.toml # Optimized AI inference
├── ag/
│ ├── default.toml # Standard Agent settings
│ ├── dev.toml # Development (local Ollama)
│ └── production.toml # Production (Claude Sonnet)
└── prov-gen/
├── default.toml # Standard provisioning settings
├── dev.toml # Development (Hetzner/LXD)
└── production.toml # Production (AWS/GCP)
```
## Backend Configurations
@ -88,6 +100,69 @@ typedialog-web --config config/web/production.toml
- Response caching
- Gzip compression
### AI Backend
**RAG and embeddings** - AI-powered form assistance and knowledge retrieval.
| Config | Purpose | Provider | Embeddings | Cache |
|--------|---------|----------|-----------|-------|
| default | Standard | OpenAI | text-embedding-3-small | Memory |
| dev | Development | Ollama | nomic-embed-text | Memory |
| production | Production | OpenAI | text-embedding-3-large | Redis |
**Usage:**
```bash
typedialog-ai --config config/ai/production.toml --query "user question"
```
**Features:**
- RAG pipeline for context retrieval
- Vector embeddings
- Knowledge graph generation
- Multi-provider support (OpenAI, Ollama)
### Agent Backend (typedialog-ag)
**LLM agent execution** - Run AI agents from markdown files.
| Config | Purpose | Provider | Model | Streaming |
|--------|---------|----------|-------|-----------|
| default | Standard | Claude | haiku | Yes |
| dev | Development | Ollama | llama2 | Yes |
| production | Production | Claude | sonnet | Yes |
**Usage:**
```bash
typedialog-ag --config config/ag/production.toml task.agent.mdx
```
**Features:**
- Multi-provider (Claude, OpenAI, Gemini, Ollama)
- Tera template engine (Jinja2-compatible)
- Streaming responses
- Variable substitution
### Provisioning Generator (typedialog-prov-gen)
**Infrastructure as Code** - Generate provisioning configurations.
| Config | Purpose | Providers | AI | Validation |
|--------|---------|-----------|----|-----------|
| default | Standard | AWS, Hetzner | Disabled | Enabled |
| dev | Development | Hetzner, LXD | Ollama | Strict |
| production | Production | AWS, GCP | Claude | Strict + Security |
**Usage:**
```bash
typedialog-prov-gen --config config/prov-gen/production.toml --name myproject
```
**Features:**
- Multi-cloud support (AWS, GCP, Hetzner, UpCloud, LXD)
- Nickel-based validation
- AI-assisted generation
- Template fragments
## Configuration by Environment
### Development Configuration
@ -108,6 +183,21 @@ trace_rendering = false
hot_reload = true
debug = true
logs = "/tmp/typedialog-web.log"
# AI Dev
provider = "ollama"
embedding_model = "nomic-embed-text"
log_level = "debug"
# Agent Dev
default_provider = "ollama"
log_level = "debug"
temperature = 0.7
# Prov-gen Dev
default_providers = ["hetzner", "lxd"]
verbose = true
log_level = "debug"
```
**Usage:**
@ -115,6 +205,9 @@ logs = "/tmp/typedialog-web.log"
typedialog --config config/cli/dev.toml form.toml
typedialog-tui --config config/tui/dev.toml form.toml
typedialog-web --config config/web/dev.toml
typedialog-ai --config config/ai/dev.toml --query "question"
typedialog-ag --config config/ag/dev.toml task.agent.mdx
typedialog-prov-gen --config config/prov-gen/dev.toml --name project
```
### Production Configuration
@ -138,6 +231,24 @@ require_https = true
csrf_enabled = true
rate_limit = 100
cache_ttl = 3600
# AI Production
provider = "openai"
embedding_model = "text-embedding-3-large"
cache_type = "redis"
log_level = "warn"
# Agent Production
default_provider = "claude"
model = "claude-3-5-sonnet-20241022"
temperature = 0.3
rate_limit_enabled = true
# Prov-gen Production
default_providers = ["aws", "gcp"]
strict_validation = true
require_encryption = true
require_tests = true
```
**Usage:**
@ -145,6 +256,9 @@ cache_ttl = 3600
typedialog --config config/cli/production.toml form.toml
typedialog-tui --config config/tui/production.toml form.toml
typedialog-web --config config/web/production.toml
typedialog-ai --config config/ai/production.toml --query "question"
typedialog-ag --config config/ag/production.toml task.agent.mdx
typedialog-prov-gen --config config/prov-gen/production.toml --name project
```
## Common Settings
@ -301,6 +415,110 @@ enable_compression = true
compression_threshold = 1024
```
## AI Backend Configuration Details
```toml
[ai]
provider = "openai" # openai, ollama
embedding_model = "text-embedding-3-small"
[ai.rag]
enabled = true
chunk_size = 512
chunk_overlap = 50
top_k = 5 # Top results to return
[ai.vector_store]
type = "memory" # memory, redis
dimensions = 1536
[ai.cache]
enabled = true
ttl = 3600 # seconds
[ai.knowledge_graph]
enabled = false
max_depth = 3
```
## Agent Backend Configuration Details
```toml
[agent]
default_provider = "claude" # claude, openai, gemini, ollama
[agent.models]
claude = "claude-3-5-haiku-20241022"
openai = "gpt-4o-mini"
gemini = "gemini-2.0-flash-exp"
ollama = "llama2"
[agent.defaults]
max_tokens = 4096
temperature = 0.7
streaming = true
[agent.template]
engine = "tera" # Jinja2-compatible
strict_variables = true # Error on undefined vars
[agent.validation]
enabled = true
strict = true
[agent.output]
format = "markdown"
color = true
timestamp = true
[agent.logging]
level = "info" # debug, info, warn, error
file = true
```
## Provisioning Generator Configuration Details
```toml
[provisioning]
output_dir = "./provisioning"
default_providers = ["aws", "hetzner"]
[provisioning.generation]
overwrite = false # Require explicit --force
dry_run = false
verbose = false
[provisioning.templates]
base_path = "templates"
custom_path = null
[provisioning.infrastructure]
environment = "development" # development, staging, production
region = "us-east-1"
[provisioning.nickel]
validate_schemas = true
generate_defaults = true
use_constraints = true
[provisioning.ai]
enabled = false # AI-assisted generation
provider = "claude"
model = "claude-3-5-sonnet-20241022"
[provisioning.logging]
level = "info"
file = false
[provisioning.validation]
strict = false
require_tests = false
[provisioning.security]
require_encryption = false
scan_templates = false
```
## Distribution Configurations
When creating release distributions, all configurations are included:
@ -315,7 +533,10 @@ distribution/typedialog-0.1.0/
├── config/
│ ├── cli/
│ ├── tui/
│ └── web/
│ ├── web/
│ ├── ai/
│ ├── ag/
│ └── prov-gen/
└── ...
```

63
config/ag/README.md Normal file
View File

@ -0,0 +1,63 @@
# Agent Configuration
Configuration for `typedialog-ag` binary - single unified binary with CLI and HTTP server modes.
The binary searches for configuration files in:
- `~/.config/typedialog/ag/{TYPEDIALOG_ENV}.toml` (environment-specific)
- `~/.config/typedialog/ag/config.toml` (fallback)
- Hardcoded defaults if no file found
## Files
- **default.toml** - Default configuration for agent CLI execution (LLM settings, validation, output)
- **dev.toml** - Development configuration (increased logging, streaming output)
- **production.toml** - Production configuration (optimized settings, file logging)
- **server-default.toml** - HTTP server configuration (host, port)
## Usage
### Agent CLI Mode
Execute agents from the command line:
```bash
typedialog-ag agent.mdx # Execute agent file
typedialog-ag run agent.mdx # Explicit run command
typedialog-ag transpile agent.mdx -o out.ncl # Transpile to Nickel
typedialog-ag validate agent.mdx # Validate without executing
typedialog-ag cache clear|stats # Manage cache
```
With custom configuration:
```bash
typedialog-ag -c ~/.config/typedialog/ag/config.toml run agent.mdx
TYPEDIALOG_ENV=dev typedialog-ag run agent.mdx
```
### Agent HTTP Server Mode
Start HTTP API server for remote agent execution:
```bash
typedialog-ag serve # Start server (127.0.0.1:8765)
typedialog-ag serve --port 9000 # Custom port
typedialog-ag serve --host 0.0.0.0 # Public interface
typedialog-ag -c ~/.config/typedialog/ag/server-default.toml serve
```
Server endpoints:
- `GET /health` - Health check
- `POST /execute` - Execute agent from file
- `POST /agents/{name}/execute` - Execute agent by name
- `POST /transpile` - Transpile MDX to Nickel
- `POST /validate` - Validate agent file
## Configuration Structure
Agent CLI settings:
- `[agent]` - LLM provider and model selection
- `[agent.models]` - Available models per provider
- `[agent.defaults]` - Execution defaults (max_tokens, temperature, streaming)
- `[agent.validation]` - Validation behavior
- `[agent.output]` - Output formatting
- `[agent.logging]` - Log level and destination
Server settings:
- `host` - Server bind address (default: 127.0.0.1)
- `port` - Server port (default: 8765)

39
config/ag/default.toml Normal file
View File

@ -0,0 +1,39 @@
# TypeDialog Agent - Default Configuration
[agent]
# Default LLM provider (claude, openai, gemini, ollama)
default_provider = "claude"
# Default model per provider
[agent.models]
claude = "claude-3-5-haiku-20241022"
openai = "gpt-4o-mini"
gemini = "gemini-2.0-flash-exp"
ollama = "llama2"
# Default settings
[agent.defaults]
max_tokens = 4096
temperature = 0.7
streaming = true
# Template settings
[agent.template]
engine = "tera" # Jinja2-compatible
strict_variables = false
# Validation settings
[agent.validation]
enabled = true
strict = false
# Output settings
[agent.output]
format = "markdown"
color = true
timestamp = false
# Logging
[agent.logging]
level = "info"
file = false

32
config/ag/dev.toml Normal file
View File

@ -0,0 +1,32 @@
# TypeDialog Agent - Development Configuration
[agent]
default_provider = "ollama" # Use local for dev
[agent.models]
claude = "claude-3-5-haiku-20241022"
openai = "gpt-4o-mini"
gemini = "gemini-2.0-flash-exp"
ollama = "llama2"
[agent.defaults]
max_tokens = 2048 # Lower for dev
temperature = 0.7
streaming = true
[agent.template]
engine = "tera"
strict_variables = true # Catch template errors in dev
[agent.validation]
enabled = true
strict = true # Strict validation in dev
[agent.output]
format = "markdown"
color = true
timestamp = true
[agent.logging]
level = "debug" # Verbose in dev
file = true

37
config/ag/production.toml Normal file
View File

@ -0,0 +1,37 @@
# TypeDialog Agent - Production Configuration
[agent]
default_provider = "claude"
[agent.models]
claude = "claude-3-5-sonnet-20241022" # Higher quality for production
openai = "gpt-4o"
gemini = "gemini-1.5-pro"
ollama = "llama2"
[agent.defaults]
max_tokens = 8192
temperature = 0.3 # More consistent
streaming = true
[agent.template]
engine = "tera"
strict_variables = true
[agent.validation]
enabled = true
strict = true
[agent.output]
format = "markdown"
color = false # No color in production logs
timestamp = true
[agent.logging]
level = "warn" # Less verbose in production
file = true
# Rate limiting (production)
[agent.rate_limit]
enabled = true
max_requests_per_minute = 60

View File

@ -0,0 +1,8 @@
# TypeDialog Agent Server Configuration
# Copy to ~/.config/typedialog/agent-server/config.toml
# Server host (default: 127.0.0.1)
host = "127.0.0.1"
# Server port (default: 8765)
port = 8765

66
config/ai/default.toml Normal file
View File

@ -0,0 +1,66 @@
# AI Backend - Default Configuration
# Provides intelligent form assistance using LLM + RAG system
[llm]
# LLM Provider: openai, anthropic, ollama
provider = "openai"
# Model to use for the selected provider
# OpenAI: gpt-4, gpt-3.5-turbo
# Anthropic: claude-3-opus, claude-3-sonnet, claude-3-haiku
# Ollama: depends on locally installed models
model = "gpt-3.5-turbo"
# API endpoint (optional, uses provider defaults if not set)
# OpenAI: https://api.openai.com/v1
# Anthropic: https://api.anthropic.com/v1
# Ollama: http://localhost:11434/api
api_endpoint = ""
[llm.generation]
# Temperature: 0.0-2.0, higher = more creative, lower = more focused
temperature = 0.7
# Maximum tokens in response
max_tokens = 2048
# Top-p (nucleus) sampling: 0.0-1.0
top_p = 0.9
[rag]
# Enable RAG (Retrieval-Augmented Generation) system
enabled = true
# Index directory for cached embeddings and vector store
# If relative path, resolved from ~/.config/typedialog/ai/
index_path = "~/.config/typedialog/ai/rag-index"
# Embedding dimensions: 384, 768, 1024
embedding_dims = 384
# Cache size for vector store (approximate, in embeddings)
cache_size = 1000
[microservice]
# HTTP server settings
host = "127.0.0.1"
port = 3001
# Enable CORS for web clients
enable_cors = false
# WebSocket support for streaming responses
enable_websocket = true
[appearance]
# Interaction mode: interactive, autocomplete, validate_only
# - interactive: LLM suggests, user can override
# - autocomplete: LLM generates all values
# - validate_only: User provides, LLM validates
interaction_mode = "interactive"
# Show LLM suggestions in user-facing prompts
show_suggestions = true
# Confidence threshold for suggestions (0.0-1.0)
suggestion_confidence_threshold = 0.5

31
config/ai/dev.toml Normal file
View File

@ -0,0 +1,31 @@
# AI Backend - Development Configuration
# Inherits defaults from default.toml, override values here for local development
[llm]
# Use ollama for local development (requires local Ollama instance)
provider = "ollama"
model = "llama2" # Or whatever model you have installed locally
api_endpoint = "http://localhost:11434/api"
[llm.generation]
# Faster responses for iteration
temperature = 0.5
max_tokens = 1024
[rag]
# Enable RAG for development
enabled = true
index_path = "~/.config/typedialog/ai/rag-index-dev"
embedding_dims = 384
cache_size = 500
[microservice]
host = "127.0.0.1"
port = 3001
enable_cors = true # Allow localhost:3000, localhost:5173, etc.
enable_websocket = true
[appearance]
interaction_mode = "interactive"
show_suggestions = true
suggestion_confidence_threshold = 0.3 # Lower threshold for dev feedback

34
config/ai/production.toml Normal file
View File

@ -0,0 +1,34 @@
# AI Backend - Production Configuration
# Optimized for reliability, cost, and performance at scale
[llm]
# Production uses high-quality, stable models
provider = "anthropic"
model = "claude-3-sonnet-20240229"
api_endpoint = "" # Uses provider defaults (api.anthropic.com)
[llm.generation]
# Conservative settings for production
temperature = 0.3 # More focused, less random
max_tokens = 1024 # Reasonable limit for cost control
top_p = 0.95
[rag]
# Production RAG system with larger cache
enabled = true
index_path = "/var/lib/typedialog/ai/rag-index" # System-wide index path
embedding_dims = 768 # Higher quality embeddings
cache_size = 10000 # Larger cache for frequently accessed data
[microservice]
# Listen on all interfaces for container deployments
host = "0.0.0.0"
port = 3001
enable_cors = false # Restrict CORS for security
enable_websocket = true
[appearance]
# Production uses validation mode
interaction_mode = "validate_only"
show_suggestions = false # Don't show raw LLM output to users
suggestion_confidence_threshold = 0.8 # Only very confident suggestions

View File

@ -0,0 +1,42 @@
# TypeDialog Provisioning Generator - Default Configuration
[provisioning]
# Default output directory
output_dir = "./provisioning"
# Default providers to include
default_providers = ["aws", "hetzner"]
# Generation settings
[provisioning.generation]
overwrite = false
dry_run = false
verbose = false
# Template settings
[provisioning.templates]
# Use local path in development; installed binaries use ~/.config/typedialog/prov-gen/templates
base_path = "crates/typedialog-prov-gen/templates"
# custom_path = "path/to/custom/templates" # Uncomment to override
# Infrastructure defaults
[provisioning.infrastructure]
environment = "development"
region = "us-east-1"
# Nickel integration
[provisioning.nickel]
validate_schemas = true
generate_defaults = true
use_constraints = true
# AI assistance
[provisioning.ai]
enabled = false
provider = "claude"
model = "claude-3-5-sonnet-20241022"
# Logging
[provisioning.logging]
level = "info"
file = false

32
config/prov-gen/dev.toml Normal file
View File

@ -0,0 +1,32 @@
# TypeDialog Provisioning Generator - Development Configuration
[provisioning]
output_dir = "./provisioning"
default_providers = ["hetzner", "lxd"] # Cheaper for dev
[provisioning.generation]
overwrite = true # Allow overwrite in dev
dry_run = false
verbose = true # Verbose in dev
[provisioning.templates]
base_path = "templates"
custom_path = "./custom-templates"
[provisioning.infrastructure]
environment = "development"
region = "eu-central-1"
[provisioning.nickel]
validate_schemas = true
generate_defaults = true
use_constraints = true
[provisioning.ai]
enabled = true # Enable AI in dev
provider = "ollama" # Use local for dev
model = "llama2"
[provisioning.logging]
level = "debug"
file = true

View File

@ -0,0 +1,41 @@
# TypeDialog Provisioning Generator - Production Configuration
[provisioning]
output_dir = "./provisioning"
default_providers = ["aws", "gcp"]
[provisioning.generation]
overwrite = false # Require explicit --force
dry_run = false
verbose = false
[provisioning.templates]
base_path = "templates"
custom_path = null
[provisioning.infrastructure]
environment = "production"
region = "us-east-1"
[provisioning.nickel]
validate_schemas = true
generate_defaults = true
use_constraints = true
[provisioning.ai]
enabled = true
provider = "claude"
model = "claude-3-5-sonnet-20241022"
[provisioning.logging]
level = "warn"
file = true
# Production-specific settings
[provisioning.validation]
strict = true
require_tests = true
[provisioning.security]
require_encryption = true
scan_templates = true