chore: add configs

This commit is contained in:
Jesús Pérez 2026-01-12 05:19:06 +00:00
parent 1508d9a96b
commit 52904472de
Signed by: jesus
GPG Key ID: 9F243E355E0BC939
7 changed files with 755 additions and 755 deletions

View File

@ -33,280 +33,280 @@ timeout = 60
# AI Features - Fine-Grained Control # AI Features - Fine-Grained Control
# ============================================================================ # ============================================================================
[ai.features] [ai.features]
# AI-assisted form filling (typdialog-ai) # AI-assisted form filling (typdialog-ai)
# Real-time suggestions and field value predictions # Real-time suggestions and field value predictions
form_assistance = true form_assistance = true
# Natural language configuration generation (typdialog-prov-gen) # Natural language configuration generation (typdialog-prov-gen)
# Convert plain English to Nickel configs # Convert plain English to Nickel configs
config_generation = true config_generation = true
# Autonomous AI agents (typdialog-ag) # Autonomous AI agents (typdialog-ag)
# WARNING: Agents can execute multi-step workflows # WARNING: Agents can execute multi-step workflows
# Recommended: false for production (enable per-use-case) # Recommended: false for production (enable per-use-case)
autonomous_agents = false autonomous_agents = false
# AI-powered troubleshooting # AI-powered troubleshooting
# Analyze logs and suggest fixes for failed deployments # Analyze logs and suggest fixes for failed deployments
troubleshooting = true troubleshooting = true
# Configuration optimization # Configuration optimization
# AI reviews configs and suggests improvements # AI reviews configs and suggests improvements
optimization = true optimization = true
# Validation error explanations # Validation error explanations
# AI explains Nickel validation errors in plain language # AI explains Nickel validation errors in plain language
error_explanations = true error_explanations = true
# ============================================================================ # ============================================================================
# LLM Provider Configuration # LLM Provider Configuration
# ============================================================================ # ============================================================================
[ai.anthropic] [ai.anthropic]
# Anthropic Claude API configuration # Anthropic Claude API configuration
api_key = "env:ANTHROPIC_API_KEY" # Load from environment variable api_key = "env:ANTHROPIC_API_KEY" # Load from environment variable
api_url = "https://api.anthropic.com/v1" api_url = "https://api.anthropic.com/v1"
max_retries = 3 max_retries = 3
retry_delay_ms = 1000 retry_delay_ms = 1000
# Rate limits (per minute) # Rate limits (per minute)
max_requests_per_minute = 50 max_requests_per_minute = 50
max_tokens_per_minute = 100000 max_tokens_per_minute = 100000
[ai.openai] [ai.openai]
# OpenAI GPT-4 API configuration # OpenAI GPT-4 API configuration
api_key = "env:OPENAI_API_KEY" api_key = "env:OPENAI_API_KEY"
api_url = "https://api.openai.com/v1" api_url = "https://api.openai.com/v1"
organization_id = "" # Optional max_retries = 3
max_retries = 3 organization_id = "" # Optional
retry_delay_ms = 1000 retry_delay_ms = 1000
# Rate limits (per minute) # Rate limits (per minute)
max_requests_per_minute = 60 max_requests_per_minute = 60
max_tokens_per_minute = 150000 max_tokens_per_minute = 150000
[ai.local] [ai.local]
# Local LLM configuration (Ollama, LlamaCpp, vLLM) # Local LLM configuration (Ollama, LlamaCpp, vLLM)
# Use for air-gapped deployments or privacy-critical scenarios # Use for air-gapped deployments or privacy-critical scenarios
model_path = "/opt/provisioning/models/llama-3-70b" context_length = 8192
server_url = "http://localhost:11434" # Ollama default model_path = "/opt/provisioning/models/llama-3-70b"
context_length = 8192 num_gpu_layers = 40 # GPU acceleration
num_gpu_layers = 40 # GPU acceleration server_url = "http://localhost:11434" # Ollama default
# ============================================================================ # ============================================================================
# Model Context Protocol (MCP) Server # Model Context Protocol (MCP) Server
# ============================================================================ # ============================================================================
[ai.mcp] [ai.mcp]
# MCP server configuration # MCP server configuration
enabled = true enabled = true
server_url = "http://localhost:9000" max_retries = 3
timeout = 30 server_url = "http://localhost:9000"
max_retries = 3 timeout = 30
# Tool calling configuration # Tool calling configuration
[ai.mcp.tools] [ai.mcp.tools]
enabled = true enabled = true
# Available tools for LLM # Available tools for LLM
# Tools provide structured actions the LLM can invoke # Tools provide structured actions the LLM can invoke
tools = [ tools = [
"nickel_validate", # Validate Nickel configuration "nickel_validate", # Validate Nickel configuration
"schema_query", # Query Nickel schema information "schema_query", # Query Nickel schema information
"config_generate", # Generate configuration snippets "config_generate", # Generate configuration snippets
"cedar_check", # Check Cedar authorization policies "cedar_check", # Check Cedar authorization policies
"deployment_status", # Query deployment status "deployment_status", # Query deployment status
"log_analyze", # Analyze deployment logs "log_analyze", # Analyze deployment logs
] ]
# ============================================================================ # ============================================================================
# Retrieval-Augmented Generation (RAG) # Retrieval-Augmented Generation (RAG)
# ============================================================================ # ============================================================================
[ai.rag] [ai.rag]
# Enable RAG system # Enable RAG system
enabled = true enabled = true
# Vector Store Configuration # Vector Store Configuration
# Options: "qdrant" | "milvus" | "pgvector" | "chromadb" # Options: "qdrant" | "milvus" | "pgvector" | "chromadb"
vector_store = "qdrant" collection_name = "provisioning-knowledge"
vector_store_url = "http://localhost:6333" vector_store = "qdrant"
collection_name = "provisioning-knowledge" vector_store_url = "http://localhost:6333"
# Embedding Model # Embedding Model
# OpenAI: "text-embedding-3-large", "text-embedding-3-small" # OpenAI: "text-embedding-3-large", "text-embedding-3-small"
# Local: "all-MiniLM-L6-v2", "bge-large-en-v1.5" # Local: "all-MiniLM-L6-v2", "bge-large-en-v1.5"
embedding_model = "text-embedding-3-large" embedding_api_key = "env:OPENAI_API_KEY" # For OpenAI embeddings
embedding_api_key = "env:OPENAI_API_KEY" # For OpenAI embeddings embedding_model = "text-embedding-3-large"
# Document Chunking # Document Chunking
chunk_size = 512 # Characters per chunk chunk_overlap = 50 # Overlap between chunks
chunk_overlap = 50 # Overlap between chunks chunk_size = 512 # Characters per chunk
max_chunks_per_query = 10 # Top-k retrieval max_chunks_per_query = 10 # Top-k retrieval
# ============================================================================ # ============================================================================
# RAG Index Configuration # RAG Index Configuration
# ============================================================================ # ============================================================================
[ai.rag.index] [ai.rag.index]
# What to index for RAG retrieval # What to index for RAG retrieval
# Index Nickel schemas (RECOMMENDED: true) # Index Nickel schemas (RECOMMENDED: true)
# Provides AI with schema definitions and contracts # Provides AI with schema definitions and contracts
schemas = true schemas = true
schemas_path = "provisioning/schemas" schemas_path = "provisioning/schemas"
# Index documentation (RECOMMENDED: true) # Index documentation (RECOMMENDED: true)
# Provides AI with user guides and best practices # Provides AI with user guides and best practices
docs = true docs = true
docs_path = "docs" docs_path = "docs"
# Index past deployments (RECOMMENDED: true) # Index past deployments (RECOMMENDED: true)
# AI learns from successful deployment patterns # AI learns from successful deployment patterns
deployments = true deployments = true
deployments_path = "workspaces" deployments_path = "workspaces"
# Index best practices (RECOMMENDED: true) # Index best practices (RECOMMENDED: true)
# Inject organizational patterns and conventions # Inject organizational patterns and conventions
best_practices = true best_practices = true
best_practices_path = ".claude/patterns" best_practices_path = ".claude/patterns"
# Index deployment logs (WARNING: Privacy concerns) # Index deployment logs (WARNING: Privacy concerns)
# Logs may contain sensitive data, enable only if sanitized # Logs may contain sensitive data, enable only if sanitized
logs = false logs = false
logs_retention_days = 30 logs_retention_days = 30
# Reindexing schedule # Reindexing schedule
auto_reindex = true auto_reindex = true
reindex_interval_hours = 24 reindex_interval_hours = 24
# ============================================================================ # ============================================================================
# Security and Access Control # Security and Access Control
# ============================================================================ # ============================================================================
[ai.security] [ai.security]
# Cedar policy store for AI access control # Cedar policy store for AI access control
cedar_policy_store = "/etc/provisioning/cedar-policies/ai" cedar_policy_store = "/etc/provisioning/cedar-policies/ai"
# AI cannot suggest secret values (CRITICAL: keep true) # AI cannot suggest secret values (CRITICAL: keep true)
# AI can suggest secret names/paths but not retrieve actual secrets # AI can suggest secret names/paths but not retrieve actual secrets
max_secret_suggestions = 0 max_secret_suggestions = 0
# Require human approval for critical operations (CRITICAL: keep true) # Require human approval for critical operations (CRITICAL: keep true)
# Operations requiring approval: # Operations requiring approval:
# - Deployments to production # - Deployments to production
# - Configuration changes affecting security # - Configuration changes affecting security
# - Secret rotation # - Secret rotation
# - Infrastructure deletion # - Infrastructure deletion
require_human_approval = true require_human_approval = true
# Audit all AI operations (CRITICAL: keep true) # Audit all AI operations (CRITICAL: keep true)
# Log every AI request, response, and action # Log every AI request, response, and action
audit_all_operations = true audit_all_operations = true
# Data sanitization before sending to LLM # Data sanitization before sending to LLM
# Remove sensitive data from prompts # Remove sensitive data from prompts
[ai.security.sanitization] [ai.security.sanitization]
sanitize_secrets = true # Remove secret values sanitize_credentials = true # Remove passwords, API keys
sanitize_pii = true # Remove personally identifiable info sanitize_ip_addresses = false # Keep for troubleshooting
sanitize_credentials = true # Remove passwords, API keys sanitize_pii = true # Remove personally identifiable info
sanitize_ip_addresses = false # Keep for troubleshooting sanitize_secrets = true # Remove secret values
# Allowed data for LLM # Allowed data for LLM
allowed_data = [ allowed_data = [
"nickel_schemas", # Schema definitions (public) "nickel_schemas", # Schema definitions (public)
"documentation", # User docs (public) "documentation", # User docs (public)
"error_messages", # Validation errors (sanitized) "error_messages", # Validation errors (sanitized)
"resource_names", # Infrastructure resource identifiers "resource_names", # Infrastructure resource identifiers
] ]
# Forbidden data for LLM (NEVER send to external LLM) # Forbidden data for LLM (NEVER send to external LLM)
forbidden_data = [ forbidden_data = [
"secret_values", # Passwords, API keys, tokens "secret_values", # Passwords, API keys, tokens
"private_keys", # SSH keys, TLS keys, encryption keys "private_keys", # SSH keys, TLS keys, encryption keys
"pii", # Email addresses, names, phone numbers "pii", # Email addresses, names, phone numbers
"credentials", # Authentication credentials "credentials", # Authentication credentials
"session_tokens", # User session data "session_tokens", # User session data
] ]
# ============================================================================ # ============================================================================
# Rate Limiting and Cost Control # Rate Limiting and Cost Control
# ============================================================================ # ============================================================================
[ai.rate_limiting] [ai.rate_limiting]
# Per-user rate limits # Per-user rate limits
requests_per_minute = 60 requests_per_day = 2000
requests_per_hour = 500 requests_per_hour = 500
requests_per_day = 2000 requests_per_minute = 60
# Token limits (to control LLM API costs) # Token limits (to control LLM API costs)
tokens_per_day = 1000000 # 1M tokens/day tokens_per_day = 1000000 # 1M tokens/day
tokens_per_month = 30000000 # 30M tokens/month tokens_per_month = 30000000 # 30M tokens/month
# Cost limits (USD) # Cost limits (USD)
cost_limit_per_day = "100.00" cost_limit_per_day = "100.00"
cost_limit_per_month = "2000.00" cost_limit_per_month = "2000.00"
# Alert thresholds # Alert thresholds
cost_alert_threshold = 0.8 # Alert at 80% of limit cost_alert_threshold = 0.8 # Alert at 80% of limit
# Rate limit exceeded behavior # Rate limit exceeded behavior
# Options: "queue" | "reject" | "throttle" # Options: "queue" | "reject" | "throttle"
exceed_behavior = "queue" exceed_behavior = "queue"
max_queue_size = 100 max_queue_size = 100
# ============================================================================ # ============================================================================
# Caching # Caching
# ============================================================================ # ============================================================================
[ai.caching] [ai.caching]
# Enable response caching to reduce LLM API calls # Enable response caching to reduce LLM API calls
enabled = true enabled = true
# Cache TTL (time-to-live) # Cache TTL (time-to-live)
ttl = "1h" ttl = "1h"
# Cache backend # Cache backend
# Options: "redis" | "memcached" | "in-memory" # Options: "redis" | "memcached" | "in-memory"
backend = "redis" backend = "redis"
redis_url = "redis://localhost:6379" redis_url = "redis://localhost:6379"
# Cache key strategy # Cache key strategy
# "prompt" = Cache by exact prompt (high precision, low hit rate) # "prompt" = Cache by exact prompt (high precision, low hit rate)
# "semantic" = Cache by semantic similarity (lower precision, high hit rate) # "semantic" = Cache by semantic similarity (lower precision, high hit rate)
cache_strategy = "semantic" cache_strategy = "semantic"
semantic_similarity_threshold = 0.95 semantic_similarity_threshold = 0.95
# Cache statistics # Cache statistics
track_hit_rate = true log_cache_misses = false
log_cache_misses = false track_hit_rate = true
# ============================================================================ # ============================================================================
# Observability and Monitoring # Observability and Monitoring
# ============================================================================ # ============================================================================
[ai.observability] [ai.observability]
# Logging level for AI operations # Logging level for AI operations
# Options: "trace" | "debug" | "info" | "warn" | "error" # Options: "trace" | "debug" | "info" | "warn" | "error"
log_level = "info" log_level = "info"
# Trace all AI requests (detailed logging) # Trace all AI requests (detailed logging)
# WARNING: Generates large log volume # WARNING: Generates large log volume
trace_all_requests = true trace_all_requests = true
# Store conversation history (for debugging and learning) # Store conversation history (for debugging and learning)
store_conversations = true conversation_retention_days = 30
conversation_retention_days = 30 store_conversations = true
# Metrics collection # Metrics collection
[ai.observability.metrics] [ai.observability.metrics]
enabled = true enabled = true
export_format = "prometheus" # "prometheus" | "opentelemetry" export_format = "prometheus" # "prometheus" | "opentelemetry"
export_port = 9090 export_port = 9090
# Metrics to collect # Metrics to collect
metrics = [ metrics = [
"request_count", # Total AI requests "request_count", # Total AI requests
"request_duration", # Latency histogram "request_duration", # Latency histogram
"token_usage", # Input/output tokens "token_usage", # Input/output tokens
@ -314,225 +314,225 @@ metrics = [
"cache_hit_rate", # Cache effectiveness "cache_hit_rate", # Cache effectiveness
"validation_success_rate", # Generated config validity "validation_success_rate", # Generated config validity
"human_approval_rate", # How often humans approve AI output "human_approval_rate", # How often humans approve AI output
] ]
# Distributed tracing # Distributed tracing
[ai.observability.tracing] [ai.observability.tracing]
enabled = true enabled = true
jaeger_endpoint = "http://localhost:14268/api/traces" jaeger_endpoint = "http://localhost:14268/api/traces"
sample_rate = 0.1 # Sample 10% of requests sample_rate = 0.1 # Sample 10% of requests
# ============================================================================ # ============================================================================
# AI Agent Configuration (typdialog-ag) # AI Agent Configuration (typdialog-ag)
# ============================================================================ # ============================================================================
[ai.agents] [ai.agents]
# WARNING: Autonomous agents can execute multi-step workflows # WARNING: Autonomous agents can execute multi-step workflows
# Enable with caution, only for trusted users # Enable with caution, only for trusted users
# Enable AI agents globally # Enable AI agents globally
enabled = false enabled = false
# Maximum iterations per agent execution # Maximum iterations per agent execution
# Prevents infinite loops # Prevents infinite loops
max_iterations = 20 max_iterations = 20
# Agent timeout (seconds) # Agent timeout (seconds)
timeout = 300 timeout = 300
# Require approval for each agent action (RECOMMENDED: true) # Require approval for each agent action (RECOMMENDED: true)
# If false, agent executes entire workflow autonomously # If false, agent executes entire workflow autonomously
require_step_approval = true require_step_approval = true
# Agent types # Agent types
[ai.agents.types] [ai.agents.types]
# Provisioning agent: End-to-end infrastructure setup # Provisioning agent: End-to-end infrastructure setup
provisioning_agent = false provisioning_agent = false
# Troubleshooting agent: Diagnose and fix deployment issues # Troubleshooting agent: Diagnose and fix deployment issues
troubleshooting_agent = true troubleshooting_agent = true
# Optimization agent: Analyze and improve configurations # Optimization agent: Analyze and improve configurations
optimization_agent = true optimization_agent = true
# Security audit agent: Review configs for vulnerabilities # Security audit agent: Review configs for vulnerabilities
security_audit_agent = true security_audit_agent = true
# ============================================================================ # ============================================================================
# Configuration Generation (typdialog-prov-gen) # Configuration Generation (typdialog-prov-gen)
# ============================================================================ # ============================================================================
[ai.config_generation] [ai.config_generation]
# Default schema for generated configs # Default schema for generated configs
default_schema = "workspace" default_schema = "workspace"
# Validation mode # Validation mode
# "strict" = Reject any invalid config # "strict" = Reject any invalid config
# "permissive" = Allow configs with warnings # "permissive" = Allow configs with warnings
validation_mode = "strict" validation_mode = "strict"
# Best practice injection # Best practice injection
# Automatically add security/performance best practices # Automatically add security/performance best practices
inject_best_practices = true inject_best_practices = true
# Template usage # Template usage
# Use pre-defined templates as starting points # Use pre-defined templates as starting points
use_templates = true template_directory = "provisioning/templates"
template_directory = "provisioning/templates" use_templates = true
# ============================================================================ # ============================================================================
# Form Assistance (typdialog-ai) # Form Assistance (typdialog-ai)
# ============================================================================ # ============================================================================
[ai.form_assistance] [ai.form_assistance]
# Real-time suggestions as user types # Real-time suggestions as user types
real_time_suggestions = true real_time_suggestions = true
# Minimum characters before triggering suggestions # Minimum characters before triggering suggestions
min_chars_for_suggestions = 3 min_chars_for_suggestions = 3
# Maximum suggestions per field # Maximum suggestions per field
max_suggestions = 5 max_suggestions = 5
# Suggestion confidence threshold (0.0-1.0) # Suggestion confidence threshold (0.0-1.0)
# Only show suggestions with confidence above threshold # Only show suggestions with confidence above threshold
confidence_threshold = 0.7 confidence_threshold = 0.7
# Natural language form filling # Natural language form filling
# User can describe entire form in plain English # User can describe entire form in plain English
nl_form_filling = true nl_form_filling = true
# ============================================================================ # ============================================================================
# Environment-Specific Overrides # Environment-Specific Overrides
# ============================================================================ # ============================================================================
# Development environment # Development environment
[ai.environments.dev] [ai.environments.dev]
enabled = true cost_limit_per_day = "10.00"
provider = "openai" # Cheaper for dev enabled = true
model = "gpt-4-turbo" model = "gpt-4-turbo"
require_human_approval = false # Faster iteration provider = "openai" # Cheaper for dev
cost_limit_per_day = "10.00" require_human_approval = false # Faster iteration
# Staging environment # Staging environment
[ai.environments.staging] [ai.environments.staging]
enabled = true cost_limit_per_day = "50.00"
provider = "anthropic" enabled = true
model = "claude-sonnet-4" model = "claude-sonnet-4"
require_human_approval = true provider = "anthropic"
cost_limit_per_day = "50.00" require_human_approval = true
# Production environment # Production environment
[ai.environments.production] [ai.environments.production]
enabled = true autonomous_agents = false # NEVER enable in production
provider = "anthropic" cost_limit_per_day = "100.00"
model = "claude-sonnet-4" enabled = true
require_human_approval = true # ALWAYS true for production model = "claude-sonnet-4"
autonomous_agents = false # NEVER enable in production provider = "anthropic"
cost_limit_per_day = "100.00" require_human_approval = true # ALWAYS true for production
# ============================================================================ # ============================================================================
# Integration with Other Services # Integration with Other Services
# ============================================================================ # ============================================================================
[ai.integration] [ai.integration]
# Orchestrator integration # Orchestrator integration
orchestrator_url = "https://orchestrator.example.com" orchestrator_api_key = "env:ORCHESTRATOR_API_KEY"
orchestrator_api_key = "env:ORCHESTRATOR_API_KEY" orchestrator_url = "https://orchestrator.example.com"
# SecretumVault integration (for secret name suggestions only) # SecretumVault integration (for secret name suggestions only)
secretum_vault_url = "https://vault.example.com:8200" secretum_vault_token = "env:VAULT_TOKEN"
secretum_vault_token = "env:VAULT_TOKEN" secretum_vault_url = "https://vault.example.com:8200"
# AI can query secret names/paths but NEVER values # AI can query secret names/paths but NEVER values
# Typdialog Web UI integration # Typdialog Web UI integration
typdialog_url = "https://forms.provisioning.example.com" typdialog_url = "https://forms.provisioning.example.com"
typdialog_websocket_enabled = true typdialog_websocket_enabled = true
# ============================================================================ # ============================================================================
# Advanced Settings # Advanced Settings
# ============================================================================ # ============================================================================
[ai.advanced] [ai.advanced]
# Prompt engineering # Prompt engineering
system_prompt_template = "provisioning/ai/prompts/system.txt" system_prompt_template = "provisioning/ai/prompts/system.txt"
user_prompt_template = "provisioning/ai/prompts/user.txt" user_prompt_template = "provisioning/ai/prompts/user.txt"
# Context window management # Context window management
max_context_tokens = 100000 # Claude Sonnet 4 context window context_truncation_strategy = "sliding_window" # "sliding_window" | "summarize"
context_truncation_strategy = "sliding_window" # "sliding_window" | "summarize" max_context_tokens = 100000 # Claude Sonnet 4 context window
# Streaming responses # Streaming responses
enable_streaming = true enable_streaming = true
stream_chunk_size = 100 # Characters per chunk stream_chunk_size = 100 # Characters per chunk
# Concurrent requests # Concurrent requests
max_concurrent_requests = 10 max_concurrent_requests = 10
# ============================================================================ # ============================================================================
# Experimental Features (Use at Your Own Risk) # Experimental Features (Use at Your Own Risk)
# ============================================================================ # ============================================================================
[ai.experimental] [ai.experimental]
# Multi-agent collaboration # Multi-agent collaboration
# Multiple AI agents work together on complex tasks # Multiple AI agents work together on complex tasks
multi_agent_collaboration = false multi_agent_collaboration = false
# Reinforcement learning from human feedback (RLHF) # Reinforcement learning from human feedback (RLHF)
# Learn from user corrections to improve over time # Learn from user corrections to improve over time
rlhf_enabled = false rlhf_enabled = false
# Fine-tuning on deployment history # Fine-tuning on deployment history
# Train custom models on organization-specific patterns # Train custom models on organization-specific patterns
fine_tuning = false fine_tuning = false
fine_tuning_dataset_path = "provisioning/ai/fine-tuning-data" fine_tuning_dataset_path = "provisioning/ai/fine-tuning-data"
# ============================================================================ # ============================================================================
# Compliance and Legal # Compliance and Legal
# ============================================================================ # ============================================================================
[ai.compliance] [ai.compliance]
# Data residency requirements # Data residency requirements
# Ensure LLM provider complies with data residency laws # Ensure LLM provider complies with data residency laws
data_residency = "us" # "us" | "eu" | "local" data_residency = "us" # "us" | "eu" | "local"
# GDPR compliance mode # GDPR compliance mode
gdpr_mode = false gdpr_data_retention_days = 90
gdpr_data_retention_days = 90 gdpr_mode = false
# SOC 2 compliance logging # SOC 2 compliance logging
soc2_logging = false soc2_logging = false
# Terms of service acceptance # Terms of service acceptance
# Must explicitly accept LLM provider TOS # Must explicitly accept LLM provider TOS
tos_accepted = false tos_accepted = false
tos_version = "2025-01-08" tos_version = "2025-01-08"
# IMPORTANT NOTES: # IMPORTANT NOTES:
# #
# 1. API Keys: NEVER hardcode API keys. Always use environment variables. # 1. API Keys: NEVER hardcode API keys. Always use environment variables.
# Example: api_key = "env:ANTHROPIC_API_KEY" # Example: api_key = "env:ANTHROPIC_API_KEY"
# #
# 2. Security: Keep require_human_approval = true for production. # 2. Security: Keep require_human_approval = true for production.
# AI-generated configs must be reviewed by humans. # AI-generated configs must be reviewed by humans.
# #
# 3. Costs: Monitor LLM API usage. Set appropriate cost_limit_per_day. # 3. Costs: Monitor LLM API usage. Set appropriate cost_limit_per_day.
# Default limits are conservative but may need adjustment. # Default limits are conservative but may need adjustment.
# #
# 4. Privacy: For sensitive workloads, use local models (no external API calls). # 4. Privacy: For sensitive workloads, use local models (no external API calls).
# Set provider = "local" and configure local model path. # Set provider = "local" and configure local model path.
# #
# 5. RAG Index: Regularly reindex to keep AI knowledge up-to-date. # 5. RAG Index: Regularly reindex to keep AI knowledge up-to-date.
# Set auto_reindex = true and adjust reindex_interval_hours. # Set auto_reindex = true and adjust reindex_interval_hours.
# #
# 6. Cedar Policies: Define fine-grained AI access control in Cedar. # 6. Cedar Policies: Define fine-grained AI access control in Cedar.
# Location: /etc/provisioning/cedar-policies/ai # Location: /etc/provisioning/cedar-policies/ai
# #
# 7. Audit Logs: AI operations are security-critical. Keep audit_all_operations = true. # 7. Audit Logs: AI operations are security-critical. Keep audit_all_operations = true.
# Logs stored in: /var/log/provisioning/ai-audit.log # Logs stored in: /var/log/provisioning/ai-audit.log
# #
# 8. Agents: Autonomous agents are powerful but risky. # 8. Agents: Autonomous agents are powerful but risky.
# Enable only for specific use cases, never globally in production. # Enable only for specific use cases, never globally in production.
# Version: 1.0 # Version: 1.0
# Last Updated: 2025-01-08 # Last Updated: 2025-01-08

View File

@ -2,21 +2,21 @@
enabled = false enabled = false
redact_sensitive = true redact_sensitive = true
[control_center.audit.storage] [control_center.audit.storage]
immutable = false immutable = false
retention_days = 90 retention_days = 90
[control_center.compliance] [control_center.compliance]
enabled = false enabled = false
encryption_required = false encryption_required = false
[control_center.compliance.data_retention] [control_center.compliance.data_retention]
audit_log_days = 2555 audit_log_days = 2555
policy_years = 7 policy_years = 7
[control_center.compliance.validation] [control_center.compliance.validation]
enabled = false enabled = false
interval_hours = 24 interval_hours = 24
[control_center.database] [control_center.database]
backend = "rocksdb" backend = "rocksdb"
@ -40,78 +40,78 @@ format = "&"
level = "&" level = "&"
outputs = ["stdout"] outputs = ["stdout"]
[control_center.logging.fields] [control_center.logging.fields]
caller = false caller = false
hostname = true hostname = true
pid = true pid = true
service_name = true service_name = true
stack_trace = false stack_trace = false
timestamp = true timestamp = true
[control_center.logging.file] [control_center.logging.file]
compress = false compress = false
max_age = 30 max_age = 30
max_backups = 10 max_backups = 10
max_size = 104857600 max_size = 104857600
path = "/var/log/provisioning/service.log" path = "/var/log/provisioning/service.log"
[control_center.logging.performance] [control_center.logging.performance]
enabled = false enabled = false
memory_info = false memory_info = false
slow_threshold = 1000 slow_threshold = 1000
[control_center.logging.sampling] [control_center.logging.sampling]
enabled = false enabled = false
initial = 100 initial = 100
thereafter = 100 thereafter = 100
[control_center.logging.syslog] [control_center.logging.syslog]
protocol = "udp" protocol = "udp"
[control_center.monitoring] [control_center.monitoring]
enabled = false enabled = false
[control_center.monitoring.alerting] [control_center.monitoring.alerting]
enabled = false enabled = false
[control_center.monitoring.health_check] [control_center.monitoring.health_check]
enabled = false enabled = false
endpoint = "/health" endpoint = "/health"
healthy_threshold = 2 healthy_threshold = 2
interval = 30 interval = 30
timeout = 5000 timeout = 5000
type = "&" type = "&"
unhealthy_threshold = 3 unhealthy_threshold = 3
[control_center.monitoring.metrics] [control_center.monitoring.metrics]
buffer_size = 1000 buffer_size = 1000
enabled = false enabled = false
interval = 60 interval = 60
prometheus_path = "/metrics" prometheus_path = "/metrics"
retention_days = 30 retention_days = 30
[control_center.monitoring.resources] [control_center.monitoring.resources]
alert_threshold = 80 alert_threshold = 80
cpu = false cpu = false
disk = false disk = false
memory = false memory = false
network = false network = false
[control_center.monitoring.tracing] [control_center.monitoring.tracing]
enabled = false enabled = false
sample_rate = 0.1 sample_rate = 0.1
[control_center.policy] [control_center.policy]
enabled = true enabled = true
[control_center.policy.cache] [control_center.policy.cache]
enabled = true enabled = true
max_policies = 10000 max_policies = 10000
ttl = 3600 ttl = 3600
[control_center.policy.versioning] [control_center.policy.versioning]
enabled = true enabled = true
max_versions = 20 max_versions = 20
[control_center.rbac] [control_center.rbac]
attribute_based = false attribute_based = false
@ -120,10 +120,10 @@ dynamic_roles = false
enabled = true enabled = true
hierarchy = true hierarchy = true
[control_center.rbac.roles] [control_center.rbac.roles]
admin = true admin = true
operator = true operator = true
viewer = true viewer = true
[control_center.security.cors] [control_center.security.cors]
allow_credentials = false allow_credentials = false
@ -176,15 +176,15 @@ workers = 4
audit_enabled = false audit_enabled = false
enabled = true enabled = true
[control_center.users.registration] [control_center.users.registration]
auto_assign_role = "user" auto_assign_role = "user"
enabled = true enabled = true
requires_approval = false requires_approval = false
[control_center.users.sessions] [control_center.users.sessions]
absolute_timeout = 86400 absolute_timeout = 86400
idle_timeout = 3600 idle_timeout = 3600
max_active = 5 max_active = 5
[control_center.workspace] [control_center.workspace]
enabled = true enabled = true

View File

@ -2,85 +2,85 @@
# High-availability, multi-source, multi-registry production deployment # High-availability, multi-source, multi-registry production deployment
[server] [server]
enable_compression = true
enable_cors = true
host = "0.0.0.0" host = "0.0.0.0"
port = 8082 port = 8082
workers = 8 workers = 8
enable_cors = true
enable_compression = true
# Primary internal Gitea instance # Primary internal Gitea instance
[[sources.gitea]] [[sources.gitea]]
id = "primary-internal-gitea" id = "primary-internal-gitea"
url = "https://gitea.internal.company.com"
organization = "platform-extensions" organization = "platform-extensions"
token_path = "/etc/secrets/gitea-primary-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/gitea-primary-token.txt"
url = "https://gitea.internal.company.com"
verify_ssl = true verify_ssl = true
# Secondary internal Gitea (failover) # Secondary internal Gitea (failover)
[[sources.gitea]] [[sources.gitea]]
id = "secondary-internal-gitea" id = "secondary-internal-gitea"
url = "https://gitea-secondary.internal.company.com"
organization = "platform-extensions" organization = "platform-extensions"
token_path = "/etc/secrets/gitea-secondary-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/gitea-secondary-token.txt"
url = "https://gitea-secondary.internal.company.com"
verify_ssl = true verify_ssl = true
# Forgejo for community extensions # Forgejo for community extensions
[[sources.forgejo]] [[sources.forgejo]]
id = "enterprise-forgejo" id = "enterprise-forgejo"
url = "https://forge.company.com"
organization = "platform" organization = "platform"
token_path = "/etc/secrets/forgejo-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/forgejo-token.txt"
url = "https://forge.company.com"
verify_ssl = true verify_ssl = true
# GitHub organization # GitHub organization
[[sources.github]] [[sources.github]]
id = "company-github" id = "company-github"
organization = "company-platform" organization = "company-platform"
token_path = "/etc/secrets/github-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/github-token.txt"
verify_ssl = true verify_ssl = true
# Primary enterprise OCI registry (Zot) # Primary enterprise OCI registry (Zot)
[[distributions.oci]] [[distributions.oci]]
id = "primary-oci-zot" id = "primary-oci-zot"
registry = "zot.internal.company.com"
namespace = "platform/extensions" namespace = "platform/extensions"
registry = "zot.internal.company.com"
timeout_seconds = 30 timeout_seconds = 30
verify_ssl = true verify_ssl = true
# Secondary enterprise OCI registry (Harbor) # Secondary enterprise OCI registry (Harbor)
[[distributions.oci]] [[distributions.oci]]
id = "secondary-oci-harbor"
registry = "harbor.internal.company.com"
namespace = "platform"
auth_token_path = "/etc/secrets/harbor-token.txt" auth_token_path = "/etc/secrets/harbor-token.txt"
id = "secondary-oci-harbor"
namespace = "platform"
registry = "harbor.internal.company.com"
timeout_seconds = 30 timeout_seconds = 30
verify_ssl = true verify_ssl = true
# Public Docker Hub for external distribution # Public Docker Hub for external distribution
[[distributions.oci]] [[distributions.oci]]
id = "public-docker-hub"
registry = "docker.io"
namespace = "company-open-source"
auth_token_path = "/etc/secrets/docker-hub-token.txt" auth_token_path = "/etc/secrets/docker-hub-token.txt"
id = "public-docker-hub"
namespace = "company-open-source"
registry = "docker.io"
timeout_seconds = 30 timeout_seconds = 30
verify_ssl = true verify_ssl = true
# Public GHCR for open-source projects # Public GHCR for open-source projects
[[distributions.oci]] [[distributions.oci]]
id = "public-ghcr"
registry = "ghcr.io"
namespace = "company-open-source"
auth_token_path = "/etc/secrets/ghcr-token.txt" auth_token_path = "/etc/secrets/ghcr-token.txt"
id = "public-ghcr"
namespace = "company-open-source"
registry = "ghcr.io"
timeout_seconds = 30 timeout_seconds = 30
verify_ssl = true verify_ssl = true
# Caching configuration for high-traffic enterprise environment # Caching configuration for high-traffic enterprise environment
[cache] [cache]
capacity = 5000 capacity = 5000
ttl_seconds = 600
enable_metadata_cache = true
enable_list_cache = true enable_list_cache = true
enable_metadata_cache = true
ttl_seconds = 600

View File

@ -3,87 +3,87 @@
# multiple Git-based sources (Gitea, Forgejo, GitHub) and multiple OCI registries # multiple Git-based sources (Gitea, Forgejo, GitHub) and multiple OCI registries
[server] [server]
enable_compression = true
enable_cors = false
host = "0.0.0.0" host = "0.0.0.0"
port = 8082 port = 8082
workers = 4 workers = 4
enable_cors = false
enable_compression = true
# Multiple Git-based source backends # Multiple Git-based source backends
# Internal Gitea instance for private extensions # Internal Gitea instance for private extensions
[[sources.gitea]] [[sources.gitea]]
id = "internal-gitea" id = "internal-gitea"
url = "https://gitea.internal.example.com"
organization = "provisioning" organization = "provisioning"
token_path = "/etc/secrets/gitea-internal-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/gitea-internal-token.txt"
url = "https://gitea.internal.example.com"
verify_ssl = true verify_ssl = true
# Public Gitea instance for community extensions # Public Gitea instance for community extensions
[[sources.gitea]] [[sources.gitea]]
id = "public-gitea" id = "public-gitea"
url = "https://gitea.public.example.com"
organization = "provisioning-extensions" organization = "provisioning-extensions"
token_path = "/etc/secrets/gitea-public-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/gitea-public-token.txt"
url = "https://gitea.public.example.com"
verify_ssl = true verify_ssl = true
# Forgejo sources (Git-compatible) # Forgejo sources (Git-compatible)
[[sources.forgejo]] [[sources.forgejo]]
id = "community-forgejo" id = "community-forgejo"
url = "https://forgejo.community.example.com"
organization = "provisioning" organization = "provisioning"
token_path = "/etc/secrets/forgejo-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/forgejo-token.txt"
url = "https://forgejo.community.example.com"
verify_ssl = true verify_ssl = true
# GitHub sources # GitHub sources
[[sources.github]] [[sources.github]]
id = "org-github" id = "org-github"
organization = "my-organization" organization = "my-organization"
token_path = "/etc/secrets/github-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/github-token.txt"
verify_ssl = true verify_ssl = true
# Multiple OCI distribution registries # Multiple OCI distribution registries
# Internal Zot registry # Internal Zot registry
[[distributions.oci]] [[distributions.oci]]
id = "internal-zot" id = "internal-zot"
registry = "zot.internal.example.com"
namespace = "provisioning/extensions" namespace = "provisioning/extensions"
registry = "zot.internal.example.com"
timeout_seconds = 30 timeout_seconds = 30
verify_ssl = true verify_ssl = true
# Public Harbor registry # Public Harbor registry
[[distributions.oci]] [[distributions.oci]]
id = "public-harbor"
registry = "harbor.public.example.com"
namespace = "provisioning"
auth_token_path = "/etc/secrets/harbor-token.txt" auth_token_path = "/etc/secrets/harbor-token.txt"
id = "public-harbor"
namespace = "provisioning"
registry = "harbor.public.example.com"
timeout_seconds = 30 timeout_seconds = 30
verify_ssl = true verify_ssl = true
# Docker Hub # Docker Hub
[[distributions.oci]] [[distributions.oci]]
id = "docker-hub"
registry = "docker.io"
namespace = "myorg"
auth_token_path = "/etc/secrets/docker-hub-token.txt" auth_token_path = "/etc/secrets/docker-hub-token.txt"
id = "docker-hub"
namespace = "myorg"
registry = "docker.io"
timeout_seconds = 30 timeout_seconds = 30
verify_ssl = true verify_ssl = true
# GHCR (GitHub Container Registry) # GHCR (GitHub Container Registry)
[[distributions.oci]] [[distributions.oci]]
id = "ghcr"
registry = "ghcr.io"
namespace = "my-organization"
auth_token_path = "/etc/secrets/ghcr-token.txt" auth_token_path = "/etc/secrets/ghcr-token.txt"
id = "ghcr"
namespace = "my-organization"
registry = "ghcr.io"
timeout_seconds = 30 timeout_seconds = 30
verify_ssl = true verify_ssl = true
# Caching configuration # Caching configuration
[cache] [cache]
capacity = 1000 capacity = 1000
ttl_seconds = 300
enable_metadata_cache = true
enable_list_cache = true enable_list_cache = true
enable_metadata_cache = true
ttl_seconds = 300

View File

@ -3,23 +3,23 @@
# Old single-instance format (auto-migrated to multi-instance on startup) # Old single-instance format (auto-migrated to multi-instance on startup)
[server] [server]
enable_compression = true
enable_cors = false
host = "127.0.0.1" host = "127.0.0.1"
port = 8082 port = 8082
workers = 2 workers = 2
enable_cors = false
enable_compression = true
# Single Gitea backend (auto-migrated to sources.gitea[0]) # Single Gitea backend (auto-migrated to sources.gitea[0])
[gitea] [gitea]
url = "http://localhost:3000"
organization = "provisioning" organization = "provisioning"
token_path = "/etc/secrets/gitea-token.txt"
timeout_seconds = 30 timeout_seconds = 30
token_path = "/etc/secrets/gitea-token.txt"
url = "http://localhost:3000"
verify_ssl = false verify_ssl = false
# Caching configuration # Caching configuration
[cache] [cache]
capacity = 100 capacity = 100
ttl_seconds = 300
enable_metadata_cache = true
enable_list_cache = true enable_list_cache = true
enable_metadata_cache = true
ttl_seconds = 300

View File

@ -3,15 +3,15 @@ metrics = false
operation_timeout = 1800000 operation_timeout = 1800000
parallel_limit = 5 parallel_limit = 5
[orchestrator.batch.checkpointing] [orchestrator.batch.checkpointing]
enabled = true enabled = true
interval = 100 interval = 100
max_checkpoints = 10 max_checkpoints = 10
[orchestrator.batch.rollback] [orchestrator.batch.rollback]
enabled = true enabled = true
max_rollback_depth = 5 max_rollback_depth = 5
strategy = "checkpoint_based" strategy = "checkpoint_based"
[orchestrator.extensions] [orchestrator.extensions]
auto_load = false auto_load = false
@ -25,66 +25,66 @@ format = "&"
level = "&" level = "&"
outputs = ["stdout"] outputs = ["stdout"]
[orchestrator.logging.fields] [orchestrator.logging.fields]
caller = false caller = false
hostname = true hostname = true
pid = true pid = true
service_name = true service_name = true
stack_trace = false stack_trace = false
timestamp = true timestamp = true
[orchestrator.logging.file] [orchestrator.logging.file]
compress = false compress = false
max_age = 30 max_age = 30
max_backups = 10 max_backups = 10
max_size = 104857600 max_size = 104857600
path = "/var/log/provisioning/service.log" path = "/var/log/provisioning/service.log"
[orchestrator.logging.performance] [orchestrator.logging.performance]
enabled = false enabled = false
memory_info = false memory_info = false
slow_threshold = 1000 slow_threshold = 1000
[orchestrator.logging.sampling] [orchestrator.logging.sampling]
enabled = false enabled = false
initial = 100 initial = 100
thereafter = 100 thereafter = 100
[orchestrator.logging.syslog] [orchestrator.logging.syslog]
protocol = "udp" protocol = "udp"
[orchestrator.monitoring] [orchestrator.monitoring]
enabled = false enabled = false
[orchestrator.monitoring.alerting] [orchestrator.monitoring.alerting]
enabled = false enabled = false
[orchestrator.monitoring.health_check] [orchestrator.monitoring.health_check]
enabled = false enabled = false
endpoint = "/health" endpoint = "/health"
healthy_threshold = 2 healthy_threshold = 2
interval = 30 interval = 30
timeout = 5000 timeout = 5000
type = "&" type = "&"
unhealthy_threshold = 3 unhealthy_threshold = 3
[orchestrator.monitoring.metrics] [orchestrator.monitoring.metrics]
buffer_size = 1000 buffer_size = 1000
enabled = false enabled = false
interval = 60 interval = 60
prometheus_path = "/metrics" prometheus_path = "/metrics"
retention_days = 30 retention_days = 30
[orchestrator.monitoring.resources] [orchestrator.monitoring.resources]
alert_threshold = 80 alert_threshold = 80
cpu = false cpu = false
disk = false disk = false
memory = false memory = false
network = false network = false
[orchestrator.monitoring.tracing] [orchestrator.monitoring.tracing]
enabled = false enabled = false
sample_rate = 0.1 sample_rate = 0.1
[orchestrator.queue] [orchestrator.queue]
max_concurrent_tasks = 5 max_concurrent_tasks = 5
@ -95,9 +95,9 @@ retry_attempts = 3
retry_delay = 5000 retry_delay = 5000
task_timeout = 3600000 task_timeout = 3600000
[orchestrator.queue.dead_letter_queue] [orchestrator.queue.dead_letter_queue]
enabled = true enabled = true
max_size = 1000 max_size = 1000
[orchestrator.server] [orchestrator.server]
graceful_shutdown = true graceful_shutdown = true
@ -113,11 +113,11 @@ workers = 4
backend = "filesystem" backend = "filesystem"
path = "/var/lib/provisioning/orchestrator/data" path = "/var/lib/provisioning/orchestrator/data"
[orchestrator.storage.cache] [orchestrator.storage.cache]
enabled = true enabled = true
eviction_policy = "lru" eviction_policy = "lru"
ttl = 3600 ttl = 3600
type = "in_memory" type = "in_memory"
[orchestrator.workspace] [orchestrator.workspace]
enabled = true enabled = true

View File

@ -3,15 +3,15 @@ metrics = false
operation_timeout = 1800000 operation_timeout = 1800000
parallel_limit = 5 parallel_limit = 5
[orchestrator.batch.checkpointing] [orchestrator.batch.checkpointing]
enabled = true enabled = true
interval = 100 interval = 100
max_checkpoints = 10 max_checkpoints = 10
[orchestrator.batch.rollback] [orchestrator.batch.rollback]
enabled = true enabled = true
max_rollback_depth = 5 max_rollback_depth = 5
strategy = "checkpoint_based" strategy = "checkpoint_based"
[orchestrator.extensions] [orchestrator.extensions]
auto_load = false auto_load = false
@ -25,66 +25,66 @@ format = "&"
level = "&" level = "&"
outputs = ["stdout"] outputs = ["stdout"]
[orchestrator.logging.fields] [orchestrator.logging.fields]
caller = false caller = false
hostname = true hostname = true
pid = true pid = true
service_name = true service_name = true
stack_trace = false stack_trace = false
timestamp = true timestamp = true
[orchestrator.logging.file] [orchestrator.logging.file]
compress = false compress = false
max_age = 30 max_age = 30
max_backups = 10 max_backups = 10
max_size = 104857600 max_size = 104857600
path = "/var/log/provisioning/service.log" path = "/var/log/provisioning/service.log"
[orchestrator.logging.performance] [orchestrator.logging.performance]
enabled = false enabled = false
memory_info = false memory_info = false
slow_threshold = 1000 slow_threshold = 1000
[orchestrator.logging.sampling] [orchestrator.logging.sampling]
enabled = false enabled = false
initial = 100 initial = 100
thereafter = 100 thereafter = 100
[orchestrator.logging.syslog] [orchestrator.logging.syslog]
protocol = "udp" protocol = "udp"
[orchestrator.monitoring] [orchestrator.monitoring]
enabled = false enabled = false
[orchestrator.monitoring.alerting] [orchestrator.monitoring.alerting]
enabled = false enabled = false
[orchestrator.monitoring.health_check] [orchestrator.monitoring.health_check]
enabled = false enabled = false
endpoint = "/health" endpoint = "/health"
healthy_threshold = 2 healthy_threshold = 2
interval = 30 interval = 30
timeout = 5000 timeout = 5000
type = "&" type = "&"
unhealthy_threshold = 3 unhealthy_threshold = 3
[orchestrator.monitoring.metrics] [orchestrator.monitoring.metrics]
buffer_size = 1000 buffer_size = 1000
enabled = false enabled = false
interval = 60 interval = 60
prometheus_path = "/metrics" prometheus_path = "/metrics"
retention_days = 30 retention_days = 30
[orchestrator.monitoring.resources] [orchestrator.monitoring.resources]
alert_threshold = 80 alert_threshold = 80
cpu = false cpu = false
disk = false disk = false
memory = false memory = false
network = false network = false
[orchestrator.monitoring.tracing] [orchestrator.monitoring.tracing]
enabled = false enabled = false
sample_rate = 0.1 sample_rate = 0.1
[orchestrator.queue] [orchestrator.queue]
max_concurrent_tasks = 5 max_concurrent_tasks = 5
@ -95,9 +95,9 @@ retry_attempts = 3
retry_delay = 5000 retry_delay = 5000
task_timeout = 3600000 task_timeout = 3600000
[orchestrator.queue.dead_letter_queue] [orchestrator.queue.dead_letter_queue]
enabled = true enabled = true
max_size = 1000 max_size = 1000
[orchestrator.server] [orchestrator.server]
graceful_shutdown = true graceful_shutdown = true
@ -113,11 +113,11 @@ workers = 4
backend = "filesystem" backend = "filesystem"
path = "/var/lib/provisioning/orchestrator/data" path = "/var/lib/provisioning/orchestrator/data"
[orchestrator.storage.cache] [orchestrator.storage.cache]
enabled = true enabled = true
eviction_policy = "lru" eviction_policy = "lru"
ttl = 3600 ttl = 3600
type = "in_memory" type = "in_memory"
[orchestrator.workspace] [orchestrator.workspace]
enabled = true enabled = true