chore: add prov-gen crate for typedialog generation
This commit is contained in:
parent
5218bf8867
commit
34508cddf4
54
crates/typedialog-prov-gen/Cargo.toml
Normal file
54
crates/typedialog-prov-gen/Cargo.toml
Normal file
@ -0,0 +1,54 @@
|
||||
[package]
|
||||
name = "typedialog-prov-gen"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
repository.workspace = true
|
||||
license.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Workspace dependencies
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
toml = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
clap = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
tera = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
# Internal dependencies (workspace path)
|
||||
typedialog-core = { path = "../typedialog-core", features = ["ai_backend"] }
|
||||
typedialog-ai = { path = "../typedialog-ai" }
|
||||
|
||||
# Additional workspace dependencies
|
||||
cargo_toml = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
strum = { workspace = true }
|
||||
strum_macros = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = { workspace = true }
|
||||
|
||||
[[bin]]
|
||||
name = "typedialog-prov-gen"
|
||||
path = "src/main.rs"
|
||||
|
||||
[package.metadata.binstall]
|
||||
pkg-url = "{ repo }/releases/download/v{ version }/typedialog-{ target }.tar.gz"
|
||||
bin-dir = "bin/{ bin }"
|
||||
pkg-fmt = "tgz"
|
||||
|
||||
[lib]
|
||||
name = "typedialog_prov_gen"
|
||||
path = "src/lib.rs"
|
||||
5
crates/typedialog-prov-gen/src/ai/mod.rs
Normal file
5
crates/typedialog-prov-gen/src/ai/mod.rs
Normal file
@ -0,0 +1,5 @@
|
||||
//! AI integration for interactive wizard and RAG retrieval.
|
||||
|
||||
pub mod wizard;
|
||||
|
||||
pub use wizard::InteractiveWizard;
|
||||
53
crates/typedialog-prov-gen/src/ai/wizard.rs
Normal file
53
crates/typedialog-prov-gen/src/ai/wizard.rs
Normal file
@ -0,0 +1,53 @@
|
||||
//! Mode C: Interactive AI-powered wizard for project configuration.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::models::{
|
||||
ConfigField, DomainFeature, FieldType, InfrastructureSpec, ProjectSpec, ProjectType,
|
||||
};
|
||||
|
||||
/// Interactive wizard using typedialog-ai for conversational generation.
|
||||
pub struct InteractiveWizard;
|
||||
|
||||
impl InteractiveWizard {
|
||||
/// Run interactive wizard to build ProjectSpec.
|
||||
pub async fn run(project_name: Option<String>) -> Result<ProjectSpec> {
|
||||
// Fallback implementation without AI (suitable for local testing)
|
||||
// In production, this would integrate with typedialog-ai for conversational generation
|
||||
|
||||
let name = project_name.unwrap_or_else(|| "my-project".to_string());
|
||||
|
||||
// Simple defaults for wizard mode
|
||||
let spec = ProjectSpec {
|
||||
name,
|
||||
project_type: ProjectType::WebService,
|
||||
infrastructure: InfrastructureSpec::default(),
|
||||
domain_features: vec![DomainFeature::new("basic_config".to_string())],
|
||||
constraints: Vec::new(),
|
||||
};
|
||||
|
||||
Ok(spec)
|
||||
}
|
||||
|
||||
/// AI-assisted feature design (stub for typedialog-ai integration)
|
||||
#[allow(dead_code)]
|
||||
fn suggest_features_with_ai(_project_description: &str) -> Vec<DomainFeature> {
|
||||
// TODO: Integrate with typedialog-ai RAG system
|
||||
// This would:
|
||||
// 1. Send project description to LLM
|
||||
// 2. Retrieve similar examples via RAG
|
||||
// 3. Generate feature suggestions based on patterns
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
/// Conversational field generation (stub for typedialog-ai integration)
|
||||
#[allow(dead_code)]
|
||||
fn generate_fields_conversationally(_feature_name: &str) -> Vec<ConfigField> {
|
||||
// TODO: Implement multi-turn conversation using typedialog-ai
|
||||
// This would ask clarifying questions and suggest field types
|
||||
vec![ConfigField::new(
|
||||
"placeholder".to_string(),
|
||||
FieldType::Text,
|
||||
"Placeholder field".to_string(),
|
||||
)]
|
||||
}
|
||||
}
|
||||
156
crates/typedialog-prov-gen/src/cli/generate.rs
Normal file
156
crates/typedialog-prov-gen/src/cli/generate.rs
Normal file
@ -0,0 +1,156 @@
|
||||
//! Generate command: orchestrates the provisioning generation pipeline.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::input::{CargoIntrospector, ConfigLoader, NickelSchemaLoader};
|
||||
use crate::models::ProjectSpec;
|
||||
use std::path::PathBuf;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// The Generate command orchestrates the entire provisioning generation pipeline.
|
||||
pub struct GenerateCommand;
|
||||
|
||||
impl GenerateCommand {
|
||||
/// Execute the generate command with specified parameters.
|
||||
pub async fn execute(
|
||||
mode: &str,
|
||||
input: Option<PathBuf>,
|
||||
output: PathBuf,
|
||||
project: Option<String>,
|
||||
dry_run: bool,
|
||||
) -> Result<()> {
|
||||
info!("Starting provisioning generation (mode: {})", mode);
|
||||
|
||||
// Step 1: Load or infer ProjectSpec based on input mode
|
||||
let spec = Self::load_spec(mode, input, project).await?;
|
||||
|
||||
debug!(
|
||||
"Loaded ProjectSpec: {} ({:?})",
|
||||
spec.name, spec.project_type
|
||||
);
|
||||
|
||||
// Step 2: Validate the spec
|
||||
spec.validate().map_err(|errors| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Invalid specification: {}",
|
||||
errors.join(", ")
|
||||
))
|
||||
})?;
|
||||
|
||||
info!("ProjectSpec validated successfully");
|
||||
|
||||
// Step 3: Generate provisioning structure
|
||||
if dry_run {
|
||||
info!("DRY RUN: Would generate to {}", output.display());
|
||||
info!("Project: {}", spec.name);
|
||||
info!("Type: {:?}", spec.project_type);
|
||||
info!(
|
||||
"Features: {:?}",
|
||||
spec.domain_features
|
||||
.iter()
|
||||
.map(|f| &f.name)
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ensure output directory exists
|
||||
std::fs::create_dir_all(&output).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to create output directory: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
info!("Generating provisioning structure to {}", output.display());
|
||||
|
||||
// Execute the 7-layer generation pipeline in order
|
||||
// Layer 1: Constraints (required by validators and fragments)
|
||||
use crate::generator::{
|
||||
ConstraintGenerator, DefaultsGenerator, FragmentGenerator, SchemaGenerator,
|
||||
ScriptGenerator, ValidatorGenerator,
|
||||
};
|
||||
|
||||
ConstraintGenerator::generate(&spec, &output)?;
|
||||
debug!("✓ Constraints layer");
|
||||
|
||||
// Layer 2: Schemas (domain types)
|
||||
SchemaGenerator::generate(&spec, &output)?;
|
||||
debug!("✓ Schemas layer");
|
||||
|
||||
// Layer 3: Validators (validation logic)
|
||||
ValidatorGenerator::generate(&spec, &output)?;
|
||||
debug!("✓ Validators layer");
|
||||
|
||||
// Layer 4: Defaults (sensible defaults)
|
||||
DefaultsGenerator::generate(&spec, &output)?;
|
||||
debug!("✓ Defaults layer");
|
||||
|
||||
// Layer 5: Fragments (form UI components)
|
||||
FragmentGenerator::generate(&spec, &output)?;
|
||||
debug!("✓ Fragments layer");
|
||||
|
||||
// Layer 6: Scripts (orchestration)
|
||||
ScriptGenerator::generate(&spec, &output)?;
|
||||
debug!("✓ Scripts layer");
|
||||
|
||||
// TODO: Layer 7: JSON output generation
|
||||
|
||||
info!("Provisioning generation completed successfully!");
|
||||
info!("Generated structure at: {}", output.display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load ProjectSpec from the specified input mode.
|
||||
async fn load_spec(
|
||||
mode: &str,
|
||||
input: Option<PathBuf>,
|
||||
project_override: Option<String>,
|
||||
) -> Result<ProjectSpec> {
|
||||
match mode.to_lowercase().as_str() {
|
||||
"cargo" => {
|
||||
let cargo_path = input.unwrap_or_else(|| PathBuf::from("Cargo.toml"));
|
||||
debug!("Loading from Cargo.toml: {}", cargo_path.display());
|
||||
CargoIntrospector::analyze(&cargo_path)
|
||||
}
|
||||
|
||||
"config" => {
|
||||
let config_path = input.ok_or_else(|| {
|
||||
crate::error::ProvisioningGenError::Other(
|
||||
"Config mode requires --input parameter".to_string(),
|
||||
)
|
||||
})?;
|
||||
debug!("Loading from config file: {}", config_path.display());
|
||||
ConfigLoader::load(&config_path)
|
||||
}
|
||||
|
||||
"nickel" => {
|
||||
let schema_path = input.ok_or_else(|| {
|
||||
crate::error::ProvisioningGenError::Other(
|
||||
"Nickel mode requires --input parameter".to_string(),
|
||||
)
|
||||
})?;
|
||||
debug!("Loading from Nickel schema: {}", schema_path.display());
|
||||
NickelSchemaLoader::load(&schema_path)
|
||||
}
|
||||
|
||||
"wizard" => {
|
||||
use crate::ai::InteractiveWizard;
|
||||
debug!("Starting interactive wizard");
|
||||
InteractiveWizard::run(project_override.clone()).await
|
||||
}
|
||||
|
||||
other => Err(crate::error::ProvisioningGenError::Other(format!(
|
||||
"Unknown mode: {}. Use: cargo, config, nickel, or wizard",
|
||||
other
|
||||
)))?,
|
||||
}
|
||||
.map(|mut spec| {
|
||||
// Apply project name override if provided
|
||||
if let Some(name) = project_override {
|
||||
spec.name = name;
|
||||
}
|
||||
spec
|
||||
})
|
||||
}
|
||||
}
|
||||
5
crates/typedialog-prov-gen/src/cli/mod.rs
Normal file
5
crates/typedialog-prov-gen/src/cli/mod.rs
Normal file
@ -0,0 +1,5 @@
|
||||
//! CLI command handlers for provisioning generation.
|
||||
|
||||
pub mod generate;
|
||||
|
||||
pub use generate::GenerateCommand;
|
||||
215
crates/typedialog-prov-gen/src/config.rs
Normal file
215
crates/typedialog-prov-gen/src/config.rs
Normal file
@ -0,0 +1,215 @@
|
||||
//! Configuration loader for typedialog-prov-gen.
|
||||
//!
|
||||
//! Loads configuration from:
|
||||
//! 1. ~/.config/typedialog/prov-gen/{TYPEDIALOG_ENV}.toml
|
||||
//! 2. ~/.config/typedialog/prov-gen/default.toml
|
||||
//! 3. Hardcoded defaults
|
||||
|
||||
use crate::error::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub provisioning: ProvisioningConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProvisioningConfig {
|
||||
pub output_dir: String,
|
||||
pub default_providers: Vec<String>,
|
||||
pub generation: GenerationConfig,
|
||||
pub templates: TemplatesConfig,
|
||||
pub infrastructure: InfrastructureConfig,
|
||||
pub nickel: NickelConfig,
|
||||
pub ai: AiConfig,
|
||||
pub logging: LoggingConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GenerationConfig {
|
||||
pub overwrite: bool,
|
||||
pub dry_run: bool,
|
||||
pub verbose: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TemplatesConfig {
|
||||
pub base_path: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub custom_path: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct InfrastructureConfig {
|
||||
pub environment: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct NickelConfig {
|
||||
pub validate_schemas: bool,
|
||||
pub generate_defaults: bool,
|
||||
pub use_constraints: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AiConfig {
|
||||
pub enabled: bool,
|
||||
pub provider: String,
|
||||
pub model: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LoggingConfig {
|
||||
pub level: String,
|
||||
pub file: bool,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Load configuration from explicit path or standard locations or defaults.
|
||||
pub fn load(config_path: Option<&Path>) -> Result<Self> {
|
||||
// If explicit config provided, use it exclusively
|
||||
if let Some(path) = config_path {
|
||||
return Self::from_file(path);
|
||||
}
|
||||
|
||||
// Try environment-specific config
|
||||
if let Ok(env) = std::env::var("TYPEDIALOG_ENV") {
|
||||
if let Some(config_dir) = dirs::config_dir() {
|
||||
let config_path = config_dir
|
||||
.join("typedialog")
|
||||
.join("prov-gen")
|
||||
.join(format!("{}.toml", env));
|
||||
if config_path.exists() {
|
||||
return Self::from_file(&config_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try default user config
|
||||
if let Some(config_dir) = dirs::config_dir() {
|
||||
let config_path = config_dir
|
||||
.join("typedialog")
|
||||
.join("prov-gen")
|
||||
.join("config.toml");
|
||||
if config_path.exists() {
|
||||
return Self::from_file(&config_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Try project config
|
||||
let project_config_path = Path::new("config/prov-gen/default.toml");
|
||||
if project_config_path.exists() {
|
||||
return Self::from_file(project_config_path);
|
||||
}
|
||||
|
||||
// Return hardcoded defaults
|
||||
Ok(Self::default())
|
||||
}
|
||||
|
||||
/// Load configuration from a specific file.
|
||||
fn from_file(path: &Path) -> Result<Self> {
|
||||
let content = std::fs::read_to_string(path)?;
|
||||
toml::from_str(&content).map_err(|e| {
|
||||
std::io::Error::new(
|
||||
std::io::ErrorKind::InvalidData,
|
||||
format!("Failed to parse config: {}", e),
|
||||
)
|
||||
.into()
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the absolute path to templates directory.
|
||||
pub fn templates_dir(&self) -> PathBuf {
|
||||
if let Some(custom) = &self.provisioning.templates.custom_path {
|
||||
return Self::expand_path(custom);
|
||||
}
|
||||
|
||||
let base = &self.provisioning.templates.base_path;
|
||||
|
||||
// Expand ~ to home directory
|
||||
let expanded = Self::expand_path(base);
|
||||
|
||||
// If expanded path is absolute, use it
|
||||
if expanded.is_absolute() && expanded.exists() {
|
||||
return expanded;
|
||||
}
|
||||
|
||||
// Try relative to project
|
||||
let project_path = Path::new(".").join(base);
|
||||
if project_path.exists() {
|
||||
return project_path;
|
||||
}
|
||||
|
||||
// Try relative to binary location
|
||||
if let Ok(exe_path) = std::env::current_exe() {
|
||||
if let Some(parent) = exe_path.parent() {
|
||||
let relative_path = parent
|
||||
.join("..")
|
||||
.join("share")
|
||||
.join("typedialog-prov-gen")
|
||||
.join(base);
|
||||
if relative_path.exists() {
|
||||
return relative_path;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return expanded path as-is (will fail at runtime if not found)
|
||||
expanded
|
||||
}
|
||||
|
||||
/// Expand ~ to home directory.
|
||||
fn expand_path(path: &str) -> PathBuf {
|
||||
if path.starts_with("~/") || path == "~" {
|
||||
if let Some(home_dir) = dirs::home_dir() {
|
||||
let suffix = if path == "~" {
|
||||
String::new()
|
||||
} else {
|
||||
path[2..].to_string()
|
||||
};
|
||||
return home_dir.join(suffix);
|
||||
}
|
||||
}
|
||||
PathBuf::from(path)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
provisioning: ProvisioningConfig {
|
||||
output_dir: "./provisioning".to_string(),
|
||||
default_providers: vec!["aws".to_string(), "hetzner".to_string()],
|
||||
generation: GenerationConfig {
|
||||
overwrite: false,
|
||||
dry_run: false,
|
||||
verbose: false,
|
||||
},
|
||||
templates: TemplatesConfig {
|
||||
base_path: "~/.config/typedialog/prov-gen/templates".to_string(),
|
||||
custom_path: None,
|
||||
},
|
||||
infrastructure: InfrastructureConfig {
|
||||
environment: "development".to_string(),
|
||||
region: "us-east-1".to_string(),
|
||||
},
|
||||
nickel: NickelConfig {
|
||||
validate_schemas: true,
|
||||
generate_defaults: true,
|
||||
use_constraints: true,
|
||||
},
|
||||
ai: AiConfig {
|
||||
enabled: false,
|
||||
provider: "claude".to_string(),
|
||||
model: "claude-3-5-sonnet-20241022".to_string(),
|
||||
},
|
||||
logging: LoggingConfig {
|
||||
level: "info".to_string(),
|
||||
file: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
210
crates/typedialog-prov-gen/src/error.rs
Normal file
210
crates/typedialog-prov-gen/src/error.rs
Normal file
@ -0,0 +1,210 @@
|
||||
//! Error types for provisioning generation.
|
||||
//!
|
||||
//! Follows M-ERRORS-CANONICAL-STRUCTS guideline: specific error types instead of generic enums.
|
||||
|
||||
use std::path::PathBuf;
|
||||
use thiserror::Error;
|
||||
|
||||
pub type Result<T> = std::result::Result<T, ProvisioningGenError>;
|
||||
|
||||
/// Root error type combining all provisioning generation errors.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ProvisioningGenError {
|
||||
/// Cargo.toml introspection failed
|
||||
#[error("Cargo introspection failed: {0}")]
|
||||
CargoIntrospection(#[from] CargoIntrospectionError),
|
||||
|
||||
/// Project spec loading failed
|
||||
#[error("Config loading failed: {0}")]
|
||||
ConfigLoading(#[from] ConfigLoadingError),
|
||||
|
||||
/// Nickel schema loading failed
|
||||
#[error("Nickel schema loading failed: {0}")]
|
||||
NickelSchemaLoading(#[from] NickelSchemaLoadingError),
|
||||
|
||||
/// Template rendering failed
|
||||
#[error("Template rendering failed: {0}")]
|
||||
TemplateRender(#[from] TemplateRenderError),
|
||||
|
||||
/// Schema generation failed
|
||||
#[error("Schema generation failed: {0}")]
|
||||
SchemaGeneration(#[from] SchemaGenerationError),
|
||||
|
||||
/// Fragment generation failed
|
||||
#[error("Fragment generation failed: {0}")]
|
||||
FragmentGeneration(#[from] FragmentGenerationError),
|
||||
|
||||
/// Validator generation failed
|
||||
#[error("Validator generation failed: {0}")]
|
||||
ValidatorGeneration(#[from] ValidatorGenerationError),
|
||||
|
||||
/// Constraint generation failed
|
||||
#[error("Constraint generation failed: {0}")]
|
||||
ConstraintGeneration(#[from] ConstraintGenerationError),
|
||||
|
||||
/// File I/O error
|
||||
#[error("File I/O error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
/// JSON serialization/deserialization error
|
||||
#[error("JSON error: {0}")]
|
||||
Json(#[from] serde_json::Error),
|
||||
|
||||
/// TOML serialization/deserialization error
|
||||
#[error("TOML error: {0}")]
|
||||
Toml(#[from] toml::de::Error),
|
||||
|
||||
/// Generic error
|
||||
#[error("{0}")]
|
||||
Other(String),
|
||||
}
|
||||
|
||||
/// Cargo.toml introspection errors.
|
||||
#[derive(Debug)]
|
||||
pub struct CargoIntrospectionError {
|
||||
pub cargo_path: PathBuf,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for CargoIntrospectionError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Failed to introspect {}: {}",
|
||||
self.cargo_path.display(),
|
||||
self.reason
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for CargoIntrospectionError {}
|
||||
|
||||
/// Config loading errors.
|
||||
#[derive(Debug)]
|
||||
pub struct ConfigLoadingError {
|
||||
pub config_path: PathBuf,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ConfigLoadingError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Failed to load config from {}: {}",
|
||||
self.config_path.display(),
|
||||
self.reason
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for ConfigLoadingError {}
|
||||
|
||||
/// Nickel schema loading errors.
|
||||
#[derive(Debug)]
|
||||
pub struct NickelSchemaLoadingError {
|
||||
pub schema_path: PathBuf,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for NickelSchemaLoadingError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Failed to load Nickel schema from {}: {}",
|
||||
self.schema_path.display(),
|
||||
self.reason
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for NickelSchemaLoadingError {}
|
||||
|
||||
/// Template rendering errors.
|
||||
#[derive(Debug)]
|
||||
pub struct TemplateRenderError {
|
||||
pub template_name: String,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TemplateRenderError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Failed to render template '{}': {}",
|
||||
self.template_name, self.reason
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for TemplateRenderError {}
|
||||
|
||||
/// Schema generation errors.
|
||||
#[derive(Debug)]
|
||||
pub struct SchemaGenerationError {
|
||||
pub feature_name: String,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SchemaGenerationError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Failed to generate schema for feature '{}': {}",
|
||||
self.feature_name, self.reason
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for SchemaGenerationError {}
|
||||
|
||||
/// Fragment generation errors.
|
||||
#[derive(Debug)]
|
||||
pub struct FragmentGenerationError {
|
||||
pub fragment_name: String,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for FragmentGenerationError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Failed to generate fragment '{}': {}",
|
||||
self.fragment_name, self.reason
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for FragmentGenerationError {}
|
||||
|
||||
/// Validator generation errors.
|
||||
#[derive(Debug)]
|
||||
pub struct ValidatorGenerationError {
|
||||
pub feature_name: String,
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ValidatorGenerationError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Failed to generate validators for feature '{}': {}",
|
||||
self.feature_name, self.reason
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for ValidatorGenerationError {}
|
||||
|
||||
/// Constraint generation errors.
|
||||
#[derive(Debug)]
|
||||
pub struct ConstraintGenerationError {
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ConstraintGenerationError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Failed to generate constraints: {}", self.reason)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for ConstraintGenerationError {}
|
||||
@ -0,0 +1,83 @@
|
||||
//! Constraint generator: produces constraints.toml from domain features.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::models::ProjectSpec;
|
||||
use std::path::Path;
|
||||
|
||||
/// Generates constraints.toml from ProjectSpec.
|
||||
pub struct ConstraintGenerator;
|
||||
|
||||
impl ConstraintGenerator {
|
||||
/// Generate constraints.toml file.
|
||||
pub fn generate(spec: &ProjectSpec, output_dir: impl AsRef<Path>) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
tracing::info!("Generating constraints for project: {}", spec.name);
|
||||
|
||||
let mut constraints_content = String::new();
|
||||
|
||||
// Add header
|
||||
constraints_content.push_str(&format!(
|
||||
"# Constraint definitions for {}\n# Single source of truth for validation rules\n\n",
|
||||
spec.name
|
||||
));
|
||||
|
||||
// Generate constraint sections for each feature
|
||||
for feature in &spec.domain_features {
|
||||
constraints_content.push_str(&format!("[feature.{}]\n", feature.name));
|
||||
constraints_content.push_str("# Field constraints\n\n");
|
||||
|
||||
for field in &feature.fields {
|
||||
if field.min.is_some() || field.max.is_some() {
|
||||
constraints_content
|
||||
.push_str(&format!("[feature.{}.{}]\n", feature.name, field.name));
|
||||
|
||||
if let Some(min) = field.min {
|
||||
constraints_content.push_str(&format!("min = {}\n", min));
|
||||
}
|
||||
if let Some(max) = field.max {
|
||||
constraints_content.push_str(&format!("max = {}\n", max));
|
||||
}
|
||||
|
||||
constraints_content.push('\n');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add global constraints from the spec
|
||||
if !spec.constraints.is_empty() {
|
||||
constraints_content.push_str("\n# Global constraints\n\n");
|
||||
|
||||
for constraint in &spec.constraints {
|
||||
constraints_content.push_str(&format!("[constraint.\"{}\"]\n", constraint.path));
|
||||
|
||||
if let Some(min) = constraint.min_items {
|
||||
constraints_content.push_str(&format!("min_items = {}\n", min));
|
||||
}
|
||||
if let Some(max) = constraint.max_items {
|
||||
constraints_content.push_str(&format!("max_items = {}\n", max));
|
||||
}
|
||||
|
||||
if constraint.unique {
|
||||
constraints_content.push_str("unique = true\n");
|
||||
if let Some(unique_key) = &constraint.unique_key {
|
||||
constraints_content.push_str(&format!("unique_key = \"{}\"\n", unique_key));
|
||||
}
|
||||
}
|
||||
|
||||
constraints_content.push('\n');
|
||||
}
|
||||
}
|
||||
|
||||
// Write constraints file
|
||||
let constraints_file = output_dir.join("constraints.toml");
|
||||
std::fs::write(&constraints_file, constraints_content).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to write constraints file: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
tracing::info!("Generated constraints file: {}", constraints_file.display());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,83 @@
|
||||
//! Defaults generator: produces default configuration values in Nickel.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::models::{FieldType, ProjectSpec};
|
||||
use std::path::Path;
|
||||
|
||||
/// Generates Nickel defaults from domain features.
|
||||
pub struct DefaultsGenerator;
|
||||
|
||||
impl DefaultsGenerator {
|
||||
/// Generate defaults for all domain features.
|
||||
pub fn generate(spec: &ProjectSpec, output_dir: impl AsRef<Path>) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
tracing::info!("Generating defaults for project: {}", spec.name);
|
||||
|
||||
// Ensure defaults directory exists
|
||||
let defaults_dir = output_dir.join("defaults");
|
||||
std::fs::create_dir_all(&defaults_dir).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to create defaults directory: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Generate defaults for each feature
|
||||
for feature in &spec.domain_features {
|
||||
let mut defaults_content = String::new();
|
||||
|
||||
defaults_content.push_str(&format!(
|
||||
"# Default configuration for {} feature\n# Generated for project: {}\n\n",
|
||||
feature.name, spec.name
|
||||
));
|
||||
|
||||
defaults_content.push_str(&format!("let {} = {{\n", feature.name));
|
||||
|
||||
for field in &feature.fields {
|
||||
defaults_content.push_str(&format!(" # {}\n", field.prompt));
|
||||
|
||||
if let Some(default) = &field.default {
|
||||
defaults_content.push_str(&format!(" {} = {},\n", field.name, default));
|
||||
} else {
|
||||
// Generate sensible defaults based on field type
|
||||
let default_val = Self::generate_default_value(field);
|
||||
defaults_content.push_str(&format!(
|
||||
" {} = {}, # No default provided\n",
|
||||
field.name, default_val
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
defaults_content.push_str("}\n\n");
|
||||
|
||||
// Write defaults file
|
||||
let defaults_file = defaults_dir.join(format!("{}.ncl", feature.name));
|
||||
std::fs::write(&defaults_file, defaults_content).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to write defaults file: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
tracing::debug!("Generated defaults for feature: {}", feature.name);
|
||||
}
|
||||
|
||||
tracing::info!("Successfully generated defaults");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate a sensible default value for a field type.
|
||||
fn generate_default_value(field: &crate::models::ConfigField) -> String {
|
||||
match field.field_type {
|
||||
FieldType::Text => "\"\"".to_string(),
|
||||
FieldType::Number => "0".to_string(),
|
||||
FieldType::Password => "\"\"".to_string(),
|
||||
FieldType::Confirm => "false".to_string(),
|
||||
FieldType::Select => "\"\"".to_string(),
|
||||
FieldType::MultiSelect => "[]".to_string(),
|
||||
FieldType::Editor => "\"\"".to_string(),
|
||||
FieldType::Date => "\"\"".to_string(),
|
||||
FieldType::RepeatingGroup => "[]".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
123
crates/typedialog-prov-gen/src/generator/fragment_generator.rs
Normal file
123
crates/typedialog-prov-gen/src/generator/fragment_generator.rs
Normal file
@ -0,0 +1,123 @@
|
||||
//! Fragment generator: produces TypeDialog form fragments from domain features.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::models::{FieldType, ProjectSpec};
|
||||
use std::path::Path;
|
||||
|
||||
/// Generates TypeDialog form fragments from domain features.
|
||||
pub struct FragmentGenerator;
|
||||
|
||||
impl FragmentGenerator {
|
||||
/// Generate form fragments for all domain features.
|
||||
pub fn generate(spec: &ProjectSpec, output_dir: impl AsRef<Path>) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
tracing::info!("Generating form fragments for project: {}", spec.name);
|
||||
|
||||
// Ensure fragments directory exists
|
||||
let fragments_dir = output_dir.join("fragments");
|
||||
std::fs::create_dir_all(&fragments_dir).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to create fragments directory: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Generate fragments for each feature
|
||||
for feature in &spec.domain_features {
|
||||
let mut fragment_content = String::new();
|
||||
|
||||
fragment_content.push_str(&format!(
|
||||
"# Form fragment for {} feature\n# Auto-generated for project: {}\n\n",
|
||||
feature.name, spec.name
|
||||
));
|
||||
|
||||
fragment_content.push_str(&format!("[section.{}]\n", feature.name));
|
||||
|
||||
if let Some(desc) = &feature.description {
|
||||
fragment_content.push_str(&format!("description = \"{}\"\n", desc));
|
||||
}
|
||||
|
||||
fragment_content.push('\n');
|
||||
|
||||
// Generate field definitions for this feature
|
||||
for field in &feature.fields {
|
||||
fragment_content.push_str(&format!("[[section.{}.fields]]\n", feature.name));
|
||||
|
||||
fragment_content.push_str(&format!("name = \"{}\"\n", field.name));
|
||||
fragment_content.push_str(&format!("prompt = \"{}\"\n", field.prompt));
|
||||
fragment_content.push_str(&format!(
|
||||
"type = \"{}\"\n",
|
||||
Self::field_type_to_form_type(&field.field_type)
|
||||
));
|
||||
|
||||
if let Some(help) = &field.help {
|
||||
fragment_content.push_str(&format!("help = \"{}\"\n", help));
|
||||
}
|
||||
|
||||
if let Some(placeholder) = &field.placeholder {
|
||||
fragment_content.push_str(&format!("placeholder = \"{}\"\n", placeholder));
|
||||
}
|
||||
|
||||
if !field.required {
|
||||
fragment_content.push_str("required = false\n");
|
||||
}
|
||||
|
||||
if field.sensitive {
|
||||
fragment_content.push_str("sensitive = true\n");
|
||||
if let Some(backend) = &field.encryption_backend {
|
||||
fragment_content
|
||||
.push_str(&format!("encryption_backend = \"{}\"\n", backend));
|
||||
}
|
||||
}
|
||||
|
||||
if !field.options.is_empty() {
|
||||
fragment_content.push_str("options = [\n");
|
||||
for option in &field.options {
|
||||
fragment_content.push_str(&format!(" \"{}\",\n", option));
|
||||
}
|
||||
fragment_content.push_str("]\n");
|
||||
}
|
||||
|
||||
if field.min.is_some() || field.max.is_some() {
|
||||
if let Some(min) = field.min {
|
||||
fragment_content.push_str(&format!("min = {}\n", min));
|
||||
}
|
||||
if let Some(max) = field.max {
|
||||
fragment_content.push_str(&format!("max = {}\n", max));
|
||||
}
|
||||
}
|
||||
|
||||
fragment_content.push('\n');
|
||||
}
|
||||
|
||||
// Write fragment file
|
||||
let fragment_file = fragments_dir.join(format!("{}-section.toml", feature.name));
|
||||
std::fs::write(&fragment_file, fragment_content).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to write fragment file: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
tracing::debug!("Generated fragment for feature: {}", feature.name);
|
||||
}
|
||||
|
||||
tracing::info!("Successfully generated form fragments");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Map ProjectSpec field types to TypeDialog form field types.
|
||||
fn field_type_to_form_type(field_type: &FieldType) -> &'static str {
|
||||
match field_type {
|
||||
FieldType::Text => "text",
|
||||
FieldType::Number => "number",
|
||||
FieldType::Password => "password",
|
||||
FieldType::Confirm => "confirm",
|
||||
FieldType::Select => "select",
|
||||
FieldType::MultiSelect => "multi_select",
|
||||
FieldType::Editor => "editor",
|
||||
FieldType::Date => "date",
|
||||
FieldType::RepeatingGroup => "repeating_group",
|
||||
}
|
||||
}
|
||||
}
|
||||
15
crates/typedialog-prov-gen/src/generator/mod.rs
Normal file
15
crates/typedialog-prov-gen/src/generator/mod.rs
Normal file
@ -0,0 +1,15 @@
|
||||
//! Code generators for provisioning structure.
|
||||
|
||||
pub mod constraint_generator;
|
||||
pub mod defaults_generator;
|
||||
pub mod fragment_generator;
|
||||
pub mod schema_generator;
|
||||
pub mod script_generator;
|
||||
pub mod validator_generator;
|
||||
|
||||
pub use constraint_generator::ConstraintGenerator;
|
||||
pub use defaults_generator::DefaultsGenerator;
|
||||
pub use fragment_generator::FragmentGenerator;
|
||||
pub use schema_generator::SchemaGenerator;
|
||||
pub use script_generator::ScriptGenerator;
|
||||
pub use validator_generator::ValidatorGenerator;
|
||||
220
crates/typedialog-prov-gen/src/generator/schema_generator.rs
Normal file
220
crates/typedialog-prov-gen/src/generator/schema_generator.rs
Normal file
@ -0,0 +1,220 @@
|
||||
//! Schema generator: produces Nickel schemas from domain features.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::models::*;
|
||||
use std::path::Path;
|
||||
|
||||
/// Generates Nickel schema files (.ncl) from domain features.
|
||||
pub struct SchemaGenerator;
|
||||
|
||||
impl SchemaGenerator {
|
||||
/// Generate schemas for all domain features.
|
||||
pub fn generate(spec: &ProjectSpec, output_dir: impl AsRef<Path>) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
tracing::info!(
|
||||
"Generating schemas for project: {} with {} features",
|
||||
spec.name,
|
||||
spec.domain_features.len()
|
||||
);
|
||||
|
||||
// Ensure schemas directory exists
|
||||
let schemas_dir = output_dir.join("schemas");
|
||||
std::fs::create_dir_all(&schemas_dir).map_err(|e| crate::error::SchemaGenerationError {
|
||||
feature_name: "root".to_string(),
|
||||
reason: format!("Failed to create schemas directory: {}", e),
|
||||
})?;
|
||||
|
||||
// Generate a schema file for each domain feature
|
||||
for feature in &spec.domain_features {
|
||||
Self::generate_feature_schema(spec, feature, &schemas_dir)?;
|
||||
}
|
||||
|
||||
// Generate a main schema that imports all features
|
||||
Self::generate_main_schema(spec, output_dir)?;
|
||||
|
||||
tracing::info!("Successfully generated schemas for project: {}", spec.name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate a Nickel schema file for a single domain feature.
|
||||
fn generate_feature_schema(
|
||||
spec: &ProjectSpec,
|
||||
feature: &DomainFeature,
|
||||
schemas_dir: &Path,
|
||||
) -> Result<()> {
|
||||
tracing::debug!("Generating schema for feature: {}", feature.name);
|
||||
|
||||
let mut schema_content = String::new();
|
||||
|
||||
// Add file header and imports
|
||||
schema_content.push_str(&format!(
|
||||
"# Schema for {} feature\n# Generated for project: {}\n\n",
|
||||
feature.name, spec.name
|
||||
));
|
||||
|
||||
// Define the feature record
|
||||
schema_content.push_str(&format!("let {} = {{\n", feature.name));
|
||||
|
||||
// Add fields to the record
|
||||
for field in &feature.fields {
|
||||
schema_content.push_str(&Self::generate_field_schema(field)?);
|
||||
}
|
||||
|
||||
schema_content.push_str("}\n\n");
|
||||
|
||||
// Add validators for fields with constraints
|
||||
if let Some(constraints) = &feature.constraints {
|
||||
for path in constraints.keys() {
|
||||
schema_content.push_str(&format!("# Constraint for {}\n", path));
|
||||
}
|
||||
}
|
||||
|
||||
// Write the schema file
|
||||
let schema_file = schemas_dir.join(format!("{}.ncl", feature.name));
|
||||
std::fs::write(&schema_file, schema_content).map_err(|e| {
|
||||
crate::error::SchemaGenerationError {
|
||||
feature_name: feature.name.clone(),
|
||||
reason: format!(
|
||||
"Failed to write schema file {}: {}",
|
||||
schema_file.display(),
|
||||
e
|
||||
),
|
||||
}
|
||||
})?;
|
||||
|
||||
tracing::debug!("Generated schema file: {}", schema_file.display());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate Nickel schema syntax for a single field.
|
||||
fn generate_field_schema(field: &ConfigField) -> Result<String> {
|
||||
let mut field_def = String::new();
|
||||
|
||||
// Add field comment if help text exists
|
||||
if let Some(help) = &field.help {
|
||||
field_def.push_str(&format!(" # {}\n", help));
|
||||
}
|
||||
|
||||
// Field name and type
|
||||
let nickel_type = Self::map_field_type_to_nickel(&field.field_type);
|
||||
let required_marker = if field.required { "" } else { "?" };
|
||||
|
||||
field_def.push_str(&format!(
|
||||
" {}{} | {},\n",
|
||||
field.name, required_marker, nickel_type
|
||||
));
|
||||
|
||||
// Add default value comment if present
|
||||
if let Some(default) = &field.default {
|
||||
field_def.push_str(&format!(" # default: {}\n", default));
|
||||
}
|
||||
|
||||
Ok(field_def)
|
||||
}
|
||||
|
||||
/// Map ProjectSpec field types to Nickel type annotations.
|
||||
fn map_field_type_to_nickel(field_type: &FieldType) -> &'static str {
|
||||
match field_type {
|
||||
FieldType::Text => "String",
|
||||
FieldType::Number => "Number",
|
||||
FieldType::Password => "String",
|
||||
FieldType::Confirm => "Bool",
|
||||
FieldType::Select => "String",
|
||||
FieldType::MultiSelect => "[String]",
|
||||
FieldType::Editor => "String",
|
||||
FieldType::Date => "String",
|
||||
FieldType::RepeatingGroup => "[_]",
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate main schema file that imports and assembles all features.
|
||||
fn generate_main_schema(spec: &ProjectSpec, output_dir: &Path) -> Result<()> {
|
||||
tracing::debug!("Generating main schema for project: {}", spec.name);
|
||||
|
||||
let mut main_schema = String::new();
|
||||
|
||||
// Add header
|
||||
main_schema.push_str(&format!(
|
||||
"# Main configuration schema for {}\n# Generated for provisioning setup\n\n",
|
||||
spec.name
|
||||
));
|
||||
|
||||
// Add infrastructure configuration
|
||||
main_schema.push_str(&Self::generate_infrastructure_schema(&spec.infrastructure)?);
|
||||
|
||||
// Import all feature schemas
|
||||
main_schema.push_str("\n# Domain features\n");
|
||||
for feature in &spec.domain_features {
|
||||
main_schema.push_str(&format!(
|
||||
"let {} = (import \"./schemas/{}.ncl\").{}\n",
|
||||
feature.name, feature.name, feature.name
|
||||
));
|
||||
}
|
||||
|
||||
// Add main configuration record
|
||||
main_schema.push_str("\n# Main configuration object\n{\n config = {\n");
|
||||
|
||||
for feature in &spec.domain_features {
|
||||
main_schema.push_str(&format!(" {}: {},\n", feature.name, feature.name));
|
||||
}
|
||||
|
||||
main_schema.push_str(" },\n}\n");
|
||||
|
||||
// Write main schema file
|
||||
let main_file = output_dir.join("config.ncl");
|
||||
std::fs::write(&main_file, main_schema).map_err(|e| {
|
||||
crate::error::SchemaGenerationError {
|
||||
feature_name: "config".to_string(),
|
||||
reason: format!("Failed to write main schema {}: {}", main_file.display(), e),
|
||||
}
|
||||
})?;
|
||||
|
||||
tracing::debug!("Generated main schema file: {}", main_file.display());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate Nickel schema for infrastructure configuration.
|
||||
fn generate_infrastructure_schema(infra: &InfrastructureSpec) -> Result<String> {
|
||||
let mut infra_schema = String::new();
|
||||
|
||||
infra_schema.push_str("# Infrastructure configuration\n");
|
||||
infra_schema.push_str("let infrastructure = {\n");
|
||||
|
||||
if infra.ssh {
|
||||
infra_schema.push_str(" ssh | Bool = true,\n");
|
||||
}
|
||||
|
||||
if let Some(db) = &infra.database {
|
||||
infra_schema.push_str(&format!(
|
||||
" database = {{\n type | String = \"{:?}\",\n required | Bool = {},\n }},\n",
|
||||
db.db_type, db.required
|
||||
));
|
||||
}
|
||||
|
||||
if !infra.providers.is_empty() {
|
||||
infra_schema.push_str(" providers | [String] = [");
|
||||
let provider_strs: Vec<String> = infra
|
||||
.providers
|
||||
.iter()
|
||||
.map(|p| format!("\"{}\"", format!("{:?}", p).to_lowercase()))
|
||||
.collect();
|
||||
infra_schema.push_str(&provider_strs.join(", "));
|
||||
infra_schema.push_str("],\n");
|
||||
}
|
||||
|
||||
if !infra.monitoring.is_empty() {
|
||||
infra_schema.push_str(" monitoring | [String] = [");
|
||||
let monitoring_strs: Vec<String> = infra
|
||||
.monitoring
|
||||
.iter()
|
||||
.map(|m| format!("\"{}\"", format!("{:?}", m).to_lowercase()))
|
||||
.collect();
|
||||
infra_schema.push_str(&monitoring_strs.join(", "));
|
||||
infra_schema.push_str("],\n");
|
||||
}
|
||||
|
||||
infra_schema.push_str("}\n");
|
||||
|
||||
Ok(infra_schema)
|
||||
}
|
||||
}
|
||||
150
crates/typedialog-prov-gen/src/generator/script_generator.rs
Normal file
150
crates/typedialog-prov-gen/src/generator/script_generator.rs
Normal file
@ -0,0 +1,150 @@
|
||||
//! Script generator: produces bash and nushell orchestration scripts.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::models::ProjectSpec;
|
||||
use std::path::Path;
|
||||
|
||||
/// Generates orchestration scripts for provisioning.
|
||||
pub struct ScriptGenerator;
|
||||
|
||||
impl ScriptGenerator {
|
||||
/// Generate bash and nushell scripts for provisioning orchestration.
|
||||
pub fn generate(spec: &ProjectSpec, output_dir: impl AsRef<Path>) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
tracing::info!(
|
||||
"Generating orchestration scripts for project: {}",
|
||||
spec.name
|
||||
);
|
||||
|
||||
// Ensure scripts directory exists
|
||||
let scripts_dir = output_dir.join("scripts");
|
||||
std::fs::create_dir_all(&scripts_dir).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to create scripts directory: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Generate bash scripts
|
||||
Self::generate_bash_scripts(spec, &scripts_dir)?;
|
||||
|
||||
// Generate nushell scripts
|
||||
Self::generate_nushell_scripts(spec, &scripts_dir)?;
|
||||
|
||||
tracing::info!("Successfully generated orchestration scripts");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate bash orchestration scripts.
|
||||
fn generate_bash_scripts(spec: &ProjectSpec, scripts_dir: &Path) -> Result<()> {
|
||||
// Config loading script
|
||||
let config_script = format!(
|
||||
"#!/bin/bash\n\
|
||||
# Load and validate configuration for {}\n\
|
||||
set -euo pipefail\n\n\
|
||||
CONFIG_DIR=\"{{CONFIG_DIR:-.}}\"\n\
|
||||
\n\
|
||||
# Load configuration from JSON\n\
|
||||
load_config() {{\n\
|
||||
local config_file=\"$1\"\n\
|
||||
if [[ ! -f \"$config_file\" ]]; then\n\
|
||||
echo \"Error: Configuration file not found: $config_file\" >&2\n\
|
||||
exit 1\n\
|
||||
fi\n\
|
||||
cat \"$config_file\"\n\
|
||||
}}\n\
|
||||
\n\
|
||||
# Validate using Nickel\n\
|
||||
validate_config() {{\n\
|
||||
local config_file=\"$1\"\n\
|
||||
nickel eval --raw \"$config_file\" > /dev/null 2>&1 || {{\n\
|
||||
echo \"Error: Configuration validation failed for $config_file\" >&2\n\
|
||||
exit 1\n\
|
||||
}}\n\
|
||||
}}\n\
|
||||
\n\
|
||||
# Main\n\
|
||||
main() {{\n\
|
||||
local config_file=\"${{CONFIG_DIR}}/config.json\"\n\
|
||||
load_config \"$config_file\"\n\
|
||||
validate_config \"$config_file\"\n\
|
||||
echo \"Configuration loaded and validated successfully\"\n\
|
||||
}}\n\
|
||||
\n\
|
||||
main \"$@\"\n",
|
||||
spec.name
|
||||
);
|
||||
|
||||
let config_script_path = scripts_dir.join("config.sh");
|
||||
std::fs::write(&config_script_path, config_script).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to write config script: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Make executable
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(0o755);
|
||||
std::fs::set_permissions(&config_script_path, perms).ok();
|
||||
}
|
||||
|
||||
tracing::debug!("Generated bash config script");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate nushell orchestration scripts.
|
||||
fn generate_nushell_scripts(spec: &ProjectSpec, scripts_dir: &Path) -> Result<()> {
|
||||
// Config loading script in nushell
|
||||
let config_script = format!(
|
||||
"#!/usr/bin/env nu\n\
|
||||
# Load and validate configuration for {} (nushell version)\n\n\
|
||||
def load_config [config_file: path] {{\n\
|
||||
if ($config_file | path exists) {{\n\
|
||||
open $config_file\n\
|
||||
}} else {{\n\
|
||||
error make {{\n\
|
||||
msg: $\"Configuration file not found: ($config_file)\"\n\
|
||||
}}\n\
|
||||
}}\n\
|
||||
}}\n\
|
||||
\n\
|
||||
def validate_config [config_file: path] {{\n\
|
||||
let config = (load_config $config_file)\n\
|
||||
# TODO: Validate against Nickel schema\n\
|
||||
$config\n\
|
||||
}}\n\
|
||||
\n\
|
||||
def main [--config_dir: path = \".\"] {{\n\
|
||||
let config_file = ($config_dir | path join config.json)\n\
|
||||
let config = (validate_config $config_file)\n\
|
||||
print $\"Configuration loaded: ($config_file)\"\n\
|
||||
$config\n\
|
||||
}}\n\
|
||||
\n\
|
||||
main $nu.env\n",
|
||||
spec.name
|
||||
);
|
||||
|
||||
let config_script_path = scripts_dir.join("config.nu");
|
||||
std::fs::write(&config_script_path, config_script).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to write nushell config script: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Make executable
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(0o755);
|
||||
std::fs::set_permissions(&config_script_path, perms).ok();
|
||||
}
|
||||
|
||||
tracing::debug!("Generated nushell config script");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
154
crates/typedialog-prov-gen/src/generator/validator_generator.rs
Normal file
154
crates/typedialog-prov-gen/src/generator/validator_generator.rs
Normal file
@ -0,0 +1,154 @@
|
||||
//! Validator generator: produces Nickel validators from constraints.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::models::{FieldType, ProjectSpec};
|
||||
use std::path::Path;
|
||||
|
||||
/// Generates validator Nickel files from constraints.
|
||||
pub struct ValidatorGenerator;
|
||||
|
||||
impl ValidatorGenerator {
|
||||
/// Generate validators for all domain features.
|
||||
pub fn generate(spec: &ProjectSpec, output_dir: impl AsRef<Path>) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
tracing::info!("Generating validators for project: {}", spec.name);
|
||||
|
||||
// Ensure validators directory exists
|
||||
let validators_dir = output_dir.join("validators");
|
||||
std::fs::create_dir_all(&validators_dir).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to create validators directory: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Generate validators for each feature
|
||||
for feature in &spec.domain_features {
|
||||
let mut validator_content = String::new();
|
||||
|
||||
validator_content.push_str(&format!(
|
||||
"# Validators for {} feature\n# Generated for project: {}\n\n",
|
||||
feature.name, spec.name
|
||||
));
|
||||
|
||||
// Add field-specific validators
|
||||
for field in &feature.fields {
|
||||
validator_content.push_str(&Self::generate_field_validator(field)?);
|
||||
}
|
||||
|
||||
// Write validator file
|
||||
let validator_file = validators_dir.join(format!("{}.ncl", feature.name));
|
||||
std::fs::write(&validator_file, validator_content).map_err(|e| {
|
||||
crate::error::ProvisioningGenError::Other(format!(
|
||||
"Failed to write validator file: {}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
tracing::debug!("Generated validator for feature: {}", feature.name);
|
||||
}
|
||||
|
||||
tracing::info!("Successfully generated validators");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate validator function for a single field.
|
||||
fn generate_field_validator(field: &crate::models::ConfigField) -> Result<String> {
|
||||
let mut validator = String::new();
|
||||
|
||||
validator.push_str(&format!("# Validator for field: {}\n", field.name));
|
||||
|
||||
match field.field_type {
|
||||
FieldType::Text => {
|
||||
validator.push_str(&format!("let validate_{} = fun value => (\n", field.name));
|
||||
validator.push_str(" (std.is_string value) &&\n");
|
||||
|
||||
if let Some(min) = field.min {
|
||||
validator.push_str(&format!(" ((std.string.length value) >= {}) &&\n", min));
|
||||
}
|
||||
if let Some(max) = field.max {
|
||||
validator.push_str(&format!(" ((std.string.length value) <= {})\n", max));
|
||||
} else {
|
||||
validator.push_str(" true\n");
|
||||
}
|
||||
|
||||
validator.push_str(")\n\n");
|
||||
}
|
||||
|
||||
FieldType::Number => {
|
||||
validator.push_str(&format!("let validate_{} = fun value => (\n", field.name));
|
||||
validator.push_str(" (std.is_number value) &&\n");
|
||||
|
||||
if let Some(min) = field.min {
|
||||
validator.push_str(&format!(" (value >= {}) &&\n", min));
|
||||
}
|
||||
if let Some(max) = field.max {
|
||||
validator.push_str(&format!(" (value <= {})\n", max));
|
||||
} else {
|
||||
validator.push_str(" true\n");
|
||||
}
|
||||
|
||||
validator.push_str(")\n\n");
|
||||
}
|
||||
|
||||
FieldType::Password => {
|
||||
validator.push_str(&format!("let validate_{} = fun value => (\n", field.name));
|
||||
validator.push_str(" (std.is_string value) &&\n");
|
||||
validator
|
||||
.push_str(" ((std.string.length value) >= 8) # Minimum password length\n");
|
||||
validator.push_str(")\n\n");
|
||||
}
|
||||
|
||||
FieldType::Confirm => {
|
||||
validator.push_str(&format!(
|
||||
"let validate_{} = fun value => std.is_bool value\n\n",
|
||||
field.name
|
||||
));
|
||||
}
|
||||
|
||||
FieldType::Select | FieldType::MultiSelect => {
|
||||
if !field.options.is_empty() {
|
||||
validator.push_str(&format!("let validate_{} = fun value => (\n", field.name));
|
||||
validator.push_str(" let valid_options = [");
|
||||
|
||||
let options_str = field
|
||||
.options
|
||||
.iter()
|
||||
.map(|opt| format!("\"{}\"", opt))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
validator.push_str(&options_str);
|
||||
validator.push_str("] in\n");
|
||||
validator.push_str(" std.arrays.elem value valid_options\n");
|
||||
validator.push_str(")\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
FieldType::RepeatingGroup => {
|
||||
validator.push_str(&format!("let validate_{} = fun value => (\n", field.name));
|
||||
validator.push_str(" (std.is_array value) &&\n");
|
||||
|
||||
if let Some(min) = field.min {
|
||||
validator.push_str(&format!(" ((std.array.length value) >= {}) &&\n", min));
|
||||
}
|
||||
if let Some(max) = field.max {
|
||||
validator.push_str(&format!(" ((std.array.length value) <= {})\n", max));
|
||||
} else {
|
||||
validator.push_str(" true\n");
|
||||
}
|
||||
|
||||
validator.push_str(")\n\n");
|
||||
}
|
||||
|
||||
_ => {
|
||||
validator.push_str(&format!(
|
||||
"let validate_{} = fun value => true # No specific validation\n\n",
|
||||
field.name
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(validator)
|
||||
}
|
||||
}
|
||||
324
crates/typedialog-prov-gen/src/input/cargo_introspector.rs
Normal file
324
crates/typedialog-prov-gen/src/input/cargo_introspector.rs
Normal file
@ -0,0 +1,324 @@
|
||||
//! Mode A: Cargo.toml introspection for automatic feature detection.
|
||||
|
||||
use crate::error::{CargoIntrospectionError, Result};
|
||||
use crate::models::*;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
/// Analyzes Cargo.toml to infer domain features and project configuration.
|
||||
pub struct CargoIntrospector;
|
||||
|
||||
/// Dependency-to-feature mapping heuristics.
|
||||
impl CargoIntrospector {
|
||||
/// Analyze a Cargo.toml file to extract project information.
|
||||
pub fn analyze(cargo_path: impl AsRef<Path>) -> Result<ProjectSpec> {
|
||||
let cargo_path = cargo_path.as_ref();
|
||||
|
||||
if !cargo_path.exists() {
|
||||
return Err(CargoIntrospectionError {
|
||||
cargo_path: cargo_path.to_path_buf(),
|
||||
reason: "File does not exist".to_string(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
let manifest =
|
||||
cargo_toml::Manifest::from_path(cargo_path).map_err(|e| CargoIntrospectionError {
|
||||
cargo_path: cargo_path.to_path_buf(),
|
||||
reason: format!("Failed to parse: {}", e),
|
||||
})?;
|
||||
|
||||
let package = manifest
|
||||
.package
|
||||
.as_ref()
|
||||
.ok_or_else(|| CargoIntrospectionError {
|
||||
cargo_path: cargo_path.to_path_buf(),
|
||||
reason: "Package section missing in Cargo.toml".to_string(),
|
||||
})?;
|
||||
|
||||
let name = package.name.clone();
|
||||
|
||||
// Determine project type from analysis
|
||||
let project_type = Self::infer_project_type(&manifest);
|
||||
|
||||
// Infer infrastructure requirements
|
||||
let infrastructure = Self::infer_infrastructure(&manifest);
|
||||
|
||||
// Infer domain features
|
||||
let domain_features = Self::infer_domain_features(&manifest);
|
||||
|
||||
// Build constraints from inferred features
|
||||
let constraints = Self::infer_constraints(&domain_features);
|
||||
|
||||
let spec = ProjectSpec {
|
||||
name,
|
||||
project_type,
|
||||
infrastructure,
|
||||
domain_features,
|
||||
constraints,
|
||||
};
|
||||
|
||||
// Validate the spec
|
||||
spec.validate().map_err(|errors| CargoIntrospectionError {
|
||||
cargo_path: cargo_path.to_path_buf(),
|
||||
reason: format!("Invalid spec generated: {}", errors.join(", ")),
|
||||
})?;
|
||||
|
||||
Ok(spec)
|
||||
}
|
||||
|
||||
/// Infer project type from manifest metadata.
|
||||
fn infer_project_type(manifest: &cargo_toml::Manifest) -> ProjectType {
|
||||
// Check description field first
|
||||
if let Some(package) = &manifest.package {
|
||||
if let Some(description) = &package.description {
|
||||
// Inheritable<String> - use Debug or convert to string
|
||||
let desc_str = format!("{:?}", description);
|
||||
let desc_lower = desc_str.to_lowercase();
|
||||
if desc_lower.contains("service") || desc_lower.contains("api") {
|
||||
return ProjectType::WebService;
|
||||
} else if desc_lower.contains("microservice") {
|
||||
return ProjectType::Microservice;
|
||||
} else if desc_lower.contains("tool") || desc_lower.contains("cli") {
|
||||
return ProjectType::CliTool;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check dependencies for web frameworks
|
||||
let deps = Self::collect_all_dependencies(manifest);
|
||||
if deps.contains_key("axum")
|
||||
|| deps.contains_key("actix-web")
|
||||
|| deps.contains_key("rocket")
|
||||
|| deps.contains_key("warp")
|
||||
|| deps.contains_key("tide")
|
||||
{
|
||||
return ProjectType::WebService;
|
||||
}
|
||||
|
||||
// Check for CLI frameworks
|
||||
if deps.contains_key("clap") || deps.contains_key("structopt") {
|
||||
return ProjectType::CliTool;
|
||||
}
|
||||
|
||||
// Default based on presence of binaries
|
||||
// manifest.bin is a BTreeMap, not Option. Check if not empty.
|
||||
if !manifest.bin.is_empty() {
|
||||
ProjectType::CliTool
|
||||
} else {
|
||||
ProjectType::Library
|
||||
}
|
||||
}
|
||||
|
||||
/// Infer infrastructure requirements from dependencies.
|
||||
fn infer_infrastructure(manifest: &cargo_toml::Manifest) -> InfrastructureSpec {
|
||||
let deps = Self::collect_all_dependencies(manifest);
|
||||
|
||||
let ssh = deps.contains_key("openssh-keys") || deps.contains_key("ssh2");
|
||||
|
||||
let database = if deps.contains_key("sqlx") || deps.contains_key("rusqlite") {
|
||||
Some(DatabaseSpec {
|
||||
db_type: DatabaseType::Sqlite,
|
||||
required: true,
|
||||
})
|
||||
} else if deps.contains_key("mysql") || deps.contains_key("sqlx") {
|
||||
Some(DatabaseSpec {
|
||||
db_type: DatabaseType::Mysql,
|
||||
required: true,
|
||||
})
|
||||
} else if deps.contains_key("postgres") || deps.contains_key("sqlx") {
|
||||
Some(DatabaseSpec {
|
||||
db_type: DatabaseType::Postgres,
|
||||
required: true,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let mut providers = Vec::new();
|
||||
if deps.contains_key("aws-config") {
|
||||
providers.push(CloudProvider::Aws);
|
||||
}
|
||||
if deps.contains_key("google-cloudkms1") {
|
||||
providers.push(CloudProvider::Gcp);
|
||||
}
|
||||
if providers.is_empty() {
|
||||
providers.push(CloudProvider::Lxd);
|
||||
}
|
||||
|
||||
let mut monitoring = Vec::new();
|
||||
if deps.contains_key("prometheus") {
|
||||
monitoring.push(MonitoringType::Prometheus);
|
||||
}
|
||||
if deps.contains_key("grafana") {
|
||||
monitoring.push(MonitoringType::Grafana);
|
||||
}
|
||||
|
||||
InfrastructureSpec {
|
||||
ssh,
|
||||
database,
|
||||
providers,
|
||||
monitoring,
|
||||
}
|
||||
}
|
||||
|
||||
/// Infer domain features from dependencies and features.
|
||||
fn infer_domain_features(manifest: &cargo_toml::Manifest) -> Vec<DomainFeature> {
|
||||
let deps = Self::collect_all_dependencies(manifest);
|
||||
let mut features = Vec::new();
|
||||
|
||||
// HTTP server detection
|
||||
if deps.contains_key("axum")
|
||||
|| deps.contains_key("actix-web")
|
||||
|| deps.contains_key("rocket")
|
||||
{
|
||||
let mut http_feature = DomainFeature::new("http_server".to_string());
|
||||
http_feature.description = Some("HTTP/REST API server".to_string());
|
||||
|
||||
http_feature = http_feature
|
||||
.with_field(
|
||||
ConfigField::new(
|
||||
"bind_address".to_string(),
|
||||
FieldType::Text,
|
||||
"Server bind address".to_string(),
|
||||
)
|
||||
.with_default(serde_json::json!("0.0.0.0:8080"))
|
||||
.with_help("Format: IP:PORT (e.g., 0.0.0.0:8080)"),
|
||||
)
|
||||
.with_field(
|
||||
ConfigField::new(
|
||||
"timeout_seconds".to_string(),
|
||||
FieldType::Number,
|
||||
"Request timeout".to_string(),
|
||||
)
|
||||
.with_default(serde_json::json!(30))
|
||||
.with_help("Timeout in seconds for HTTP requests"),
|
||||
);
|
||||
|
||||
features.push(http_feature);
|
||||
}
|
||||
|
||||
// Authentication detection
|
||||
if deps.contains_key("jsonwebtoken") || deps.contains_key("oauth2") {
|
||||
let mut auth_feature = DomainFeature::new("authentication".to_string());
|
||||
auth_feature.description = Some("User authentication".to_string());
|
||||
|
||||
auth_feature = auth_feature.with_field(
|
||||
ConfigField::new(
|
||||
"jwt_secret".to_string(),
|
||||
FieldType::Password,
|
||||
"JWT signing secret".to_string(),
|
||||
)
|
||||
.with_help("Secret key for JWT token signing")
|
||||
.sensitive("age"),
|
||||
);
|
||||
|
||||
features.push(auth_feature);
|
||||
}
|
||||
|
||||
// Caching detection
|
||||
if deps.contains_key("redis") || deps.contains_key("memcache") {
|
||||
let mut cache_feature = DomainFeature::new("caching".to_string());
|
||||
cache_feature.description = Some("Caching layer".to_string());
|
||||
|
||||
cache_feature = cache_feature
|
||||
.with_field(
|
||||
ConfigField::new(
|
||||
"cache_enabled".to_string(),
|
||||
FieldType::Confirm,
|
||||
"Enable caching".to_string(),
|
||||
)
|
||||
.with_default(serde_json::json!(true)),
|
||||
)
|
||||
.with_field(
|
||||
ConfigField::new(
|
||||
"cache_ttl_seconds".to_string(),
|
||||
FieldType::Number,
|
||||
"Cache TTL".to_string(),
|
||||
)
|
||||
.with_default(serde_json::json!(3600))
|
||||
.with_help("Time to live in seconds"),
|
||||
);
|
||||
|
||||
features.push(cache_feature);
|
||||
}
|
||||
|
||||
// Default: at least one feature (basic configuration)
|
||||
if features.is_empty() {
|
||||
let mut basic = DomainFeature::new("basic_config".to_string());
|
||||
basic.description = Some("Basic project configuration".to_string());
|
||||
basic = basic.with_field(
|
||||
ConfigField::new(
|
||||
"config_version".to_string(),
|
||||
FieldType::Text,
|
||||
"Configuration version".to_string(),
|
||||
)
|
||||
.with_default(serde_json::json!("1.0"))
|
||||
.optional(),
|
||||
);
|
||||
features.push(basic);
|
||||
}
|
||||
|
||||
features
|
||||
}
|
||||
|
||||
/// Infer constraints from domain features.
|
||||
fn infer_constraints(features: &[DomainFeature]) -> Vec<Constraint> {
|
||||
let mut constraints = Vec::new();
|
||||
|
||||
for feature in features {
|
||||
for field in &feature.fields {
|
||||
// Only create constraints for repeating group fields with min/max bounds
|
||||
if field.field_type == FieldType::RepeatingGroup {
|
||||
if let (Some(min), Some(max)) = (field.min, field.max) {
|
||||
let constraint =
|
||||
Constraint::new(format!("{}.{}", feature.name, field.name))
|
||||
.with_min_items(min)
|
||||
.with_max_items(max);
|
||||
|
||||
constraints.push(constraint);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constraints
|
||||
}
|
||||
|
||||
/// Collect all dependencies (both regular and dev).
|
||||
fn collect_all_dependencies(manifest: &cargo_toml::Manifest) -> HashMap<String, String> {
|
||||
let mut deps = HashMap::new();
|
||||
|
||||
// Dependencies is a BTreeMap, not Option
|
||||
for name in manifest.dependencies.keys() {
|
||||
deps.insert(name.clone(), "dependency".to_string());
|
||||
}
|
||||
|
||||
// Dev dependencies is a BTreeMap, not Option
|
||||
for name in manifest.dev_dependencies.keys() {
|
||||
deps.insert(name.clone(), "dev-dependency".to_string());
|
||||
}
|
||||
|
||||
deps
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_project_type_inference() {
|
||||
// Tests for project type detection would go here
|
||||
// Requires fixture Cargo.toml files
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_infrastructure_inference() {
|
||||
// Tests for infrastructure detection
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_domain_features_inference() {
|
||||
// Tests for feature detection
|
||||
}
|
||||
}
|
||||
374
crates/typedialog-prov-gen/src/input/config_loader.rs
Normal file
374
crates/typedialog-prov-gen/src/input/config_loader.rs
Normal file
@ -0,0 +1,374 @@
|
||||
//! Mode B: Load project specification from explicit config.toml.
|
||||
|
||||
use crate::error::{ConfigLoadingError, Result};
|
||||
use crate::models::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
|
||||
/// Loads ProjectSpec from an explicit project-spec.toml configuration file.
|
||||
pub struct ConfigLoader;
|
||||
|
||||
/// TOML-serializable configuration spec (mirrors ProjectSpec for file I/O).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConfigSpec {
|
||||
/// Project name
|
||||
pub name: String,
|
||||
|
||||
/// Project type
|
||||
pub project_type: String,
|
||||
|
||||
/// Infrastructure configuration
|
||||
#[serde(default)]
|
||||
pub infrastructure: InfrastructureConfig,
|
||||
|
||||
/// Domain features
|
||||
#[serde(default)]
|
||||
pub features: Vec<FeatureConfig>,
|
||||
|
||||
/// Constraints
|
||||
#[serde(default)]
|
||||
pub constraints: Vec<ConstraintConfig>,
|
||||
}
|
||||
|
||||
/// Infrastructure configuration in TOML format.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct InfrastructureConfig {
|
||||
/// SSH support
|
||||
#[serde(default)]
|
||||
pub ssh: bool,
|
||||
|
||||
/// Database configuration
|
||||
pub database: Option<DatabaseConfig>,
|
||||
|
||||
/// Cloud providers
|
||||
#[serde(default)]
|
||||
pub providers: Vec<String>,
|
||||
|
||||
/// Monitoring tools
|
||||
#[serde(default)]
|
||||
pub monitoring: Vec<String>,
|
||||
}
|
||||
|
||||
/// Database configuration in TOML format.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DatabaseConfig {
|
||||
/// Database type (sqlite, mysql, postgres)
|
||||
pub r#type: String,
|
||||
|
||||
/// Is database required?
|
||||
#[serde(default = "default_true")]
|
||||
pub required: bool,
|
||||
}
|
||||
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Domain feature configuration in TOML format.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FeatureConfig {
|
||||
/// Feature name
|
||||
pub name: String,
|
||||
|
||||
/// Human-readable description
|
||||
pub description: Option<String>,
|
||||
|
||||
/// Configuration fields
|
||||
#[serde(default)]
|
||||
pub fields: Vec<FieldConfig>,
|
||||
|
||||
/// Feature-specific constraints
|
||||
#[serde(default)]
|
||||
pub constraints: HashMap<String, ConstraintConfigValue>,
|
||||
}
|
||||
|
||||
/// Configuration field in TOML format.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FieldConfig {
|
||||
/// Field name
|
||||
pub name: String,
|
||||
|
||||
/// Field type
|
||||
pub r#type: String,
|
||||
|
||||
/// Human-readable prompt
|
||||
pub prompt: String,
|
||||
|
||||
/// Is field required?
|
||||
#[serde(default = "default_true")]
|
||||
pub required: bool,
|
||||
|
||||
/// Default value
|
||||
pub default: Option<serde_json::Value>,
|
||||
|
||||
/// Help text
|
||||
pub help: Option<String>,
|
||||
|
||||
/// Placeholder value
|
||||
pub placeholder: Option<String>,
|
||||
|
||||
/// For select fields: available options
|
||||
#[serde(default)]
|
||||
pub options: Vec<String>,
|
||||
|
||||
/// Min value or array length
|
||||
pub min: Option<u32>,
|
||||
|
||||
/// Max value or array length
|
||||
pub max: Option<u32>,
|
||||
|
||||
/// Item fragment for repeating groups
|
||||
pub item_fragment: Option<String>,
|
||||
|
||||
/// Is field encrypted/sensitive?
|
||||
#[serde(default)]
|
||||
pub sensitive: bool,
|
||||
|
||||
/// Encryption backend
|
||||
pub encryption_backend: Option<String>,
|
||||
|
||||
/// Encryption config (key-value pairs)
|
||||
#[serde(default)]
|
||||
pub encryption_config: HashMap<String, String>,
|
||||
}
|
||||
|
||||
/// Constraint configuration in TOML format.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConstraintConfig {
|
||||
/// Constraint path (e.g., "feature.field")
|
||||
pub path: String,
|
||||
|
||||
/// Minimum items (for arrays)
|
||||
pub min_items: Option<u32>,
|
||||
|
||||
/// Maximum items (for arrays)
|
||||
pub max_items: Option<u32>,
|
||||
|
||||
/// Unique constraint field
|
||||
pub unique: Option<String>,
|
||||
}
|
||||
|
||||
/// Feature constraint value type.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(untagged)]
|
||||
pub enum ConstraintConfigValue {
|
||||
/// Simple string constraint
|
||||
String(String),
|
||||
/// Numeric constraint
|
||||
Number(u32),
|
||||
/// Constraint object
|
||||
Object(HashMap<String, serde_json::Value>),
|
||||
}
|
||||
|
||||
impl ConfigLoader {
|
||||
/// Load ProjectSpec from config file.
|
||||
pub fn load(config_path: impl AsRef<Path>) -> Result<ProjectSpec> {
|
||||
let config_path = config_path.as_ref();
|
||||
|
||||
if !config_path.exists() {
|
||||
return Err(ConfigLoadingError {
|
||||
config_path: config_path.to_path_buf(),
|
||||
reason: "File does not exist".to_string(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
let config_content =
|
||||
std::fs::read_to_string(config_path).map_err(|e| ConfigLoadingError {
|
||||
config_path: config_path.to_path_buf(),
|
||||
reason: format!("Failed to read file: {}", e),
|
||||
})?;
|
||||
|
||||
let config_spec: ConfigSpec =
|
||||
toml::from_str(&config_content).map_err(|e| ConfigLoadingError {
|
||||
config_path: config_path.to_path_buf(),
|
||||
reason: format!("Failed to parse TOML: {}", e),
|
||||
})?;
|
||||
|
||||
Self::convert_to_project_spec(config_spec, config_path)
|
||||
}
|
||||
|
||||
/// Convert ConfigSpec to ProjectSpec.
|
||||
fn convert_to_project_spec(config: ConfigSpec, config_path: &Path) -> Result<ProjectSpec> {
|
||||
// Parse project type
|
||||
let project_type = match config.project_type.to_lowercase().as_str() {
|
||||
"web" | "webservice" | "web-service" => ProjectType::WebService,
|
||||
"cli" | "clitool" | "cli-tool" => ProjectType::CliTool,
|
||||
"micro" | "microservice" => ProjectType::Microservice,
|
||||
"lib" | "library" => ProjectType::Library,
|
||||
_ => ProjectType::Custom,
|
||||
};
|
||||
|
||||
// Convert infrastructure
|
||||
let infrastructure = Self::convert_infrastructure(&config.infrastructure)?;
|
||||
|
||||
// Convert domain features
|
||||
let domain_features = Self::convert_features(&config.features)?;
|
||||
|
||||
// Convert constraints
|
||||
let constraints = Self::convert_constraints(&config.constraints)?;
|
||||
|
||||
let spec = ProjectSpec {
|
||||
name: config.name,
|
||||
project_type,
|
||||
infrastructure,
|
||||
domain_features,
|
||||
constraints,
|
||||
};
|
||||
|
||||
// Validate the spec
|
||||
spec.validate().map_err(|errors| ConfigLoadingError {
|
||||
config_path: config_path.to_path_buf(),
|
||||
reason: format!("Invalid config: {}", errors.join(", ")),
|
||||
})?;
|
||||
|
||||
Ok(spec)
|
||||
}
|
||||
|
||||
/// Convert infrastructure configuration.
|
||||
fn convert_infrastructure(infra: &InfrastructureConfig) -> Result<InfrastructureSpec> {
|
||||
let database = match &infra.database {
|
||||
Some(db) => Some(DatabaseSpec {
|
||||
db_type: match db.r#type.to_lowercase().as_str() {
|
||||
"sqlite" | "sql" => DatabaseType::Sqlite,
|
||||
"mysql" => DatabaseType::Mysql,
|
||||
"postgres" | "postgresql" | "pg" => DatabaseType::Postgres,
|
||||
"redis" => DatabaseType::Redis,
|
||||
other => {
|
||||
return Err(ConfigLoadingError {
|
||||
config_path: std::path::PathBuf::new(),
|
||||
reason: format!("Unknown database type: {}", other),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
},
|
||||
required: db.required,
|
||||
}),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let providers = infra
|
||||
.providers
|
||||
.iter()
|
||||
.filter_map(|p| match p.to_lowercase().as_str() {
|
||||
"lxd" => Some(CloudProvider::Lxd),
|
||||
"hetzner" => Some(CloudProvider::Hetzner),
|
||||
"aws" => Some(CloudProvider::Aws),
|
||||
"gcp" | "google" => Some(CloudProvider::Gcp),
|
||||
"azure" => Some(CloudProvider::Azure),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let monitoring = infra
|
||||
.monitoring
|
||||
.iter()
|
||||
.filter_map(|m| match m.to_lowercase().as_str() {
|
||||
"prometheus" => Some(MonitoringType::Prometheus),
|
||||
"grafana" => Some(MonitoringType::Grafana),
|
||||
"cloudwatch" => Some(MonitoringType::CloudWatch),
|
||||
"stackdriver" => Some(MonitoringType::StackDriver),
|
||||
_ => None,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(InfrastructureSpec {
|
||||
ssh: infra.ssh,
|
||||
database,
|
||||
providers,
|
||||
monitoring,
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert domain features.
|
||||
fn convert_features(features: &[FeatureConfig]) -> Result<Vec<DomainFeature>> {
|
||||
features
|
||||
.iter()
|
||||
.map(|f| {
|
||||
let fields = f
|
||||
.fields
|
||||
.iter()
|
||||
.map(Self::convert_field)
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
Ok(DomainFeature {
|
||||
name: f.name.clone(),
|
||||
description: f.description.clone(),
|
||||
fields,
|
||||
constraints: None,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Convert a configuration field.
|
||||
fn convert_field(field: &FieldConfig) -> Result<ConfigField> {
|
||||
let field_type = match field.r#type.to_lowercase().as_str() {
|
||||
"text" => FieldType::Text,
|
||||
"number" | "num" => FieldType::Number,
|
||||
"password" | "secret" => FieldType::Password,
|
||||
"confirm" | "checkbox" => FieldType::Confirm,
|
||||
"select" => FieldType::Select,
|
||||
"multiselect" | "multi-select" => FieldType::MultiSelect,
|
||||
"editor" => FieldType::Editor,
|
||||
"date" => FieldType::Date,
|
||||
"repeating" | "repeating-group" => FieldType::RepeatingGroup,
|
||||
other => {
|
||||
return Err(ConfigLoadingError {
|
||||
config_path: std::path::PathBuf::new(),
|
||||
reason: format!("Unknown field type: {}", other),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
};
|
||||
|
||||
let mut encryption_config = HashMap::new();
|
||||
for (k, v) in &field.encryption_config {
|
||||
encryption_config.insert(k.clone(), v.clone());
|
||||
}
|
||||
|
||||
Ok(ConfigField {
|
||||
name: field.name.clone(),
|
||||
field_type,
|
||||
prompt: field.prompt.clone(),
|
||||
required: field.required,
|
||||
default: field.default.clone(),
|
||||
help: field.help.clone(),
|
||||
placeholder: field.placeholder.clone(),
|
||||
options: field.options.clone(),
|
||||
min: field.min,
|
||||
max: field.max,
|
||||
item_fragment: field.item_fragment.clone(),
|
||||
sensitive: field.sensitive,
|
||||
encryption_backend: field.encryption_backend.clone(),
|
||||
encryption_config: if encryption_config.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(encryption_config)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Convert constraints.
|
||||
fn convert_constraints(constraints: &[ConstraintConfig]) -> Result<Vec<Constraint>> {
|
||||
constraints
|
||||
.iter()
|
||||
.map(|c| {
|
||||
let mut constraint = Constraint::new(c.path.clone());
|
||||
|
||||
if let Some(min) = c.min_items {
|
||||
constraint = constraint.with_min_items(min);
|
||||
}
|
||||
if let Some(max) = c.max_items {
|
||||
constraint = constraint.with_max_items(max);
|
||||
}
|
||||
if let Some(unique) = &c.unique {
|
||||
constraint = constraint.with_unique(unique.clone());
|
||||
}
|
||||
|
||||
Ok(constraint)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
9
crates/typedialog-prov-gen/src/input/mod.rs
Normal file
9
crates/typedialog-prov-gen/src/input/mod.rs
Normal file
@ -0,0 +1,9 @@
|
||||
//! Input modes for loading project specifications.
|
||||
|
||||
pub mod cargo_introspector;
|
||||
pub mod config_loader;
|
||||
pub mod nickel_schema_loader;
|
||||
|
||||
pub use cargo_introspector::CargoIntrospector;
|
||||
pub use config_loader::ConfigLoader;
|
||||
pub use nickel_schema_loader::NickelSchemaLoader;
|
||||
300
crates/typedialog-prov-gen/src/input/nickel_schema_loader.rs
Normal file
300
crates/typedialog-prov-gen/src/input/nickel_schema_loader.rs
Normal file
@ -0,0 +1,300 @@
|
||||
//! Mode D: Augment existing Nickel schema to infer ProjectSpec.
|
||||
|
||||
use crate::error::{NickelSchemaLoadingError, Result};
|
||||
use crate::models::*;
|
||||
use std::path::Path;
|
||||
|
||||
/// Analyzes existing Nickel schema to infer ProjectSpec.
|
||||
pub struct NickelSchemaLoader;
|
||||
|
||||
impl NickelSchemaLoader {
|
||||
/// Load ProjectSpec by analyzing an existing Nickel schema file.
|
||||
///
|
||||
/// This mode augments existing Nickel schemas by:
|
||||
/// 1. Parsing the schema structure
|
||||
/// 2. Extracting field definitions
|
||||
/// 3. Inferring domain features from configuration records
|
||||
/// 4. Building constraints from contract definitions
|
||||
pub fn load(schema_path: impl AsRef<Path>) -> Result<ProjectSpec> {
|
||||
let schema_path = schema_path.as_ref();
|
||||
|
||||
if !schema_path.exists() {
|
||||
return Err(NickelSchemaLoadingError {
|
||||
schema_path: schema_path.to_path_buf(),
|
||||
reason: "Schema file does not exist".to_string(),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
// Extract the project name from the schema filename (without extension)
|
||||
let project_name = schema_path
|
||||
.file_stem()
|
||||
.and_then(|s| s.to_str())
|
||||
.map(|s| s.to_string())
|
||||
.unwrap_or_else(|| "generated-project".to_string());
|
||||
|
||||
// Read the schema file
|
||||
let schema_content =
|
||||
std::fs::read_to_string(schema_path).map_err(|e| NickelSchemaLoadingError {
|
||||
schema_path: schema_path.to_path_buf(),
|
||||
reason: format!("Failed to read file: {}", e),
|
||||
})?;
|
||||
|
||||
// Parse schema structure using basic pattern matching
|
||||
Self::parse_schema(&project_name, &schema_content, schema_path)
|
||||
}
|
||||
|
||||
/// Parse Nickel schema content to extract field definitions.
|
||||
fn parse_schema(project_name: &str, content: &str, schema_path: &Path) -> Result<ProjectSpec> {
|
||||
// Extract features from top-level record definitions
|
||||
let domain_features = Self::extract_features(content)?;
|
||||
|
||||
// Default to WebService type unless we can infer otherwise
|
||||
let project_type = if content.contains("fn handle") || content.contains("endpoint") {
|
||||
ProjectType::WebService
|
||||
} else {
|
||||
ProjectType::Library
|
||||
};
|
||||
|
||||
// Try to extract infrastructure hints from documentation comments
|
||||
let infrastructure = Self::extract_infrastructure_hints(content)?;
|
||||
|
||||
// Extract constraints from field definitions
|
||||
let constraints = Self::extract_constraints(content)?;
|
||||
|
||||
let spec = ProjectSpec {
|
||||
name: project_name.to_string(),
|
||||
project_type,
|
||||
infrastructure,
|
||||
domain_features,
|
||||
constraints,
|
||||
};
|
||||
|
||||
// Validate the spec
|
||||
spec.validate().map_err(|errors| NickelSchemaLoadingError {
|
||||
schema_path: schema_path.to_path_buf(),
|
||||
reason: format!("Invalid schema: {}", errors.join(", ")),
|
||||
})?;
|
||||
|
||||
Ok(spec)
|
||||
}
|
||||
|
||||
/// Extract domain features from Nickel record definitions.
|
||||
fn extract_features(content: &str) -> Result<Vec<DomainFeature>> {
|
||||
let mut features = Vec::new();
|
||||
|
||||
// Simple pattern: look for lines like `field_name | FieldType,`
|
||||
// This is a basic extraction; full Nickel parsing would be more complex
|
||||
let lines: Vec<&str> = content.lines().collect();
|
||||
|
||||
let mut current_feature: Option<DomainFeature> = None;
|
||||
let mut in_record = false;
|
||||
|
||||
for line in lines.iter() {
|
||||
let trimmed = line.trim();
|
||||
|
||||
// Detect record start: `let RecordName = {`
|
||||
if trimmed.contains("= {") && !trimmed.starts_with("//") {
|
||||
if let Some(name) = Self::extract_record_name(trimmed) {
|
||||
if let Some(prev_feature) = current_feature.take() {
|
||||
features.push(prev_feature);
|
||||
}
|
||||
in_record = true;
|
||||
current_feature = Some(DomainFeature::new(name));
|
||||
}
|
||||
}
|
||||
|
||||
// Detect record end: `}`
|
||||
if trimmed == "}" && in_record {
|
||||
if let Some(feature) = current_feature.take() {
|
||||
features.push(feature);
|
||||
}
|
||||
in_record = false;
|
||||
}
|
||||
|
||||
// Extract field definitions while in a record
|
||||
if in_record {
|
||||
if let Some((field_name, field_type)) = Self::extract_field_definition(trimmed) {
|
||||
let field = ConfigField::new(
|
||||
field_name.clone(),
|
||||
field_type,
|
||||
format!("Configure {}", field_name),
|
||||
);
|
||||
|
||||
if let Some(ref mut feature) = current_feature {
|
||||
feature.fields.push(field);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push any remaining feature
|
||||
if let Some(feature) = current_feature.take() {
|
||||
features.push(feature);
|
||||
}
|
||||
|
||||
// Default: at least one basic feature
|
||||
if features.is_empty() {
|
||||
features.push(DomainFeature::new("schema_config".to_string()));
|
||||
}
|
||||
|
||||
Ok(features)
|
||||
}
|
||||
|
||||
/// Extract record name from a definition line.
|
||||
fn extract_record_name(line: &str) -> Option<String> {
|
||||
// Pattern: `let RecordName = {`
|
||||
if let Some(start) = line.find("let ") {
|
||||
let rest = &line[start + 4..];
|
||||
if let Some(end) = rest.find(" ") {
|
||||
return Some(rest[..end].trim().to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Extract field definition from a Nickel line.
|
||||
fn extract_field_definition(line: &str) -> Option<(String, FieldType)> {
|
||||
// Skip comments and empty lines
|
||||
if line.is_empty() || line.starts_with("//") {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Pattern: `field_name | Type,` or `field_name: Type,`
|
||||
if let Some(pipe_pos) = line.find('|') {
|
||||
let field_name = line[..pipe_pos].trim().to_string();
|
||||
|
||||
let field_type = if line.contains("String") {
|
||||
FieldType::Text
|
||||
} else if line.contains("Number") || line.contains("Int") {
|
||||
FieldType::Number
|
||||
} else if line.contains("Bool") {
|
||||
FieldType::Confirm
|
||||
} else if line.contains("Array") {
|
||||
FieldType::RepeatingGroup
|
||||
} else {
|
||||
FieldType::Text // Default
|
||||
};
|
||||
|
||||
return Some((field_name, field_type));
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Extract infrastructure hints from comments and imports.
|
||||
fn extract_infrastructure_hints(content: &str) -> Result<InfrastructureSpec> {
|
||||
let mut infrastructure = InfrastructureSpec::default();
|
||||
|
||||
// Check for database hints
|
||||
if content.contains("database") || content.contains("Database") {
|
||||
infrastructure.database = Some(DatabaseSpec {
|
||||
db_type: if content.contains("postgres") {
|
||||
DatabaseType::Postgres
|
||||
} else if content.contains("mysql") {
|
||||
DatabaseType::Mysql
|
||||
} else {
|
||||
DatabaseType::Sqlite
|
||||
},
|
||||
required: true,
|
||||
});
|
||||
}
|
||||
|
||||
// Check for SSH hints
|
||||
if content.contains("ssh") || content.contains("SSH") || content.contains("keypair") {
|
||||
infrastructure.ssh = true;
|
||||
}
|
||||
|
||||
// Check for cloud provider hints
|
||||
if content.contains("aws") || content.contains("AWS") {
|
||||
infrastructure.providers.push(CloudProvider::Aws);
|
||||
}
|
||||
if content.contains("gcp") || content.contains("google") {
|
||||
infrastructure.providers.push(CloudProvider::Gcp);
|
||||
}
|
||||
|
||||
// Default to LXD if no providers specified
|
||||
if infrastructure.providers.is_empty() {
|
||||
infrastructure.providers.push(CloudProvider::Lxd);
|
||||
}
|
||||
|
||||
// Check for monitoring hints
|
||||
if content.contains("prometheus") {
|
||||
infrastructure.monitoring.push(MonitoringType::Prometheus);
|
||||
}
|
||||
if content.contains("grafana") {
|
||||
infrastructure.monitoring.push(MonitoringType::Grafana);
|
||||
}
|
||||
|
||||
Ok(infrastructure)
|
||||
}
|
||||
|
||||
/// Extract constraints from field definitions.
|
||||
fn extract_constraints(content: &str) -> Result<Vec<Constraint>> {
|
||||
let mut constraints = Vec::new();
|
||||
|
||||
// Look for patterns like `min_length = 5`, `max_length = 100`
|
||||
for line in content.lines() {
|
||||
if line.contains("min_length") {
|
||||
if let Ok(min_val) = Self::extract_numeric_constraint(line, "min_length") {
|
||||
let constraint =
|
||||
Constraint::new("field.min".to_string()).with_min_items(min_val);
|
||||
constraints.push(constraint);
|
||||
}
|
||||
}
|
||||
|
||||
if line.contains("max_length") {
|
||||
if let Ok(max_val) = Self::extract_numeric_constraint(line, "max_length") {
|
||||
let constraint =
|
||||
Constraint::new("field.max".to_string()).with_max_items(max_val);
|
||||
constraints.push(constraint);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(constraints)
|
||||
}
|
||||
|
||||
/// Extract numeric constraint value from a line.
|
||||
fn extract_numeric_constraint(line: &str, key: &str) -> Result<u32> {
|
||||
if let Some(pos) = line.find(key) {
|
||||
let rest = &line[pos + key.len()..];
|
||||
// Look for pattern: `= number` or `: number`
|
||||
let value_part = rest.trim_start_matches('=').trim_start_matches(':').trim();
|
||||
if let Some(first_num) = value_part.split(|c: char| !c.is_numeric()).next() {
|
||||
if !first_num.is_empty() {
|
||||
if let Ok(num) = first_num.parse::<u32>() {
|
||||
return Ok(num);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(crate::error::NickelSchemaLoadingError {
|
||||
schema_path: std::path::PathBuf::new(),
|
||||
reason: format!("Could not extract constraint from: {}", line),
|
||||
}
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_record_name_extraction() {
|
||||
let line = "let DatabaseConfig = {";
|
||||
assert_eq!(
|
||||
NickelSchemaLoader::extract_record_name(line),
|
||||
Some("DatabaseConfig".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_field_definition_extraction() {
|
||||
let line = "host | String,";
|
||||
let (name, field_type) = NickelSchemaLoader::extract_field_definition(line).unwrap();
|
||||
assert_eq!(name, "host");
|
||||
assert_eq!(field_type, FieldType::Text);
|
||||
}
|
||||
}
|
||||
44
crates/typedialog-prov-gen/src/lib.rs
Normal file
44
crates/typedialog-prov-gen/src/lib.rs
Normal file
@ -0,0 +1,44 @@
|
||||
//! Provisioning generator for typedialog projects.
|
||||
//!
|
||||
//! Generates complete provisioning/ directory structures with 7-layer validation
|
||||
//! (Forms → Constraints → Values → Validators → Schemas → Defaults → JSON)
|
||||
//! from project specifications using TypeDialog + Nickel.
|
||||
|
||||
pub mod ai;
|
||||
pub mod cli;
|
||||
pub mod config;
|
||||
pub mod error;
|
||||
pub mod generator;
|
||||
pub mod input;
|
||||
pub mod models;
|
||||
pub mod template;
|
||||
|
||||
pub use error::{ProvisioningGenError, Result};
|
||||
pub use models::{ConfigField, DomainFeature, InfrastructureSpec, ProjectSpec};
|
||||
|
||||
/// Run provisioning generation from project specification.
|
||||
pub async fn generate(spec: ProjectSpec, output_dir: impl AsRef<std::path::Path>) -> Result<()> {
|
||||
let output_dir = output_dir.as_ref();
|
||||
|
||||
// Ensure output directory exists
|
||||
std::fs::create_dir_all(output_dir)?;
|
||||
|
||||
// Generation pipeline:
|
||||
// 1. Constraints (required by fragments and validators)
|
||||
// 2. Schemas (domain-specific types)
|
||||
// 3. Validators (validation logic using constraints)
|
||||
// 4. Defaults (sensible defaults)
|
||||
// 5. Fragments (form UI sections using constraints)
|
||||
// 6. Scripts (orchestration bash + nushell)
|
||||
// 7. Infrastructure (copy generic templates)
|
||||
// 8. Form (assemble main config-form.toml)
|
||||
// 9. README (documentation)
|
||||
|
||||
tracing::info!(
|
||||
"Starting provisioning generation for project: {}",
|
||||
spec.name
|
||||
);
|
||||
tracing::info!("Output directory: {}", output_dir.display());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
227
crates/typedialog-prov-gen/src/main.rs
Normal file
227
crates/typedialog-prov-gen/src/main.rs
Normal file
@ -0,0 +1,227 @@
|
||||
//! CLI entry point for typedialog-prov-gen.
|
||||
|
||||
use clap::Parser;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "typedialog-prov-gen")]
|
||||
#[command(about = "Generate provisioning structures for typedialog projects", long_about = None)]
|
||||
struct Cli {
|
||||
/// Subcommand to execute
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
|
||||
/// Configuration file (TOML)
|
||||
///
|
||||
/// If provided, uses this file exclusively.
|
||||
/// If not provided, searches: ~/.config/typedialog/prov-gen/{TYPEDIALOG_ENV}.toml → ~/.config/typedialog/prov-gen/config.toml → defaults
|
||||
#[arg(global = true, short = 'c', long, value_name = "FILE")]
|
||||
config: Option<PathBuf>,
|
||||
|
||||
/// Enable verbose logging
|
||||
#[arg(short, long, global = true)]
|
||||
verbose: bool,
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
enum Commands {
|
||||
/// Generate provisioning structure from project specification
|
||||
Generate {
|
||||
/// Input mode: cargo | config | wizard | nickel
|
||||
#[arg(short, long)]
|
||||
mode: String,
|
||||
|
||||
/// Input file path (for modes that require files)
|
||||
#[arg(short, long)]
|
||||
input: Option<PathBuf>,
|
||||
|
||||
/// Output directory for generated files
|
||||
#[arg(short, long, default_value = "./provisioning")]
|
||||
output: PathBuf,
|
||||
|
||||
/// Project name override
|
||||
#[arg(short, long)]
|
||||
project: Option<String>,
|
||||
|
||||
/// Overwrite existing files
|
||||
#[arg(short, long)]
|
||||
force: bool,
|
||||
|
||||
/// Dry run (show what would be generated)
|
||||
#[arg(long)]
|
||||
dry_run: bool,
|
||||
},
|
||||
|
||||
/// Validate a provisioning structure
|
||||
Validate {
|
||||
/// Path to provisioning directory
|
||||
#[arg(value_name = "DIR")]
|
||||
path: PathBuf,
|
||||
},
|
||||
|
||||
/// List available templates
|
||||
Templates {
|
||||
/// Output format: json, yaml, text, toml
|
||||
#[arg(short, long, value_name = "FORMAT", default_value = "text")]
|
||||
format: String,
|
||||
},
|
||||
|
||||
/// Show help for a specific input mode
|
||||
ModeHelp {
|
||||
/// Mode name (cargo, config, wizard, nickel)
|
||||
#[arg(value_name = "MODE")]
|
||||
mode: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let cli = Cli::parse();
|
||||
|
||||
// Initialize logging
|
||||
let level = if cli.verbose {
|
||||
tracing::metadata::LevelFilter::DEBUG
|
||||
} else {
|
||||
tracing::metadata::LevelFilter::INFO
|
||||
};
|
||||
tracing_subscriber::fmt().with_max_level(level).init();
|
||||
|
||||
match cli.command {
|
||||
Commands::Generate {
|
||||
mode,
|
||||
input,
|
||||
output,
|
||||
project,
|
||||
force,
|
||||
dry_run,
|
||||
} => {
|
||||
use typedialog_prov_gen::cli::GenerateCommand;
|
||||
use typedialog_prov_gen::config::Config;
|
||||
|
||||
// Load configuration
|
||||
let _config = match Config::load(cli.config.as_deref()) {
|
||||
Ok(cfg) => cfg,
|
||||
Err(e) => {
|
||||
eprintln!("Failed to load configuration: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
if !force
|
||||
&& output.exists()
|
||||
&& std::fs::read_dir(&output)
|
||||
.ok()
|
||||
.is_some_and(|mut dir| dir.next().is_some())
|
||||
{
|
||||
eprintln!("Output directory already exists. Use --force to overwrite");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if let Err(e) = GenerateCommand::execute(&mode, input, output, project, dry_run).await {
|
||||
eprintln!("Generation failed: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
Commands::Validate { path: _ } => {
|
||||
// TODO: Implement validation command
|
||||
eprintln!("Validation command not yet implemented");
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
Commands::Templates { format } => {
|
||||
use typedialog_prov_gen::config::Config;
|
||||
use typedialog_prov_gen::template::TemplateLoader;
|
||||
|
||||
match Config::load(cli.config.as_deref()) {
|
||||
Ok(config) => match TemplateLoader::new(&config) {
|
||||
Ok(loader) => match loader.list_templates() {
|
||||
Ok(categories) => {
|
||||
match format.as_str() {
|
||||
"json" => {
|
||||
if let Ok(json) = serde_json::to_string_pretty(&categories) {
|
||||
println!("{}", json);
|
||||
} else {
|
||||
eprintln!("Failed to serialize templates to JSON");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
"yaml" => {
|
||||
if let Ok(yaml) = serde_yaml::to_string(&categories) {
|
||||
println!("{}", yaml);
|
||||
} else {
|
||||
eprintln!("Failed to serialize templates to YAML");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
"toml" => {
|
||||
use serde::Serialize;
|
||||
#[derive(Serialize)]
|
||||
struct TemplatesWrapper {
|
||||
categories: Vec<typedialog_prov_gen::template::TemplateCategory>,
|
||||
}
|
||||
let wrapped = TemplatesWrapper { categories };
|
||||
if let Ok(toml) = toml::to_string_pretty(&wrapped) {
|
||||
println!("{}", toml);
|
||||
} else {
|
||||
eprintln!("Failed to serialize templates to TOML");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
"text" => {
|
||||
println!("From: {}\n", loader.templates_dir().display());
|
||||
println!("📋 Available Template Categories:\n");
|
||||
for category in categories {
|
||||
println!(
|
||||
"{} {}",
|
||||
category.name.to_uppercase(),
|
||||
category.description
|
||||
);
|
||||
for template in &category.templates {
|
||||
println!(" • {}", template);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
eprintln!("Unknown format: {}. Supported: json, yaml, text, toml", format);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to list templates: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
eprintln!("Failed to initialize template loader: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
eprintln!("Failed to load configuration: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Commands::ModeHelp { mode } => match mode.as_deref() {
|
||||
Some("cargo") => {
|
||||
println!("Mode: cargo\n\nAnalyze Cargo.toml to infer project configuration")
|
||||
}
|
||||
Some("config") => println!("Mode: config\n\nLoad explicit project-spec.toml"),
|
||||
Some("wizard") => println!("Mode: wizard\n\nInteractive AI-powered wizard"),
|
||||
Some("nickel") => println!("Mode: nickel\n\nAugment existing Nickel schema"),
|
||||
_ => {
|
||||
println!("Available modes:");
|
||||
println!(" cargo - Analyze Cargo.toml (automatic feature detection)");
|
||||
println!(" config - Load explicit project-spec.toml");
|
||||
println!(" wizard - Interactive AI-powered wizard");
|
||||
println!(" nickel - Augment existing Nickel schema");
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
8
crates/typedialog-prov-gen/src/models/mod.rs
Normal file
8
crates/typedialog-prov-gen/src/models/mod.rs
Normal file
@ -0,0 +1,8 @@
|
||||
//! Data models for provisioning generation.
|
||||
|
||||
pub mod project_spec;
|
||||
|
||||
pub use project_spec::{
|
||||
CloudProvider, ConfigField, Constraint, DatabaseSpec, DatabaseType, DomainFeature, FieldType,
|
||||
InfrastructureSpec, MonitoringType, ProjectSpec, ProjectType,
|
||||
};
|
||||
546
crates/typedialog-prov-gen/src/models/project_spec.rs
Normal file
546
crates/typedialog-prov-gen/src/models/project_spec.rs
Normal file
@ -0,0 +1,546 @@
|
||||
//! Central data model for provisioning generation.
|
||||
//!
|
||||
//! ProjectSpec is the normalized intermediate representation that all input modes
|
||||
//! (Cargo, Config, Wizard, Nickel) converge to. It defines what gets generated.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Complete project specification for provisioning generation.
|
||||
///
|
||||
/// This is the single source of truth that all input modes normalize to.
|
||||
/// All generation happens from this spec.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ProjectSpec {
|
||||
/// Project name (lowercase, dashes allowed, no leading numbers)
|
||||
pub name: String,
|
||||
|
||||
/// Project type (web-service, cli-tool, microservice, library)
|
||||
pub project_type: ProjectType,
|
||||
|
||||
/// Infrastructure requirements (SSH, database, providers, monitoring)
|
||||
pub infrastructure: InfrastructureSpec,
|
||||
|
||||
/// Domain-specific features and their configuration fields
|
||||
pub domain_features: Vec<DomainFeature>,
|
||||
|
||||
/// Validation constraints (array sizes, uniqueness rules, etc.)
|
||||
pub constraints: Vec<Constraint>,
|
||||
}
|
||||
|
||||
impl ProjectSpec {
|
||||
/// Create a new ProjectSpec with minimal required fields.
|
||||
pub fn new(name: String, project_type: ProjectType) -> Self {
|
||||
Self {
|
||||
name,
|
||||
project_type,
|
||||
infrastructure: InfrastructureSpec::default(),
|
||||
domain_features: Vec::new(),
|
||||
constraints: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate the spec for completeness and consistency.
|
||||
pub fn validate(&self) -> Result<(), Vec<String>> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
// Project name validation
|
||||
if self.name.is_empty() {
|
||||
errors.push("Project name cannot be empty".to_string());
|
||||
} else if !self.name.chars().next().unwrap().is_lowercase() {
|
||||
errors.push("Project name must start with lowercase letter".to_string());
|
||||
}
|
||||
|
||||
// Domain features validation
|
||||
if self.domain_features.is_empty() {
|
||||
errors.push("At least one domain feature must be defined".to_string());
|
||||
}
|
||||
|
||||
for feature in &self.domain_features {
|
||||
if let Err(feature_errors) = feature.validate() {
|
||||
errors.extend(feature_errors);
|
||||
}
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(errors)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ProjectSpec {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Project {} ({:?}): {} features, {} constraints",
|
||||
self.name,
|
||||
self.project_type,
|
||||
self.domain_features.len(),
|
||||
self.constraints.len()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Project type classification.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum ProjectType {
|
||||
/// Web service (HTTP API, REST)
|
||||
WebService,
|
||||
/// Command-line tool
|
||||
CliTool,
|
||||
/// Microservice (containerized, distributed)
|
||||
Microservice,
|
||||
/// Shared library
|
||||
Library,
|
||||
/// Custom type (user-specified)
|
||||
Custom,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ProjectType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::WebService => write!(f, "web-service"),
|
||||
Self::CliTool => write!(f, "cli-tool"),
|
||||
Self::Microservice => write!(f, "microservice"),
|
||||
Self::Library => write!(f, "library"),
|
||||
Self::Custom => write!(f, "custom"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Infrastructure requirements.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct InfrastructureSpec {
|
||||
/// SSH/remote access required
|
||||
pub ssh: bool,
|
||||
|
||||
/// Database configuration
|
||||
pub database: Option<DatabaseSpec>,
|
||||
|
||||
/// Cloud providers to support
|
||||
pub providers: Vec<CloudProvider>,
|
||||
|
||||
/// Optional monitoring features
|
||||
pub monitoring: Vec<MonitoringType>,
|
||||
}
|
||||
|
||||
/// Database configuration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DatabaseSpec {
|
||||
pub db_type: DatabaseType,
|
||||
pub required: bool,
|
||||
}
|
||||
|
||||
/// Supported database types.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum DatabaseType {
|
||||
Sqlite,
|
||||
Mysql,
|
||||
Postgres,
|
||||
Redis,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DatabaseType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Sqlite => write!(f, "sqlite"),
|
||||
Self::Mysql => write!(f, "mysql"),
|
||||
Self::Postgres => write!(f, "postgres"),
|
||||
Self::Redis => write!(f, "redis"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cloud providers.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum CloudProvider {
|
||||
Lxd,
|
||||
Hetzner,
|
||||
Aws,
|
||||
Gcp,
|
||||
Azure,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for CloudProvider {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Lxd => write!(f, "lxd"),
|
||||
Self::Hetzner => write!(f, "hetzner"),
|
||||
Self::Aws => write!(f, "aws"),
|
||||
Self::Gcp => write!(f, "gcp"),
|
||||
Self::Azure => write!(f, "azure"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Monitoring types.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum MonitoringType {
|
||||
Prometheus,
|
||||
Grafana,
|
||||
CloudWatch,
|
||||
StackDriver,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MonitoringType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Prometheus => write!(f, "prometheus"),
|
||||
Self::Grafana => write!(f, "grafana"),
|
||||
Self::CloudWatch => write!(f, "cloudwatch"),
|
||||
Self::StackDriver => write!(f, "stackdriver"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Domain-specific feature with its configuration fields.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DomainFeature {
|
||||
/// Feature name (e.g., "http_server", "authentication", "caching")
|
||||
pub name: String,
|
||||
|
||||
/// Human-readable description
|
||||
pub description: Option<String>,
|
||||
|
||||
/// Configuration fields for this feature
|
||||
pub fields: Vec<ConfigField>,
|
||||
|
||||
/// Constraints specific to this feature (e.g., array bounds)
|
||||
pub constraints: Option<HashMap<String, FeatureConstraint>>,
|
||||
}
|
||||
|
||||
impl DomainFeature {
|
||||
/// Create a new feature with no fields.
|
||||
pub fn new(name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
description: None,
|
||||
fields: Vec::new(),
|
||||
constraints: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a configuration field to this feature.
|
||||
pub fn with_field(mut self, field: ConfigField) -> Self {
|
||||
self.fields.push(field);
|
||||
self
|
||||
}
|
||||
|
||||
/// Validate the feature for completeness.
|
||||
pub fn validate(&self) -> Result<(), Vec<String>> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
if self.name.is_empty() {
|
||||
errors.push("Feature name cannot be empty".to_string());
|
||||
}
|
||||
|
||||
for field in &self.fields {
|
||||
if let Err(field_errors) = field.validate() {
|
||||
errors.extend(
|
||||
field_errors
|
||||
.iter()
|
||||
.map(|e| format!("Feature '{}': {}", self.name, e)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(errors)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DomainFeature {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Feature: {} ({} fields)", self.name, self.fields.len())
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration field definition.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConfigField {
|
||||
/// Field name (matches Nickel path element)
|
||||
pub name: String,
|
||||
|
||||
/// Field type (text, number, select, etc.)
|
||||
pub field_type: FieldType,
|
||||
|
||||
/// Human-readable prompt
|
||||
pub prompt: String,
|
||||
|
||||
/// Whether field is required
|
||||
pub required: bool,
|
||||
|
||||
/// Default value (as JSON)
|
||||
pub default: Option<serde_json::Value>,
|
||||
|
||||
/// Help text for the user
|
||||
pub help: Option<String>,
|
||||
|
||||
/// Optional placeholder value
|
||||
pub placeholder: Option<String>,
|
||||
|
||||
/// For select/multi_select: available options
|
||||
pub options: Vec<String>,
|
||||
|
||||
/// Min value (for numbers/arrays)
|
||||
pub min: Option<u32>,
|
||||
|
||||
/// Max value (for numbers/arrays)
|
||||
pub max: Option<u32>,
|
||||
|
||||
/// For repeating groups: item fragment path
|
||||
pub item_fragment: Option<String>,
|
||||
|
||||
/// Is this field encrypted?
|
||||
pub sensitive: bool,
|
||||
|
||||
/// Encryption backend if sensitive
|
||||
pub encryption_backend: Option<String>,
|
||||
|
||||
/// Encryption config if sensitive
|
||||
pub encryption_config: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
impl ConfigField {
|
||||
/// Create a new configuration field.
|
||||
pub fn new(name: String, field_type: FieldType, prompt: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
field_type,
|
||||
prompt,
|
||||
required: true,
|
||||
default: None,
|
||||
help: None,
|
||||
placeholder: None,
|
||||
options: Vec::new(),
|
||||
min: None,
|
||||
max: None,
|
||||
item_fragment: None,
|
||||
sensitive: false,
|
||||
encryption_backend: None,
|
||||
encryption_config: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Mark field as optional.
|
||||
pub fn optional(mut self) -> Self {
|
||||
self.required = false;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set a default value.
|
||||
pub fn with_default(mut self, default: serde_json::Value) -> Self {
|
||||
self.default = Some(default);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set help text.
|
||||
pub fn with_help(mut self, help: impl Into<String>) -> Self {
|
||||
self.help = Some(help.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Mark as sensitive (encrypted).
|
||||
pub fn sensitive(mut self, backend: impl Into<String>) -> Self {
|
||||
self.sensitive = true;
|
||||
self.encryption_backend = Some(backend.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Validate the field.
|
||||
pub fn validate(&self) -> Result<(), Vec<String>> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
if self.name.is_empty() {
|
||||
errors.push("Field name cannot be empty".to_string());
|
||||
}
|
||||
|
||||
if self.prompt.is_empty() {
|
||||
errors.push(format!("Field '{}': prompt cannot be empty", self.name));
|
||||
}
|
||||
|
||||
// Validate min/max constraints
|
||||
if let (Some(min), Some(max)) = (self.min, self.max) {
|
||||
if min > max {
|
||||
errors.push(format!(
|
||||
"Field '{}': min ({}) > max ({})",
|
||||
self.name, min, max
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if errors.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(errors)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ConfigField {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}: {:?}", self.name, self.field_type)
|
||||
}
|
||||
}
|
||||
|
||||
/// Field type for form UI rendering and JSON serialization.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum FieldType {
|
||||
Text,
|
||||
Number,
|
||||
Password,
|
||||
Confirm,
|
||||
Select,
|
||||
MultiSelect,
|
||||
Editor,
|
||||
Date,
|
||||
RepeatingGroup,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for FieldType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Text => write!(f, "text"),
|
||||
Self::Number => write!(f, "number"),
|
||||
Self::Password => write!(f, "password"),
|
||||
Self::Confirm => write!(f, "confirm"),
|
||||
Self::Select => write!(f, "select"),
|
||||
Self::MultiSelect => write!(f, "multi_select"),
|
||||
Self::Editor => write!(f, "editor"),
|
||||
Self::Date => write!(f, "date"),
|
||||
Self::RepeatingGroup => write!(f, "repeatinggroup"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Constraint on a feature (array bounds, uniqueness, etc.).
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Constraint {
|
||||
/// Path in TOML (e.g., "tracker.udp_trackers")
|
||||
pub path: String,
|
||||
|
||||
/// Minimum array items
|
||||
pub min_items: Option<u32>,
|
||||
|
||||
/// Maximum array items
|
||||
pub max_items: Option<u32>,
|
||||
|
||||
/// Items must be unique
|
||||
pub unique: bool,
|
||||
|
||||
/// Uniqueness key field (if unique=true)
|
||||
pub unique_key: Option<String>,
|
||||
}
|
||||
|
||||
impl Constraint {
|
||||
/// Create a new constraint for a path.
|
||||
pub fn new(path: String) -> Self {
|
||||
Self {
|
||||
path,
|
||||
min_items: None,
|
||||
max_items: None,
|
||||
unique: false,
|
||||
unique_key: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set minimum items constraint.
|
||||
pub fn with_min_items(mut self, min: u32) -> Self {
|
||||
self.min_items = Some(min);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set maximum items constraint.
|
||||
pub fn with_max_items(mut self, max: u32) -> Self {
|
||||
self.max_items = Some(max);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set uniqueness constraint.
|
||||
pub fn with_unique(mut self, key: impl Into<String>) -> Self {
|
||||
self.unique = true;
|
||||
self.unique_key = Some(key.into());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Constraint {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Constraint({})", self.path)
|
||||
}
|
||||
}
|
||||
|
||||
/// Feature-specific constraint.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FeatureConstraint {
|
||||
pub min_items: Option<u32>,
|
||||
pub max_items: Option<u32>,
|
||||
pub unique: bool,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_project_spec_creation() {
|
||||
let spec = ProjectSpec::new("my-service".to_string(), ProjectType::WebService);
|
||||
assert_eq!(spec.name, "my-service");
|
||||
assert_eq!(spec.project_type, ProjectType::WebService);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_project_spec_validation_fails_without_features() {
|
||||
let spec = ProjectSpec::new("my-service".to_string(), ProjectType::WebService);
|
||||
assert!(spec.validate().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_domain_feature_creation() {
|
||||
let feature = DomainFeature::new("http_server".to_string());
|
||||
assert_eq!(feature.name, "http_server");
|
||||
assert!(feature.fields.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_field_creation() {
|
||||
let field = ConfigField::new(
|
||||
"bind_address".to_string(),
|
||||
FieldType::Text,
|
||||
"Server bind address".to_string(),
|
||||
);
|
||||
assert_eq!(field.name, "bind_address");
|
||||
assert!(field.required);
|
||||
assert!(!field.sensitive);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_field_sensitive() {
|
||||
let field = ConfigField::new(
|
||||
"api_token".to_string(),
|
||||
FieldType::Password,
|
||||
"API token".to_string(),
|
||||
)
|
||||
.sensitive("age");
|
||||
|
||||
assert!(field.sensitive);
|
||||
assert_eq!(field.encryption_backend, Some("age".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_constraint_creation() {
|
||||
let constraint = Constraint::new("tracker.udp_trackers".to_string())
|
||||
.with_min_items(1)
|
||||
.with_max_items(4)
|
||||
.with_unique("bind_address");
|
||||
|
||||
assert_eq!(constraint.min_items, Some(1));
|
||||
assert_eq!(constraint.max_items, Some(4));
|
||||
assert!(constraint.unique);
|
||||
}
|
||||
}
|
||||
108
crates/typedialog-prov-gen/src/template/loader.rs
Normal file
108
crates/typedialog-prov-gen/src/template/loader.rs
Normal file
@ -0,0 +1,108 @@
|
||||
//! Template loader and renderer.
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::error::Result;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Loads and renders Tera templates for code generation.
|
||||
pub struct TemplateLoader {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
/// Template category with its templates.
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
pub struct TemplateCategory {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub templates: Vec<String>,
|
||||
}
|
||||
|
||||
impl TemplateLoader {
|
||||
/// Load template library from configuration.
|
||||
pub fn new(config: &Config) -> Result<Self> {
|
||||
let path = config.templates_dir();
|
||||
Ok(TemplateLoader { path })
|
||||
}
|
||||
|
||||
/// Get the templates directory path.
|
||||
pub fn templates_dir(&self) -> &std::path::PathBuf {
|
||||
&self.path
|
||||
}
|
||||
|
||||
/// List all available template categories and their templates.
|
||||
pub fn list_templates(&self) -> Result<Vec<TemplateCategory>> {
|
||||
let templates_dir = &self.path;
|
||||
let mut categories = Vec::new();
|
||||
|
||||
if !templates_dir.exists() {
|
||||
return Err(std::io::Error::new(
|
||||
std::io::ErrorKind::NotFound,
|
||||
format!(
|
||||
"Templates directory not found: {}",
|
||||
templates_dir.display()
|
||||
),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
let category_names = vec![
|
||||
"defaults",
|
||||
"domain",
|
||||
"fragments",
|
||||
"schemas",
|
||||
"scripts",
|
||||
"validators",
|
||||
];
|
||||
let category_descriptions = BTreeMap::from([
|
||||
("defaults", "Default value templates for fields"),
|
||||
("domain", "Domain model and schema generation"),
|
||||
("fragments", "Reusable code fragments"),
|
||||
("schemas", "Schema validation and definition"),
|
||||
("scripts", "Infrastructure and deployment scripts"),
|
||||
("validators", "Field validation templates"),
|
||||
]);
|
||||
|
||||
for category_name in category_names {
|
||||
let category_dir = templates_dir.join(category_name);
|
||||
if !category_dir.exists() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut templates = Vec::new();
|
||||
for entry in fs::read_dir(&category_dir)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
if path.is_file() {
|
||||
if let Some(filename) = path.file_name() {
|
||||
if let Some(name) = filename.to_str() {
|
||||
templates.push(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
templates.sort();
|
||||
|
||||
let description = category_descriptions
|
||||
.get(category_name)
|
||||
.unwrap_or(&"")
|
||||
.to_string();
|
||||
|
||||
categories.push(TemplateCategory {
|
||||
name: category_name.to_string(),
|
||||
description,
|
||||
templates,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(categories)
|
||||
}
|
||||
|
||||
/// Render a template with given context.
|
||||
pub fn render(&self, _template_name: &str, _context: &tera::Context) -> Result<String> {
|
||||
// TODO: Implement template rendering
|
||||
Ok(String::new())
|
||||
}
|
||||
}
|
||||
5
crates/typedialog-prov-gen/src/template/mod.rs
Normal file
5
crates/typedialog-prov-gen/src/template/mod.rs
Normal file
@ -0,0 +1,5 @@
|
||||
//! Template system for code generation.
|
||||
|
||||
pub mod loader;
|
||||
|
||||
pub use loader::{TemplateCategory, TemplateLoader};
|
||||
12
crates/typedialog-prov-gen/templates/defaults/database.ncl
Normal file
12
crates/typedialog-prov-gen/templates/defaults/database.ncl
Normal file
@ -0,0 +1,12 @@
|
||||
# Database Defaults
|
||||
# Default values with type contracts applied
|
||||
|
||||
let schemas = import "../schemas/database.ncl" in
|
||||
|
||||
{
|
||||
# Default database: SQLite (simple, file-based)
|
||||
database | schemas.Database = {
|
||||
driver = "sqlite3",
|
||||
database_name = "tracker.db",
|
||||
},
|
||||
}
|
||||
@ -0,0 +1,11 @@
|
||||
# Environment Defaults
|
||||
# Default values with type contracts applied
|
||||
|
||||
let schemas = import "../schemas/environment.ncl" in
|
||||
|
||||
{
|
||||
environment | schemas.Environment = {
|
||||
# name is required from user (no default)
|
||||
# instance_name is optional
|
||||
},
|
||||
}
|
||||
17
crates/typedialog-prov-gen/templates/defaults/features.ncl
Normal file
17
crates/typedialog-prov-gen/templates/defaults/features.ncl
Normal file
@ -0,0 +1,17 @@
|
||||
# Features Defaults
|
||||
# Default values with type contracts applied
|
||||
# Note: Features are completely optional - user decides what to enable/configure
|
||||
|
||||
let schemas = import "../schemas/features.ncl" in
|
||||
|
||||
{
|
||||
features | schemas.Features = {
|
||||
# Prometheus: optional structure (user provides if needed)
|
||||
# Default: not specified (optional in schema)
|
||||
# prometheus = {...}
|
||||
|
||||
# Grafana: optional structure (user provides if needed)
|
||||
# Default: not specified (optional in schema)
|
||||
# grafana = {...}
|
||||
},
|
||||
}
|
||||
13
crates/typedialog-prov-gen/templates/defaults/provider.ncl
Normal file
13
crates/typedialog-prov-gen/templates/defaults/provider.ncl
Normal file
@ -0,0 +1,13 @@
|
||||
# Provider Defaults
|
||||
# No defaults provided (user must supply provider configuration)
|
||||
|
||||
let schemas = import "../schemas/provider.ncl" in
|
||||
|
||||
{
|
||||
# provider field must be completely supplied by user
|
||||
# Example (user would provide):
|
||||
# provider | schemas.Provider = {
|
||||
# provider = "lxd",
|
||||
# profile_name = "my-profile",
|
||||
# }
|
||||
}
|
||||
17
crates/typedialog-prov-gen/templates/defaults/ssh.ncl
Normal file
17
crates/typedialog-prov-gen/templates/defaults/ssh.ncl
Normal file
@ -0,0 +1,17 @@
|
||||
# SSH Defaults
|
||||
# Default values with type contracts applied
|
||||
|
||||
let schemas = import "../schemas/ssh.ncl" in
|
||||
|
||||
{
|
||||
ssh_credentials | schemas.SshCredentials = {
|
||||
# private_key_path must be provided by user (no default)
|
||||
# public_key_path must be provided by user (no default)
|
||||
|
||||
# Default SSH username: "torrust" (project convention)
|
||||
username = "torrust",
|
||||
|
||||
# Default SSH port: (standard SSH port)
|
||||
port = 22,
|
||||
},
|
||||
}
|
||||
@ -0,0 +1,11 @@
|
||||
# Default configuration values for {{ feature_name }} domain
|
||||
# Provides sensible defaults for {{ feature_description | default(value="domain feature") }}
|
||||
# Auto-generated for project: {{ project_name }}
|
||||
|
||||
{
|
||||
{{ feature_name }} = {
|
||||
{%- for field in fields %}
|
||||
{{ field.name }} = {{ field.default | default(value=field.default_by_type) }},
|
||||
{%- endfor %}
|
||||
},
|
||||
}
|
||||
@ -0,0 +1,48 @@
|
||||
# Form fragment for {{ feature_name }} feature
|
||||
# Auto-generated for project: {{ project_name }}
|
||||
|
||||
[section.{{ feature_name }}]
|
||||
{%- if description %}
|
||||
description = "{{ description }}"
|
||||
{%- endif %}
|
||||
|
||||
{%- for field in fields %}
|
||||
|
||||
[[section.{{ feature_name }}.fields]]
|
||||
name = "{{ field.name }}"
|
||||
prompt = "{{ field.prompt }}"
|
||||
type = "{{ field.type }}"
|
||||
{%- if field.help %}
|
||||
help = "{{ field.help }}"
|
||||
{%- endif %}
|
||||
{%- if field.placeholder %}
|
||||
placeholder = "{{ field.placeholder }}"
|
||||
{%- endif %}
|
||||
{%- if field.default %}
|
||||
default = "{{ field.default }}"
|
||||
{%- endif %}
|
||||
{%- if not field.required %}
|
||||
required = false
|
||||
{%- endif %}
|
||||
{%- if field.sensitive %}
|
||||
sensitive = true
|
||||
{%- if field.encryption_backend %}
|
||||
encryption_backend = "{{ field.encryption_backend }}"
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if field.options %}
|
||||
options = [
|
||||
{%- for option in field.options %}
|
||||
"{{ option }}",
|
||||
{%- endfor %}
|
||||
]
|
||||
{%- endif %}
|
||||
{%- if field.min or field.max %}
|
||||
{%- if field.min %}
|
||||
min = {{ field.min }}
|
||||
{%- endif %}
|
||||
{%- if field.max %}
|
||||
max = {{ field.max }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
@ -0,0 +1,16 @@
|
||||
# Domain Schema for {{ feature_name }}
|
||||
# Type contract for {{ feature_description | default(value="feature configuration") }}
|
||||
# Auto-generated for project: {{ project_name }}
|
||||
{%- if imports %}
|
||||
{%- for import in imports %}
|
||||
let {{ import.name }} = import "{{ import.path }}"
|
||||
{%- endfor %}
|
||||
{% endif %}
|
||||
|
||||
{
|
||||
{{- field_name | capitalize }} = {
|
||||
{%- for field in fields %}
|
||||
{{ field.name }} | {{ field.nickel_type }}{%- if field.optional %} | optional{%- endif %}{%- if field.doc %}, # {{ field.doc }}{%- else %},{%- endif %}
|
||||
{%- endfor %}
|
||||
},
|
||||
}
|
||||
@ -0,0 +1,57 @@
|
||||
# Validator functions for {{ feature_name }} domain
|
||||
# Type-specific validation rules for {{ feature_description | default(value="domain feature") }}
|
||||
# Auto-generated for project: {{ project_name }}
|
||||
{%- if imports %}
|
||||
{%- for import in imports %}
|
||||
let {{ import.name }} = import "{{ import.path }}"
|
||||
{%- endfor %}
|
||||
|
||||
{% endif %}
|
||||
{
|
||||
{%- for field in fields %}
|
||||
|
||||
# Validator for {{ field.name }}
|
||||
{%- if field.doc %} - {{ field.doc }}{%- endif %}
|
||||
validate_{{ field.name }} = fun value => (
|
||||
{%- if field.type == "Text" %}
|
||||
(std.is_string value) &&
|
||||
(std.string.length value > 0)
|
||||
{%- if field.min_length %} &&
|
||||
(std.string.length value >= {{ field.min_length }})
|
||||
{%- endif %}
|
||||
{%- if field.max_length %} &&
|
||||
(std.string.length value <= {{ field.max_length }})
|
||||
{%- endif %}
|
||||
{%- else if field.type == "Number" %}
|
||||
(std.is_number value)
|
||||
{%- if field.min %} &&
|
||||
(value >= {{ field.min }})
|
||||
{%- endif %}
|
||||
{%- if field.max %} &&
|
||||
(value <= {{ field.max }})
|
||||
{%- endif %}
|
||||
{%- else if field.type == "Confirm" %}
|
||||
(std.is_bool value)
|
||||
{%- else if field.type == "Password" %}
|
||||
(std.is_string value) &&
|
||||
(std.string.length value > 0)
|
||||
{%- if field.min_length %} &&
|
||||
(std.string.length value >= {{ field.min_length }})
|
||||
{%- endif %}
|
||||
{%- else if field.type == "Select" or field.type == "MultiSelect" %}
|
||||
(std.is_string value) &&
|
||||
(std.array.contains {{ field.options | tojson }} value)
|
||||
{%- else if field.type == "RepeatingGroup" %}
|
||||
(std.is_array value)
|
||||
{%- if field.min_items %} &&
|
||||
(std.array.length value >= {{ field.min_items }})
|
||||
{%- endif %}
|
||||
{%- if field.max_items %} &&
|
||||
(std.array.length value <= {{ field.max_items }})
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
true
|
||||
{%- endif %}
|
||||
),
|
||||
{%- endfor %}
|
||||
}
|
||||
@ -0,0 +1,86 @@
|
||||
name = "mysql_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "mysql_header"
|
||||
type = "section_header"
|
||||
title = "💾 MySQL Database Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "mysql_host"
|
||||
type = "text"
|
||||
prompt = "MySQL host"
|
||||
placeholder = "localhost"
|
||||
default = "localhost"
|
||||
required = true
|
||||
help = "MySQL server hostname or IP address"
|
||||
nickel_path = [
|
||||
"tracker",
|
||||
"core",
|
||||
"database",
|
||||
"mysql_host",
|
||||
]
|
||||
nickel_alias = "mysql_host"
|
||||
|
||||
[[elements]]
|
||||
name = "mysql_port"
|
||||
type = "text"
|
||||
prompt = "MySQL port"
|
||||
placeholder = "3306"
|
||||
default = "3306"
|
||||
required = true
|
||||
help = "MySQL server port (default 3306). Must be between 1-65535."
|
||||
nickel_path = [
|
||||
"tracker",
|
||||
"core",
|
||||
"database",
|
||||
"mysql_port",
|
||||
]
|
||||
nickel_alias = "mysql_port"
|
||||
|
||||
[[elements]]
|
||||
name = "mysql_database_name"
|
||||
type = "text"
|
||||
prompt = "Database name"
|
||||
placeholder = "torrust_tracker"
|
||||
default = "torrust_tracker"
|
||||
required = true
|
||||
help = "Name of the MySQL database"
|
||||
nickel_path = [
|
||||
"tracker",
|
||||
"core",
|
||||
"database",
|
||||
"database_name",
|
||||
]
|
||||
nickel_alias = "mysql_database_name"
|
||||
|
||||
[[elements]]
|
||||
name = "mysql_username"
|
||||
type = "text"
|
||||
prompt = "Database username"
|
||||
placeholder = "tracker_user"
|
||||
default = "tracker_user"
|
||||
required = true
|
||||
help = "MySQL username for authentication"
|
||||
nickel_path = [
|
||||
"tracker",
|
||||
"core",
|
||||
"database",
|
||||
"mysql_username",
|
||||
]
|
||||
nickel_alias = "mysql_username"
|
||||
|
||||
[[elements]]
|
||||
name = "mysql_password"
|
||||
type = "password"
|
||||
prompt = "Database password"
|
||||
required = true
|
||||
help = "MySQL password for authentication"
|
||||
nickel_path = [
|
||||
"tracker",
|
||||
"core",
|
||||
"database",
|
||||
"mysql_password",
|
||||
]
|
||||
nickel_alias = "mysql_password"
|
||||
@ -0,0 +1,24 @@
|
||||
name = "sqlite_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "sqlite_header"
|
||||
type = "section_header"
|
||||
title = "💾 SQLite Database Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "sqlite_database_name"
|
||||
type = "text"
|
||||
prompt = "Database filename"
|
||||
placeholder = "tracker.db"
|
||||
default = "tracker.db"
|
||||
required = true
|
||||
help = "Name of the SQLite database file (will be created in the tracker data directory)"
|
||||
nickel_path = [
|
||||
"tracker",
|
||||
"core",
|
||||
"database",
|
||||
"database_name",
|
||||
]
|
||||
nickel_alias = "sqlite_database_name"
|
||||
@ -0,0 +1,31 @@
|
||||
name = "environment_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "environment_header"
|
||||
type = "section_header"
|
||||
title = "🏗️ Environment Identification"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "environment_name"
|
||||
type = "text"
|
||||
prompt = "Environment name"
|
||||
placeholder = "dev, staging, production, e2e-test"
|
||||
required = true
|
||||
help = "Lowercase letters, numbers, dashes. Cannot start with number or dash. Examples: dev, staging, e2e-config"
|
||||
nickel_path = [
|
||||
"environment",
|
||||
"name",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "instance_name"
|
||||
type = "text"
|
||||
prompt = "Instance/VM name (optional)"
|
||||
placeholder = "Leave empty for auto-generation: torrust-tracker-vm-{env-name}"
|
||||
help = "1-63 chars, ASCII letters/numbers/dashes, no leading digit/dash, no trailing dash. Will be auto-generated if omitted."
|
||||
nickel_path = [
|
||||
"environment",
|
||||
"instance_name",
|
||||
]
|
||||
@ -0,0 +1,31 @@
|
||||
name = "grafana_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "grafana_header"
|
||||
type = "section_header"
|
||||
title = "📈 Grafana Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "grafana_bind_address"
|
||||
type = "text"
|
||||
prompt = "Grafana bind address"
|
||||
placeholder = "0.0.0.0:3000"
|
||||
default = "0.0.0.0:3000"
|
||||
help = "Address and port for Grafana. Format: IP:PORT (e.g., 0.0.0.0:3000)"
|
||||
nickel_path = [
|
||||
"grafana",
|
||||
"bind_address",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "grafana_admin_password"
|
||||
type = "password"
|
||||
prompt = "Grafana admin password"
|
||||
required = true
|
||||
help = "Admin password for Grafana access. Keep this secure!"
|
||||
nickel_path = [
|
||||
"grafana",
|
||||
"admin_password",
|
||||
]
|
||||
@ -0,0 +1,32 @@
|
||||
name = "prometheus_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "prometheus_header"
|
||||
type = "section_header"
|
||||
title = "📊 Prometheus Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "prometheus_bind_address"
|
||||
type = "text"
|
||||
prompt = "Prometheus bind address"
|
||||
placeholder = "0.0.0.0:9090"
|
||||
default = "0.0.0.0:9090"
|
||||
help = "Address and port for Prometheus. Format: IP:PORT (e.g., 0.0.0.0:9090)"
|
||||
nickel_path = [
|
||||
"prometheus",
|
||||
"bind_address",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "prometheus_scrape_interval"
|
||||
type = "text"
|
||||
prompt = "Scrape interval (seconds)"
|
||||
placeholder = "15"
|
||||
default = "15"
|
||||
help = "How often Prometheus should scrape metrics (in seconds). Default: 15 seconds."
|
||||
nickel_path = [
|
||||
"prometheus",
|
||||
"scrape_interval",
|
||||
]
|
||||
@ -0,0 +1,145 @@
|
||||
name = "aws_provider_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "aws_header"
|
||||
type = "section_header"
|
||||
title = "☁️ AWS Cloud Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "aws_access_key_id"
|
||||
type = "text"
|
||||
prompt = "AWS Access Key ID"
|
||||
required = true
|
||||
help = "Your AWS IAM Access Key ID for authentication"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"aws_access_key_id",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "aws_secret_access_key"
|
||||
type = "password"
|
||||
prompt = "AWS Secret Access Key"
|
||||
required = true
|
||||
help = "Your AWS IAM Secret Access Key (will be masked)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"aws_secret_access_key",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "aws_region"
|
||||
type = "select"
|
||||
prompt = "AWS Region"
|
||||
options = [
|
||||
{ value = "us-east-1", label = "US East (N. Virginia)" },
|
||||
{ value = "us-east-2", label = "US East (Ohio)" },
|
||||
{ value = "us-west-1", label = "US West (N. California)" },
|
||||
{ value = "us-west-2", label = "US West (Oregon)" },
|
||||
{ value = "eu-west-1", label = "Europe (Ireland)" },
|
||||
{ value = "eu-west-2", label = "Europe (London)" },
|
||||
{ value = "eu-west-3", label = "Europe (Paris)" },
|
||||
{ value = "eu-central-1", label = "Europe (Frankfurt)" },
|
||||
{ value = "eu-north-1", label = "Europe (Stockholm)" },
|
||||
{ value = "ap-northeast-1", label = "Asia Pacific (Tokyo)" },
|
||||
{ value = "ap-northeast-2", label = "Asia Pacific (Seoul)" },
|
||||
{ value = "ap-northeast-3", label = "Asia Pacific (Osaka)" },
|
||||
{ value = "ap-southeast-1", label = "Asia Pacific (Singapore)" },
|
||||
{ value = "ap-southeast-2", label = "Asia Pacific (Sydney)" },
|
||||
{ value = "ap-south-1", label = "Asia Pacific (Mumbai)" },
|
||||
{ value = "sa-east-1", label = "South America (São Paulo)" },
|
||||
{ value = "ca-central-1", label = "Canada (Central)" },
|
||||
]
|
||||
default = "us-east-1"
|
||||
required = true
|
||||
help = "AWS region where resources will be deployed"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"aws_region",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "aws_instance_type"
|
||||
type = "select"
|
||||
prompt = "EC2 Instance Type"
|
||||
options = [
|
||||
{ value = "t3.micro", label = "t3.micro - 2 vCPU, 1 GB RAM (Free tier)" },
|
||||
{ value = "t3.small", label = "t3.small - 2 vCPU, 2 GB RAM" },
|
||||
{ value = "t3.medium", label = "t3.medium - 2 vCPU, 4 GB RAM" },
|
||||
{ value = "t3.large", label = "t3.large - 2 vCPU, 8 GB RAM" },
|
||||
{ value = "t3.xlarge", label = "t3.xlarge - 4 vCPU, 16 GB RAM" },
|
||||
{ value = "t3.2xlarge", label = "t3.2xlarge - 8 vCPU, 32 GB RAM" },
|
||||
{ value = "m5.large", label = "m5.large - 2 vCPU, 8 GB RAM" },
|
||||
{ value = "m5.xlarge", label = "m5.xlarge - 4 vCPU, 16 GB RAM" },
|
||||
{ value = "m5.2xlarge", label = "m5.2xlarge - 8 vCPU, 32 GB RAM" },
|
||||
{ value = "m5.4xlarge", label = "m5.4xlarge - 16 vCPU, 64 GB RAM" },
|
||||
{ value = "c5.large", label = "c5.large - 2 vCPU, 4 GB RAM (compute optimized)" },
|
||||
{ value = "c5.xlarge", label = "c5.xlarge - 4 vCPU, 8 GB RAM (compute optimized)" },
|
||||
{ value = "r5.large", label = "r5.large - 2 vCPU, 16 GB RAM (memory optimized)" },
|
||||
{ value = "r5.xlarge", label = "r5.xlarge - 4 vCPU, 32 GB RAM (memory optimized)" },
|
||||
]
|
||||
default = "t3.medium"
|
||||
required = true
|
||||
help = "EC2 instance type (determines CPU, RAM, and pricing)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"aws_instance_type",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "aws_ami"
|
||||
type = "select"
|
||||
prompt = "Amazon Machine Image (AMI)"
|
||||
options = [
|
||||
{ value = "ami-ubuntu-24-04", label = "Ubuntu 24.04 LTS (Latest)" },
|
||||
{ value = "ami-ubuntu-22-04", label = "Ubuntu 22.04 LTS" },
|
||||
{ value = "ami-ubuntu-20-04", label = "Ubuntu 20.04 LTS" },
|
||||
{ value = "ami-debian-12", label = "Debian 12 (Bookworm)" },
|
||||
{ value = "ami-debian-11", label = "Debian 11 (Bullseye)" },
|
||||
{ value = "ami-amazon-linux-2", label = "Amazon Linux 2" },
|
||||
{ value = "ami-amazon-linux-2023", label = "Amazon Linux 2023" },
|
||||
]
|
||||
default = "ami-ubuntu-24-04"
|
||||
required = true
|
||||
help = "Operating system image for EC2 instances"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"aws_ami",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "aws_vpc_cidr"
|
||||
type = "text"
|
||||
prompt = "VPC CIDR Block"
|
||||
default = "10.0.0.0/16"
|
||||
required = true
|
||||
help = "CIDR block for the VPC (e.g., 10.0.0.0/16)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"aws_vpc_cidr",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "aws_subnet_cidr"
|
||||
type = "text"
|
||||
prompt = "Subnet CIDR Block"
|
||||
default = "10.0.1.0/24"
|
||||
required = true
|
||||
help = "CIDR block for the subnet (e.g., 10.0.1.0/24)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"aws_subnet_cidr",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "aws_ssh_key_name"
|
||||
type = "text"
|
||||
prompt = "SSH Key Pair Name"
|
||||
required = true
|
||||
help = "Name of the EC2 SSH key pair for instance access"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"aws_ssh_key_name",
|
||||
]
|
||||
@ -0,0 +1,305 @@
|
||||
name = "azure_provider_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "azure_header"
|
||||
type = "section_header"
|
||||
title = "☁️ Microsoft Azure Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "azure_subscription_id"
|
||||
type = "text"
|
||||
prompt = "Azure Subscription ID"
|
||||
required = true
|
||||
help = "Your Azure subscription ID (GUID format)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_subscription_id",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_tenant_id"
|
||||
type = "text"
|
||||
prompt = "Azure Tenant ID"
|
||||
required = true
|
||||
help = "Your Azure Active Directory tenant ID"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_tenant_id",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_client_id"
|
||||
type = "text"
|
||||
prompt = "Service Principal Client ID"
|
||||
required = true
|
||||
help = "Client ID of the service principal (app registration)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_client_id",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_client_secret"
|
||||
type = "password"
|
||||
prompt = "Service Principal Client Secret"
|
||||
required = true
|
||||
help = "Client secret for authentication (will be masked)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_client_secret",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_location"
|
||||
type = "select"
|
||||
prompt = "Azure Region"
|
||||
options = [
|
||||
{ value = "eastus", label = "East US - Virginia, USA" },
|
||||
{ value = "eastus2", label = "East US 2 - Virginia, USA" },
|
||||
{ value = "westus", label = "West US - California, USA" },
|
||||
{ value = "westus2", label = "West US 2 - Washington, USA" },
|
||||
{ value = "westus3", label = "West US 3 - Arizona, USA" },
|
||||
{ value = "centralus", label = "Central US - Iowa, USA" },
|
||||
{ value = "northcentralus", label = "North Central US - Illinois, USA" },
|
||||
{ value = "southcentralus", label = "South Central US - Texas, USA" },
|
||||
{ value = "northeurope", label = "North Europe - Ireland" },
|
||||
{ value = "westeurope", label = "West Europe - Netherlands" },
|
||||
{ value = "francecentral", label = "France Central - Paris" },
|
||||
{ value = "germanywestcentral", label = "Germany West Central - Frankfurt" },
|
||||
{ value = "switzerlandnorth", label = "Switzerland North - Zurich" },
|
||||
{ value = "uksouth", label = "UK South - London" },
|
||||
{ value = "ukwest", label = "UK West - Cardiff" },
|
||||
{ value = "norwayeast", label = "Norway East - Oslo" },
|
||||
{ value = "swedencentral", label = "Sweden Central - Gävle" },
|
||||
{ value = "eastasia", label = "East Asia - Hong Kong" },
|
||||
{ value = "southeastasia", label = "Southeast Asia - Singapore" },
|
||||
{ value = "japaneast", label = "Japan East - Tokyo" },
|
||||
{ value = "japanwest", label = "Japan West - Osaka" },
|
||||
{ value = "koreacentral", label = "Korea Central - Seoul" },
|
||||
{ value = "australiaeast", label = "Australia East - Sydney" },
|
||||
{ value = "australiasoutheast", label = "Australia Southeast - Melbourne" },
|
||||
{ value = "canadacentral", label = "Canada Central - Toronto" },
|
||||
{ value = "canadaeast", label = "Canada East - Quebec" },
|
||||
{ value = "brazilsouth", label = "Brazil South - São Paulo" },
|
||||
{ value = "southafricanorth", label = "South Africa North - Johannesburg" },
|
||||
{ value = "uaenorth", label = "UAE North - Dubai" },
|
||||
{ value = "centralindia", label = "Central India - Pune" },
|
||||
{ value = "southindia", label = "South India - Chennai" },
|
||||
]
|
||||
default = "westeurope"
|
||||
required = true
|
||||
help = "Azure region where resources will be deployed"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_location",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_resource_group_name"
|
||||
type = "text"
|
||||
prompt = "Resource Group name"
|
||||
required = true
|
||||
help = "Name of the Azure Resource Group (will be created if doesn't exist)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_resource_group_name",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_vm_size"
|
||||
type = "select"
|
||||
prompt = "Virtual Machine Size"
|
||||
options = [
|
||||
{ value = "Standard_B1s", label = "Standard_B1s - 1 vCPU, 1 GB RAM (burstable)" },
|
||||
{ value = "Standard_B1ms", label = "Standard_B1ms - 1 vCPU, 2 GB RAM (burstable)" },
|
||||
{ value = "Standard_B2s", label = "Standard_B2s - 2 vCPU, 4 GB RAM (burstable)" },
|
||||
{ value = "Standard_B2ms", label = "Standard_B2ms - 2 vCPU, 8 GB RAM (burstable)" },
|
||||
{ value = "Standard_B4ms", label = "Standard_B4ms - 4 vCPU, 16 GB RAM (burstable)" },
|
||||
{ value = "Standard_D2s_v3", label = "Standard_D2s_v3 - 2 vCPU, 8 GB RAM (general purpose)" },
|
||||
{ value = "Standard_D4s_v3", label = "Standard_D4s_v3 - 4 vCPU, 16 GB RAM (general purpose)" },
|
||||
{ value = "Standard_D8s_v3", label = "Standard_D8s_v3 - 8 vCPU, 32 GB RAM (general purpose)" },
|
||||
{ value = "Standard_E2s_v3", label = "Standard_E2s_v3 - 2 vCPU, 16 GB RAM (memory optimized)" },
|
||||
{ value = "Standard_E4s_v3", label = "Standard_E4s_v3 - 4 vCPU, 32 GB RAM (memory optimized)" },
|
||||
{ value = "Standard_F2s_v2", label = "Standard_F2s_v2 - 2 vCPU, 4 GB RAM (compute optimized)" },
|
||||
{ value = "Standard_F4s_v2", label = "Standard_F4s_v2 - 4 vCPU, 8 GB RAM (compute optimized)" },
|
||||
]
|
||||
default = "Standard_B2s"
|
||||
required = true
|
||||
help = "Azure VM size (determines CPU, RAM, and pricing)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_vm_size",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_image_publisher"
|
||||
type = "select"
|
||||
prompt = "Image Publisher"
|
||||
options = [
|
||||
{ value = "Canonical", label = "Canonical (Ubuntu)" },
|
||||
{ value = "Debian", label = "Debian (Debian Linux)" },
|
||||
{ value = "RedHat", label = "Red Hat (RHEL)" },
|
||||
{ value = "OpenLogic", label = "OpenLogic (CentOS)" },
|
||||
{ value = "AlmaLinux", label = "AlmaLinux Foundation" },
|
||||
{ value = "MicrosoftWindowsServer", label = "Microsoft (Windows Server)" },
|
||||
]
|
||||
default = "Canonical"
|
||||
required = true
|
||||
help = "Publisher of the VM image"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_image_publisher",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_image_offer"
|
||||
type = "select"
|
||||
prompt = "Image Offer"
|
||||
options = [
|
||||
{ value = "0001-com-ubuntu-server-jammy", label = "Ubuntu Server 22.04 LTS" },
|
||||
{ value = "0001-com-ubuntu-server-focal", label = "Ubuntu Server 20.04 LTS" },
|
||||
{ value = "debian-11", label = "Debian 11" },
|
||||
{ value = "debian-12", label = "Debian 12" },
|
||||
{ value = "RHEL", label = "Red Hat Enterprise Linux" },
|
||||
{ value = "CentOS", label = "CentOS" },
|
||||
{ value = "almalinux", label = "AlmaLinux" },
|
||||
]
|
||||
default = "0001-com-ubuntu-server-jammy"
|
||||
required = true
|
||||
help = "Specific offer from the publisher (must match publisher)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_image_offer",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_image_sku"
|
||||
type = "select"
|
||||
prompt = "Image SKU"
|
||||
options = [
|
||||
{ value = "22_04-lts-gen2", label = "Ubuntu 22.04 LTS Gen2" },
|
||||
{ value = "20_04-lts-gen2", label = "Ubuntu 20.04 LTS Gen2" },
|
||||
{ value = "11-gen2", label = "Debian 11 Gen2" },
|
||||
{ value = "12-gen2", label = "Debian 12 Gen2" },
|
||||
{ value = "9-lvm-gen2", label = "RHEL 9 Gen2" },
|
||||
{ value = "8-lvm-gen2", label = "RHEL 8 Gen2" },
|
||||
]
|
||||
default = "22_04-lts-gen2"
|
||||
required = true
|
||||
help = "SKU (version) of the image (must match offer)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_image_sku",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_os_disk_size_gb"
|
||||
type = "text"
|
||||
prompt = "OS Disk size (GB)"
|
||||
default = "30"
|
||||
required = true
|
||||
help = "Size of the OS disk in GB (minimum 30 GB)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_os_disk_size_gb",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_os_disk_type"
|
||||
type = "select"
|
||||
prompt = "OS Disk type"
|
||||
options = [
|
||||
{ value = "Standard_LRS", label = "Standard_LRS - Standard HDD (locally redundant)" },
|
||||
{ value = "StandardSSD_LRS", label = "StandardSSD_LRS - Standard SSD (locally redundant)" },
|
||||
{ value = "Premium_LRS", label = "Premium_LRS - Premium SSD (locally redundant)" },
|
||||
{ value = "UltraSSD_LRS", label = "UltraSSD_LRS - Ultra SSD (highest performance)" },
|
||||
]
|
||||
default = "StandardSSD_LRS"
|
||||
required = true
|
||||
help = "Type of managed disk for OS volume"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_os_disk_type",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_admin_username"
|
||||
type = "text"
|
||||
prompt = "Admin username"
|
||||
default = "azureuser"
|
||||
required = true
|
||||
help = "Administrator username for the VM"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_admin_username",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_ssh_public_key"
|
||||
type = "text"
|
||||
prompt = "SSH public key"
|
||||
required = true
|
||||
help = "SSH public key for authentication (e.g., ~/.ssh/id_rsa.pub content)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_ssh_public_key",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_vnet_address_space"
|
||||
type = "text"
|
||||
prompt = "Virtual Network address space"
|
||||
default = "10.0.0.0/16"
|
||||
required = true
|
||||
help = "Address space for the Virtual Network (CIDR notation)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_vnet_address_space",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_subnet_address_prefix"
|
||||
type = "text"
|
||||
prompt = "Subnet address prefix"
|
||||
default = "10.0.1.0/24"
|
||||
required = true
|
||||
help = "Address prefix for the subnet (CIDR notation)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_subnet_address_prefix",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_enable_public_ip"
|
||||
type = "confirm"
|
||||
prompt = "Assign public IP address?"
|
||||
default = true
|
||||
help = "Assign a public IP to the VM for external access"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_enable_public_ip",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_enable_accelerated_networking"
|
||||
type = "confirm"
|
||||
prompt = "Enable accelerated networking?"
|
||||
default = false
|
||||
help = "Enable SR-IOV for better network performance (requires compatible VM size)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_enable_accelerated_networking",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "azure_enable_boot_diagnostics"
|
||||
type = "confirm"
|
||||
prompt = "Enable boot diagnostics?"
|
||||
default = true
|
||||
help = "Enable boot diagnostics for troubleshooting"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"azure_enable_boot_diagnostics",
|
||||
]
|
||||
@ -0,0 +1,256 @@
|
||||
name = "gcp_provider_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_header"
|
||||
type = "section_header"
|
||||
title = "☁️ Google Cloud Platform (GCP) Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_project_id"
|
||||
type = "text"
|
||||
prompt = "GCP Project ID"
|
||||
required = true
|
||||
help = "Your Google Cloud Project ID (e.g., my-project-123456)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_project_id",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_credentials_file"
|
||||
type = "text"
|
||||
prompt = "Service Account credentials file path"
|
||||
required = true
|
||||
help = "Path to service account JSON key file (e.g., ~/.gcp/credentials.json)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_credentials_file",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_region"
|
||||
type = "select"
|
||||
prompt = "GCP Region"
|
||||
options = [
|
||||
{ value = "us-central1", label = "US-CENTRAL1 - Iowa, USA" },
|
||||
{ value = "us-east1", label = "US-EAST1 - South Carolina, USA" },
|
||||
{ value = "us-east4", label = "US-EAST4 - Northern Virginia, USA" },
|
||||
{ value = "us-west1", label = "US-WEST1 - Oregon, USA" },
|
||||
{ value = "us-west2", label = "US-WEST2 - Los Angeles, USA" },
|
||||
{ value = "us-west3", label = "US-WEST3 - Salt Lake City, USA" },
|
||||
{ value = "us-west4", label = "US-WEST4 - Las Vegas, USA" },
|
||||
{ value = "europe-west1", label = "EUROPE-WEST1 - Belgium" },
|
||||
{ value = "europe-west2", label = "EUROPE-WEST2 - London, UK" },
|
||||
{ value = "europe-west3", label = "EUROPE-WEST3 - Frankfurt, Germany" },
|
||||
{ value = "europe-west4", label = "EUROPE-WEST4 - Netherlands" },
|
||||
{ value = "europe-west6", label = "EUROPE-WEST6 - Zurich, Switzerland" },
|
||||
{ value = "europe-north1", label = "EUROPE-NORTH1 - Finland" },
|
||||
{ value = "asia-east1", label = "ASIA-EAST1 - Taiwan" },
|
||||
{ value = "asia-east2", label = "ASIA-EAST2 - Hong Kong" },
|
||||
{ value = "asia-northeast1", label = "ASIA-NORTHEAST1 - Tokyo, Japan" },
|
||||
{ value = "asia-northeast2", label = "ASIA-NORTHEAST2 - Osaka, Japan" },
|
||||
{ value = "asia-northeast3", label = "ASIA-NORTHEAST3 - Seoul, South Korea" },
|
||||
{ value = "asia-south1", label = "ASIA-SOUTH1 - Mumbai, India" },
|
||||
{ value = "asia-southeast1", label = "ASIA-SOUTHEAST1 - Singapore" },
|
||||
{ value = "asia-southeast2", label = "ASIA-SOUTHEAST2 - Jakarta, Indonesia" },
|
||||
{ value = "australia-southeast1", label = "AUSTRALIA-SOUTHEAST1 - Sydney, Australia" },
|
||||
{ value = "southamerica-east1", label = "SOUTHAMERICA-EAST1 - São Paulo, Brazil" },
|
||||
]
|
||||
default = "europe-west3"
|
||||
required = true
|
||||
help = "GCP region where resources will be deployed"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_region",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_zone"
|
||||
type = "select"
|
||||
prompt = "Availability Zone"
|
||||
options = [
|
||||
{ value = "a", label = "Zone A (primary)" },
|
||||
{ value = "b", label = "Zone B" },
|
||||
{ value = "c", label = "Zone C" },
|
||||
{ value = "d", label = "Zone D (if available)" },
|
||||
]
|
||||
default = "a"
|
||||
required = true
|
||||
help = "Zone within the selected region (e.g., 'a' for europe-west3-a)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_zone",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_machine_type"
|
||||
type = "select"
|
||||
prompt = "Machine Type"
|
||||
options = [
|
||||
{ value = "e2-micro", label = "e2-micro - 2 vCPU (shared), 1 GB RAM (Free tier)" },
|
||||
{ value = "e2-small", label = "e2-small - 2 vCPU (shared), 2 GB RAM" },
|
||||
{ value = "e2-medium", label = "e2-medium - 2 vCPU (shared), 4 GB RAM" },
|
||||
{ value = "e2-standard-2", label = "e2-standard-2 - 2 vCPU, 8 GB RAM" },
|
||||
{ value = "e2-standard-4", label = "e2-standard-4 - 4 vCPU, 16 GB RAM" },
|
||||
{ value = "e2-standard-8", label = "e2-standard-8 - 8 vCPU, 32 GB RAM" },
|
||||
{ value = "n1-standard-1", label = "n1-standard-1 - 1 vCPU, 3.75 GB RAM" },
|
||||
{ value = "n1-standard-2", label = "n1-standard-2 - 2 vCPU, 7.5 GB RAM" },
|
||||
{ value = "n1-standard-4", label = "n1-standard-4 - 4 vCPU, 15 GB RAM" },
|
||||
{ value = "n2-standard-2", label = "n2-standard-2 - 2 vCPU, 8 GB RAM (newer generation)" },
|
||||
{ value = "n2-standard-4", label = "n2-standard-4 - 4 vCPU, 16 GB RAM (newer generation)" },
|
||||
{ value = "n2-standard-8", label = "n2-standard-8 - 8 vCPU, 32 GB RAM (newer generation)" },
|
||||
{ value = "n2-highmem-2", label = "n2-highmem-2 - 2 vCPU, 16 GB RAM (memory optimized)" },
|
||||
{ value = "n2-highmem-4", label = "n2-highmem-4 - 4 vCPU, 32 GB RAM (memory optimized)" },
|
||||
{ value = "c2-standard-4", label = "c2-standard-4 - 4 vCPU, 16 GB RAM (compute optimized)" },
|
||||
{ value = "c2-standard-8", label = "c2-standard-8 - 8 vCPU, 32 GB RAM (compute optimized)" },
|
||||
]
|
||||
default = "e2-medium"
|
||||
required = true
|
||||
help = "GCP Compute Engine machine type (determines CPU, RAM, and pricing)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_machine_type",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_image_family"
|
||||
type = "select"
|
||||
prompt = "Image Family"
|
||||
options = [
|
||||
{ value = "ubuntu-2404-lts", label = "Ubuntu 24.04 LTS (Latest)" },
|
||||
{ value = "ubuntu-2204-lts", label = "Ubuntu 22.04 LTS" },
|
||||
{ value = "ubuntu-2004-lts", label = "Ubuntu 20.04 LTS" },
|
||||
{ value = "debian-12", label = "Debian 12 (Bookworm)" },
|
||||
{ value = "debian-11", label = "Debian 11 (Bullseye)" },
|
||||
{ value = "rocky-linux-9", label = "Rocky Linux 9" },
|
||||
{ value = "rocky-linux-8", label = "Rocky Linux 8" },
|
||||
{ value = "rhel-9", label = "Red Hat Enterprise Linux 9" },
|
||||
{ value = "rhel-8", label = "Red Hat Enterprise Linux 8" },
|
||||
{ value = "centos-stream-9", label = "CentOS Stream 9" },
|
||||
]
|
||||
default = "ubuntu-2404-lts"
|
||||
required = true
|
||||
help = "Operating system image family for the instance"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_image_family",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_image_project"
|
||||
type = "select"
|
||||
prompt = "Image Project"
|
||||
options = [
|
||||
{ value = "ubuntu-os-cloud", label = "ubuntu-os-cloud (Ubuntu images)" },
|
||||
{ value = "debian-cloud", label = "debian-cloud (Debian images)" },
|
||||
{ value = "rocky-linux-cloud", label = "rocky-linux-cloud (Rocky Linux)" },
|
||||
{ value = "rhel-cloud", label = "rhel-cloud (Red Hat)" },
|
||||
{ value = "centos-cloud", label = "centos-cloud (CentOS)" },
|
||||
]
|
||||
default = "ubuntu-os-cloud"
|
||||
required = true
|
||||
help = "GCP project that provides the image"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_image_project",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_disk_size_gb"
|
||||
type = "text"
|
||||
prompt = "Boot disk size (GB)"
|
||||
default = "20"
|
||||
required = true
|
||||
help = "Boot disk size in GB (minimum 10 GB)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_disk_size_gb",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_disk_type"
|
||||
type = "select"
|
||||
prompt = "Disk type"
|
||||
options = [
|
||||
{ value = "pd-standard", label = "pd-standard - Standard persistent disk (HDD)" },
|
||||
{ value = "pd-balanced", label = "pd-balanced - Balanced persistent disk (SSD, recommended)" },
|
||||
{ value = "pd-ssd", label = "pd-ssd - SSD persistent disk (high performance)" },
|
||||
{ value = "pd-extreme", label = "pd-extreme - Extreme persistent disk (highest IOPS)" },
|
||||
]
|
||||
default = "pd-balanced"
|
||||
required = true
|
||||
help = "Type of persistent disk for boot volume"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_disk_type",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_network_name"
|
||||
type = "text"
|
||||
prompt = "VPC Network name"
|
||||
default = "default"
|
||||
required = true
|
||||
help = "Name of the VPC network (use 'default' for default network)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_network_name",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_subnetwork_name"
|
||||
type = "text"
|
||||
prompt = "Subnetwork name"
|
||||
default = "default"
|
||||
required = true
|
||||
help = "Name of the subnetwork within the VPC"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_subnetwork_name",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_enable_external_ip"
|
||||
type = "confirm"
|
||||
prompt = "Enable external IP address?"
|
||||
default = true
|
||||
help = "Assign a public IP address to the instance"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_enable_external_ip",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_preemptible"
|
||||
type = "confirm"
|
||||
prompt = "Use preemptible instance?"
|
||||
default = false
|
||||
help = "Preemptible instances are cheaper but can be stopped by GCP (not for production)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_preemptible",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_enable_deletion_protection"
|
||||
type = "confirm"
|
||||
prompt = "Enable deletion protection?"
|
||||
default = false
|
||||
help = "Prevent accidental deletion of the instance"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_enable_deletion_protection",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "gcp_ssh_keys"
|
||||
type = "text"
|
||||
prompt = "SSH public key (optional)"
|
||||
required = false
|
||||
help = "SSH public key for instance access (leave empty to skip)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"gcp_ssh_keys",
|
||||
]
|
||||
@ -0,0 +1,89 @@
|
||||
name = "hetzner_provider_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "hetzner_header"
|
||||
type = "section_header"
|
||||
title = "☁️ Hetzner Cloud Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "hetzner_api_token"
|
||||
type = "password"
|
||||
prompt = "Hetzner API token"
|
||||
required = true
|
||||
help = "Your Hetzner Cloud API token for authentication"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"hetzner_api_token",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "hetzner_server_type"
|
||||
type = "select"
|
||||
prompt = "Server type"
|
||||
options = [
|
||||
{ value = "cx11", label = "CX11 - 1 vCPU, 1 GB RAM" },
|
||||
{ value = "cx21", label = "CX21 - 2 vCPU, 4 GB RAM" },
|
||||
{ value = "cx31", label = "CX31 - 2 vCPU, 8 GB RAM" },
|
||||
{ value = "cx41", label = "CX41 - 4 vCPU, 16 GB RAM" },
|
||||
{ value = "cx51", label = "CX51 - 8 vCPU, 32 GB RAM" },
|
||||
{ value = "cpx11", label = "CPX11 - 2 vCPU (dedicated), 4 GB RAM" },
|
||||
{ value = "cpx21", label = "CPX21 - 4 vCPU (dedicated), 8 GB RAM" },
|
||||
{ value = "cpx31", label = "CPX31 - 8 vCPU (dedicated), 16 GB RAM" },
|
||||
{ value = "cx22", label = "CX22 - 2 vCPU, 4 GB RAM" },
|
||||
{ value = "cx32", label = "CX32 - 2 vCPU, 8 GB RAM" },
|
||||
{ value = "cx42", label = "CX42 - 4 vCPU, 16 GB RAM" },
|
||||
{ value = "cx52", label = "CX52 - 8 vCPU, 32 GB RAM" },
|
||||
]
|
||||
default = "cx22"
|
||||
required = true
|
||||
help = "Hetzner Cloud server instance type"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"hetzner_server_type",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "hetzner_location"
|
||||
type = "select"
|
||||
prompt = "Datacenter location"
|
||||
options = [
|
||||
{ value = "fsn1", label = "FSN1 - Frankfurt, Germany" },
|
||||
{ value = "fsn1-dc14", label = "FSN1-DC14 - Frankfurt 14, Germany" },
|
||||
{ value = "nbg1", label = "NBG1 - Nuremberg, Germany" },
|
||||
{ value = "nbg1-dc3", label = "NBG1-DC3 - Nuremberg 3, Germany" },
|
||||
{ value = "hel1", label = "HEL1 - Helsinki, Finland" },
|
||||
{ value = "hel1-dc8", label = "HEL1-DC8 - Helsinki 8, Finland" },
|
||||
{ value = "ash", label = "ASH - Ashburn, Virginia USA" },
|
||||
{ value = "ash-dc1", label = "ASH-DC1 - Ashburn 1, Virginia USA" },
|
||||
{ value = "hil", label = "HIL - Hildesheim, Germany" },
|
||||
{ value = "hil-dc1", label = "HIL-DC1 - Hildesheim 1, Germany" },
|
||||
]
|
||||
default = "nbg1"
|
||||
required = true
|
||||
help = "Hetzner datacenter location"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"hetzner_location",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "hetzner_image"
|
||||
type = "select"
|
||||
prompt = "Operating system image"
|
||||
options = [
|
||||
{ value = "ubuntu-24.04", label = "Ubuntu 24.04 LTS (Latest)" },
|
||||
{ value = "ubuntu-22.04", label = "Ubuntu 22.04 LTS" },
|
||||
{ value = "ubuntu-20.04", label = "Ubuntu 20.04 LTS" },
|
||||
{ value = "debian-12", label = "Debian 12 (Bookworm)" },
|
||||
{ value = "debian-11", label = "Debian 11 (Bullseye)" },
|
||||
{ value = "debian-10", label = "Debian 10 (Buster)" },
|
||||
]
|
||||
default = "ubuntu-24.04"
|
||||
required = true
|
||||
help = "OS image to use for the server"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"hetzner_image",
|
||||
]
|
||||
@ -0,0 +1,247 @@
|
||||
name = "lxd_provider_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_header"
|
||||
type = "section_header"
|
||||
title = "🖥️ LXD Container/VM Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_remote"
|
||||
type = "select"
|
||||
prompt = "LXD Remote"
|
||||
options = [
|
||||
{ value = "local", label = "local - Local LXD server" },
|
||||
{ value = "remote", label = "remote - Remote LXD server" },
|
||||
]
|
||||
default = "local"
|
||||
required = true
|
||||
help = "Use local LXD or connect to remote LXD server"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_remote",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_remote_address"
|
||||
type = "text"
|
||||
prompt = "Remote server address (if remote)"
|
||||
required = false
|
||||
help = "Address of remote LXD server (e.g., https://lxd.example.com:8443)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_remote_address",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_remote_password"
|
||||
type = "password"
|
||||
prompt = "Remote server password (if remote)"
|
||||
required = false
|
||||
help = "Trust password for remote LXD server (will be masked)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_remote_password",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_instance_type"
|
||||
type = "select"
|
||||
prompt = "Instance Type"
|
||||
options = [
|
||||
{ value = "container", label = "Container - Lightweight, shared kernel" },
|
||||
{ value = "virtual-machine", label = "Virtual Machine - Full VM with own kernel" },
|
||||
]
|
||||
default = "container"
|
||||
required = true
|
||||
help = "Run as container (fast, lightweight) or virtual machine (isolated)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_instance_type",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_image"
|
||||
type = "select"
|
||||
prompt = "Base Image"
|
||||
options = [
|
||||
{ value = "ubuntu:24.04", label = "Ubuntu 24.04 LTS (Noble)" },
|
||||
{ value = "ubuntu:22.04", label = "Ubuntu 22.04 LTS (Jammy)" },
|
||||
{ value = "ubuntu:20.04", label = "Ubuntu 20.04 LTS (Focal)" },
|
||||
{ value = "debian:12", label = "Debian 12 (Bookworm)" },
|
||||
{ value = "debian:11", label = "Debian 11 (Bullseye)" },
|
||||
{ value = "alpine:3.19", label = "Alpine Linux 3.19 (minimal)" },
|
||||
{ value = "alpine:3.18", label = "Alpine Linux 3.18" },
|
||||
{ value = "rockylinux:9", label = "Rocky Linux 9" },
|
||||
{ value = "rockylinux:8", label = "Rocky Linux 8" },
|
||||
{ value = "archlinux", label = "Arch Linux (rolling release)" },
|
||||
]
|
||||
default = "ubuntu:24.04"
|
||||
required = true
|
||||
help = "Operating system image for the instance"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_image",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_instance_name"
|
||||
type = "text"
|
||||
prompt = "Instance name"
|
||||
required = true
|
||||
help = "Unique name for the LXD instance"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_instance_name",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_cpu_limit"
|
||||
type = "text"
|
||||
prompt = "CPU limit (cores)"
|
||||
default = "2"
|
||||
required = false
|
||||
help = "Number of CPU cores (e.g., 2 or leave empty for unlimited)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_cpu_limit",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_memory_limit"
|
||||
type = "text"
|
||||
prompt = "Memory limit"
|
||||
default = "2GB"
|
||||
required = false
|
||||
help = "Memory limit (e.g., 2GB, 4GB, or leave empty for unlimited)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_memory_limit",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_disk_size"
|
||||
type = "text"
|
||||
prompt = "Root disk size"
|
||||
default = "10GB"
|
||||
required = false
|
||||
help = "Root disk size (e.g., 10GB, 20GB, or leave empty for default)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_disk_size",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_storage_pool"
|
||||
type = "select"
|
||||
prompt = "Storage pool"
|
||||
options = [
|
||||
{ value = "default", label = "default - Default storage pool" },
|
||||
{ value = "dir", label = "dir - Directory-backed pool" },
|
||||
{ value = "zfs", label = "zfs - ZFS pool (best performance)" },
|
||||
{ value = "btrfs", label = "btrfs - Btrfs pool" },
|
||||
{ value = "lvm", label = "lvm - LVM pool" },
|
||||
]
|
||||
default = "default"
|
||||
required = true
|
||||
help = "Storage pool for the instance root disk"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_storage_pool",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_network"
|
||||
type = "select"
|
||||
prompt = "Network"
|
||||
options = [
|
||||
{ value = "lxdbr0", label = "lxdbr0 - Default LXD bridge (NAT)" },
|
||||
{ value = "host", label = "host - Direct host networking" },
|
||||
{ value = "macvlan", label = "macvlan - MAC VLAN (bridge to physical)" },
|
||||
]
|
||||
default = "lxdbr0"
|
||||
required = true
|
||||
help = "Network configuration for the instance"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_network",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_ipv4_address"
|
||||
type = "text"
|
||||
prompt = "Static IPv4 address (optional)"
|
||||
required = false
|
||||
help = "Assign static IPv4 (e.g., 10.0.0.100) or leave empty for DHCP"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_ipv4_address",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_ipv6_address"
|
||||
type = "text"
|
||||
prompt = "Static IPv6 address (optional)"
|
||||
required = false
|
||||
help = "Assign static IPv6 or leave empty for auto"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_ipv6_address",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_profiles"
|
||||
type = "text"
|
||||
prompt = "Additional profiles (comma-separated)"
|
||||
default = "default"
|
||||
required = false
|
||||
help = "LXD profiles to apply (e.g., default,docker)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_profiles",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_enable_nesting"
|
||||
type = "confirm"
|
||||
prompt = "Enable nesting (for Docker/LXD inside)?"
|
||||
default = false
|
||||
help = "Allow running containers/VMs inside this instance"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_enable_nesting",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_privileged"
|
||||
type = "confirm"
|
||||
prompt = "Run as privileged container?"
|
||||
default = false
|
||||
help = "Run container without user namespace isolation (less secure)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_privileged",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_autostart"
|
||||
type = "confirm"
|
||||
prompt = "Auto-start on boot?"
|
||||
default = true
|
||||
help = "Automatically start instance when LXD daemon starts"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_autostart",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "lxd_cloud_init"
|
||||
type = "text"
|
||||
prompt = "Cloud-init user data (optional)"
|
||||
required = false
|
||||
help = "Path to cloud-init configuration file (leave empty to skip)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"lxd_cloud_init",
|
||||
]
|
||||
@ -0,0 +1,149 @@
|
||||
name = "upcloud_provider_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_header"
|
||||
type = "section_header"
|
||||
title = "☁️ UpCloud Configuration"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_username"
|
||||
type = "text"
|
||||
prompt = "UpCloud username"
|
||||
required = true
|
||||
help = "Your UpCloud account username for API authentication"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_username",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_password"
|
||||
type = "password"
|
||||
prompt = "UpCloud password"
|
||||
required = true
|
||||
help = "Your UpCloud account password (will be masked)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_password",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_zone"
|
||||
type = "select"
|
||||
prompt = "Availability zone"
|
||||
options = [
|
||||
{ value = "fi-hel1", label = "FI-HEL1 - Helsinki, Finland" },
|
||||
{ value = "fi-hel2", label = "FI-HEL2 - Helsinki, Finland (Secondary)" },
|
||||
{ value = "de-fra1", label = "DE-FRA1 - Frankfurt, Germany" },
|
||||
{ value = "uk-lon1", label = "UK-LON1 - London, United Kingdom" },
|
||||
{ value = "nl-ams1", label = "NL-AMS1 - Amsterdam, Netherlands" },
|
||||
{ value = "us-chi1", label = "US-CHI1 - Chicago, USA" },
|
||||
{ value = "us-nyc1", label = "US-NYC1 - New York, USA" },
|
||||
{ value = "us-sjo1", label = "US-SJO1 - San Jose, USA" },
|
||||
{ value = "sg-sin1", label = "SG-SIN1 - Singapore" },
|
||||
{ value = "au-syd1", label = "AU-SYD1 - Sydney, Australia" },
|
||||
{ value = "es-mad1", label = "ES-MAD1 - Madrid, Spain" },
|
||||
{ value = "pl-waw1", label = "PL-WAW1 - Warsaw, Poland" },
|
||||
]
|
||||
default = "de-fra1"
|
||||
required = true
|
||||
help = "UpCloud zone where resources will be deployed"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_zone",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_plan"
|
||||
type = "select"
|
||||
prompt = "Server plan"
|
||||
options = [
|
||||
{ value = "1xCPU-1GB", label = "1xCPU-1GB - 1 vCPU, 1 GB RAM, 25 GB SSD" },
|
||||
{ value = "1xCPU-2GB", label = "1xCPU-2GB - 1 vCPU, 2 GB RAM, 50 GB SSD" },
|
||||
{ value = "2xCPU-4GB", label = "2xCPU-4GB - 2 vCPU, 4 GB RAM, 80 GB SSD" },
|
||||
{ value = "4xCPU-8GB", label = "4xCPU-8GB - 4 vCPU, 8 GB RAM, 160 GB SSD" },
|
||||
{ value = "6xCPU-16GB", label = "6xCPU-16GB - 6 vCPU, 16 GB RAM, 320 GB SSD" },
|
||||
{ value = "8xCPU-32GB", label = "8xCPU-32GB - 8 vCPU, 32 GB RAM, 640 GB SSD" },
|
||||
{ value = "12xCPU-48GB", label = "12xCPU-48GB - 12 vCPU, 48 GB RAM, 960 GB SSD" },
|
||||
{ value = "16xCPU-64GB", label = "16xCPU-64GB - 16 vCPU, 64 GB RAM, 1280 GB SSD" },
|
||||
{ value = "20xCPU-96GB", label = "20xCPU-96GB - 20 vCPU, 96 GB RAM, 1920 GB SSD" },
|
||||
{ value = "20xCPU-128GB", label = "20xCPU-128GB - 20 vCPU, 128 GB RAM, 2048 GB SSD" },
|
||||
]
|
||||
default = "2xCPU-4GB"
|
||||
required = true
|
||||
help = "UpCloud server plan (determines CPU, RAM, and storage)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_plan",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_template"
|
||||
type = "select"
|
||||
prompt = "Operating system template"
|
||||
options = [
|
||||
{ value = "Ubuntu Server 24.04 LTS (Noble Numbat)", label = "Ubuntu 24.04 LTS (Latest)" },
|
||||
{ value = "Ubuntu Server 22.04 LTS (Jammy Jellyfish)", label = "Ubuntu 22.04 LTS" },
|
||||
{ value = "Ubuntu Server 20.04 LTS (Focal Fossa)", label = "Ubuntu 20.04 LTS" },
|
||||
{ value = "Debian 12 (Bookworm)", label = "Debian 12 (Bookworm)" },
|
||||
{ value = "Debian 11 (Bullseye)", label = "Debian 11 (Bullseye)" },
|
||||
{ value = "Debian 10 (Buster)", label = "Debian 10 (Buster)" },
|
||||
{ value = "Rocky Linux 9", label = "Rocky Linux 9" },
|
||||
{ value = "Rocky Linux 8", label = "Rocky Linux 8" },
|
||||
{ value = "AlmaLinux 9", label = "AlmaLinux 9" },
|
||||
{ value = "AlmaLinux 8", label = "AlmaLinux 8" },
|
||||
]
|
||||
default = "Ubuntu Server 24.04 LTS (Noble Numbat)"
|
||||
required = true
|
||||
help = "Operating system template for the server"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_template",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_hostname"
|
||||
type = "text"
|
||||
prompt = "Server hostname"
|
||||
required = true
|
||||
help = "Hostname for the UpCloud server"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_hostname",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_storage_size"
|
||||
type = "text"
|
||||
prompt = "Storage size (GB)"
|
||||
default = "25"
|
||||
required = true
|
||||
help = "Additional storage size in GB (beyond plan default)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_storage_size",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_private_networking"
|
||||
type = "confirm"
|
||||
prompt = "Enable private networking?"
|
||||
default = true
|
||||
help = "Enable UpCloud private networking (SDN) for this server"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_private_networking",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "upcloud_backups"
|
||||
type = "confirm"
|
||||
prompt = "Enable automated backups?"
|
||||
default = false
|
||||
help = "Enable automated daily backups (additional cost)"
|
||||
nickel_path = [
|
||||
"provider",
|
||||
"upcloud_backups",
|
||||
]
|
||||
@ -0,0 +1,56 @@
|
||||
name = "ssh_fragment"
|
||||
|
||||
[[elements]]
|
||||
name = "ssh_header"
|
||||
type = "section_header"
|
||||
title = "🔐 SSH Credentials"
|
||||
border_top = true
|
||||
border_bottom = true
|
||||
|
||||
[[elements]]
|
||||
name = "ssh_private_key_path"
|
||||
type = "text"
|
||||
prompt = "Private key path"
|
||||
placeholder = "~/.ssh/id_rsa"
|
||||
required = true
|
||||
help = "Absolute or relative path to SSH private key file"
|
||||
nickel_path = [
|
||||
"ssh_credentials",
|
||||
"private_key_path",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "ssh_public_key_path"
|
||||
type = "text"
|
||||
prompt = "Public key path"
|
||||
placeholder = "~/.ssh/id_rsa.pub"
|
||||
required = true
|
||||
help = "Absolute or relative path to SSH public key file"
|
||||
nickel_path = [
|
||||
"ssh_credentials",
|
||||
"public_key_path",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "ssh_username"
|
||||
type = "text"
|
||||
prompt = "SSH username"
|
||||
placeholder = "torrust"
|
||||
default = "torrust"
|
||||
help = "Linux username for SSH access. Defaults to 'torrust'. Must be 1-32 characters, starting with letter or underscore."
|
||||
nickel_path = [
|
||||
"ssh_credentials",
|
||||
"username",
|
||||
]
|
||||
|
||||
[[elements]]
|
||||
name = "ssh_port"
|
||||
type = "text"
|
||||
prompt = "SSH port"
|
||||
placeholder = "22"
|
||||
default = "22"
|
||||
help = "SSH port number (default 22). Must be between 1-65535."
|
||||
nickel_path = [
|
||||
"ssh_credentials",
|
||||
"port",
|
||||
]
|
||||
16
crates/typedialog-prov-gen/templates/schemas/database.ncl
Normal file
16
crates/typedialog-prov-gen/templates/schemas/database.ncl
Normal file
@ -0,0 +1,16 @@
|
||||
# Database Schema
|
||||
# Type contracts for database configuration
|
||||
# Supports SQLite (file-based) or MySQL (server-based) databases
|
||||
|
||||
{
|
||||
# Database base contract with optional fields
|
||||
# Validation logic in config layer ensures required fields per driver type
|
||||
Database = {
|
||||
driver | String,
|
||||
database_name | String | optional,
|
||||
host | String | optional,
|
||||
port | Number | optional,
|
||||
username | String | optional,
|
||||
password | String | optional,
|
||||
},
|
||||
}
|
||||
@ -0,0 +1,9 @@
|
||||
# Environment Schema
|
||||
# Type contract for environment identification
|
||||
|
||||
{
|
||||
Environment = {
|
||||
name | String | optional, # Provided by user (no default)
|
||||
instance_name | String | optional, # Optional, auto-generated if not provided
|
||||
},
|
||||
}
|
||||
24
crates/typedialog-prov-gen/templates/schemas/features.ncl
Normal file
24
crates/typedialog-prov-gen/templates/schemas/features.ncl
Normal file
@ -0,0 +1,24 @@
|
||||
# Features Schema
|
||||
# Type contracts for optional features (Prometheus, Grafana)
|
||||
|
||||
{
|
||||
# Prometheus monitoring configuration (all optional - user decides what to set)
|
||||
PrometheusConfig = {
|
||||
enabled | Bool | optional,
|
||||
bind_address | String | optional,
|
||||
scrape_interval | Number | optional,
|
||||
},
|
||||
|
||||
# Grafana visualization configuration (all optional - user decides what to set)
|
||||
GrafanaConfig = {
|
||||
enabled | Bool | optional,
|
||||
bind_address | String | optional,
|
||||
admin_password | String | optional,
|
||||
},
|
||||
|
||||
# All optional features (user provides what they want)
|
||||
Features = {
|
||||
prometheus | PrometheusConfig | optional,
|
||||
grafana | GrafanaConfig | optional,
|
||||
},
|
||||
}
|
||||
19
crates/typedialog-prov-gen/templates/schemas/provider.ncl
Normal file
19
crates/typedialog-prov-gen/templates/schemas/provider.ncl
Normal file
@ -0,0 +1,19 @@
|
||||
# Provider Schema
|
||||
# Type contracts for infrastructure provider configuration
|
||||
# Supports LXD (local) or Hetzner Cloud (managed) providers
|
||||
# Note: No defaults for provider - user must supply completely
|
||||
|
||||
{
|
||||
# Provider record: LXD and Hetzner fields (all optional - user chooses one)
|
||||
Provider = {
|
||||
provider | String | optional, # Provided by user
|
||||
# LXD fields
|
||||
profile_name | String | optional,
|
||||
# Hetzner fields
|
||||
api_token | doc "Hetzner fields"
|
||||
| String | optional,
|
||||
server_type | String | optional,
|
||||
location | String | optional,
|
||||
image | String | optional,
|
||||
},
|
||||
}
|
||||
14
crates/typedialog-prov-gen/templates/schemas/ssh.ncl
Normal file
14
crates/typedialog-prov-gen/templates/schemas/ssh.ncl
Normal file
@ -0,0 +1,14 @@
|
||||
# SSH Credentials Schema
|
||||
# Type contract for SSH authentication configuration
|
||||
|
||||
{
|
||||
SshCredentials = {
|
||||
# Required (usually provided by user)
|
||||
private_key_path | String | optional,
|
||||
public_key_path | String | optional,
|
||||
|
||||
# Optional (have defaults)
|
||||
username | String | optional,
|
||||
port | Number | optional,
|
||||
},
|
||||
}
|
||||
195
crates/typedialog-prov-gen/templates/scripts/config.nu
Executable file
195
crates/typedialog-prov-gen/templates/scripts/config.nu
Executable file
@ -0,0 +1,195 @@
|
||||
#!/usr/bin/env nu
|
||||
# Torrust Tracker Environment Configuration Wizard (Nushell variant)
|
||||
# Main orchestration script for the configuration wizard workflow
|
||||
#
|
||||
# Compliance: .claude/guidelines/nushell/NUSHELL_COMPLIANCE_CHECKLIST.md
|
||||
# - No try-catch: Uses `do { } | complete` pattern
|
||||
# - No let mut: Pure immutable transformations
|
||||
# - Function signatures with explicit types
|
||||
# - External commands prefixed with `^`
|
||||
# - String interpolation: [$var] for variables, ($expr) for expressions
|
||||
#
|
||||
# This script:
|
||||
# 1. Verifies TypeDialog and Nickel are installed
|
||||
# 2. Launches interactive TypeDialog form
|
||||
# 3. Converts JSON output to Nickel configuration
|
||||
# 4. Validates with Nickel validators
|
||||
# 5. Exports final JSON to envs/ directory
|
||||
#
|
||||
# Usage:
|
||||
# ./provisioning/scripts/config.nu
|
||||
|
||||
# Check if a command exists
|
||||
def check-command [cmd: string]: nothing -> bool {
|
||||
(do { ^which $cmd } | complete).exit_code == 0
|
||||
}
|
||||
|
||||
# Print section header
|
||||
def print-header [msg: string]: nothing -> nothing {
|
||||
print "═══════════════════════════════════════════════════════════"
|
||||
print $"🎯 ($msg)"
|
||||
print "═══════════════════════════════════════════════════════════"
|
||||
print ""
|
||||
}
|
||||
|
||||
# Print step message with progress
|
||||
def print-step [step: string, total: string, msg: string]: nothing -> nothing {
|
||||
print $"📝 Step ($step)/($total): ($msg)..."
|
||||
}
|
||||
|
||||
# Print success message
|
||||
def print-success [msg: string]: nothing -> nothing {
|
||||
print $"✅ ($msg)"
|
||||
}
|
||||
|
||||
# Print info message
|
||||
def print-info [msg: string]: nothing -> nothing {
|
||||
print $"ℹ️ ($msg)"
|
||||
}
|
||||
|
||||
# Print error message to stderr
|
||||
def print-error [msg: string]: nothing -> nothing {
|
||||
print -e $"❌ ($msg)"
|
||||
}
|
||||
|
||||
# Verify dependencies are installed
|
||||
def verify-dependencies []: nothing -> bool {
|
||||
print "Checking dependencies..."
|
||||
print ""
|
||||
|
||||
let deps = [
|
||||
{name: "typedialog", install: "cargo install typedialog"}
|
||||
{name: "nickel", install: "cargo install nickel-lang-cli"}
|
||||
{name: "jq", install: "brew install jq (or apt-get install jq)"}
|
||||
]
|
||||
|
||||
let missing = ($deps | where {|dep| not (check-command $dep.name)})
|
||||
|
||||
if ($missing | is-not-empty) {
|
||||
print-error "Missing dependencies:"
|
||||
$missing | each {|dep|
|
||||
print -e $" - ($dep.name): ($dep.install)"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
print-success "All dependencies available"
|
||||
print ""
|
||||
return true
|
||||
}
|
||||
|
||||
# Main wizard function
|
||||
def main []: nothing -> nothing {
|
||||
print-header "Torrust Tracker - Environment Configuration Wizard"
|
||||
|
||||
# Verify dependencies
|
||||
if not (verify-dependencies) {
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Get directory paths - script is expected to be in provisioning/scripts/
|
||||
# Default to known location, can be overridden with TORRUST_SCRIPT_DIR env var
|
||||
let script_dir = (
|
||||
$env | get --optional TORRUST_SCRIPT_DIR // "/Users/Akasha/Development/torrust-tracker-deployer/provisioning/scripts"
|
||||
)
|
||||
let provisioning_dir = ($script_dir | path dirname)
|
||||
let project_root = ($provisioning_dir | path dirname)
|
||||
let envs_dir = ($project_root | path join "envs")
|
||||
let values_dir = ($provisioning_dir | path join "values")
|
||||
let form_path = ($provisioning_dir | path join "config-form.toml")
|
||||
|
||||
# Create directories if they don't exist
|
||||
^mkdir -p $envs_dir
|
||||
^mkdir -p $values_dir
|
||||
|
||||
# Step 1: Run TypeDialog form
|
||||
print-step "1" "4" "Collecting configuration via interactive form"
|
||||
|
||||
let temp_output = $"/tmp/typedialog-output-(date now | format date '%s').json"
|
||||
|
||||
# TypeDialog outputs to stdout, redirect to file
|
||||
^typedialog $form_path | save --force $temp_output
|
||||
|
||||
# Check if output file has content
|
||||
if not ($temp_output | path exists) or (open $temp_output | is-empty) {
|
||||
print-error "TypeDialog output is empty. Wizard cancelled."
|
||||
exit 1
|
||||
}
|
||||
|
||||
print-success "Configuration collected"
|
||||
print ""
|
||||
|
||||
# Step 2: Extract environment name
|
||||
print-step "2" "4" "Processing configuration"
|
||||
|
||||
let config = (open $temp_output)
|
||||
let env_name = $config.environment_name
|
||||
|
||||
if ($env_name | is-empty) {
|
||||
print-error "Could not extract environment name from form output"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print-info $"Environment name: ($env_name)"
|
||||
|
||||
let values_file = ($values_dir | path join $"($env_name).ncl")
|
||||
let json_file = ($envs_dir | path join $"($env_name).json")
|
||||
|
||||
# Step 3: Convert JSON to Nickel
|
||||
print-step "3" "4" "Converting to Nickel configuration"
|
||||
|
||||
let converter_script = ($script_dir | path join "json-to-nickel.nu")
|
||||
^nu -c $"source '($converter_script)'; main '($temp_output)' '($values_file)'"
|
||||
|
||||
if not ($values_file | path exists) {
|
||||
print-error "Nickel file generation failed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print-success $"Nickel configuration generated: ($values_file)"
|
||||
print ""
|
||||
|
||||
# Step 4: Validate Nickel
|
||||
print-info "Validating Nickel configuration..."
|
||||
|
||||
let validate_script = ($script_dir | path join "validate-nickel.nu")
|
||||
^nu -c $"source '($validate_script)'; main '($values_file)'"
|
||||
|
||||
print-success "Nickel validation passed"
|
||||
print ""
|
||||
|
||||
# Step 5: Export Nickel to JSON
|
||||
print-step "4" "4" "Exporting to JSON format"
|
||||
|
||||
let exporter_script = ($script_dir | path join "nickel-to-json.nu")
|
||||
^nu -c $"source '($exporter_script)'; main '($values_file)' '($json_file)'"
|
||||
|
||||
if not ($json_file | path exists) {
|
||||
print-error "JSON export failed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print-success $"JSON configuration exported: ($json_file)"
|
||||
print ""
|
||||
|
||||
# Cleanup temporary file
|
||||
^rm -f $temp_output
|
||||
|
||||
# Success summary
|
||||
print-header "Configuration Generation Complete!"
|
||||
print ""
|
||||
|
||||
print-info "Generated files:"
|
||||
print $" - Nickel: ($values_file)"
|
||||
print $" - JSON: ($json_file)"
|
||||
print ""
|
||||
|
||||
print-info "Next steps:"
|
||||
print $" 1. Review configuration: cat ($json_file) | jq ."
|
||||
print $" 2. Create environment: cargo run --bin torrust-tracker-deployer -- create environment --env-file ($json_file)"
|
||||
print $" 3. Provision: cargo run --bin torrust-tracker-deployer -- provision ($env_name)"
|
||||
print ""
|
||||
}
|
||||
|
||||
# Execute main
|
||||
main
|
||||
198
crates/typedialog-prov-gen/templates/scripts/config.sh
Executable file
198
crates/typedialog-prov-gen/templates/scripts/config.sh
Executable file
@ -0,0 +1,198 @@
|
||||
#!/bin/bash
|
||||
# Torrust Tracker Environment Configuration Wizard (Bash variant)
|
||||
# Main orchestration script for the configuration wizard workflow
|
||||
#
|
||||
# This script:
|
||||
# 1. Verifies TypeDialog and Nickel are installed
|
||||
# 2. Launches interactive TypeDialog form
|
||||
# 3. Converts JSON output to Nickel configuration
|
||||
# 4. Validates with Nickel validators
|
||||
# 5. Exports final JSON to envs/ directory
|
||||
#
|
||||
# Usage:
|
||||
# ./provisioning/scripts/config.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ============================================================================
|
||||
# CONFIGURATION
|
||||
# ============================================================================
|
||||
|
||||
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
readonly PROVISIONING_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
readonly PROJECT_ROOT="$(dirname "$PROVISIONING_DIR")"
|
||||
readonly ENVS_DIR="${PROJECT_ROOT}/envs"
|
||||
readonly VALUES_DIR="${PROVISIONING_DIR}/values"
|
||||
readonly FORM_PATH="${PROVISIONING_DIR}/config-form.toml"
|
||||
readonly SCRIPTS_DIR="$SCRIPT_DIR"
|
||||
|
||||
# ============================================================================
|
||||
# UTILITY FUNCTIONS
|
||||
# ============================================================================
|
||||
|
||||
print_header() {
|
||||
echo "═══════════════════════════════════════════════════════════"
|
||||
echo "🎯 $1"
|
||||
echo "═══════════════════════════════════════════════════════════"
|
||||
echo ""
|
||||
}
|
||||
|
||||
print_step() {
|
||||
echo "📝 Step $1/$2: $3..."
|
||||
}
|
||||
|
||||
print_success() {
|
||||
echo "✅ $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo "❌ $1" >&2
|
||||
}
|
||||
|
||||
print_info() {
|
||||
echo "ℹ️ $1"
|
||||
}
|
||||
|
||||
# Check if a command exists
|
||||
command_exists() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# DEPENDENCY VERIFICATION
|
||||
# ============================================================================
|
||||
|
||||
verify_dependencies() {
|
||||
print_header "Checking Dependencies"
|
||||
|
||||
local missing_deps=()
|
||||
|
||||
if ! command_exists "typedialog"; then
|
||||
missing_deps+=("typedialog (install with: cargo install typedialog)")
|
||||
fi
|
||||
|
||||
if ! command_exists "nickel"; then
|
||||
missing_deps+=("nickel (install with: cargo install nickel-lang-cli)")
|
||||
fi
|
||||
|
||||
if ! command_exists "jq"; then
|
||||
missing_deps+=("jq (install with: brew install jq or apt-get install jq)")
|
||||
fi
|
||||
|
||||
if [[ ${#missing_deps[@]} -gt 0 ]]; then
|
||||
print_error "Missing dependencies:"
|
||||
for dep in "${missing_deps[@]}"; do
|
||||
echo " - $dep" >&2
|
||||
done
|
||||
return 1
|
||||
fi
|
||||
|
||||
print_success "All dependencies available"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# MAIN WORKFLOW
|
||||
# ============================================================================
|
||||
|
||||
main() {
|
||||
print_header "Torrust Tracker - Environment Configuration Wizard"
|
||||
|
||||
# Step 0: Verify dependencies
|
||||
if ! verify_dependencies; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ensure directories exist
|
||||
mkdir -p "$ENVS_DIR"
|
||||
mkdir -p "$VALUES_DIR"
|
||||
|
||||
# Step 1: Run TypeDialog form
|
||||
print_step "1" "4" "Collecting configuration via interactive form"
|
||||
|
||||
local temp_output
|
||||
temp_output=$(mktemp)
|
||||
trap "rm -f '$temp_output'" EXIT
|
||||
|
||||
if ! typedialog run "$FORM_PATH" > "$temp_output" 2>&1; then
|
||||
print_error "TypeDialog form failed"
|
||||
cat "$temp_output" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -s "$temp_output" ]]; then
|
||||
print_error "TypeDialog output is empty. Wizard cancelled."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Configuration collected"
|
||||
echo ""
|
||||
|
||||
# Step 2: Extract environment name
|
||||
print_step "2" "4" "Processing configuration"
|
||||
|
||||
local env_name
|
||||
env_name=$(jq -r '.environment_name' "$temp_output")
|
||||
|
||||
if [[ -z "$env_name" ]]; then
|
||||
print_error "Could not extract environment name from form output"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_info "Environment name: $env_name"
|
||||
|
||||
local values_file="${VALUES_DIR}/${env_name}.ncl"
|
||||
local json_file="${ENVS_DIR}/${env_name}.json"
|
||||
|
||||
# Step 3: Convert JSON to Nickel
|
||||
print_step "3" "4" "Converting to Nickel configuration"
|
||||
|
||||
if ! bash "$SCRIPTS_DIR/json-to-nickel.sh" "$temp_output" "$values_file"; then
|
||||
print_error "Nickel file generation failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Nickel configuration generated: $values_file"
|
||||
echo ""
|
||||
|
||||
# Step 4: Validate Nickel
|
||||
print_info "Validating Nickel configuration..."
|
||||
|
||||
if ! nickel eval "$values_file" > /dev/null 2>&1; then
|
||||
print_error "Nickel validation failed"
|
||||
nickel eval "$values_file" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "Nickel validation passed"
|
||||
echo ""
|
||||
|
||||
# Step 5: Export Nickel to JSON
|
||||
print_step "4" "4" "Exporting to JSON format"
|
||||
|
||||
if ! bash "$SCRIPTS_DIR/nickel-to-json.sh" "$values_file" "$json_file"; then
|
||||
print_error "JSON export failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "JSON configuration exported: $json_file"
|
||||
echo ""
|
||||
|
||||
# Success summary
|
||||
print_header "Configuration Generation Complete!"
|
||||
echo ""
|
||||
|
||||
print_info "Generated files:"
|
||||
echo " - Nickel: $values_file"
|
||||
echo " - JSON: $json_file"
|
||||
echo ""
|
||||
|
||||
print_info "Next steps:"
|
||||
echo " 1. Review configuration: cat '$json_file' | jq ."
|
||||
echo " 2. Create environment: cargo run --bin torrust-tracker-deployer -- create environment --env-file '$json_file'"
|
||||
echo " 3. Provision: cargo run --bin torrust-tracker-deployer -- provision '$env_name'"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
254
crates/typedialog-prov-gen/templates/scripts/json-to-nickel.nu
Executable file
254
crates/typedialog-prov-gen/templates/scripts/json-to-nickel.nu
Executable file
@ -0,0 +1,254 @@
|
||||
#!/usr/bin/env nu
|
||||
# Convert TypeDialog JSON output to Nickel configuration file (Nushell variant)
|
||||
#
|
||||
# Compliance: .claude/guidelines/nushell/NUSHELL_COMPLIANCE_CHECKLIST.md
|
||||
# - Function signatures with explicit types
|
||||
# - No try-catch: Uses `do { } | complete`
|
||||
# - External commands prefixed with `^`
|
||||
# - String interpolation: [$var] for variables, ($expr) for expressions
|
||||
#
|
||||
# Usage:
|
||||
# nu ./json-to-nickel.nu <input.json> <output.ncl>
|
||||
|
||||
# Extract value from JSON with optional default
|
||||
def extract-json [json: record, key: string, default: string = ""]: nothing -> string {
|
||||
let maybe_value = ($json | get --optional $key)
|
||||
if ($maybe_value == null) { $default } else { $maybe_value }
|
||||
}
|
||||
|
||||
def main [input_json: string, output_nickel: string]: nothing -> nothing {
|
||||
if not ($input_json | path exists) {
|
||||
print -e $"Error: Input JSON file not found: ($input_json)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Load JSON
|
||||
let config = (open $input_json)
|
||||
|
||||
# Extract environment section
|
||||
let env_name = (extract-json $config "environment_name")
|
||||
let instance_name = (extract-json $config "instance_name")
|
||||
|
||||
# Extract provider section
|
||||
let provider = (extract-json $config "provider")
|
||||
|
||||
# Extract provider-specific values
|
||||
let lxd_profile = if ($provider == "lxd") {
|
||||
extract-json $config "lxd_profile_name"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let hetzner_token = if ($provider == "hetzner") {
|
||||
extract-json $config "hetzner_api_token"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let hetzner_server = if ($provider == "hetzner") {
|
||||
extract-json $config "hetzner_server_type"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let hetzner_location = if ($provider == "hetzner") {
|
||||
extract-json $config "hetzner_location"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let hetzner_image = if ($provider == "hetzner") {
|
||||
extract-json $config "hetzner_image"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
# Extract SSH section
|
||||
let ssh_private_key = (extract-json $config "ssh_private_key_path")
|
||||
let ssh_public_key = (extract-json $config "ssh_public_key_path")
|
||||
let ssh_username = (extract-json $config "ssh_username" "torrust")
|
||||
let ssh_port = (extract-json $config "ssh_port" "22")
|
||||
|
||||
# Extract database section
|
||||
let database_driver = (extract-json $config "database_driver")
|
||||
|
||||
let sqlite_db = if ($database_driver == "sqlite3") {
|
||||
extract-json $config "sqlite_database_name"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let mysql_host = if ($database_driver == "mysql") {
|
||||
extract-json $config "mysql_host"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let mysql_port = if ($database_driver == "mysql") {
|
||||
extract-json $config "mysql_port"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let mysql_db = if ($database_driver == "mysql") {
|
||||
extract-json $config "mysql_database_name"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let mysql_user = if ($database_driver == "mysql") {
|
||||
extract-json $config "mysql_username"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let mysql_pass = if ($database_driver == "mysql") {
|
||||
extract-json $config "mysql_password"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
# Extract tracker section
|
||||
let tracker_private = (extract-json $config "tracker_private_mode" "false")
|
||||
let udp_bind = (extract-json $config "udp_tracker_bind_address")
|
||||
let http_bind = (extract-json $config "http_tracker_bind_address")
|
||||
let api_bind = (extract-json $config "http_api_bind_address")
|
||||
let api_token = (extract-json $config "http_api_admin_token")
|
||||
|
||||
# Extract features section
|
||||
let enable_prometheus = (extract-json $config "enable_prometheus" "false")
|
||||
let enable_grafana = (extract-json $config "enable_grafana" "false")
|
||||
|
||||
let prometheus_bind = if ($enable_prometheus == "true") {
|
||||
extract-json $config "prometheus_bind_address"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let prometheus_interval = if ($enable_prometheus == "true") {
|
||||
extract-json $config "prometheus_scrape_interval" "15"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let grafana_bind = if ($enable_grafana == "true") {
|
||||
extract-json $config "grafana_bind_address"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
let grafana_pass = if ($enable_grafana == "true") {
|
||||
extract-json $config "grafana_admin_password"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
# Build Nickel configuration
|
||||
let timestamp = (date now | format date '%Y-%m-%dT%H:%M:%SZ')
|
||||
|
||||
# Build instance_name section
|
||||
let instance_section = if ($instance_name != "") {
|
||||
$" instance_name = validators_instance.ValidInstanceName \"($instance_name)\",\n"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
# Build provider section (does NOT include environment closing brace)
|
||||
let provider_section = if ($provider == "lxd") {
|
||||
$" provider = {\n provider = \"lxd\",\n profile_name = validators_instance.ValidInstanceName \"($lxd_profile)\",\n },"
|
||||
} else if ($provider == "hetzner") {
|
||||
$" provider = {\n provider = \"hetzner\",\n api_token = \"($hetzner_token)\",\n server_type = \"($hetzner_server)\",\n location = \"($hetzner_location)\",\n image = \"($hetzner_image)\",\n },"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
# Build database section
|
||||
let database_section = if ($database_driver == "sqlite3") {
|
||||
$" {\n driver = \"sqlite3\",\n database_name = \"($sqlite_db)\",\n },"
|
||||
} else if ($database_driver == "mysql") {
|
||||
$" {\n driver = \"mysql\",\n host = \"($mysql_host)\",\n port = validators_common.ValidPort ($mysql_port),\n database_name = \"($mysql_db)\",\n username = \"($mysql_user)\",\n password = \"($mysql_pass)\",\n },"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
# Build prometheus section
|
||||
let prometheus_section = if ($enable_prometheus == "true") {
|
||||
$" bind_address = \"($prometheus_bind)\",\n scrape_interval = ($prometheus_interval),"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
# Build grafana section
|
||||
let grafana_section = if ($enable_grafana == "true") {
|
||||
$" bind_address = \"($grafana_bind)\",\n admin_password = \"($grafana_pass)\","
|
||||
} else {
|
||||
""
|
||||
}
|
||||
|
||||
# Construct the complete Nickel file - using pure string interpolation
|
||||
let nickel_content = $"# Environment configuration \(generated from TypeDialog\)
|
||||
# Generated: ($timestamp)
|
||||
|
||||
let schemas = import \"../schemas/environment.ncl\" in
|
||||
let defaults = import \"../defaults/environment.ncl\" in
|
||||
let validators = import \"../validators/environment.ncl\" in
|
||||
let validators_instance = import \"../validators/instance.ncl\" in
|
||||
let validators_username = import \"../validators/username.ncl\" in
|
||||
let validators_common = import \"../validators/common.ncl\" in
|
||||
let validators_network = import \"../validators/network.ncl\" in
|
||||
|
||||
let user_config = {
|
||||
environment = {
|
||||
name = validators.ValidEnvironmentName \"($env_name)\",
|
||||
($instance_section) },
|
||||
|
||||
($provider_section)
|
||||
ssh_credentials = {
|
||||
private_key_path = \"($ssh_private_key)\",
|
||||
public_key_path = \"($ssh_public_key)\",
|
||||
username = validators_username.ValidUsername \"($ssh_username)\",
|
||||
port = validators_common.ValidPort ($ssh_port),
|
||||
},
|
||||
|
||||
tracker = {
|
||||
core = {
|
||||
private = ($tracker_private),
|
||||
database =
|
||||
($database_section)
|
||||
},
|
||||
udp_trackers = [
|
||||
{ bind_address = validators_network.ValidBindAddress \"($udp_bind)\" },
|
||||
],
|
||||
http_trackers = [
|
||||
{ bind_address = validators_network.ValidBindAddress \"($http_bind)\" },
|
||||
],
|
||||
http_api = {
|
||||
bind_address = validators_network.ValidBindAddress \"($api_bind)\",
|
||||
admin_token = \"($api_token)\",
|
||||
},
|
||||
},
|
||||
|
||||
features = {
|
||||
prometheus = {
|
||||
enabled = ($enable_prometheus),
|
||||
($prometheus_section)
|
||||
},
|
||||
grafana = {
|
||||
enabled = ($enable_grafana),
|
||||
($grafana_section)
|
||||
},
|
||||
},
|
||||
} in
|
||||
|
||||
# Merge defaults with user config
|
||||
defaults & user_config
|
||||
"
|
||||
|
||||
# Write to file
|
||||
$nickel_content | save --force $output_nickel
|
||||
|
||||
print $"✅ Nickel file generated: ($output_nickel)"
|
||||
}
|
||||
|
||||
# Script is a library - call main directly:
|
||||
# nu -c 'source ./json-to-nickel.nu; main "input.json" "output.ncl"'
|
||||
246
crates/typedialog-prov-gen/templates/scripts/json-to-nickel.sh
Executable file
246
crates/typedialog-prov-gen/templates/scripts/json-to-nickel.sh
Executable file
@ -0,0 +1,246 @@
|
||||
#!/bin/bash
|
||||
# Convert TypeDialog JSON output to Nickel configuration file
|
||||
#
|
||||
# This script takes JSON output from TypeDialog and generates a Nickel
|
||||
# configuration file that merges user values with schemas/defaults/validators.
|
||||
#
|
||||
# Usage:
|
||||
# ./json-to-nickel.sh <input.json> <output.ncl>
|
||||
#
|
||||
# Arguments:
|
||||
# input.json - JSON file from TypeDialog (required)
|
||||
# output.ncl - Nickel output file (required)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo "Usage: $0 <input.json> <output.ncl>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly INPUT_JSON="$1"
|
||||
readonly OUTPUT_NICKEL="$2"
|
||||
|
||||
if [[ ! -f "$INPUT_JSON" ]]; then
|
||||
echo "Error: Input JSON file not found: $INPUT_JSON" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# EXTRACT VALUES FROM JSON
|
||||
# ============================================================================
|
||||
|
||||
extract_json() {
|
||||
local key="$1"
|
||||
local default="${2:-}"
|
||||
jq -r ".${key} // \"${default}\"" "$INPUT_JSON"
|
||||
}
|
||||
|
||||
# Environment section
|
||||
ENV_NAME=$(extract_json "environment_name")
|
||||
INSTANCE_NAME=$(extract_json "instance_name" "")
|
||||
|
||||
# Provider section
|
||||
PROVIDER=$(extract_json "provider")
|
||||
|
||||
# Provider-specific values
|
||||
if [[ "$PROVIDER" == "lxd" ]]; then
|
||||
LXD_PROFILE=$(extract_json "lxd_profile_name")
|
||||
elif [[ "$PROVIDER" == "hetzner" ]]; then
|
||||
HETZNER_TOKEN=$(extract_json "hetzner_api_token")
|
||||
HETZNER_SERVER=$(extract_json "hetzner_server_type")
|
||||
HETZNER_LOCATION=$(extract_json "hetzner_location")
|
||||
HETZNER_IMAGE=$(extract_json "hetzner_image")
|
||||
fi
|
||||
|
||||
# SSH section
|
||||
SSH_PRIVATE_KEY=$(extract_json "ssh_private_key_path")
|
||||
SSH_PUBLIC_KEY=$(extract_json "ssh_public_key_path")
|
||||
SSH_USERNAME=$(extract_json "ssh_username" "torrust")
|
||||
SSH_PORT=$(extract_json "ssh_port" "22")
|
||||
|
||||
# Database section
|
||||
DATABASE_DRIVER=$(extract_json "database_driver")
|
||||
|
||||
if [[ "$DATABASE_DRIVER" == "sqlite3" ]]; then
|
||||
SQLITE_DB=$(extract_json "sqlite_database_name")
|
||||
elif [[ "$DATABASE_DRIVER" == "mysql" ]]; then
|
||||
MYSQL_HOST=$(extract_json "mysql_host")
|
||||
MYSQL_PORT=$(extract_json "mysql_port")
|
||||
MYSQL_DB=$(extract_json "mysql_database_name")
|
||||
MYSQL_USER=$(extract_json "mysql_username")
|
||||
MYSQL_PASS=$(extract_json "mysql_password")
|
||||
fi
|
||||
|
||||
# Tracker section
|
||||
TRACKER_PRIVATE=$(extract_json "tracker_private_mode" "false")
|
||||
UDP_BIND=$(extract_json "udp_tracker_bind_address")
|
||||
HTTP_BIND=$(extract_json "http_tracker_bind_address")
|
||||
API_BIND=$(extract_json "http_api_bind_address")
|
||||
API_TOKEN=$(extract_json "http_api_admin_token")
|
||||
|
||||
# Features section
|
||||
ENABLE_PROMETHEUS=$(extract_json "enable_prometheus" "false")
|
||||
ENABLE_GRAFANA=$(extract_json "enable_grafana" "false")
|
||||
|
||||
if [[ "$ENABLE_PROMETHEUS" == "true" ]]; then
|
||||
PROMETHEUS_BIND=$(extract_json "prometheus_bind_address")
|
||||
PROMETHEUS_INTERVAL=$(extract_json "prometheus_scrape_interval" "15")
|
||||
fi
|
||||
|
||||
if [[ "$ENABLE_GRAFANA" == "true" ]]; then
|
||||
GRAFANA_BIND=$(extract_json "grafana_bind_address")
|
||||
GRAFANA_PASS=$(extract_json "grafana_admin_password")
|
||||
fi
|
||||
|
||||
# ============================================================================
|
||||
# GENERATE NICKEL FILE
|
||||
# ============================================================================
|
||||
|
||||
cat > "$OUTPUT_NICKEL" <<'NICKEL_TEMPLATE'
|
||||
# Environment configuration (generated from TypeDialog)
|
||||
# Generated: $(date -Iseconds)
|
||||
|
||||
let schemas = import "../schemas/environment.ncl" in
|
||||
let defaults = import "../defaults/environment.ncl" in
|
||||
let validators = import "../validators/environment.ncl" in
|
||||
let validators_instance = import "../validators/instance.ncl" in
|
||||
let validators_username = import "../validators/username.ncl" in
|
||||
let validators_common = import "../validators/common.ncl" in
|
||||
let validators_network = import "../validators/network.ncl" in
|
||||
|
||||
let user_config = {
|
||||
NICKEL_TEMPLATE
|
||||
|
||||
# Append environment section
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
environment = {
|
||||
name = validators.ValidEnvironmentName "$ENV_NAME",
|
||||
EOF
|
||||
|
||||
if [[ -n "$INSTANCE_NAME" ]]; then
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
instance_name = validators_instance.ValidInstanceName "$INSTANCE_NAME",
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat >> "$OUTPUT_NICKEL" <<'EOF'
|
||||
},
|
||||
|
||||
EOF
|
||||
|
||||
# Append provider section
|
||||
if [[ "$PROVIDER" == "lxd" ]]; then
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
provider = {
|
||||
provider = "lxd",
|
||||
profile_name = validators_instance.ValidInstanceName "$LXD_PROFILE",
|
||||
},
|
||||
|
||||
EOF
|
||||
elif [[ "$PROVIDER" == "hetzner" ]]; then
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
provider = {
|
||||
provider = "hetzner",
|
||||
api_token = "$HETZNER_TOKEN",
|
||||
server_type = "$HETZNER_SERVER",
|
||||
location = "$HETZNER_LOCATION",
|
||||
image = "$HETZNER_IMAGE",
|
||||
},
|
||||
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Append SSH section
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
ssh_credentials = {
|
||||
private_key_path = "$SSH_PRIVATE_KEY",
|
||||
public_key_path = "$SSH_PUBLIC_KEY",
|
||||
username = validators_username.ValidUsername "$SSH_USERNAME",
|
||||
port = validators_common.ValidPort $SSH_PORT,
|
||||
},
|
||||
|
||||
EOF
|
||||
|
||||
# Append tracker core + database section
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
tracker = {
|
||||
core = {
|
||||
private = $TRACKER_PRIVATE,
|
||||
database =
|
||||
EOF
|
||||
|
||||
if [[ "$DATABASE_DRIVER" == "sqlite3" ]]; then
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
{
|
||||
driver = "sqlite3",
|
||||
database_name = "$SQLITE_DB",
|
||||
},
|
||||
EOF
|
||||
elif [[ "$DATABASE_DRIVER" == "mysql" ]]; then
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
{
|
||||
driver = "mysql",
|
||||
host = "$MYSQL_HOST",
|
||||
port = validators_common.ValidPort $MYSQL_PORT,
|
||||
database_name = "$MYSQL_DB",
|
||||
username = "$MYSQL_USER",
|
||||
password = "$MYSQL_PASS",
|
||||
},
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Append tracker services
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
},
|
||||
udp_trackers = [
|
||||
{ bind_address = validators_network.ValidBindAddress "$UDP_BIND" },
|
||||
],
|
||||
http_trackers = [
|
||||
{ bind_address = validators_network.ValidBindAddress "$HTTP_BIND" },
|
||||
],
|
||||
http_api = {
|
||||
bind_address = validators_network.ValidBindAddress "$API_BIND",
|
||||
admin_token = "$API_TOKEN",
|
||||
},
|
||||
},
|
||||
|
||||
EOF
|
||||
|
||||
# Append features section
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
features = {
|
||||
prometheus = {
|
||||
enabled = $ENABLE_PROMETHEUS,
|
||||
EOF
|
||||
|
||||
if [[ "$ENABLE_PROMETHEUS" == "true" ]]; then
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
bind_address = "$PROMETHEUS_BIND",
|
||||
scrape_interval = $PROMETHEUS_INTERVAL,
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
},
|
||||
grafana = {
|
||||
enabled = $ENABLE_GRAFANA,
|
||||
EOF
|
||||
|
||||
if [[ "$ENABLE_GRAFANA" == "true" ]]; then
|
||||
cat >> "$OUTPUT_NICKEL" <<EOF
|
||||
bind_address = "$GRAFANA_BIND",
|
||||
admin_password = "$GRAFANA_PASS",
|
||||
EOF
|
||||
fi
|
||||
|
||||
cat >> "$OUTPUT_NICKEL" <<'EOF'
|
||||
},
|
||||
},
|
||||
} in
|
||||
|
||||
# Merge defaults with user config
|
||||
defaults & user_config
|
||||
EOF
|
||||
|
||||
echo "✅ Nickel file generated: $OUTPUT_NICKEL"
|
||||
38
crates/typedialog-prov-gen/templates/scripts/nickel-to-json.nu
Executable file
38
crates/typedialog-prov-gen/templates/scripts/nickel-to-json.nu
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env nu
|
||||
# Export Nickel configuration to JSON format (Nushell variant)
|
||||
#
|
||||
# Compliance: .claude/guidelines/nushell/NUSHELL_COMPLIANCE_CHECKLIST.md
|
||||
# - Function signatures with explicit types
|
||||
# - External commands prefixed with `^`
|
||||
# - String interpolation: [$var] for variables
|
||||
#
|
||||
# Usage:
|
||||
# ./nickel-to-json.nu <input.ncl> <output.json>
|
||||
|
||||
def main [input_nickel: string, output_json: string]: nothing -> nothing {
|
||||
if not ($input_nickel | path exists) {
|
||||
print -e $"Error: Input Nickel file not found: ($input_nickel)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Export Nickel to JSON
|
||||
^nickel export --format json $input_nickel | save --force $output_json
|
||||
|
||||
# Verify output was generated
|
||||
if not ($output_json | path exists) {
|
||||
print -e "❌ Nickel export failed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
let file_size = (ls $output_json | get size.0)
|
||||
if ($file_size == 0) {
|
||||
print -e "❌ Nickel export produced empty output"
|
||||
^rm -f $output_json
|
||||
exit 1
|
||||
}
|
||||
|
||||
print $"✅ JSON exported: ($output_json)"
|
||||
}
|
||||
|
||||
# Script is a library - call main directly:
|
||||
# nu -c 'source ./nickel-to-json.nu; main "input.ncl" "output.json"'
|
||||
43
crates/typedialog-prov-gen/templates/scripts/nickel-to-json.sh
Executable file
43
crates/typedialog-prov-gen/templates/scripts/nickel-to-json.sh
Executable file
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
# Export Nickel configuration to JSON format
|
||||
#
|
||||
# This script evaluates a Nickel configuration file and exports it as JSON.
|
||||
# The resulting JSON is suitable for use with the Torrust Tracker Deployer.
|
||||
#
|
||||
# Usage:
|
||||
# ./nickel-to-json.sh <input.ncl> <output.json>
|
||||
#
|
||||
# Arguments:
|
||||
# input.ncl - Nickel configuration file (required)
|
||||
# output.json - JSON output file (required)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -lt 2 ]]; then
|
||||
echo "Usage: $0 <input.ncl> <output.json>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly INPUT_NICKEL="$1"
|
||||
readonly OUTPUT_JSON="$2"
|
||||
|
||||
if [[ ! -f "$INPUT_NICKEL" ]]; then
|
||||
echo "Error: Input Nickel file not found: $INPUT_NICKEL" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Export Nickel to JSON using nickel CLI
|
||||
if ! nickel export --format json "$INPUT_NICKEL" > "$OUTPUT_JSON" 2>&1; then
|
||||
echo "❌ Nickel export failed" >&2
|
||||
cat "$OUTPUT_JSON" >&2
|
||||
rm -f "$OUTPUT_JSON"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -s "$OUTPUT_JSON" ]]; then
|
||||
echo "❌ Nickel export produced empty output" >&2
|
||||
rm -f "$OUTPUT_JSON"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ JSON exported: $OUTPUT_JSON"
|
||||
35
crates/typedialog-prov-gen/templates/scripts/validate-nickel.nu
Executable file
35
crates/typedialog-prov-gen/templates/scripts/validate-nickel.nu
Executable file
@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env nu
|
||||
# Validate Nickel configuration file (Nushell variant)
|
||||
#
|
||||
# Compliance: .claude/guidelines/nushell/NUSHELL_COMPLIANCE_CHECKLIST.md
|
||||
# - Function signatures with explicit types
|
||||
# - External commands prefixed with `^`
|
||||
# - String interpolation: [$var] for variables
|
||||
#
|
||||
# Usage:
|
||||
# ./validate-nickel.nu <config.ncl>
|
||||
|
||||
def main [input_nickel: string]: nothing -> nothing {
|
||||
if not ($input_nickel | path exists) {
|
||||
print -e $"❌ Error: File not found: ($input_nickel)"
|
||||
exit 1
|
||||
}
|
||||
|
||||
print $"Validating Nickel configuration: ($input_nickel)"
|
||||
print ""
|
||||
|
||||
let validate_result = (do { ^nickel eval $input_nickel } | complete)
|
||||
|
||||
if $validate_result.exit_code == 0 {
|
||||
print "✅ Nickel configuration is valid"
|
||||
exit 0
|
||||
} else {
|
||||
print "❌ Nickel validation failed. Errors:"
|
||||
print ""
|
||||
print $validate_result.stdout
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
# Script is a library - call main directly:
|
||||
# nu -c 'source ./validate-nickel.nu; main "config.ncl"'
|
||||
39
crates/typedialog-prov-gen/templates/scripts/validate-nickel.sh
Executable file
39
crates/typedialog-prov-gen/templates/scripts/validate-nickel.sh
Executable file
@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
# Validate Nickel configuration file
|
||||
#
|
||||
# This script evaluates a Nickel configuration file to check for syntax errors
|
||||
# and validation failures. If validation succeeds, all values are validated
|
||||
# according to the defined validators.
|
||||
#
|
||||
# Usage:
|
||||
# ./validate-nickel.sh <config.ncl>
|
||||
#
|
||||
# Arguments:
|
||||
# config.ncl - Nickel configuration file to validate (required)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Usage: $0 <config.ncl>" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly INPUT_NICKEL="$1"
|
||||
|
||||
if [[ ! -f "$INPUT_NICKEL" ]]; then
|
||||
echo "❌ Error: File not found: $INPUT_NICKEL" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Validating Nickel configuration: $INPUT_NICKEL"
|
||||
echo ""
|
||||
|
||||
if nickel eval "$INPUT_NICKEL" > /dev/null 2>&1; then
|
||||
echo "✅ Nickel configuration is valid"
|
||||
exit 0
|
||||
else
|
||||
echo "❌ Nickel validation failed. Errors:"
|
||||
echo ""
|
||||
nickel eval "$INPUT_NICKEL"
|
||||
exit 1
|
||||
fi
|
||||
63
crates/typedialog-prov-gen/templates/validators/common.ncl
Normal file
63
crates/typedialog-prov-gen/templates/validators/common.ncl
Normal file
@ -0,0 +1,63 @@
|
||||
# Common Validators
|
||||
# Utility functions used by all validators
|
||||
|
||||
{
|
||||
# Validates port number (must be 1-65535, rejects 0 explicitly)
|
||||
#
|
||||
# Port 0 is explicitly rejected per project ADR:
|
||||
# "Port 0 means 'any available port' in most systems, which is unsuitable
|
||||
# for production configuration where we need predictable listening addresses."
|
||||
#
|
||||
# Args:
|
||||
# port: Number - port number to validate
|
||||
# Returns:
|
||||
# port if valid
|
||||
# Throws:
|
||||
# Error if port is not in valid range
|
||||
ValidPort = fun port =>
|
||||
let port_num = if std.is_number port then port else std.string.to_number port in
|
||||
if port_num >= 1 && port_num <= 65535 then
|
||||
port_num
|
||||
else
|
||||
std.record.fail "Port must be 1-65535 (0 not supported), got %{std.string.from_number port_num}",
|
||||
|
||||
# Validates non-empty string
|
||||
#
|
||||
# Args:
|
||||
# s: String - string to validate
|
||||
# Returns:
|
||||
# s if non-empty
|
||||
# Throws:
|
||||
# Error if string is empty
|
||||
NonEmptyString = fun s =>
|
||||
if std.string.length s > 0 then s
|
||||
else std.record.fail "String cannot be empty",
|
||||
|
||||
# Validates bind address format (IP:PORT)
|
||||
#
|
||||
# Args:
|
||||
# addr: String - address in format "IP:PORT"
|
||||
# Returns:
|
||||
# addr if valid
|
||||
# Throws:
|
||||
# Error if format is invalid or port is out of range
|
||||
ValidBindAddress = fun addr =>
|
||||
# Simple regex check for IP:PORT format
|
||||
# Pattern: one or more digits/dots followed by colon followed by digits
|
||||
let is_valid_format = std.string.is_match "^[0-9.]+:[0-9]+$" addr in
|
||||
|
||||
if !is_valid_format then
|
||||
std.record.fail "Bind address must be IP:PORT format, got '%{addr}'"
|
||||
|
||||
else
|
||||
# Extract port and validate range
|
||||
let parts = std.string.split ":" addr in
|
||||
let last_index = (std.array.length parts) - 1 in
|
||||
let port_str = std.array.at last_index parts in
|
||||
let port_num = std.string.to_number port_str in
|
||||
|
||||
if port_num >= 1 && port_num <= 65535 then
|
||||
addr
|
||||
else
|
||||
std.record.fail "Port in bind address must be 1-65535, got %{port_str}",
|
||||
}
|
||||
124
crates/typedialog-prov-gen/templates/validators/environment.ncl
Normal file
124
crates/typedialog-prov-gen/templates/validators/environment.ncl
Normal file
@ -0,0 +1,124 @@
|
||||
# Environment Name Validators
|
||||
# Mirrors validation from src/domain/environment/name.rs
|
||||
|
||||
let helpers = import "common.ncl" in
|
||||
|
||||
{
|
||||
# Validates EnvironmentName according to Rust domain rules
|
||||
#
|
||||
# Rust reference: src/domain/environment/name.rs
|
||||
#
|
||||
# Rules (MUST match Rust exactly):
|
||||
# 1. Non-empty
|
||||
# 2. Lowercase only (a-z, 0-9, -)
|
||||
# 3. Cannot start with digit
|
||||
# 4. Cannot start with dash
|
||||
# 5. Cannot end with dash
|
||||
# 6. No consecutive dashes
|
||||
# 7. Cannot contain uppercase letters
|
||||
#
|
||||
# Valid examples: dev, staging, production, e2e-config, test-integration
|
||||
# Invalid examples: Dev, 1dev, -dev, dev-, dev--test
|
||||
#
|
||||
# Args:
|
||||
# name: String - environment name to validate
|
||||
# Returns:
|
||||
# name if valid
|
||||
# Throws:
|
||||
# Error with specific rule violation
|
||||
ValidEnvironmentName = fun name =>
|
||||
# Rule 1: Check if empty
|
||||
let _ = if name == "" then
|
||||
std.record.fail "Environment name cannot be empty.
|
||||
|
||||
Valid format: lowercase letters, numbers, and dashes only.
|
||||
Examples: dev, staging, production, e2e-config, test-integration"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 3: Check if starts with digit
|
||||
let _ = if std.string.is_match "^[0-9]" name then
|
||||
std.record.fail "Environment name '%{name}' is invalid: starts with a number (for InstanceName compatibility).
|
||||
|
||||
Valid format: lowercase letters, numbers, and dashes only.
|
||||
Examples: dev, staging, production, e2e-config, test-integration"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 7: Check for uppercase letters
|
||||
let has_uppercase = std.string.is_match "[A-Z]" name in
|
||||
let _ = if has_uppercase then
|
||||
let uppercase_chars = std.string.split "" name |
|
||||
std.array.filter (fun c => std.string.is_match "^[A-Z]$" c) |
|
||||
std.string.join "" in
|
||||
std.record.fail "Environment name '%{name}' is invalid: contains uppercase letters: %{uppercase_chars}.
|
||||
|
||||
Valid format: lowercase letters, numbers, and dashes only.
|
||||
Examples: dev, staging, production, e2e-config, test-integration"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Collect invalid characters (not in a-z, 0-9, -)
|
||||
let invalid_chars_list = std.string.split "" name |
|
||||
std.array.filter (fun c =>
|
||||
!std.string.is_match "^[a-z0-9-]$" c
|
||||
) in
|
||||
|
||||
# Rule 4 & other chars: Check for invalid characters
|
||||
let _ = if std.array.length invalid_chars_list > 0 then
|
||||
let unique_invalid = (
|
||||
invalid_chars_list |
|
||||
std.array.sort |
|
||||
std.array.reverse |
|
||||
std.array.fold (fun acc c =>
|
||||
if std.array.length acc > 0 && std.array.at (-1) acc == c then
|
||||
acc
|
||||
else
|
||||
acc @ [c]
|
||||
) []
|
||||
) |
|
||||
std.string.join "" in
|
||||
std.record.fail "Environment name '%{name}' is invalid: contains invalid characters: %{unique_invalid}.
|
||||
|
||||
Valid format: lowercase letters, numbers, and dashes only.
|
||||
Examples: dev, staging, production, e2e-config, test-integration"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 4: Check for leading dash
|
||||
let _ = if std.string.is_match "^-" name then
|
||||
std.record.fail "Environment name '%{name}' is invalid: starts with dash.
|
||||
|
||||
Valid format: lowercase letters, numbers, and dashes only.
|
||||
Examples: dev, staging, production, e2e-config, test-integration"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 5: Check for trailing dash
|
||||
let _ = if std.string.is_match "-$" name then
|
||||
std.record.fail "Environment name '%{name}' is invalid: ends with dash.
|
||||
|
||||
Valid format: lowercase letters, numbers, and dashes only.
|
||||
Examples: dev, staging, production, e2e-config, test-integration"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 6: Check for consecutive dashes
|
||||
let _ = if std.string.contains "--" name then
|
||||
std.record.fail "Environment name '%{name}' is invalid: contains consecutive dashes.
|
||||
|
||||
Valid format: lowercase letters, numbers, and dashes only.
|
||||
Examples: dev, staging, production, e2e-config, test-integration"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# All validations passed
|
||||
name,
|
||||
}
|
||||
73
crates/typedialog-prov-gen/templates/validators/instance.ncl
Normal file
73
crates/typedialog-prov-gen/templates/validators/instance.ncl
Normal file
@ -0,0 +1,73 @@
|
||||
# Instance Name Validators (LXD naming rules)
|
||||
# Mirrors validation from src/domain/instance_name.rs
|
||||
|
||||
{
|
||||
# Validates InstanceName according to LXD requirements
|
||||
#
|
||||
# Rust reference: src/domain/instance_name.rs
|
||||
# Use cases: LXD virtual machine names, Docker container names
|
||||
#
|
||||
# Rules (MUST match Rust exactly):
|
||||
# 1. Non-empty
|
||||
# 2. 1-63 characters maximum
|
||||
# 3. ASCII letters, numbers, dashes only
|
||||
# 4. Cannot start with digit or dash
|
||||
# 5. Cannot end with dash
|
||||
# 6. Cannot contain uppercase or special characters
|
||||
#
|
||||
# Valid examples: test-instance, vm-prod, app01, a
|
||||
# Invalid examples: 1test, -test, test-, test@instance, test_instance
|
||||
#
|
||||
# Args:
|
||||
# name: String - instance name to validate
|
||||
# Returns:
|
||||
# name if valid
|
||||
# Throws:
|
||||
# Error with specific rule violation
|
||||
ValidInstanceName = fun name =>
|
||||
let len = std.string.length name in
|
||||
|
||||
# Rule 1: Check if empty
|
||||
let _ = if name == "" then
|
||||
std.record.fail "Instance name cannot be empty"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 2: Check length (max 63 characters)
|
||||
let _ = if len > 63 then
|
||||
std.record.fail "Instance name must be 63 characters or less, got %{std.string.from_number len} characters"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 3: Check for invalid characters (only ASCII alphanumeric + dashes allowed)
|
||||
let invalid_chars_list = std.string.split "" name |
|
||||
std.array.filter (fun c =>
|
||||
!std.string.is_match "^[a-zA-Z0-9-]$" c
|
||||
) in
|
||||
|
||||
let _ = if std.array.length invalid_chars_list > 0 then
|
||||
std.record.fail "Instance name must contain only ASCII letters, numbers, and dashes"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 4: Check first character (cannot be digit or dash)
|
||||
let first_char = std.string.characters name |> std.array.at 0 in
|
||||
let _ = if std.string.is_match "^[0-9-]" first_char then
|
||||
std.record.fail "Instance name must not start with a digit or dash"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# Rule 5: Check last character (cannot be dash)
|
||||
let _ = if std.string.is_match "-$" name then
|
||||
std.record.fail "Instance name must not end with a dash"
|
||||
else
|
||||
name
|
||||
in
|
||||
|
||||
# All validations passed
|
||||
name,
|
||||
}
|
||||
12
crates/typedialog-prov-gen/templates/validators/network.ncl
Normal file
12
crates/typedialog-prov-gen/templates/validators/network.ncl
Normal file
@ -0,0 +1,12 @@
|
||||
# Network Validators
|
||||
# Validators for network addresses, ports, and connectivity configuration
|
||||
|
||||
let helpers = import "common.ncl" in
|
||||
|
||||
{
|
||||
# Re-export common network validators
|
||||
ValidPort = helpers.ValidPort,
|
||||
ValidBindAddress = helpers.ValidBindAddress,
|
||||
|
||||
# Additional network validators can be added here
|
||||
}
|
||||
32
crates/typedialog-prov-gen/templates/validators/paths.ncl
Normal file
32
crates/typedialog-prov-gen/templates/validators/paths.ncl
Normal file
@ -0,0 +1,32 @@
|
||||
# Path Validators
|
||||
# Validators for file paths and configuration paths
|
||||
|
||||
{
|
||||
# Validates SSH key path (non-empty string)
|
||||
#
|
||||
# Note: This validator does not check if the file actually exists.
|
||||
# File existence is verified when the configuration is used by the
|
||||
# Rust application or when provisioning occurs.
|
||||
#
|
||||
# Args:
|
||||
# path: String - path to SSH key file
|
||||
# Returns:
|
||||
# path if valid format
|
||||
# Throws:
|
||||
# Error if path is empty
|
||||
ValidSshKeyPath = fun path =>
|
||||
if std.string.length path > 0 then path
|
||||
else std.record.fail "SSH key path cannot be empty",
|
||||
|
||||
# Validates generic file path (non-empty string)
|
||||
#
|
||||
# Args:
|
||||
# path: String - file path to validate
|
||||
# Returns:
|
||||
# path if valid format
|
||||
# Throws:
|
||||
# Error if path is empty
|
||||
ValidPath = fun path =>
|
||||
if std.string.length path > 0 then path
|
||||
else std.record.fail "Path cannot be empty",
|
||||
}
|
||||
65
crates/typedialog-prov-gen/templates/validators/username.ncl
Normal file
65
crates/typedialog-prov-gen/templates/validators/username.ncl
Normal file
@ -0,0 +1,65 @@
|
||||
# Username Validators (Linux system username rules)
|
||||
# Mirrors validation from src/shared/username.rs
|
||||
|
||||
{
|
||||
# Validates Username according to Linux system requirements
|
||||
#
|
||||
# Rust reference: src/shared/username.rs
|
||||
# Used for: SSH authentication, system user creation, process ownership
|
||||
#
|
||||
# Rules (MUST match Rust exactly):
|
||||
# 1. Non-empty
|
||||
# 2. 1-32 characters maximum
|
||||
# 3. Must start with letter (a-z, A-Z) or underscore (_)
|
||||
# 4. Subsequent chars: letters, digits, underscores, hyphens
|
||||
# 5. Case-sensitive (allows uppercase, unlike EnvironmentName)
|
||||
#
|
||||
# Valid examples: torrust, _service, Deploy_USER, user-123, Admin
|
||||
# Invalid examples: 123user, -user, user@domain, user.name
|
||||
#
|
||||
# Args:
|
||||
# username: String - username to validate
|
||||
# Returns:
|
||||
# username if valid
|
||||
# Throws:
|
||||
# Error with specific rule violation
|
||||
ValidUsername = fun username =>
|
||||
let len = std.string.length username in
|
||||
|
||||
# Rule 1: Check if empty
|
||||
let _ = if username == "" then
|
||||
std.record.fail "Username cannot be empty"
|
||||
else
|
||||
username
|
||||
in
|
||||
|
||||
# Rule 2: Check length (1-32 characters)
|
||||
let _ = if len > 32 then
|
||||
std.record.fail "Username must be 32 characters or less, got %{std.string.from_number len} characters"
|
||||
else
|
||||
username
|
||||
in
|
||||
|
||||
# Rule 3: Check first character (must be letter or underscore)
|
||||
let first_char = std.string.characters username |> std.array.at 0 in
|
||||
let _ = if !std.string.is_match "^[a-zA-Z_]" first_char then
|
||||
std.record.fail "Username must start with a letter (a-z, A-Z) or underscore (_)"
|
||||
else
|
||||
username
|
||||
in
|
||||
|
||||
# Rule 4: Check all characters (letters, digits, underscores, hyphens only)
|
||||
let invalid_chars_list = std.string.split "" username |
|
||||
std.array.filter (fun c =>
|
||||
!std.string.is_match "^[a-zA-Z0-9_-]$" c
|
||||
) in
|
||||
|
||||
let _ = if std.array.length invalid_chars_list > 0 then
|
||||
std.record.fail "Username must contain only letters, digits, underscores, and hyphens"
|
||||
else
|
||||
username
|
||||
in
|
||||
|
||||
# All validations passed
|
||||
username,
|
||||
}
|
||||
80
crates/typedialog-prov-gen/tests/integration_test.rs
Normal file
80
crates/typedialog-prov-gen/tests/integration_test.rs
Normal file
@ -0,0 +1,80 @@
|
||||
//! Integration tests for provisioning generator.
|
||||
|
||||
use typedialog_prov_gen::input::CargoIntrospector;
|
||||
use typedialog_prov_gen::models::{ProjectSpec, ProjectType};
|
||||
|
||||
#[test]
|
||||
fn test_cargo_introspector_sample_project() {
|
||||
// Create a simple test manifest
|
||||
let manifest_content = r#"
|
||||
[package]
|
||||
name = "test-package"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
axum = "0.7"
|
||||
"#;
|
||||
|
||||
use std::io::Write;
|
||||
use tempfile::NamedTempFile;
|
||||
|
||||
let mut temp_file = NamedTempFile::new().expect("Failed to create temp file");
|
||||
temp_file
|
||||
.write_all(manifest_content.as_bytes())
|
||||
.expect("Failed to write to temp file");
|
||||
|
||||
let result = CargoIntrospector::analyze(temp_file.path());
|
||||
assert!(result.is_ok(), "Failed to analyze Cargo.toml");
|
||||
|
||||
let spec = result.unwrap();
|
||||
assert_eq!(spec.name, "test-package");
|
||||
assert_eq!(spec.project_type, ProjectType::WebService);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_project_spec_validation() {
|
||||
use typedialog_prov_gen::models::DomainFeature;
|
||||
|
||||
let spec = ProjectSpec {
|
||||
name: "test-project".to_string(),
|
||||
project_type: ProjectType::WebService,
|
||||
infrastructure: Default::default(),
|
||||
domain_features: vec![DomainFeature::new("basic".to_string())],
|
||||
constraints: vec![],
|
||||
};
|
||||
|
||||
let result = spec.validate();
|
||||
assert!(result.is_ok(), "Valid spec should pass validation");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_domain_feature_validation() {
|
||||
use typedialog_prov_gen::models::DomainFeature;
|
||||
|
||||
let feature = DomainFeature::new(String::new());
|
||||
let result = feature.validate();
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Feature with empty name should fail validation"
|
||||
);
|
||||
|
||||
let feature = DomainFeature::new("valid_feature".to_string());
|
||||
let result = feature.validate();
|
||||
assert!(result.is_ok(), "Valid feature should pass validation");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_field_creation() {
|
||||
use typedialog_prov_gen::models::{ConfigField, FieldType};
|
||||
|
||||
let field = ConfigField::new(
|
||||
"test_field".to_string(),
|
||||
FieldType::Text,
|
||||
"Test field prompt".to_string(),
|
||||
);
|
||||
|
||||
assert_eq!(field.name, "test_field");
|
||||
assert_eq!(field.field_type, FieldType::Text);
|
||||
assert!(field.required);
|
||||
}
|
||||
@ -1,217 +0,0 @@
|
||||
# Encryption Testing - Quick Start
|
||||
|
||||
## TL;DR
|
||||
|
||||
```bash
|
||||
# 1. Setup services (Age already configured, RustyVault requires Docker)
|
||||
./scripts/encryption-test-setup.sh
|
||||
|
||||
# 2. Load environment
|
||||
source /tmp/typedialog-env.sh
|
||||
|
||||
# 3. Test redaction (no service) - Simple example
|
||||
typedialog form examples/08-encryption/simple-login.toml --redact --format json
|
||||
|
||||
# 4. Test Age encryption (requires ~/.age/key.txt)
|
||||
typedialog form examples/08-encryption/simple-login.toml \
|
||||
--encrypt --backend age \
|
||||
--key-file ~/.age/key.txt \
|
||||
--format json
|
||||
|
||||
# 5. Full feature demo (all encryption features)
|
||||
typedialog form examples/08-encryption/credentials.toml --redact --format json
|
||||
|
||||
# 6. Run all integration tests
|
||||
cargo test --test nickel_integration test_encryption -- --nocapture
|
||||
```
|
||||
|
||||
## Example Forms
|
||||
|
||||
### Simple Login Form (`examples/08-encryption/simple-login.toml`)
|
||||
|
||||
Minimal example for quick testing:
|
||||
- `username` (plaintext)
|
||||
- `password` (sensitive, auto-detected from type)
|
||||
|
||||
**Use this for**:
|
||||
- Quick verification of redaction
|
||||
- Basic Age encryption testing
|
||||
- First-time setup validation
|
||||
|
||||
### Full Credentials Form (`examples/08-encryption/credentials.toml`)
|
||||
|
||||
Comprehensive example demonstrating all encryption features:
|
||||
- Non-sensitive fields: username, email, company
|
||||
- Auto-detected sensitive: password, confirm_password (FieldType::Password)
|
||||
- Explicitly marked sensitive: api_token, ssh_key, database_url
|
||||
- Field-level backends: vault_token (RustyVault config)
|
||||
- Override: demo_password (type=password but NOT sensitive)
|
||||
|
||||
**Use this for**:
|
||||
- Testing field-level sensitivity control
|
||||
- Field-specific encryption backend configuration
|
||||
- Demonstrating RustyVault setup
|
||||
|
||||
### Nickel Schema (`examples/08-encryption/nickel-secrets.ncl`)
|
||||
|
||||
Demonstrates encryption in Nickel schema language:
|
||||
- `Sensitive Backend="age"` annotations
|
||||
- Key path specification
|
||||
- Nested structure with sensitive fields
|
||||
|
||||
**Use this for**:
|
||||
- Understanding Nickel contract syntax
|
||||
- Converting Nickel schemas to TOML forms
|
||||
|
||||
See `examples/08-encryption/README.md` for detailed examples and testing instructions.
|
||||
|
||||
## Current Status
|
||||
|
||||
✅ **Age (Local encryption)** - Ready to test
|
||||
- Public key: Generated automatically
|
||||
- Private key: `~/.age/key.txt`
|
||||
- No service required, uses CLI tool
|
||||
- Forms ready: `simple-login.toml`, `credentials.toml`
|
||||
|
||||
✅ **Redaction** - Fully functional
|
||||
- Works without any encryption service
|
||||
- Auto-detects sensitive fields from FieldType::Password
|
||||
- Field-level control with explicit `sensitive` flag
|
||||
|
||||
⏳ **RustyVault (HTTP service)** - Framework ready, tests pending
|
||||
- Needs: Docker or manual build
|
||||
- Service: `http://localhost:8200`
|
||||
- API: Transit secrets engine
|
||||
- Configuration demo: `credentials.toml` vault_token field
|
||||
|
||||
## Test Results
|
||||
|
||||
**Tests passing (redaction, metadata mapping):**
|
||||
```
|
||||
cargo test --test nickel_integration test_encryption
|
||||
```
|
||||
|
||||
Output:
|
||||
```
|
||||
running 5 tests
|
||||
test test_encryption_metadata_parsing ... ok
|
||||
test test_encryption_metadata_in_nickel_field ... ok
|
||||
test test_encryption_auto_detection_from_field_type ... ok
|
||||
test test_encryption_roundtrip_with_redaction ... ok
|
||||
test test_encryption_metadata_to_field_definition ... ok
|
||||
|
||||
test result: ok. 5 passed; 0 failed
|
||||
```
|
||||
|
||||
All tests use the example forms for verification.
|
||||
|
||||
## Next Steps for Full Encryption Testing
|
||||
|
||||
### 1. Create test forms with encryption
|
||||
|
||||
**test_form_age.toml:**
|
||||
```toml
|
||||
name = "age_test"
|
||||
display_mode = "complete"
|
||||
|
||||
[[fields]]
|
||||
name = "username"
|
||||
type = "text"
|
||||
prompt = "Username"
|
||||
sensitive = false
|
||||
|
||||
[[fields]]
|
||||
name = "password"
|
||||
type = "password"
|
||||
prompt = "Password"
|
||||
sensitive = true
|
||||
encryption_backend = "age"
|
||||
|
||||
[fields.encryption_config]
|
||||
key = "~/.age/key.txt"
|
||||
```
|
||||
|
||||
### 2. Test Age encryption manually
|
||||
|
||||
```bash
|
||||
# Generate test message
|
||||
echo "test-secret-123" > /tmp/test.txt
|
||||
|
||||
# Get public key
|
||||
PUBLIC_KEY=$(grep "^public key:" ~/.age/key.txt | cut -d' ' -f3)
|
||||
|
||||
# Encrypt
|
||||
age -r "$PUBLIC_KEY" /tmp/test.txt > /tmp/test.age
|
||||
|
||||
# Decrypt
|
||||
age -d -i ~/.age/key.txt /tmp/test.age
|
||||
# Output: test-secret-123
|
||||
```
|
||||
|
||||
### 3. Implement Age roundtrip test
|
||||
|
||||
File: `crates/typedialog-core/tests/encryption_roundtrip.rs`
|
||||
|
||||
```rust
|
||||
#[test]
|
||||
fn test_age_encrypt_decrypt_roundtrip() {
|
||||
use typedialog_core::helpers::{EncryptionContext, transform_results};
|
||||
|
||||
let mut results = HashMap::new();
|
||||
results.insert("secret".to_string(), json!("my-password"));
|
||||
|
||||
let field = FieldDefinition {
|
||||
name: "secret".to_string(),
|
||||
sensitive: Some(true),
|
||||
encryption_backend: Some("age".to_string()),
|
||||
encryption_config: Some({
|
||||
let mut m = HashMap::new();
|
||||
m.insert("key".to_string(), "~/.age/key.txt".to_string());
|
||||
m
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Encrypt
|
||||
let context = EncryptionContext::encrypt_with("age", Default::default());
|
||||
let encrypted = transform_results(&results, &[field.clone()], &context, None)
|
||||
.expect("Encryption failed");
|
||||
|
||||
// Verify ciphertext
|
||||
let ciphertext = encrypted.get("secret").unwrap().as_str().unwrap();
|
||||
assert!(ciphertext.starts_with("age1-"), "Should be Age format");
|
||||
assert_ne!(ciphertext, "my-password", "Should be encrypted");
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Test with RustyVault (optional, requires Docker)
|
||||
|
||||
```bash
|
||||
# Pull RustyVault image
|
||||
docker pull rustyvault:latest
|
||||
|
||||
# Re-run setup script
|
||||
./scripts/encryption-test-setup.sh
|
||||
|
||||
# Test encryption with vault
|
||||
typedialog form test_form_age.toml \
|
||||
--encrypt --backend rustyvault \
|
||||
--vault-addr http://localhost:8200 \
|
||||
--vault-token root \
|
||||
--vault-key-path transit/keys/typedialog-key \
|
||||
--format json
|
||||
```
|
||||
|
||||
## Verification Checklist
|
||||
|
||||
- [ ] Age installed: `age --version`
|
||||
- [ ] Age keys generated: `cat ~/.age/key.txt`
|
||||
- [ ] Test redaction: `typedialog form ... --redact`
|
||||
- [ ] Run encryption tests: `cargo test --test nickel_integration test_encryption`
|
||||
- [ ] All 5 tests passing
|
||||
- [ ] (Optional) Docker available for RustyVault
|
||||
- [ ] (Optional) RustyVault running: `curl http://localhost:8200/v1/sys/health`
|
||||
|
||||
## Documentation
|
||||
|
||||
Full setup guide: See `docs/ENCRYPTION-SERVICES-SETUP.md`
|
||||
@ -1,695 +0,0 @@
|
||||
# HOW-TO: Configure and Run Encryption Services for typedialog
|
||||
|
||||
## Overview
|
||||
|
||||
This guide walks through setting up **Age** (local file-based encryption) and **RustyVault** (HTTP-based encryption service) to test the typedialog encryption pipeline end-to-end.
|
||||
|
||||
**Service Matrix:**
|
||||
|
||||
| Backend | Type | Setup Complexity | Network | Requires |
|
||||
|---------|------|------------------|---------|----------|
|
||||
| **Age** | Local file-based | Trivial | None | age CLI tool |
|
||||
| **RustyVault** | HTTP vault server | Moderate | localhost:8200 | Docker or manual build |
|
||||
| **SOPS** | External tool | Complex | Varies | sops CLI + backends |
|
||||
|
||||
This guide covers Age (trivial) and RustyVault (moderate). SOPS is skipped for now.
|
||||
|
||||
---
|
||||
|
||||
## Part 1: Age Backend (Local File Encryption)
|
||||
|
||||
### What is Age?
|
||||
|
||||
Age is a simple, modern encryption tool using X25519 keys. Perfect for development because:
|
||||
- No daemon/service required
|
||||
- Keys stored as plaintext files
|
||||
- Single binary
|
||||
|
||||
### Installation
|
||||
|
||||
**macOS (via Homebrew):**
|
||||
```bash
|
||||
brew install age
|
||||
```
|
||||
|
||||
**Linux (Ubuntu/Debian):**
|
||||
```bash
|
||||
sudo apt-get install age
|
||||
```
|
||||
|
||||
**Manual (any OS):**
|
||||
```bash
|
||||
# Download from https://github.com/FiloSottile/age/releases
|
||||
# Extract and add to PATH
|
||||
tar xzf age-v1.1.1-linux-amd64.tar.gz
|
||||
sudo mv age/age /usr/local/bin/
|
||||
sudo mv age/age-keygen /usr/local/bin/
|
||||
```
|
||||
|
||||
**Verify installation:**
|
||||
```bash
|
||||
age --version
|
||||
# age v1.1.1
|
||||
```
|
||||
|
||||
### Generate Age Key Pair
|
||||
|
||||
Age uses a single private key file that contains both public and private components. The public key is derived from the private key.
|
||||
|
||||
**Generate keys for testing:**
|
||||
```bash
|
||||
# Create a test directory
|
||||
mkdir -p ~/.age
|
||||
|
||||
# Generate private key
|
||||
age-keygen -o ~/.age/key.txt
|
||||
|
||||
# Output will show:
|
||||
# Public key: age1...xxx (save this, shown in file)
|
||||
# Written to /home/user/.age/key.txt
|
||||
```
|
||||
|
||||
**Verify key generation:**
|
||||
```bash
|
||||
# Check private key exists
|
||||
cat ~/.age/key.txt
|
||||
# Output: AGE-SECRET-KEY-1XXXX...
|
||||
|
||||
# Extract public key (age CLI does this automatically)
|
||||
grep "^public key:" ~/.age/key.txt | cut -d' ' -f3
|
||||
```
|
||||
|
||||
### Test Age Encryption Locally
|
||||
|
||||
**Create a test plaintext file:**
|
||||
```bash
|
||||
echo "This is a secret message" > test_message.txt
|
||||
```
|
||||
|
||||
**Encrypt with age:**
|
||||
```bash
|
||||
# Get public key from private key
|
||||
PUBLIC_KEY=$(grep "^public key:" ~/.age/key.txt | cut -d' ' -f3)
|
||||
|
||||
# Encrypt
|
||||
age -r "$PUBLIC_KEY" test_message.txt > test_message.age
|
||||
|
||||
# Verify ciphertext is unreadable
|
||||
cat test_message.age
|
||||
# Output: AGE-ENCRYPTION-V1...binary...
|
||||
```
|
||||
|
||||
**Decrypt with age:**
|
||||
```bash
|
||||
# Decrypt (will prompt for passphrase if key is encrypted)
|
||||
age -d -i ~/.age/key.txt test_message.age
|
||||
|
||||
# Output: This is a secret message
|
||||
```
|
||||
|
||||
### Configure typedialog to Use Age
|
||||
|
||||
**Environment variables:**
|
||||
```bash
|
||||
export AGE_KEY_FILE="$HOME/.age/key.txt"
|
||||
```
|
||||
|
||||
**CLI flags:**
|
||||
```bash
|
||||
# Redact mode (no encryption needed)
|
||||
typedialog form examples/08-encryption/simple-login.toml --redact --format json
|
||||
|
||||
# Encrypt mode (requires Age backend)
|
||||
typedialog form examples/08-encryption/simple-login.toml --encrypt --backend age --key-file ~/.age/key.txt --format json
|
||||
```
|
||||
|
||||
See `examples/08-encryption/README.md` for more example forms and test cases.
|
||||
|
||||
**TOML form configuration:**
|
||||
```toml
|
||||
[[fields]]
|
||||
name = "password"
|
||||
type = "password"
|
||||
prompt = "Enter password"
|
||||
sensitive = true
|
||||
encryption_backend = "age"
|
||||
|
||||
[fields.encryption_config]
|
||||
key = "~/.age/key.txt"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 2: RustyVault Backend (HTTP Service)
|
||||
|
||||
### What is RustyVault?
|
||||
|
||||
RustyVault is a Rust implementation of HashiCorp Vault's Transit API:
|
||||
- HTTP-based encryption/decryption service
|
||||
- Suitable for production environments
|
||||
- API-compatible with Vault Transit secrets engine
|
||||
|
||||
### Installation & Setup
|
||||
|
||||
**Option A: Docker (Recommended for testing)**
|
||||
|
||||
RustyVault provides official Docker images. Check availability:
|
||||
```bash
|
||||
# Search Docker Hub
|
||||
docker search rustyvault
|
||||
|
||||
# Or build from source
|
||||
git clone https://github.com/Tongsuo-Project/RustyVault.git
|
||||
cd RustyVault
|
||||
docker build -t rustyvault:latest .
|
||||
```
|
||||
|
||||
**Option B: Manual Build (if Docker not available)**
|
||||
|
||||
```bash
|
||||
# Prerequisites: Rust toolchain
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
|
||||
# Clone and build
|
||||
git clone https://github.com/Tongsuo-Project/RustyVault.git
|
||||
cd RustyVault
|
||||
cargo build --release
|
||||
|
||||
# Binary at: target/release/rustyvault
|
||||
```
|
||||
|
||||
### Run RustyVault Service
|
||||
|
||||
**Using Docker (single command):**
|
||||
```bash
|
||||
docker run -d \
|
||||
--name rustyvault \
|
||||
-p 8200:8200 \
|
||||
-e RUSTYVAULT_LOG_LEVEL=info \
|
||||
rustyvault:latest
|
||||
|
||||
# Verify it started
|
||||
docker logs rustyvault | head -20
|
||||
```
|
||||
|
||||
**Using local binary:**
|
||||
```bash
|
||||
# Create config directory
|
||||
mkdir -p ~/.rustyvault
|
||||
cd ~/.rustyvault
|
||||
|
||||
# Create minimal config (rustyvault.toml)
|
||||
cat > config.toml <<'EOF'
|
||||
[server]
|
||||
address = "127.0.0.1:8200"
|
||||
tls_disable = true
|
||||
|
||||
[backend]
|
||||
type = "inmem" # In-memory storage (ephemeral)
|
||||
EOF
|
||||
|
||||
# Run service
|
||||
~/RustyVault/target/release/rustyvault server -c config.toml
|
||||
```
|
||||
|
||||
**Verify service is running:**
|
||||
```bash
|
||||
# In another terminal
|
||||
curl -s http://localhost:8200/v1/sys/health | jq .
|
||||
# Should return health status JSON
|
||||
```
|
||||
|
||||
### Configure RustyVault for Encryption
|
||||
|
||||
**Initialize RustyVault (first time only):**
|
||||
```bash
|
||||
# Generate initial token
|
||||
VAULT_INIT=$(curl -s -X POST http://localhost:8200/v1/sys/init \
|
||||
-d '{"secret_shares": 1, "secret_threshold": 1}' | jq -r .keys[0])
|
||||
|
||||
# Unseal vault
|
||||
curl -s -X PUT http://localhost:8200/v1/sys/unseal \
|
||||
-d "{\"key\": \"$VAULT_INIT\"}" > /dev/null
|
||||
|
||||
# Save root token
|
||||
ROOT_TOKEN=$(curl -s -X POST http://localhost:8200/v1/sys/unseal \
|
||||
-d "{\"key\": \"$VAULT_INIT\"}" | jq -r .auth.client_token)
|
||||
|
||||
export VAULT_TOKEN="$ROOT_TOKEN"
|
||||
```
|
||||
|
||||
**Enable Transit secrets engine:**
|
||||
```bash
|
||||
curl -s -X POST http://localhost:8200/v1/sys/mounts/transit \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN" \
|
||||
-d '{"type": "transit"}' | jq .
|
||||
```
|
||||
|
||||
**Create encryption key:**
|
||||
```bash
|
||||
curl -s -X POST http://localhost:8200/v1/transit/keys/typedialog-key \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN" \
|
||||
-d '{}' | jq .
|
||||
|
||||
# Verify key created
|
||||
curl -s http://localhost:8200/v1/transit/keys/typedialog-key \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN" | jq .
|
||||
```
|
||||
|
||||
### Test RustyVault Encryption
|
||||
|
||||
**Encrypt data via HTTP:**
|
||||
```bash
|
||||
# Plaintext (base64 encoded)
|
||||
PLAINTEXT=$(echo -n "my-secret-password" | base64)
|
||||
|
||||
curl -s -X POST http://localhost:8200/v1/transit/encrypt/typedialog-key \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN" \
|
||||
-d "{\"plaintext\": \"$PLAINTEXT\"}" | jq .data.ciphertext
|
||||
```
|
||||
|
||||
**Decrypt data via HTTP:**
|
||||
```bash
|
||||
# From encryption output above
|
||||
CIPHERTEXT="vault:v1:..."
|
||||
|
||||
curl -s -X POST http://localhost:8200/v1/transit/decrypt/typedialog-key \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN" \
|
||||
-d "{\"ciphertext\": \"$CIPHERTEXT\"}" | jq -r .data.plaintext | base64 -d
|
||||
```
|
||||
|
||||
### Configure typedialog to Use RustyVault
|
||||
|
||||
**Environment variables:**
|
||||
```bash
|
||||
export VAULT_ADDR="http://localhost:8200"
|
||||
export VAULT_TOKEN="s.xxxx..." # Token from above
|
||||
```
|
||||
|
||||
**CLI flags:**
|
||||
```bash
|
||||
typedialog form examples/08-encryption/credentials.toml \
|
||||
--encrypt \
|
||||
--backend rustyvault \
|
||||
--vault-addr http://localhost:8200 \
|
||||
--vault-token "s.xxxx..." \
|
||||
--vault-key-path "transit/keys/typedialog-key" \
|
||||
--format json
|
||||
```
|
||||
|
||||
This form includes field-level RustyVault configuration in the `vault_token` field.
|
||||
|
||||
**TOML form configuration:**
|
||||
```toml
|
||||
[[fields]]
|
||||
name = "password"
|
||||
type = "password"
|
||||
prompt = "Enter password"
|
||||
sensitive = true
|
||||
encryption_backend = "rustyvault"
|
||||
|
||||
[fields.encryption_config]
|
||||
vault_addr = "http://localhost:8200"
|
||||
vault_token = "s.xxxx..."
|
||||
key_path = "transit/keys/typedialog-key"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 3: Complete Integration Test Workflow
|
||||
|
||||
### Script: Setup Everything
|
||||
|
||||
Create `scripts/encryption-test-setup.sh`:
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
echo "=== typedialog Encryption Services Setup ==="
|
||||
|
||||
# Age Setup
|
||||
echo "1. Setting up Age..."
|
||||
if ! command -v age &> /dev/null; then
|
||||
echo " ✗ age not installed. Run: brew install age"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p ~/.age
|
||||
if [ ! -f ~/.age/key.txt ]; then
|
||||
echo " → Generating Age keys..."
|
||||
age-keygen -o ~/.age/key.txt
|
||||
fi
|
||||
export AGE_KEY_FILE="$HOME/.age/key.txt"
|
||||
echo " ✓ Age configured at: $AGE_KEY_FILE"
|
||||
|
||||
# RustyVault Setup (Docker)
|
||||
echo ""
|
||||
echo "2. Setting up RustyVault (Docker)..."
|
||||
if ! command -v docker &> /dev/null; then
|
||||
echo " ⚠ Docker not installed, skipping RustyVault"
|
||||
echo " → Install Docker or skip RustyVault tests"
|
||||
else
|
||||
if ! docker ps | grep -q rustyvault; then
|
||||
echo " → Starting RustyVault container..."
|
||||
docker run -d \
|
||||
--name rustyvault \
|
||||
-p 8200:8200 \
|
||||
-e RUSTYVAULT_LOG_LEVEL=info \
|
||||
rustyvault:latest
|
||||
sleep 2
|
||||
fi
|
||||
|
||||
# Initialize vault
|
||||
echo " → Initializing RustyVault..."
|
||||
VAULT_INIT=$(curl -s -X POST http://localhost:8200/v1/sys/init \
|
||||
-d '{"secret_shares": 1, "secret_threshold": 1}' | jq -r .keys[0])
|
||||
|
||||
curl -s -X PUT http://localhost:8200/v1/sys/unseal \
|
||||
-d "{\"key\": \"$VAULT_INIT\"}" > /dev/null
|
||||
|
||||
# Get root token
|
||||
RESPONSE=$(curl -s -X GET http://localhost:8200/v1/sys/unseal \
|
||||
-H "X-Vault-Token: $VAULT_INIT")
|
||||
export VAULT_TOKEN=$(echo "$RESPONSE" | jq -r .auth.client_token // "root")
|
||||
export VAULT_ADDR="http://localhost:8200"
|
||||
|
||||
# Enable transit
|
||||
curl -s -X POST http://localhost:8200/v1/sys/mounts/transit \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN" \
|
||||
-d '{"type": "transit"}' > /dev/null 2>&1 || true
|
||||
|
||||
# Create key
|
||||
curl -s -X POST http://localhost:8200/v1/transit/keys/typedialog-key \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN" \
|
||||
-d '{}' > /dev/null 2>&1 || true
|
||||
|
||||
echo " ✓ RustyVault running at: http://localhost:8200"
|
||||
echo " ✓ Token: $VAULT_TOKEN"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=== Setup Complete ==="
|
||||
echo ""
|
||||
echo "Test Age encryption:"
|
||||
echo " typedialog form test.toml --encrypt --backend age --key-file ~/.age/key.txt"
|
||||
echo ""
|
||||
echo "Test RustyVault encryption:"
|
||||
echo " export VAULT_ADDR='http://localhost:8200'"
|
||||
echo " export VAULT_TOKEN='$VAULT_TOKEN'"
|
||||
echo " typedialog form test.toml --encrypt --backend rustyvault --vault-key-path 'transit/keys/typedialog-key'"
|
||||
```
|
||||
|
||||
**Make executable and run:**
|
||||
```bash
|
||||
chmod +x scripts/encryption-test-setup.sh
|
||||
./scripts/encryption-test-setup.sh
|
||||
```
|
||||
|
||||
### Test Case 1: Age Redaction (No Service Required)
|
||||
|
||||
**Option A: Use pre-built example (Recommended)**
|
||||
```bash
|
||||
typedialog form examples/08-encryption/simple-login.toml --redact --format json
|
||||
|
||||
# Expected output:
|
||||
# {"username": "alice", "password": "[REDACTED]"}
|
||||
```
|
||||
|
||||
**Option B: Create form manually**
|
||||
```bash
|
||||
# Create test form
|
||||
cat > test_redaction.toml <<'EOF'
|
||||
name = "test_form"
|
||||
display_mode = "complete"
|
||||
|
||||
[[fields]]
|
||||
name = "username"
|
||||
type = "text"
|
||||
prompt = "Username"
|
||||
|
||||
[[fields]]
|
||||
name = "password"
|
||||
type = "password"
|
||||
prompt = "Password"
|
||||
sensitive = true
|
||||
EOF
|
||||
|
||||
# Test redaction (requires no service)
|
||||
typedialog form test_redaction.toml --redact --format json
|
||||
```
|
||||
|
||||
### Test Case 2: Age Encryption (Service Not Required, Key File Required)
|
||||
|
||||
**Option A: Use pre-built example (Recommended)**
|
||||
```bash
|
||||
# Prerequisites: Age key generated (from setup script)
|
||||
./scripts/encryption-test-setup.sh
|
||||
|
||||
# Test with simple form
|
||||
typedialog form examples/08-encryption/simple-login.toml \
|
||||
--encrypt --backend age --key-file ~/.age/key.txt --format json
|
||||
|
||||
# Expected output: password field contains age ciphertext
|
||||
# {"username": "alice", "password": "age1muz6ah54ew9am7mzmy0m4w5..."}
|
||||
|
||||
# Or test with full credentials form
|
||||
typedialog form examples/08-encryption/credentials.toml \
|
||||
--encrypt --backend age --key-file ~/.age/key.txt --format json
|
||||
```
|
||||
|
||||
**Option B: Create form manually**
|
||||
```bash
|
||||
# Generate Age key if not exists
|
||||
mkdir -p ~/.age
|
||||
if [ ! -f ~/.age/key.txt ]; then
|
||||
age-keygen -o ~/.age/key.txt
|
||||
fi
|
||||
|
||||
# Create test form
|
||||
cat > test_age_encrypt.toml <<'EOF'
|
||||
name = "test_form"
|
||||
display_mode = "complete"
|
||||
|
||||
[[fields]]
|
||||
name = "username"
|
||||
type = "text"
|
||||
prompt = "Username"
|
||||
|
||||
[[fields]]
|
||||
name = "password"
|
||||
type = "password"
|
||||
prompt = "Password"
|
||||
sensitive = true
|
||||
encryption_backend = "age"
|
||||
|
||||
[fields.encryption_config]
|
||||
key = "~/.age/key.txt"
|
||||
EOF
|
||||
|
||||
# Test encryption (requires Age key file)
|
||||
typedialog form test_age_encrypt.toml --encrypt --backend age --key-file ~/.age/key.txt --format json
|
||||
```
|
||||
|
||||
### Test Case 3: RustyVault Encryption (Service Required)
|
||||
|
||||
**Prerequisites: RustyVault running**
|
||||
```bash
|
||||
# Start RustyVault and setup (requires Docker)
|
||||
./scripts/encryption-test-setup.sh
|
||||
|
||||
# Verify service is healthy
|
||||
curl http://localhost:8200/v1/sys/health | jq .
|
||||
```
|
||||
|
||||
**Option A: Use pre-built example (Recommended)**
|
||||
```bash
|
||||
# Export Vault credentials
|
||||
export VAULT_ADDR="http://localhost:8200"
|
||||
export VAULT_TOKEN="root"
|
||||
|
||||
# Test with simple form
|
||||
typedialog form examples/08-encryption/simple-login.toml \
|
||||
--encrypt --backend rustyvault \
|
||||
--vault-key-path "transit/keys/typedialog-key" \
|
||||
--format json
|
||||
|
||||
# Expected output: password field contains vault ciphertext
|
||||
# {"username": "alice", "password": "vault:v1:K8..."}
|
||||
|
||||
# Or test with full credentials form (demonstrates field-level config)
|
||||
typedialog form examples/08-encryption/credentials.toml \
|
||||
--encrypt --backend rustyvault \
|
||||
--vault-key-path "transit/keys/typedialog-key" \
|
||||
--format json
|
||||
```
|
||||
|
||||
**Option B: Create form manually**
|
||||
```bash
|
||||
cat > test_vault_encrypt.toml <<'EOF'
|
||||
name = "test_form"
|
||||
display_mode = "complete"
|
||||
|
||||
[[fields]]
|
||||
name = "username"
|
||||
type = "text"
|
||||
prompt = "Username"
|
||||
|
||||
[[fields]]
|
||||
name = "password"
|
||||
type = "password"
|
||||
prompt = "Password"
|
||||
sensitive = true
|
||||
encryption_backend = "rustyvault"
|
||||
|
||||
[fields.encryption_config]
|
||||
vault_addr = "http://localhost:8200"
|
||||
key_path = "transit/keys/typedialog-key"
|
||||
EOF
|
||||
|
||||
# Test encryption with RustyVault
|
||||
export VAULT_TOKEN="s.xxxx" # From setup output
|
||||
export VAULT_ADDR="http://localhost:8200"
|
||||
|
||||
typedialog form test_vault_encrypt.toml \
|
||||
--encrypt \
|
||||
--backend rustyvault \
|
||||
--vault-addr http://localhost:8200 \
|
||||
--vault-token "$VAULT_TOKEN" \
|
||||
--vault-key-path "transit/keys/typedialog-key" \
|
||||
--format json
|
||||
|
||||
# Expected output: password field contains vault ciphertext
|
||||
# {"username": "alice", "password": "vault:v1:..."}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 4: Run Actual Integration Tests
|
||||
|
||||
### Test Case: Age Roundtrip (Encrypt → Decrypt)
|
||||
|
||||
Once Age is set up, these test scenarios validate the pipeline:
|
||||
|
||||
**Scenario 1: Redaction works (no encryption service)**
|
||||
```bash
|
||||
cargo test --test nickel_integration test_encryption_roundtrip_with_redaction -- --nocapture
|
||||
|
||||
# Expected: PASS - redacts sensitive fields
|
||||
```
|
||||
|
||||
**Scenario 2: Metadata mapping works**
|
||||
```bash
|
||||
cargo test --test nickel_integration test_encryption_metadata_to_field_definition -- --nocapture
|
||||
|
||||
# Expected: PASS - EncryptionMetadata maps to FieldDefinition
|
||||
```
|
||||
|
||||
**Scenario 3: Auto-detection of password fields**
|
||||
```bash
|
||||
cargo test --test nickel_integration test_encryption_auto_detection_from_field_type -- --nocapture
|
||||
|
||||
# Expected: PASS - Password fields auto-marked as sensitive
|
||||
```
|
||||
|
||||
### Run All Encryption Tests
|
||||
|
||||
```bash
|
||||
cargo test --test nickel_integration test_encryption -- --nocapture
|
||||
```
|
||||
|
||||
**Current status:**
|
||||
- ✅ 5 tests passing (redaction, metadata mapping)
|
||||
- ⏳ 0 tests for actual Age encryption roundtrip (not yet implemented)
|
||||
- ⏳ 0 tests for RustyVault integration (backend not implemented)
|
||||
|
||||
---
|
||||
|
||||
## Part 5: Troubleshooting
|
||||
|
||||
### Age Issues
|
||||
|
||||
**Problem: `age: command not found`**
|
||||
```bash
|
||||
# Install age
|
||||
brew install age # macOS
|
||||
sudo apt install age # Linux
|
||||
```
|
||||
|
||||
**Problem: Permission denied on ~/.age/key.txt**
|
||||
```bash
|
||||
chmod 600 ~/.age/key.txt
|
||||
```
|
||||
|
||||
**Problem: Invalid key format**
|
||||
```bash
|
||||
# Regenerate keys
|
||||
rm ~/.age/key.txt
|
||||
age-keygen -o ~/.age/key.txt
|
||||
```
|
||||
|
||||
### RustyVault Issues
|
||||
|
||||
**Problem: Docker container won't start**
|
||||
```bash
|
||||
# Check logs
|
||||
docker logs rustyvault
|
||||
|
||||
# Remove and restart
|
||||
docker rm -f rustyvault
|
||||
docker run -d --name rustyvault -p 8200:8200 rustyvault:latest
|
||||
```
|
||||
|
||||
**Problem: Vault initialization fails**
|
||||
```bash
|
||||
# Check if vault is responding
|
||||
curl -s http://localhost:8200/v1/sys/health
|
||||
|
||||
# If not, restart container
|
||||
docker restart rustyvault
|
||||
```
|
||||
|
||||
**Problem: Transit API not working**
|
||||
```bash
|
||||
# Verify token
|
||||
echo $VAULT_TOKEN
|
||||
|
||||
# Check auth
|
||||
curl -s http://localhost:8200/v1/sys/mounts \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN"
|
||||
```
|
||||
|
||||
**Problem: Can't connect from typedialog**
|
||||
```bash
|
||||
# Verify network
|
||||
curl -s http://localhost:8200/v1/sys/health | jq .
|
||||
|
||||
# Check environment variables
|
||||
echo $VAULT_ADDR
|
||||
echo $VAULT_TOKEN
|
||||
|
||||
# Test encryption endpoint
|
||||
curl -s -X POST http://localhost:8200/v1/transit/encrypt/typedialog-key \
|
||||
-H "X-Vault-Token: $VAULT_TOKEN" \
|
||||
-d '{"plaintext": "dGVzdA=="}' | jq .
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Part 6: Next Steps
|
||||
|
||||
Once services are running, implement:
|
||||
|
||||
1. **test_age_encrypt_roundtrip** - Encrypt with Age, decrypt, verify plaintext
|
||||
2. **test_rustyvault_encrypt_roundtrip** - Encrypt with RustyVault, decrypt, verify
|
||||
3. **test_cli_encrypt_age** - Run `typedialog form --encrypt --backend age`, verify output is ciphertext
|
||||
4. **test_cli_encrypt_rustyvault** - Run `typedialog form --encrypt --backend rustyvault`, verify output is ciphertext
|
||||
5. **Integration test script** - Single script that tests all pipelines end-to-end
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- **Age**: https://github.com/FiloSottile/age
|
||||
- **RustyVault**: https://github.com/Tongsuo-Project/RustyVault
|
||||
- **HashiCorp Vault Transit**: https://www.vaultproject.io/api-docs/secret/transit
|
||||
@ -1,438 +0,0 @@
|
||||
# Unified Encryption Architecture
|
||||
|
||||
This document explains the updated encryption architecture for typedialog and how it integrates with the unified encryption system from prov-ecosystem.
|
||||
|
||||
## Overview
|
||||
|
||||
typedialog now uses a **unified encryption API** from the `encrypt` crate, eliminating backend-specific code and enabling support for multiple encryption backends (Age, SOPS, SecretumVault, AWS/GCP/Azure KMS) through a single API.
|
||||
|
||||
**Key Benefits:**
|
||||
- Single code path supports all backends
|
||||
- Configuration-driven backend selection
|
||||
- Multi-backend support in TOML and Nickel schemas
|
||||
- Post-quantum cryptography ready (via SecretumVault)
|
||||
- Cleaner, more maintainable code
|
||||
|
||||
## Architecture Changes
|
||||
|
||||
### Before: Direct Backend Instantiation
|
||||
|
||||
```rust
|
||||
// OLD: Direct Age backend instantiation
|
||||
use encrypt::backend::age::AgeBackend;
|
||||
|
||||
let backend = AgeBackend::with_defaults()?;
|
||||
let ciphertext = backend.encrypt(&plaintext)?;
|
||||
|
||||
// To support other backends, need separate code paths
|
||||
#[match]
|
||||
"sops" => { /* SOPS code */ }
|
||||
"rustyvault" => { /* RustyVault code */ }
|
||||
```
|
||||
|
||||
### After: Unified API with BackendSpec
|
||||
|
||||
```rust
|
||||
// NEW: Configuration-driven, backend-agnostic
|
||||
use encrypt::{encrypt, BackendSpec};
|
||||
|
||||
// Same code for all backends
|
||||
let spec = BackendSpec::age_default();
|
||||
let ciphertext = encrypt(&plaintext, &spec)?;
|
||||
|
||||
// Or SOPS
|
||||
let spec = BackendSpec::sops();
|
||||
let ciphertext = encrypt(&plaintext, &spec)?;
|
||||
|
||||
// Or KMS
|
||||
let spec = BackendSpec::aws_kms(region, key_id);
|
||||
let ciphertext = encrypt(&plaintext, &spec)?;
|
||||
```
|
||||
|
||||
## Integration Points
|
||||
|
||||
### 1. TOML Form Configuration
|
||||
|
||||
No changes required for existing TOML configurations. The system transparently uses the new API:
|
||||
|
||||
```toml
|
||||
[[fields]]
|
||||
name = "password"
|
||||
type = "password"
|
||||
sensitive = true
|
||||
encryption_backend = "age"
|
||||
encryption_config = { key_file = "~/.age/key.txt" }
|
||||
```
|
||||
|
||||
The `encryption_bridge` module automatically converts this to `BackendSpec::age(...)` and uses the unified API.
|
||||
|
||||
### 2. Nickel Schema Integration
|
||||
|
||||
Nickel support remains unchanged but now uses the unified backend system:
|
||||
|
||||
```nickel
|
||||
{
|
||||
# Age backend for development
|
||||
dev_password | Sensitive Backend="age" Key="~/.age/key.txt" = "",
|
||||
|
||||
# SOPS for staging
|
||||
staging_secret | Sensitive Backend="sops" = "",
|
||||
|
||||
# SecretumVault Transit Engine for production (post-quantum)
|
||||
prod_token | Sensitive Backend="secretumvault"
|
||||
Vault="https://vault.internal:8200"
|
||||
Key="app-key" = "",
|
||||
|
||||
# AWS KMS for cloud-native deployments
|
||||
aws_secret | Sensitive Backend="awskms"
|
||||
Region="us-east-1"
|
||||
KeyId="arn:aws:kms:..." = "",
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Internal Encryption Function
|
||||
|
||||
The `transform_sensitive_value()` function now uses the unified API:
|
||||
|
||||
```rust
|
||||
// File: crates/typedialog-core/src/helpers.rs
|
||||
|
||||
fn transform_sensitive_value(
|
||||
value: &Value,
|
||||
field: &FieldDefinition,
|
||||
context: &EncryptionContext,
|
||||
_global_config: Option<&EncryptionDefaults>,
|
||||
) -> Result<Value> {
|
||||
// Convert field definition to BackendSpec
|
||||
let spec = crate::encryption_bridge::field_to_backend_spec(field, None)?;
|
||||
|
||||
// Use unified API
|
||||
let plaintext = serde_json::to_string(value)?;
|
||||
let ciphertext = encrypt::encrypt(&plaintext, &spec)?;
|
||||
|
||||
Ok(Value::String(ciphertext))
|
||||
}
|
||||
```
|
||||
|
||||
## New Bridge Module
|
||||
|
||||
New `encryption_bridge.rs` module provides seamless conversion:
|
||||
|
||||
```rust
|
||||
// File: crates/typedialog-core/src/encryption_bridge.rs
|
||||
|
||||
pub fn field_to_backend_spec(
|
||||
field: &FieldDefinition,
|
||||
default_backend: Option<&str>,
|
||||
) -> Result<encrypt::BackendSpec>
|
||||
```
|
||||
|
||||
**Conversion Logic:**
|
||||
1. Reads `field.encryption_backend` (or uses default)
|
||||
2. Extracts `field.encryption_config` (backend-specific settings)
|
||||
3. Validates required configuration for the backend
|
||||
4. Returns `BackendSpec` ready for use with `encrypt::encrypt()`
|
||||
|
||||
**Supported Backends:**
|
||||
- ✓ Age (with custom key paths)
|
||||
- ✓ SOPS (minimal config)
|
||||
- ✓ SecretumVault (vault_addr, vault_token, key_name)
|
||||
- ✓ AWS KMS (region, key_id)
|
||||
- ✓ GCP KMS (project_id, key_ring, crypto_key, location)
|
||||
- ✓ Azure KMS (vault_name, tenant_id)
|
||||
|
||||
## Configuration Changes
|
||||
|
||||
### Cargo.toml
|
||||
|
||||
No changes required - encryption support includes all commonly used backends by default.
|
||||
|
||||
To customize backends:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
# Default: age + other major backends
|
||||
typedialog-core = { path = "...", features = ["encryption"] }
|
||||
|
||||
# Only Age
|
||||
typedialog-core = { path = "...", features = ["encryption"] }
|
||||
# (Age is default in encrypt crate)
|
||||
|
||||
# Custom selection
|
||||
typedialog-core = { path = "...", features = ["encryption"] }
|
||||
# Depends on encrypt crate configuration
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Backend-specific configuration via environment:
|
||||
|
||||
**Age:**
|
||||
```bash
|
||||
# Uses ~/.age/key.txt by default
|
||||
# Or specify via field config: encryption_config = { key_file = "/custom/path" }
|
||||
```
|
||||
|
||||
**SOPS:**
|
||||
```bash
|
||||
# Uses .sops.yaml in current/parent directories
|
||||
```
|
||||
|
||||
**SecretumVault:**
|
||||
```bash
|
||||
export VAULT_ADDR="https://vault.internal:8200"
|
||||
export VAULT_TOKEN="hvs.CAAA..."
|
||||
```
|
||||
|
||||
**AWS KMS:**
|
||||
```bash
|
||||
export AWS_REGION="us-east-1"
|
||||
# AWS credentials from standard chain (env vars, ~/.aws/credentials, IAM roles)
|
||||
```
|
||||
|
||||
**GCP KMS:**
|
||||
```bash
|
||||
export GOOGLE_APPLICATION_CREDENTIALS="/path/to/service-account.json"
|
||||
```
|
||||
|
||||
**Azure KMS:**
|
||||
```bash
|
||||
# Azure CLI authentication or environment variables
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
Enhanced error messages guide users through troubleshooting:
|
||||
|
||||
```
|
||||
Error: Encryption failed: Backend 'age' not available.
|
||||
Enable feature 'age' in Cargo.toml
|
||||
|
||||
Error: Encryption failed: SecretumVault backend requires
|
||||
vault_addr in encryption_config
|
||||
|
||||
Error: Encryption failed: AWS KMS backend requires region
|
||||
in encryption_config
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Running Encryption Tests
|
||||
|
||||
```bash
|
||||
# All encryption tests
|
||||
cargo test --features encryption
|
||||
|
||||
# Only encryption integration tests
|
||||
cargo test --features encryption --test encryption_integration
|
||||
|
||||
# Specific test
|
||||
cargo test --features encryption test_age_roundtrip_encrypt_decrypt
|
||||
```
|
||||
|
||||
### Test Results
|
||||
|
||||
All 15 encryption integration tests pass:
|
||||
- ✓ 7 encryption behavior tests
|
||||
- ✓ 8 Age backend roundtrip tests
|
||||
|
||||
```
|
||||
running 15 tests
|
||||
test encryption_tests::test_explicit_non_sensitive_overrides_password_type ... ok
|
||||
test encryption_tests::test_auto_detect_password_field_as_sensitive ... ok
|
||||
test encryption_tests::test_redaction_preserves_non_sensitive ... ok
|
||||
test encryption_tests::test_multiple_sensitive_fields ... ok
|
||||
test encryption_tests::test_redaction_in_json_output ... ok
|
||||
test encryption_tests::test_unknown_backend_error ... ok
|
||||
test encryption_tests::test_redaction_in_yaml_output ... ok
|
||||
test age_roundtrip_tests::test_age_backend_availability ... ok
|
||||
test age_roundtrip_tests::test_age_invalid_ciphertext_fails ... ok
|
||||
test age_roundtrip_tests::test_age_encryption_produces_ciphertext ... ok
|
||||
test age_roundtrip_tests::test_age_roundtrip_encrypt_decrypt ... ok
|
||||
test age_roundtrip_tests::test_age_handles_empty_string ... ok
|
||||
test age_roundtrip_tests::test_age_handles_unicode ... ok
|
||||
test age_roundtrip_tests::test_age_encryption_different_ciphertexts ... ok
|
||||
test age_roundtrip_tests::test_age_handles_large_values ... ok
|
||||
|
||||
test result: ok. 15 passed; 0 failed
|
||||
```
|
||||
|
||||
## Migration Path
|
||||
|
||||
### For Existing Users
|
||||
|
||||
**No action required.** The system is backward compatible:
|
||||
|
||||
1. Existing TOML forms work unchanged
|
||||
2. Existing Nickel schemas work unchanged
|
||||
3. Internal implementation now uses unified API
|
||||
4. No visible changes to users
|
||||
|
||||
### For New Deployments
|
||||
|
||||
Can now use additional backends:
|
||||
|
||||
```toml
|
||||
# Now supports more backends in single codebase
|
||||
[[fields]]
|
||||
name = "secret"
|
||||
type = "text"
|
||||
sensitive = true
|
||||
encryption_backend = "secretumvault" # Or awskms, gcpkms, azurekms
|
||||
encryption_config = { vault_addr = "...", vault_token = "..." }
|
||||
```
|
||||
|
||||
### For Code Extending typedialog
|
||||
|
||||
If extending typedialog with custom encryption logic:
|
||||
|
||||
```rust
|
||||
// OLD: Manual backend instantiation (still works)
|
||||
use encrypt::backend::age::AgeBackend;
|
||||
let backend = AgeBackend::new(pub_key, priv_key)?;
|
||||
|
||||
// NEW: Use bridge module + unified API (recommended)
|
||||
use encrypt::encrypt;
|
||||
use typedialog_core::encryption_bridge;
|
||||
|
||||
let spec = encryption_bridge::field_to_backend_spec(&field, None)?;
|
||||
let ciphertext = encrypt(&plaintext, &spec)?;
|
||||
```
|
||||
|
||||
## Multi-Backend Support
|
||||
|
||||
### Same Code, Different Configs
|
||||
|
||||
```toml
|
||||
# Development (Age)
|
||||
[[fields]]
|
||||
name = "db_password"
|
||||
sensitive = true
|
||||
encryption_backend = "age"
|
||||
|
||||
# Production (SecretumVault - post-quantum)
|
||||
[[fields]]
|
||||
name = "db_password"
|
||||
sensitive = true
|
||||
encryption_backend = "secretumvault"
|
||||
encryption_config = { vault_addr = "https://vault.prod:8200", vault_token = "..." }
|
||||
```
|
||||
|
||||
Same Rust code handles both without changes.
|
||||
|
||||
### CLI Overrides
|
||||
|
||||
```bash
|
||||
# Override backend from command line
|
||||
typedialog form config.toml --encrypt --backend secretumvault
|
||||
|
||||
# Use with environment variables
|
||||
export VAULT_ADDR="https://vault.internal:8200"
|
||||
export VAULT_TOKEN="hvs.CAAA..."
|
||||
typedialog form config.toml --encrypt --backend secretumvault
|
||||
```
|
||||
|
||||
## Feature Flags
|
||||
|
||||
Backends are feature-gated in the encrypt crate:
|
||||
|
||||
```rust
|
||||
// With feature enabled
|
||||
#[cfg(feature = "age")]
|
||||
{
|
||||
let spec = BackendSpec::age_default();
|
||||
encrypt(&plaintext, &spec)?; // Works
|
||||
}
|
||||
|
||||
// Without feature
|
||||
#[cfg(not(feature = "age"))]
|
||||
{
|
||||
let spec = BackendSpec::age_default();
|
||||
encrypt(&plaintext, &spec)?; // Returns: Backend 'age' not available
|
||||
}
|
||||
```
|
||||
|
||||
## Post-Quantum Cryptography
|
||||
|
||||
### SecretumVault Transit Engine
|
||||
|
||||
For post-quantum cryptography support, use SecretumVault with ML-KEM/ML-DSA:
|
||||
|
||||
```toml
|
||||
[[fields]]
|
||||
name = "pqc_secret"
|
||||
sensitive = true
|
||||
encryption_backend = "secretumvault"
|
||||
encryption_config = {
|
||||
vault_addr = "https://pq-vault.internal:8200",
|
||||
vault_token = "hvs.CAAA...",
|
||||
key_name = "pqc-key" # Uses ML-KEM encapsulation, ML-DSA signatures
|
||||
}
|
||||
```
|
||||
|
||||
**Requirements:**
|
||||
- SecretumVault server configured with Transit Engine
|
||||
- Post-quantum crypto backend enabled (aws-lc-rs or Tongsuo)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Backend not available" Error
|
||||
|
||||
**Problem:** Encryption fails with "Backend 'age' not available"
|
||||
|
||||
**Solution:** Feature may not be enabled in encrypt crate. Check:
|
||||
|
||||
```bash
|
||||
# Check available backends
|
||||
cargo build --features encryption --verbose
|
||||
|
||||
# Look for feature compilation
|
||||
# It should show: "Compiling encrypt ... with features: age,..."
|
||||
```
|
||||
|
||||
### "Invalid ciphertext" After Update
|
||||
|
||||
**Problem:** Old ciphertexts fail to decrypt
|
||||
|
||||
**Solution:** Age format hasn't changed. Verify:
|
||||
1. Same Age key is used
|
||||
2. Ciphertext format is valid (hex-encoded)
|
||||
3. Key file permissions: `chmod 600 ~/.age/key.txt`
|
||||
|
||||
### Form TOML Backward Compatibility
|
||||
|
||||
**Problem:** Existing TOML forms stop working after update
|
||||
|
||||
**Solution:** No breaking changes. Forms should work as-is. If not:
|
||||
|
||||
1. Verify encryption_backend name is valid
|
||||
2. Check encryption_config required fields
|
||||
3. Test with: `cargo test --features encryption`
|
||||
|
||||
## Testing with Mock Backend
|
||||
|
||||
For faster tests without real encryption keys:
|
||||
|
||||
```bash
|
||||
# In development/testing
|
||||
cargo test --features test-util
|
||||
```
|
||||
|
||||
MockBackend provides deterministic encryption for CI/CD:
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
use encrypt::test_util::MockBackend;
|
||||
|
||||
let backend = MockBackend::new();
|
||||
let ct = backend.encrypt("secret")?;
|
||||
// Fast, reproducible, no real keys needed
|
||||
```
|
||||
|
||||
## See Also
|
||||
|
||||
- [ENCRYPTION-QUICK-START.md](ENCRYPTION-QUICK-START.md) - Getting started with encryption
|
||||
- [ENCRYPTION-SERVICES-SETUP.md](ENCRYPTION-SERVICES-SETUP.md) - Setting up encryption services
|
||||
- [../../prov-ecosystem/docs/guides/ENCRYPTION.md](../../prov-ecosystem/docs/guides/ENCRYPTION.md) - Comprehensive encryption guide
|
||||
- [encryption_bridge.rs](crates/typedialog-core/src/encryption_bridge.rs) - Bridge module source
|
||||
- [../../prov-ecosystem/crates/encrypt](../../prov-ecosystem/crates/encrypt) - encrypt crate source
|
||||
@ -1,31 +0,0 @@
|
||||
# English translations (alternative TOML format)
|
||||
|
||||
[forms.registration]
|
||||
title = "User Registration"
|
||||
description = "Create a new user account"
|
||||
username-label = "Username"
|
||||
username-prompt = "Please enter a username"
|
||||
username-placeholder = "user123"
|
||||
email-label = "Email Address"
|
||||
email-prompt = "Please enter your email address"
|
||||
email-placeholder = "user@example.com"
|
||||
|
||||
[forms.registration.roles]
|
||||
admin = "Administrator"
|
||||
user = "Regular User"
|
||||
guest = "Guest"
|
||||
developer = "Developer"
|
||||
|
||||
[forms.employee-onboarding]
|
||||
title = "Employee Onboarding"
|
||||
description = "Complete your onboarding process"
|
||||
welcome = "Welcome to the team!"
|
||||
full-name-prompt = "What is your full name?"
|
||||
department-prompt = "Which department are you joining?"
|
||||
start-date-prompt = "What is your start date?"
|
||||
|
||||
[forms.feedback]
|
||||
title = "Feedback Form"
|
||||
overall-satisfaction-prompt = "How satisfied are you with our service?"
|
||||
improvement-prompt = "What could we improve?"
|
||||
contact-prompt = "Can we contact you with follow-up questions?"
|
||||
@ -1,37 +0,0 @@
|
||||
# English translations for common form fields
|
||||
|
||||
## Registration form
|
||||
registration-title = User Registration
|
||||
registration-description = Create a new user account
|
||||
registration-username-label = Username
|
||||
registration-username-prompt = Please enter a username
|
||||
registration-username-placeholder = user123
|
||||
registration-email-label = Email Address
|
||||
registration-email-prompt = Please enter your email address
|
||||
registration-email-placeholder = user@example.com
|
||||
registration-password-label = Password
|
||||
registration-password-prompt = Please enter a password
|
||||
registration-password-placeholder = ••••••••
|
||||
registration-confirm-label = I agree to the terms and conditions
|
||||
registration-confirm-prompt = Do you agree to the terms and conditions?
|
||||
|
||||
## Role selection
|
||||
role-prompt = Please select your role
|
||||
role-admin = Administrator
|
||||
role-user = Regular User
|
||||
role-guest = Guest
|
||||
role-developer = Developer
|
||||
|
||||
## Common actions
|
||||
action-submit = Submit
|
||||
action-cancel = Cancel
|
||||
action-next = Next
|
||||
action-previous = Previous
|
||||
action-confirm = Confirm
|
||||
action-decline = Decline
|
||||
|
||||
## Common validation messages
|
||||
error-required = This field is required
|
||||
error-invalid-email = Please enter a valid email address
|
||||
error-password-too-short = Password must be at least 8 characters
|
||||
error-passwords-mismatch = Passwords do not match
|
||||
@ -1,31 +0,0 @@
|
||||
# Traducciones al español (formato TOML alternativo)
|
||||
|
||||
[forms.registration]
|
||||
title = "Registro de Usuario"
|
||||
description = "Crear una nueva cuenta de usuario"
|
||||
username-label = "Nombre de usuario"
|
||||
username-prompt = "Por favor, ingrese su nombre de usuario"
|
||||
username-placeholder = "usuario123"
|
||||
email-label = "Correo electrónico"
|
||||
email-prompt = "Por favor, ingrese su correo electrónico"
|
||||
email-placeholder = "usuario@ejemplo.com"
|
||||
|
||||
[forms.registration.roles]
|
||||
admin = "Administrador"
|
||||
user = "Usuario Regular"
|
||||
guest = "Invitado"
|
||||
developer = "Desarrollador"
|
||||
|
||||
[forms.employee-onboarding]
|
||||
title = "Incorporación de Empleado"
|
||||
description = "Complete su proceso de incorporación"
|
||||
welcome = "¡Bienvenido al equipo!"
|
||||
full-name-prompt = "¿Cuál es su nombre completo?"
|
||||
department-prompt = "¿A cuál departamento se está uniendo?"
|
||||
start-date-prompt = "¿Cuál es su fecha de inicio?"
|
||||
|
||||
[forms.feedback]
|
||||
title = "Formulario de Retroalimentación"
|
||||
overall-satisfaction-prompt = "¿Cuán satisfecho está con nuestro servicio?"
|
||||
improvement-prompt = "¿Qué podríamos mejorar?"
|
||||
contact-prompt = "¿Podemos contactarlo con preguntas de seguimiento?"
|
||||
@ -1,37 +0,0 @@
|
||||
# Traducciones al español para formularios comunes
|
||||
|
||||
## Formulario de registro
|
||||
registration-title = Registro de Usuario
|
||||
registration-description = Crear una nueva cuenta de usuario
|
||||
registration-username-label = Nombre de usuario
|
||||
registration-username-prompt = Por favor, ingrese su nombre de usuario
|
||||
registration-username-placeholder = usuario123
|
||||
registration-email-label = Correo electrónico
|
||||
registration-email-prompt = Por favor, ingrese su correo electrónico
|
||||
registration-email-placeholder = usuario@ejemplo.com
|
||||
registration-password-label = Contraseña
|
||||
registration-password-prompt = Por favor, ingrese su contraseña
|
||||
registration-password-placeholder = ••••••••
|
||||
registration-confirm-label = Acepto los términos y condiciones
|
||||
registration-confirm-prompt = ¿Acepta los términos y condiciones?
|
||||
|
||||
## Selección de rol
|
||||
role-prompt = Por favor, seleccione su rol
|
||||
role-admin = Administrador
|
||||
role-user = Usuario Regular
|
||||
role-guest = Invitado
|
||||
role-developer = Desarrollador
|
||||
|
||||
## Acciones comunes
|
||||
action-submit = Enviar
|
||||
action-cancel = Cancelar
|
||||
action-next = Siguiente
|
||||
action-previous = Anterior
|
||||
action-confirm = Confirmar
|
||||
action-decline = Rechazar
|
||||
|
||||
## Mensajes de validación comunes
|
||||
error-required = Este campo es requerido
|
||||
error-invalid-email = Por favor, ingrese una dirección de correo válida
|
||||
error-password-too-short = La contraseña debe tener al menos 8 caracteres
|
||||
error-passwords-mismatch = Las contraseñas no coinciden
|
||||
Loading…
x
Reference in New Issue
Block a user