Keyboard shortcuts

Press or to navigate between chapters

Press S or / to search in the book

Press ? to show this help

Press Esc to hide this help

Integration Patterns

Overview

Provisioning implements sophisticated integration patterns to coordinate between its hybrid Rust/Nushell architecture, manage multi-provider workflows, and enable extensible functionality. This document outlines the key integration patterns, their implementations, and best practices.

Core Integration Patterns

1. Hybrid Language Integration

Rust-to-Nushell Communication Pattern

Use Case: Orchestrator invoking business logic operations

Implementation:

use tokio::process::Command;
use serde_json;

pub async fn execute_nushell_workflow(
    workflow: &str,
    args: &[String]
) -> Result<WorkflowResult, Error> {
    let mut cmd = Command::new("nu");
    cmd.arg("-c")
       .arg(format!("use core/nulib/workflows/{}.nu *; {}", workflow, args.join(" ")));

    let output = cmd.output().await?;
    let result: WorkflowResult = serde_json::from_slice(&output.stdout)?;
    Ok(result)
}

Data Exchange Format:

{
    "status": "success" | "error" | "partial",
    "result": {
        "operation": "server_create",
        "resources": ["server-001", "server-002"],
        "metadata": { ... }
    },
    "error": null | { "code": "ERR001", "message": "..." },
    "context": { "workflow_id": "wf-123", "step": 2 }
}

Nushell-to-Rust Communication Pattern

Use Case: Business logic submitting workflows to orchestrator

Implementation:

def submit-workflow [workflow: record] -> record {
    let payload = $workflow | to json

    http post "http://localhost:9090/workflows/submit" {
        headers: { "Content-Type": "application/json" }
        body: $payload
    }
    | from json
}

API Contract:

{
    "workflow_id": "wf-456",
    "name": "multi_cloud_deployment",
    "operations": [...],
    "dependencies": { ... },
    "configuration": { ... }
}

2. Provider Abstraction Pattern

Standard Provider Interface

Purpose: Uniform API across different cloud providers

Interface Definition:

# Standard provider interface that all providers must implement
export def list-servers [] -> table {
    # Provider-specific implementation
}

export def create-server [config: record] -> record {
    # Provider-specific implementation
}

export def delete-server [id: string] -> nothing {
    # Provider-specific implementation
}

export def get-server [id: string] -> record {
    # Provider-specific implementation
}

Configuration Integration:

[providers.aws]
region = "us-west-2"
credentials_profile = "default"
timeout = 300

[providers.upcloud]
zone = "de-fra1"
api_endpoint = "https://api.upcloud.com"
timeout = 180

[providers.local]
docker_socket = "/var/run/docker.sock"
network_mode = "bridge"

Provider Discovery and Loading

def load-providers [] -> table {
    let provider_dirs = glob "providers/*/nulib"

    $provider_dirs
    | each { |dir|
        let provider_name = $dir | path basename | path dirname | path basename
        let provider_config = get-provider-config $provider_name

        {
            name: $provider_name,
            path: $dir,
            config: $provider_config,
            available: (test-provider-connectivity $provider_name)
        }
    }
}

3. Configuration Resolution Pattern

Hierarchical Configuration Loading

Implementation:

def resolve-configuration [context: record] -> record {
    let base_config = open config.defaults.toml
    let user_config = if ("config.user.toml" | path exists) {
        open config.user.toml
    } else { {} }

    let env_config = if ($env.PROVISIONING_ENV? | is-not-empty) {
        let env_file = $"config.($env.PROVISIONING_ENV).toml"
        if ($env_file | path exists) { open $env_file } else { {} }
    } else { {} }

    let merged_config = $base_config
    | merge $user_config
    | merge $env_config
    | merge ($context.runtime_config? | default {})

    interpolate-variables $merged_config
}

Variable Interpolation Pattern

def interpolate-variables [config: record] -> record {
    let interpolations = {
        "{{paths.base}}": ($env.PWD),
        "{{env.HOME}}": ($env.HOME),
        "{{now.date}}": (date now | format date "%Y-%m-%d"),
        "{{git.branch}}": (git branch --show-current | str trim)
    }

    $config
    | to json
    | str replace --all "{{paths.base}}" $interpolations."{{paths.base}}"
    | str replace --all "{{env.HOME}}" $interpolations."{{env.HOME}}"
    | str replace --all "{{now.date}}" $interpolations."{{now.date}}"
    | str replace --all "{{git.branch}}" $interpolations."{{git.branch}}"
    | from json
}

4. Workflow Orchestration Patterns

Dependency Resolution Pattern

Use Case: Managing complex workflow dependencies

Implementation (Rust):

use petgraph::{Graph, Direction};
use std::collections::HashMap;

pub struct DependencyResolver {
    graph: Graph<String, ()>,
    node_map: HashMap<String, petgraph::graph::NodeIndex>,
}

impl DependencyResolver {
    pub fn resolve_execution_order(&self) -> Result<Vec<String>, Error> {
        let mut topo = petgraph::algo::toposort(&self.graph, None)
            .map_err(|_| Error::CyclicDependency)?;

        Ok(topo.into_iter()
            .map(|idx| self.graph[idx].clone())
            .collect())
    }

    pub fn add_dependency(&mut self, from: &str, to: &str) {
        let from_idx = self.get_or_create_node(from);
        let to_idx = self.get_or_create_node(to);
        self.graph.add_edge(from_idx, to_idx, ());
    }
}

Parallel Execution Pattern

use tokio::task::JoinSet;
use futures::stream::{FuturesUnordered, StreamExt};

pub async fn execute_parallel_batch(
    operations: Vec<Operation>,
    parallelism_limit: usize
) -> Result<Vec<OperationResult>, Error> {
    let semaphore = tokio::sync::Semaphore::new(parallelism_limit);
    let mut join_set = JoinSet::new();

    for operation in operations {
        let permit = semaphore.clone();
        join_set.spawn(async move {
            let _permit = permit.acquire().await?;
            execute_operation(operation).await
        });
    }

    let mut results = Vec::new();
    while let Some(result) = join_set.join_next().await {
        results.push(result??);
    }

    Ok(results)
}

5. State Management Patterns

Checkpoint-Based Recovery Pattern

Use Case: Reliable state persistence and recovery

Implementation:

#[derive(Serialize, Deserialize)]
pub struct WorkflowCheckpoint {
    pub workflow_id: String,
    pub step: usize,
    pub completed_operations: Vec<String>,
    pub current_state: serde_json::Value,
    pub metadata: HashMap<String, String>,
    pub timestamp: chrono::DateTime<chrono::Utc>,
}

pub struct CheckpointManager {
    checkpoint_dir: PathBuf,
}

impl CheckpointManager {
    pub fn save_checkpoint(&self, checkpoint: &WorkflowCheckpoint) -> Result<(), Error> {
        let checkpoint_file = self.checkpoint_dir
            .join(&checkpoint.workflow_id)
            .with_extension("json");

        let checkpoint_data = serde_json::to_string_pretty(checkpoint)?;
        std::fs::write(checkpoint_file, checkpoint_data)?;
        Ok(())
    }

    pub fn restore_checkpoint(&self, workflow_id: &str) -> Result<Option<WorkflowCheckpoint>, Error> {
        let checkpoint_file = self.checkpoint_dir
            .join(workflow_id)
            .with_extension("json");

        if checkpoint_file.exists() {
            let checkpoint_data = std::fs::read_to_string(checkpoint_file)?;
            let checkpoint = serde_json::from_str(&checkpoint_data)?;
            Ok(Some(checkpoint))
        } else {
            Ok(None)
        }
    }
}

Rollback Pattern

pub struct RollbackManager {
    rollback_stack: Vec<RollbackAction>,
}

#[derive(Clone, Debug)]
pub enum RollbackAction {
    DeleteResource { provider: String, resource_id: String },
    RestoreFile { path: PathBuf, content: String },
    RevertConfiguration { key: String, value: serde_json::Value },
    CustomAction { command: String, args: Vec<String> },
}

impl RollbackManager {
    pub async fn execute_rollback(&self) -> Result<(), Error> {
        // Execute rollback actions in reverse order
        for action in self.rollback_stack.iter().rev() {
            match action {
                RollbackAction::DeleteResource { provider, resource_id } => {
                    self.delete_resource(provider, resource_id).await?;
                }
                RollbackAction::RestoreFile { path, content } => {
                    tokio::fs::write(path, content).await?;
                }
                // ... handle other rollback actions
            }
        }
        Ok(())
    }
}

6. Event and Messaging Patterns

Event-Driven Architecture Pattern

Use Case: Decoupled communication between components

Event Definition:

#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum SystemEvent {
    WorkflowStarted { workflow_id: String, name: String },
    WorkflowCompleted { workflow_id: String, result: WorkflowResult },
    WorkflowFailed { workflow_id: String, error: String },
    ResourceCreated { provider: String, resource_type: String, resource_id: String },
    ResourceDeleted { provider: String, resource_type: String, resource_id: String },
    ConfigurationChanged { key: String, old_value: serde_json::Value, new_value: serde_json::Value },
}

Event Bus Implementation:

use tokio::sync::broadcast;

pub struct EventBus {
    sender: broadcast::Sender<SystemEvent>,
}

impl EventBus {
    pub fn new(capacity: usize) -> Self {
        let (sender, _) = broadcast::channel(capacity);
        Self { sender }
    }

    pub fn publish(&self, event: SystemEvent) -> Result<(), Error> {
        self.sender.send(event)
            .map_err(|_| Error::EventPublishFailed)?;
        Ok(())
    }

    pub fn subscribe(&self) -> broadcast::Receiver<SystemEvent> {
        self.sender.subscribe()
    }
}

7. Extension Integration Patterns

Extension Discovery and Loading

def discover-extensions [] -> table {
    let extension_dirs = glob "extensions/*/extension.toml"

    $extension_dirs
    | each { |manifest_path|
        let extension_dir = $manifest_path | path dirname
        let manifest = open $manifest_path

        {
            name: $manifest.extension.name,
            version: $manifest.extension.version,
            type: $manifest.extension.type,
            path: $extension_dir,
            manifest: $manifest,
            valid: (validate-extension $manifest),
            compatible: (check-compatibility $manifest.compatibility)
        }
    }
    | where valid and compatible
}

Extension Interface Pattern

# Standard extension interface
export def extension-info [] -> record {
    {
        name: "custom-provider",
        version: "1.0.0",
        type: "provider",
        description: "Custom cloud provider integration",
        entry_points: {
            cli: "nulib/cli.nu",
            provider: "nulib/provider.nu"
        }
    }
}

export def extension-validate [] -> bool {
    # Validate extension configuration and dependencies
    true
}

export def extension-activate [] -> nothing {
    # Perform extension activation tasks
}

export def extension-deactivate [] -> nothing {
    # Perform extension cleanup tasks
}

8. API Design Patterns

REST API Standardization

Base API Structure:

use axum::{
    extract::{Path, State},
    response::Json,
    routing::{get, post, delete},
    Router,
};

pub fn create_api_router(state: AppState) -> Router {
    Router::new()
        .route("/health", get(health_check))
        .route("/workflows", get(list_workflows).post(create_workflow))
        .route("/workflows/:id", get(get_workflow).delete(delete_workflow))
        .route("/workflows/:id/status", get(workflow_status))
        .route("/workflows/:id/logs", get(workflow_logs))
        .with_state(state)
}

Standard Response Format:

{
    "status": "success" | "error" | "pending",
    "data": { ... },
    "metadata": {
        "timestamp": "2025-09-26T12:00:00Z",
        "request_id": "req-123",
        "version": "3.1.0"
    },
    "error": null | {
        "code": "ERR001",
        "message": "Human readable error",
        "details": { ... }
    }
}

Error Handling Patterns

Structured Error Pattern

#[derive(thiserror::Error, Debug)]
pub enum ProvisioningError {
    #[error("Configuration error: {message}")]
    Configuration { message: String },

    #[error("Provider error [{provider}]: {message}")]
    Provider { provider: String, message: String },

    #[error("Workflow error [{workflow_id}]: {message}")]
    Workflow { workflow_id: String, message: String },

    #[error("Resource error [{resource_type}/{resource_id}]: {message}")]
    Resource { resource_type: String, resource_id: String, message: String },
}

Error Recovery Pattern

def with-retry [operation: closure, max_attempts: int = 3] {
    mut attempts = 0
    mut last_error = null

    while $attempts < $max_attempts {
        try {
            return (do $operation)
        } catch { |error|
            $attempts = $attempts + 1
            $last_error = $error

            if $attempts < $max_attempts {
                let delay = (2 ** ($attempts - 1)) * 1000  # Exponential backoff
                sleep $"($delay)ms"
            }
        }
    }

    error make { msg: $"Operation failed after ($max_attempts) attempts: ($last_error)" }
}

Performance Optimization Patterns

Caching Strategy Pattern

use std::sync::Arc;
use tokio::sync::RwLock;
use std::collections::HashMap;
use chrono::{DateTime, Utc, Duration};

#[derive(Clone)]
pub struct CacheEntry<T> {
    pub value: T,
    pub expires_at: DateTime<Utc>,
}

pub struct Cache<T> {
    store: Arc<RwLock<HashMap<String, CacheEntry<T>>>>,
    default_ttl: Duration,
}

impl<T: Clone> Cache<T> {
    pub async fn get(&self, key: &str) -> Option<T> {
        let store = self.store.read().await;
        if let Some(entry) = store.get(key) {
            if entry.expires_at > Utc::now() {
                Some(entry.value.clone())
            } else {
                None
            }
        } else {
            None
        }
    }

    pub async fn set(&self, key: String, value: T) {
        let expires_at = Utc::now() + self.default_ttl;
        let entry = CacheEntry { value, expires_at };

        let mut store = self.store.write().await;
        store.insert(key, entry);
    }
}

Streaming Pattern for Large Data

def process-large-dataset [source: string] -> nothing {
    # Stream processing instead of loading entire dataset
    open $source
    | lines
    | each { |line|
        # Process line individually
        $line | process-record
    }
    | save output.json
}

Testing Integration Patterns

Integration Test Pattern

#[cfg(test)]
mod integration_tests {
    use super::*;
    use tokio_test;

    #[tokio::test]
    async fn test_workflow_execution() {
        let orchestrator = setup_test_orchestrator().await;
        let workflow = create_test_workflow();

        let result = orchestrator.execute_workflow(workflow).await;

        assert!(result.is_ok());
        assert_eq!(result.unwrap().status, WorkflowStatus::Completed);
    }
}

These integration patterns provide the foundation for the system’s sophisticated multi-component architecture, enabling reliable, scalable, and maintainable infrastructure automation.