diff --git a/scripts/README.md b/scripts/README.md
new file mode 100644
index 0000000..aa73b81
--- /dev/null
+++ b/scripts/README.md
@@ -0,0 +1,487 @@
+# Rustelo Scripts Directory
+
+This directory contains all the utility scripts for the Rustelo framework, organized by category for easy management and maintenance.
+
+## 📁 Directory Structure
+
+```
+scripts/
+├── databases/ # Database management scripts
+├── setup/ # Project setup and installation scripts
+├── tools/ # Advanced tooling scripts
+├── utils/ # General utility scripts
+├── deploy.sh # Main deployment script
+├── install.sh # Main installation script
+└── README.md # This file
+```
+
+## 🚀 Quick Start
+
+### Using Just (Recommended)
+
+The easiest way to use these scripts is through the `justfile` commands:
+
+```bash
+# Development
+just dev # Start development server
+just build # Build project
+just test # Run tests
+
+# Database
+just db-setup # Setup database
+just db-migrate # Run migrations
+just db-backup # Create backup
+
+# Tools
+just perf-benchmark # Run performance tests
+just security-audit # Run security audit
+just monitor-health # Monitor application health
+just ci-pipeline # Run CI/CD pipeline
+```
+
+### Direct Script Usage
+
+You can also run scripts directly:
+
+```bash
+# Database operations
+./scripts/databases/db.sh setup create
+./scripts/databases/db.sh migrate run
+
+# Performance testing
+./scripts/tools/performance.sh benchmark load
+./scripts/tools/performance.sh monitor live
+
+# Security scanning
+./scripts/tools/security.sh audit full
+./scripts/tools/security.sh analyze report
+
+# Monitoring
+./scripts/tools/monitoring.sh monitor health
+./scripts/tools/monitoring.sh reports generate
+```
+
+## 📂 Script Categories
+
+### 🗄️ Database Scripts (`databases/`)
+
+Comprehensive database management and operations:
+
+- **`db.sh`** - Master database management hub
+- **`db-setup.sh`** - Database setup and initialization
+- **`db-migrate.sh`** - Migration management
+- **`db-backup.sh`** - Backup and restore operations
+- **`db-monitor.sh`** - Database monitoring and health checks
+- **`db-utils.sh`** - Database utilities and maintenance
+
+**Key Features:**
+- PostgreSQL and SQLite support
+- Automated migrations
+- Backup/restore with compression
+- Performance monitoring
+- Health checks and alerts
+- Data export/import
+- Schema management
+
+**Usage Examples:**
+```bash
+# Full database setup
+./scripts/databases/db.sh setup setup
+
+# Create backup
+./scripts/databases/db.sh backup create
+
+# Monitor database health
+./scripts/databases/db.sh monitor health
+
+# Run migrations
+./scripts/databases/db.sh migrate run
+```
+
+### 🔧 Setup Scripts (`setup/`)
+
+Project initialization and configuration:
+
+- **`install.sh`** - Main installation script
+- **`install-dev.sh`** - Development environment setup
+- **`setup_dev.sh`** - Development configuration
+- **`setup-config.sh`** - Configuration management
+- **`setup_encryption.sh`** - Encryption setup
+
+**Key Features:**
+- Multi-mode installation (dev/prod/custom)
+- Dependency management
+- Environment configuration
+- Encryption setup
+- Feature selection
+- Cross-platform support
+
+**Usage Examples:**
+```bash
+# Basic development setup
+./scripts/setup/install.sh
+
+# Production setup with TLS
+./scripts/setup/install.sh -m prod --enable-tls
+
+# Custom interactive setup
+./scripts/setup/install.sh -m custom
+```
+
+### 🛠️ Tool Scripts (`tools/`)
+
+Advanced tooling and automation:
+
+- **`performance.sh`** - Performance testing and monitoring
+- **`security.sh`** - Security scanning and auditing
+- **`ci.sh`** - CI/CD pipeline management
+- **`monitoring.sh`** - Application monitoring and observability
+
+#### Performance Tools (`performance.sh`)
+
+**Commands:**
+- `benchmark load` - Load testing
+- `benchmark stress` - Stress testing
+- `monitor live` - Real-time monitoring
+- `analyze report` - Performance analysis
+- `optimize build` - Build optimization
+
+**Features:**
+- Load and stress testing
+- Real-time performance monitoring
+- Response time analysis
+- Resource usage tracking
+- Performance reporting
+- Build optimization
+
+**Usage:**
+```bash
+# Run load test
+./scripts/tools/performance.sh benchmark load -d 60 -c 100
+
+# Live monitoring
+./scripts/tools/performance.sh monitor live
+
+# Generate report
+./scripts/tools/performance.sh analyze report
+```
+
+#### Security Tools (`security.sh`)
+
+**Commands:**
+- `audit full` - Complete security audit
+- `audit dependencies` - Dependency vulnerability scan
+- `audit secrets` - Secret scanning
+- `analyze report` - Security reporting
+
+**Features:**
+- Dependency vulnerability scanning
+- Secret detection
+- Permission auditing
+- Security header analysis
+- Configuration security checks
+- Automated fixes
+
+**Usage:**
+```bash
+# Full security audit
+./scripts/tools/security.sh audit full
+
+# Scan for secrets
+./scripts/tools/security.sh audit secrets
+
+# Fix security issues
+./scripts/tools/security.sh audit dependencies --fix
+```
+
+#### CI/CD Tools (`ci.sh`)
+
+**Commands:**
+- `pipeline run` - Full CI/CD pipeline
+- `build docker` - Docker image building
+- `test all` - Complete test suite
+- `deploy staging` - Staging deployment
+
+**Features:**
+- Complete CI/CD pipeline
+- Docker image building
+- Multi-stage testing
+- Quality checks
+- Automated deployment
+- Build reporting
+
+**Usage:**
+```bash
+# Run full pipeline
+./scripts/tools/ci.sh pipeline run
+
+# Build Docker image
+./scripts/tools/ci.sh build docker -t v1.0.0
+
+# Deploy to staging
+./scripts/tools/ci.sh deploy staging
+```
+
+#### Monitoring Tools (`monitoring.sh`)
+
+**Commands:**
+- `monitor health` - Health monitoring
+- `monitor metrics` - Metrics collection
+- `monitor logs` - Log analysis
+- `reports generate` - Monitoring reports
+
+**Features:**
+- Real-time health monitoring
+- Metrics collection and analysis
+- Log monitoring and analysis
+- System resource monitoring
+- Alert management
+- Dashboard generation
+
+**Usage:**
+```bash
+# Monitor health
+./scripts/tools/monitoring.sh monitor health -d 300
+
+# Monitor all metrics
+./scripts/tools/monitoring.sh monitor all
+
+# Generate report
+./scripts/tools/monitoring.sh reports generate
+```
+
+### 🔧 Utility Scripts (`utils/`)
+
+General-purpose utilities:
+
+- **`configure-features.sh`** - Feature configuration
+- **`build-examples.sh`** - Example building
+- **`generate_certs.sh`** - TLS certificate generation
+- **`test_encryption.sh`** - Encryption testing
+- **`demo_root_path.sh`** - Demo path generation
+
+## 🚀 Common Workflows
+
+### Development Workflow
+
+```bash
+# 1. Initial setup
+just setup
+
+# 2. Database setup
+just db-setup
+
+# 3. Start development
+just dev-full
+
+# 4. Run tests
+just test
+
+# 5. Quality checks
+just quality
+```
+
+### Production Deployment
+
+```bash
+# 1. Build and test
+just ci-pipeline
+
+# 2. Security audit
+just security-audit
+
+# 3. Performance testing
+just perf-benchmark
+
+# 4. Deploy to staging
+just ci-deploy-staging
+
+# 5. Deploy to production
+just ci-deploy-prod
+```
+
+### Monitoring and Maintenance
+
+```bash
+# 1. Setup monitoring
+just monitor-setup
+
+# 2. Health monitoring
+just monitor-health
+
+# 3. Performance monitoring
+just perf-monitor
+
+# 4. Security monitoring
+just security-audit
+
+# 5. Generate reports
+just monitor-report
+```
+
+## 📋 Script Conventions
+
+### Common Options
+
+Most scripts support these common options:
+
+- `--help` - Show help message
+- `--verbose` - Enable verbose output
+- `--quiet` - Suppress output
+- `--dry-run` - Show what would be done
+- `--force` - Skip confirmations
+- `--env ENV` - Specify environment
+
+### Exit Codes
+
+Scripts use standard exit codes:
+
+- `0` - Success
+- `1` - General error
+- `2` - Misuse of shell builtins
+- `126` - Command invoked cannot execute
+- `127` - Command not found
+- `128` - Invalid argument to exit
+
+### Logging
+
+Scripts use consistent logging:
+
+- `[INFO]` - General information
+- `[WARN]` - Warnings
+- `[ERROR]` - Errors
+- `[SUCCESS]` - Success messages
+- `[CRITICAL]` - Critical issues
+
+## 🔧 Configuration
+
+### Environment Variables
+
+Scripts respect these environment variables:
+
+```bash
+# General
+PROJECT_NAME=rustelo
+ENVIRONMENT=dev
+LOG_LEVEL=info
+
+# Database
+DATABASE_URL=postgresql://user:pass@localhost/db
+
+# Docker
+DOCKER_REGISTRY=docker.io
+DOCKER_IMAGE=rustelo
+DOCKER_TAG=latest
+
+# Monitoring
+METRICS_PORT=3030
+GRAFANA_PORT=3000
+PROMETHEUS_PORT=9090
+```
+
+### Configuration Files
+
+Scripts may use these configuration files:
+
+- `.env` - Environment variables
+- `Cargo.toml` - Rust project configuration
+- `package.json` - Node.js dependencies
+- `docker-compose.yml` - Docker services
+
+## 🛠️ Development
+
+### Adding New Scripts
+
+1. Create script in appropriate category directory
+2. Make executable: `chmod +x script.sh`
+3. Add to `justfile` if needed
+4. Update this README
+5. Add tests if applicable
+
+### Script Template
+
+```bash
+#!/bin/bash
+# Script Description
+# Detailed description of what the script does
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+# Logging functions
+log() { echo -e "${GREEN}[INFO]${NC} $1"; }
+log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
+log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
+
+# Your script logic here
+main() {
+ log "Starting script..."
+ # Implementation
+ log "Script completed"
+}
+
+# Run main function
+main "$@"
+```
+
+## 📚 References
+
+- [Just Command Runner](https://just.systems/) - Task runner
+- [Bash Style Guide](https://google.github.io/styleguide/shellguide.html) - Shell scripting standards
+- [Rustelo Documentation](../README.md) - Main project documentation
+- [Docker Documentation](https://docs.docker.com/) - Container management
+- [PostgreSQL Documentation](https://www.postgresql.org/docs/) - Database management
+
+## 🆘 Troubleshooting
+
+### Common Issues
+
+1. **Permission Denied**
+ ```bash
+ chmod +x scripts/path/to/script.sh
+ ```
+
+2. **Missing Dependencies**
+ ```bash
+ just setup-deps
+ ```
+
+3. **Environment Variables Not Set**
+ ```bash
+ cp .env.example .env
+ # Edit .env with your values
+ ```
+
+4. **Database Connection Issues**
+ ```bash
+ just db-status
+ just db-setup
+ ```
+
+5. **Docker Issues**
+ ```bash
+ docker system prune -f
+ just docker-build
+ ```
+
+### Getting Help
+
+- Run any script with `--help` for usage information
+- Check the `justfile` for available commands
+- Review logs in the output directories
+- Consult the main project documentation
+
+## 🤝 Contributing
+
+1. Follow the established conventions
+2. Add appropriate error handling
+3. Include help documentation
+4. Test thoroughly
+5. Update this README
+
+For questions or issues, please consult the project documentation or create an issue.
\ No newline at end of file
diff --git a/scripts/book/theme/custom.css b/scripts/book/theme/custom.css
new file mode 100644
index 0000000..52452c3
--- /dev/null
+++ b/scripts/book/theme/custom.css
@@ -0,0 +1,179 @@
+/* Rustelo Documentation Custom Styles */
+
+:root {
+ --rustelo-primary: #e53e3e;
+ --rustelo-secondary: #3182ce;
+ --rustelo-accent: #38a169;
+ --rustelo-dark: #2d3748;
+ --rustelo-light: #f7fafc;
+}
+
+/* Custom header styling */
+.menu-title {
+ color: var(--rustelo-primary);
+ font-weight: bold;
+}
+
+/* Code block improvements */
+pre {
+ border-radius: 8px;
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
+}
+
+/* Improved table styling */
+table {
+ border-collapse: collapse;
+ width: 100%;
+ margin: 1rem 0;
+}
+
+table th,
+table td {
+ border: 1px solid #e2e8f0;
+ padding: 0.75rem;
+ text-align: left;
+}
+
+table th {
+ background-color: var(--rustelo-light);
+ font-weight: 600;
+}
+
+table tr:nth-child(even) {
+ background-color: #f8f9fa;
+}
+
+/* Feature badge styling */
+.feature-badge {
+ display: inline-block;
+ padding: 0.25rem 0.5rem;
+ border-radius: 0.25rem;
+ font-size: 0.875rem;
+ font-weight: 500;
+ margin: 0.125rem;
+}
+
+.feature-badge.enabled {
+ background-color: #c6f6d5;
+ color: #22543d;
+}
+
+.feature-badge.disabled {
+ background-color: #fed7d7;
+ color: #742a2a;
+}
+
+.feature-badge.optional {
+ background-color: #fef5e7;
+ color: #744210;
+}
+
+/* Callout boxes */
+.callout {
+ padding: 1rem;
+ margin: 1rem 0;
+ border-left: 4px solid;
+ border-radius: 0 4px 4px 0;
+}
+
+.callout.note {
+ border-left-color: var(--rustelo-secondary);
+ background-color: #ebf8ff;
+}
+
+.callout.warning {
+ border-left-color: #ed8936;
+ background-color: #fffaf0;
+}
+
+.callout.tip {
+ border-left-color: var(--rustelo-accent);
+ background-color: #f0fff4;
+}
+
+.callout.danger {
+ border-left-color: var(--rustelo-primary);
+ background-color: #fff5f5;
+}
+
+/* Command line styling */
+.command-line {
+ background-color: #1a202c;
+ color: #e2e8f0;
+ padding: 1rem;
+ border-radius: 8px;
+ font-family: 'JetBrains Mono', 'Fira Code', monospace;
+ margin: 1rem 0;
+}
+
+.command-line::before {
+ content: "$ ";
+ color: #48bb78;
+ font-weight: bold;
+}
+
+/* Navigation improvements */
+.chapter li.part-title {
+ color: var(--rustelo-primary);
+ font-weight: bold;
+ margin-top: 1rem;
+}
+
+/* Search improvements */
+#searchresults mark {
+ background-color: #fef5e7;
+ color: #744210;
+}
+
+/* Mobile improvements */
+@media (max-width: 768px) {
+ .content {
+ padding: 1rem;
+ }
+
+ table {
+ font-size: 0.875rem;
+ }
+
+ .command-line {
+ font-size: 0.8rem;
+ padding: 0.75rem;
+ }
+}
+
+/* Dark theme overrides */
+.navy .callout.note {
+ background-color: #1e3a8a;
+}
+
+.navy .callout.warning {
+ background-color: #92400e;
+}
+
+.navy .callout.tip {
+ background-color: #14532d;
+}
+
+.navy .callout.danger {
+ background-color: #991b1b;
+}
+
+/* Print styles */
+@media print {
+ .nav-wrapper,
+ .page-wrapper > .page > .menu,
+ .mobile-nav-chapters,
+ .nav-chapters,
+ .sidebar-scrollbox {
+ display: none !important;
+ }
+
+ .page-wrapper > .page {
+ left: 0 !important;
+ }
+
+ .content {
+ margin-left: 0 !important;
+ max-width: none !important;
+ }
+}
diff --git a/scripts/book/theme/custom.js b/scripts/book/theme/custom.js
new file mode 100644
index 0000000..350072e
--- /dev/null
+++ b/scripts/book/theme/custom.js
@@ -0,0 +1,115 @@
+// Rustelo Documentation Custom JavaScript
+
+// Add copy buttons to code blocks
+document.addEventListener('DOMContentLoaded', function() {
+ // Add copy buttons to code blocks
+ const codeBlocks = document.querySelectorAll('pre > code');
+ codeBlocks.forEach(function(codeBlock) {
+ const pre = codeBlock.parentElement;
+ const button = document.createElement('button');
+ button.className = 'copy-button';
+ button.textContent = 'Copy';
+ button.style.cssText = `
+ position: absolute;
+ top: 8px;
+ right: 8px;
+ background: #4a5568;
+ color: white;
+ border: none;
+ padding: 4px 8px;
+ border-radius: 4px;
+ font-size: 12px;
+ cursor: pointer;
+ opacity: 0;
+ transition: opacity 0.2s;
+ `;
+
+ pre.style.position = 'relative';
+ pre.appendChild(button);
+
+ pre.addEventListener('mouseenter', function() {
+ button.style.opacity = '1';
+ });
+
+ pre.addEventListener('mouseleave', function() {
+ button.style.opacity = '0';
+ });
+
+ button.addEventListener('click', function() {
+ const text = codeBlock.textContent;
+ navigator.clipboard.writeText(text).then(function() {
+ button.textContent = 'Copied!';
+ button.style.background = '#48bb78';
+ setTimeout(function() {
+ button.textContent = 'Copy';
+ button.style.background = '#4a5568';
+ }, 2000);
+ });
+ });
+ });
+
+ // Add feature badges
+ const content = document.querySelector('.content');
+ if (content) {
+ let html = content.innerHTML;
+
+ // Replace feature indicators
+ html = html.replace(/\[FEATURE:([^\]]+)\]/g, '$1');
+ html = html.replace(/\[OPTIONAL:([^\]]+)\]/g, '$1');
+ html = html.replace(/\[DISABLED:([^\]]+)\]/g, '$1');
+
+ // Add callout boxes
+ html = html.replace(/\[NOTE\]([\s\S]*?)\[\/NOTE\]/g, '
$1
');
+ html = html.replace(/\[WARNING\]([\s\S]*?)\[\/WARNING\]/g, '$1
');
+ html = html.replace(/\[TIP\]([\s\S]*?)\[\/TIP\]/g, '$1
');
+ html = html.replace(/\[DANGER\]([\s\S]*?)\[\/DANGER\]/g, '$1
');
+
+ content.innerHTML = html;
+ }
+
+ // Add smooth scrolling
+ document.querySelectorAll('a[href^="#"]').forEach(anchor => {
+ anchor.addEventListener('click', function (e) {
+ e.preventDefault();
+ const target = document.querySelector(this.getAttribute('href'));
+ if (target) {
+ target.scrollIntoView({
+ behavior: 'smooth'
+ });
+ }
+ });
+ });
+});
+
+// Add keyboard shortcuts
+document.addEventListener('keydown', function(e) {
+ // Ctrl/Cmd + K to focus search
+ if ((e.ctrlKey || e.metaKey) && e.key === 'k') {
+ e.preventDefault();
+ const searchInput = document.querySelector('#searchbar');
+ if (searchInput) {
+ searchInput.focus();
+ }
+ }
+});
+
+// Add version info to footer
+document.addEventListener('DOMContentLoaded', function() {
+ const content = document.querySelector('.content');
+ if (content) {
+ const footer = document.createElement('div');
+ footer.style.cssText = `
+ margin-top: 3rem;
+ padding: 2rem 0;
+ border-top: 1px solid #e2e8f0;
+ text-align: center;
+ font-size: 0.875rem;
+ color: #718096;
+ `;
+ footer.innerHTML = `
+ Built with ❤️ using mdBook
+ Rustelo Documentation • Last updated: ${new Date().toLocaleDateString()}
+ `;
+ content.appendChild(footer);
+ }
+});
diff --git a/scripts/build-docs.sh b/scripts/build-docs.sh
new file mode 100755
index 0000000..f21924e
--- /dev/null
+++ b/scripts/build-docs.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+# Build documentation with logo assets for RUSTELO
+# This script generates cargo documentation and copies logo assets to the output directory
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Function to print colored output
+print_status() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+print_warning() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+print_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+# Check if we're in the correct directory
+if [ ! -f "Cargo.toml" ]; then
+ print_error "Cargo.toml not found. Please run this script from the project root directory."
+ exit 1
+fi
+
+if [ ! -d "logos" ]; then
+ print_error "logos directory not found. Please ensure the logos directory exists in the project root."
+ exit 1
+fi
+
+print_status "Building RUSTELO documentation with logo assets..."
+
+# Clean previous documentation build
+print_status "Cleaning previous documentation..."
+cargo clean --doc
+
+# Build documentation
+print_status "Generating cargo documentation..."
+if cargo doc --no-deps --lib --workspace --document-private-items; then
+ print_status "Documentation generated successfully"
+else
+ print_error "Failed to generate documentation"
+ exit 1
+fi
+
+# Copy logo assets to documentation output
+print_status "Copying logo assets to documentation output..."
+if [ -d "target/doc" ]; then
+ cp -r logos target/doc/
+ print_status "Logo assets copied to target/doc/logos/"
+else
+ print_error "Documentation output directory not found"
+ exit 1
+fi
+
+# Check if logos were copied successfully
+if [ -d "target/doc/logos" ] && [ "$(ls -A target/doc/logos)" ]; then
+ print_status "Logo assets verified in documentation output"
+ echo "Available logo files:"
+ ls -la target/doc/logos/
+else
+ print_warning "Logo assets may not have been copied correctly"
+fi
+
+# Display completion message
+print_status "Documentation build complete!"
+echo ""
+echo "Documentation available at: target/doc/index.html"
+echo "Logo assets available at: target/doc/logos/"
+echo ""
+echo "To view the documentation, run:"
+echo " cargo doc --open"
+echo "or open target/doc/index.html in your browser"
diff --git a/scripts/config_wizard.rhai b/scripts/config_wizard.rhai
new file mode 100644
index 0000000..340e679
--- /dev/null
+++ b/scripts/config_wizard.rhai
@@ -0,0 +1,337 @@
+// Configuration Wizard Script for Rustelo Template
+// This script interactively generates config.toml and sets Cargo.toml features
+
+// Configuration structure
+let config = #{
+ features: #{},
+ server: #{},
+ database: #{},
+ auth: #{},
+ oauth: #{},
+ email: #{},
+ security: #{},
+ monitoring: #{},
+ ssl: #{},
+ cache: #{},
+ build_info: #{}
+};
+
+// Available features with descriptions
+let available_features = #{
+ auth: "Authentication and authorization system",
+ tls: "TLS/SSL support for secure connections",
+ rbac: "Role-based access control",
+ crypto: "Cryptographic utilities and encryption",
+ content_db: "Content management and database features",
+ email: "Email sending capabilities",
+ metrics: "Prometheus metrics collection",
+ examples: "Include example code and documentation",
+ production: "Production-ready configuration (includes: auth, content-db, crypto, email, metrics, tls)"
+};
+
+// Helper function to ask yes/no questions
+fn ask_yes_no(question) {
+ print(question + " (y/n): ");
+ let answer = input();
+ return answer.to_lower() == "y" || answer.to_lower() == "yes";
+}
+
+// Helper function to ask for string input
+fn ask_string(question, default_value) {
+ if default_value != "" {
+ print(question + " [" + default_value + "]: ");
+ } else {
+ print(question + ": ");
+ }
+ let answer = input();
+ return if answer == "" { default_value } else { answer };
+}
+
+// Helper function to ask for numeric input
+fn ask_number(question, default_value) {
+ print(question + " [" + default_value + "]: ");
+ let answer = input();
+ return if answer == "" { default_value } else { parse_int(answer) };
+}
+
+// Main configuration wizard
+fn run_wizard() {
+ print("=== Rustelo Configuration Wizard ===\n");
+ print("This wizard will help you configure your Rustelo application.\n");
+
+ // Ask about features
+ print("\n--- Feature Selection ---");
+ print("Select the features you want to enable:\n");
+
+ let selected_features = [];
+
+ for feature in available_features.keys() {
+ let description = available_features[feature];
+ if ask_yes_no("Enable " + feature + "? (" + description + ")") {
+ selected_features.push(feature);
+ }
+ }
+
+ config.features = selected_features;
+
+ // Basic server configuration
+ print("\n--- Server Configuration ---");
+ config.server.host = ask_string("Server host", "127.0.0.1");
+ config.server.port = ask_number("Server port", 3030);
+ config.server.environment = ask_string("Environment (dev/prod/test)", "dev");
+ config.server.workers = ask_number("Number of workers", 4);
+
+ // Database configuration (if content-db feature is enabled)
+ if selected_features.contains("content-db") {
+ print("\n--- Database Configuration ---");
+ config.database.url = ask_string("Database URL", "sqlite:rustelo.db");
+ config.database.max_connections = ask_number("Max database connections", 10);
+ config.database.enable_logging = ask_yes_no("Enable database query logging");
+ }
+
+ // Authentication configuration (if auth feature is enabled)
+ if selected_features.contains("auth") {
+ print("\n--- Authentication Configuration ---");
+ config.auth.jwt_secret = ask_string("JWT secret (leave empty for auto-generation)", "");
+ config.auth.session_timeout = ask_number("Session timeout (minutes)", 60);
+ config.auth.max_login_attempts = ask_number("Max login attempts", 5);
+ config.auth.require_email_verification = ask_yes_no("Require email verification");
+
+ // OAuth configuration
+ if ask_yes_no("Enable OAuth providers?") {
+ config.oauth.enabled = true;
+
+ if ask_yes_no("Enable Google OAuth?") {
+ config.oauth.google = #{
+ client_id: ask_string("Google OAuth Client ID", ""),
+ client_secret: ask_string("Google OAuth Client Secret", ""),
+ redirect_uri: ask_string("Google OAuth Redirect URI", "http://localhost:3030/auth/google/callback")
+ };
+ }
+
+ if ask_yes_no("Enable GitHub OAuth?") {
+ config.oauth.github = #{
+ client_id: ask_string("GitHub OAuth Client ID", ""),
+ client_secret: ask_string("GitHub OAuth Client Secret", ""),
+ redirect_uri: ask_string("GitHub OAuth Redirect URI", "http://localhost:3030/auth/github/callback")
+ };
+ }
+ }
+ }
+
+ // Email configuration (if email feature is enabled)
+ if selected_features.contains("email") {
+ print("\n--- Email Configuration ---");
+ config.email.smtp_host = ask_string("SMTP host", "localhost");
+ config.email.smtp_port = ask_number("SMTP port", 587);
+ config.email.smtp_username = ask_string("SMTP username", "");
+ config.email.smtp_password = ask_string("SMTP password", "");
+ config.email.from_email = ask_string("From email address", "noreply@localhost");
+ config.email.from_name = ask_string("From name", "Rustelo App");
+ }
+
+ // Security configuration
+ print("\n--- Security Configuration ---");
+ config.security.enable_csrf = ask_yes_no("Enable CSRF protection");
+ config.security.rate_limit_requests = ask_number("Rate limit requests per minute", 100);
+ config.security.bcrypt_cost = ask_number("BCrypt cost (4-31)", 12);
+
+ // SSL/TLS configuration (if tls feature is enabled)
+ if selected_features.contains("tls") {
+ print("\n--- SSL/TLS Configuration ---");
+ config.ssl.force_https = ask_yes_no("Force HTTPS");
+ config.ssl.cert_path = ask_string("SSL certificate path", "");
+ config.ssl.key_path = ask_string("SSL private key path", "");
+ }
+
+ // Monitoring configuration (if metrics feature is enabled)
+ if selected_features.contains("metrics") {
+ print("\n--- Monitoring Configuration ---");
+ config.monitoring.enabled = ask_yes_no("Enable monitoring");
+ if config.monitoring.enabled {
+ config.monitoring.metrics_port = ask_number("Metrics port", 9090);
+ config.monitoring.prometheus_enabled = ask_yes_no("Enable Prometheus metrics");
+ }
+ }
+
+ // Cache configuration
+ print("\n--- Cache Configuration ---");
+ config.cache.enabled = ask_yes_no("Enable caching");
+ if config.cache.enabled {
+ config.cache.type = ask_string("Cache type (memory/redis)", "memory");
+ config.cache.default_ttl = ask_number("Default TTL (seconds)", 3600);
+ }
+
+ // Build information
+ config.build_info.environment = config.server.environment;
+ config.build_info.config_version = "1.0.0";
+
+ return config;
+}
+
+// Generate TOML configuration
+fn generate_toml(config) {
+ let toml_content = "";
+
+ // Root configuration
+ toml_content += "# Rustelo Configuration File\n";
+ toml_content += "# Generated by Configuration Wizard\n\n";
+ toml_content += "root_path = \".\"\n\n";
+
+ // Features section
+ toml_content += "[features]\n";
+ if config.features.contains("auth") {
+ toml_content += "auth = true\n";
+ }
+ toml_content += "\n";
+
+ // Server section
+ toml_content += "[server]\n";
+ toml_content += "protocol = \"http\"\n";
+ toml_content += "host = \"" + config.server.host + "\"\n";
+ toml_content += "port = " + config.server.port + "\n";
+ toml_content += "environment = \"" + config.server.environment + "\"\n";
+ toml_content += "workers = " + config.server.workers + "\n";
+ toml_content += "\n";
+
+ // Database section
+ if config.database != () {
+ toml_content += "[database]\n";
+ toml_content += "url = \"" + config.database.url + "\"\n";
+ toml_content += "max_connections = " + config.database.max_connections + "\n";
+ toml_content += "enable_logging = " + config.database.enable_logging + "\n";
+ toml_content += "\n";
+ }
+
+ // Authentication section
+ if config.auth != () {
+ toml_content += "[auth]\n";
+ if config.auth.jwt_secret != "" {
+ toml_content += "jwt_secret = \"" + config.auth.jwt_secret + "\"\n";
+ }
+ toml_content += "session_timeout = " + config.auth.session_timeout + "\n";
+ toml_content += "max_login_attempts = " + config.auth.max_login_attempts + "\n";
+ toml_content += "require_email_verification = " + config.auth.require_email_verification + "\n";
+ toml_content += "\n";
+ }
+
+ // OAuth section
+ if config.oauth != () && config.oauth.enabled {
+ toml_content += "[oauth]\n";
+ toml_content += "enabled = true\n\n";
+
+ if config.oauth.google != () {
+ toml_content += "[oauth.google]\n";
+ toml_content += "client_id = \"" + config.oauth.google.client_id + "\"\n";
+ toml_content += "client_secret = \"" + config.oauth.google.client_secret + "\"\n";
+ toml_content += "redirect_uri = \"" + config.oauth.google.redirect_uri + "\"\n\n";
+ }
+
+ if config.oauth.github != () {
+ toml_content += "[oauth.github]\n";
+ toml_content += "client_id = \"" + config.oauth.github.client_id + "\"\n";
+ toml_content += "client_secret = \"" + config.oauth.github.client_secret + "\"\n";
+ toml_content += "redirect_uri = \"" + config.oauth.github.redirect_uri + "\"\n\n";
+ }
+ }
+
+ // Email section
+ if config.email != () {
+ toml_content += "[email]\n";
+ toml_content += "smtp_host = \"" + config.email.smtp_host + "\"\n";
+ toml_content += "smtp_port = " + config.email.smtp_port + "\n";
+ toml_content += "smtp_username = \"" + config.email.smtp_username + "\"\n";
+ toml_content += "smtp_password = \"" + config.email.smtp_password + "\"\n";
+ toml_content += "from_email = \"" + config.email.from_email + "\"\n";
+ toml_content += "from_name = \"" + config.email.from_name + "\"\n";
+ toml_content += "\n";
+ }
+
+ // Security section
+ toml_content += "[security]\n";
+ toml_content += "enable_csrf = " + config.security.enable_csrf + "\n";
+ toml_content += "rate_limit_requests = " + config.security.rate_limit_requests + "\n";
+ toml_content += "bcrypt_cost = " + config.security.bcrypt_cost + "\n";
+ toml_content += "\n";
+
+ // SSL section
+ if config.ssl != () {
+ toml_content += "[ssl]\n";
+ toml_content += "force_https = " + config.ssl.force_https + "\n";
+ if config.ssl.cert_path != "" {
+ toml_content += "cert_path = \"" + config.ssl.cert_path + "\"\n";
+ }
+ if config.ssl.key_path != "" {
+ toml_content += "key_path = \"" + config.ssl.key_path + "\"\n";
+ }
+ toml_content += "\n";
+ }
+
+ // Monitoring section
+ if config.monitoring != () && config.monitoring.enabled {
+ toml_content += "[monitoring]\n";
+ toml_content += "enabled = true\n";
+ toml_content += "metrics_port = " + config.monitoring.metrics_port + "\n";
+ toml_content += "prometheus_enabled = " + config.monitoring.prometheus_enabled + "\n";
+ toml_content += "\n";
+ }
+
+ // Cache section
+ if config.cache != () && config.cache.enabled {
+ toml_content += "[cache]\n";
+ toml_content += "enabled = true\n";
+ toml_content += "type = \"" + config.cache.type + "\"\n";
+ toml_content += "default_ttl = " + config.cache.default_ttl + "\n";
+ toml_content += "\n";
+ }
+
+ // Build info section
+ toml_content += "[build_info]\n";
+ toml_content += "environment = \"" + config.build_info.environment + "\"\n";
+ toml_content += "config_version = \"" + config.build_info.config_version + "\"\n";
+
+ return toml_content;
+}
+
+// Generate Cargo.toml features
+fn generate_cargo_features(selected_features) {
+ let features_line = "default = [";
+
+ for i in 0..selected_features.len() {
+ features_line += "\"" + selected_features[i] + "\"";
+ if i < selected_features.len() - 1 {
+ features_line += ", ";
+ }
+ }
+
+ features_line += "]";
+
+ return features_line;
+}
+
+// Main execution
+fn main() {
+ let config = run_wizard();
+
+ print("\n=== Configuration Summary ===");
+ print("Selected features: " + config.features);
+ print("Server: " + config.server.host + ":" + config.server.port);
+ print("Environment: " + config.server.environment);
+
+ if ask_yes_no("\nGenerate configuration files?") {
+ let toml_content = generate_toml(config);
+ let cargo_features = generate_cargo_features(config.features);
+
+ print("\n=== Generated config.toml ===");
+ print(toml_content);
+
+ print("\n=== Cargo.toml default features ===");
+ print(cargo_features);
+
+ print("\nConfiguration generated successfully!");
+ print("Copy the above content to your config.toml and update your Cargo.toml accordingly.");
+ }
+}
+
+// Run the wizard
+main();
diff --git a/scripts/databases/DATABASE_SCRIPTS.md b/scripts/databases/DATABASE_SCRIPTS.md
new file mode 100644
index 0000000..064722d
--- /dev/null
+++ b/scripts/databases/DATABASE_SCRIPTS.md
@@ -0,0 +1,533 @@
+# Database Management Scripts
+
+This directory contains a comprehensive set of shell scripts for managing your Rustelo application's database. These scripts provide convenient commands for all database operations including setup, backup, monitoring, migrations, and utilities.
+
+## Overview
+
+The database management system consists of several specialized scripts, each handling different aspects of database operations:
+
+- **`db.sh`** - Master script that provides easy access to all database tools
+- **`db-setup.sh`** - Database setup and initialization
+- **`db-backup.sh`** - Backup and restore operations
+- **`db-monitor.sh`** - Monitoring and health checks
+- **`db-migrate.sh`** - Migration management with advanced features
+- **`db-utils.sh`** - Database utilities and maintenance tasks
+
+## Quick Start
+
+### Master Script (`db.sh`)
+
+The master script provides a centralized interface to all database operations:
+
+```bash
+# Quick status check
+./scripts/db.sh status
+
+# Complete health check
+./scripts/db.sh health
+
+# Create backup
+./scripts/db.sh backup
+
+# Run migrations
+./scripts/db.sh migrate
+
+# Optimize database
+./scripts/db.sh optimize
+```
+
+### Category-based Commands
+
+Use the master script with categories for specific operations:
+
+```bash
+# Database setup
+./scripts/db.sh setup create
+./scripts/db.sh setup migrate
+./scripts/db.sh setup seed
+
+# Backup operations
+./scripts/db.sh backup create
+./scripts/db.sh backup restore --file backup.sql
+./scripts/db.sh backup list
+
+# Monitoring
+./scripts/db.sh monitor health
+./scripts/db.sh monitor connections
+./scripts/db.sh monitor performance
+
+# Migration management
+./scripts/db.sh migrate create --name add_users
+./scripts/db.sh migrate run
+./scripts/db.sh migrate rollback --steps 1
+
+# Utilities
+./scripts/db.sh utils size
+./scripts/db.sh utils tables
+./scripts/db.sh utils optimize
+```
+
+## Individual Scripts
+
+### Database Setup (`db-setup.sh`)
+
+Handles database initialization and basic operations:
+
+```bash
+# Full setup (create + migrate + seed)
+./scripts/db-setup.sh setup
+
+# Individual operations
+./scripts/db-setup.sh create
+./scripts/db-setup.sh migrate
+./scripts/db-setup.sh seed
+./scripts/db-setup.sh reset --force
+
+# Database-specific setup
+./scripts/db-setup.sh postgres
+./scripts/db-setup.sh sqlite
+```
+
+**Features:**
+- Automatic environment detection
+- Support for PostgreSQL and SQLite
+- Seed data management
+- Database reset with safety checks
+- Environment variable management
+
+### Database Backup (`db-backup.sh`)
+
+Comprehensive backup and restore functionality:
+
+```bash
+# Create backups
+./scripts/db-backup.sh backup # Full backup
+./scripts/db-backup.sh backup --compress # Compressed backup
+./scripts/db-backup.sh backup --schema-only # Schema only
+./scripts/db-backup.sh backup --tables users,content # Specific tables
+
+# Restore operations
+./scripts/db-backup.sh restore --file backup.sql
+./scripts/db-backup.sh restore --file backup.sql --force
+
+# Backup management
+./scripts/db-backup.sh list # List backups
+./scripts/db-backup.sh clean --keep-days 7 # Clean old backups
+```
+
+**Features:**
+- Multiple backup formats (SQL, custom, tar)
+- Compression support
+- Selective table backup
+- Automatic backup cleanup
+- Backup validation
+- Database cloning capabilities
+
+### Database Monitoring (`db-monitor.sh`)
+
+Real-time monitoring and health checks:
+
+```bash
+# Health checks
+./scripts/db-monitor.sh health # Complete health check
+./scripts/db-monitor.sh status # Quick status
+./scripts/db-monitor.sh connections # Active connections
+./scripts/db-monitor.sh performance # Performance metrics
+
+# Monitoring
+./scripts/db-monitor.sh monitor --interval 30 # Continuous monitoring
+./scripts/db-monitor.sh slow-queries # Slow query analysis
+./scripts/db-monitor.sh locks # Database locks
+
+# Maintenance
+./scripts/db-monitor.sh vacuum # Database maintenance
+./scripts/db-monitor.sh analyze # Update statistics
+./scripts/db-monitor.sh report # Generate report
+```
+
+**Features:**
+- Real-time connection monitoring
+- Performance metrics tracking
+- Slow query detection
+- Lock analysis
+- Disk usage monitoring
+- Memory usage tracking
+- Automated maintenance tasks
+- Comprehensive reporting
+
+### Database Migration (`db-migrate.sh`)
+
+Advanced migration management system:
+
+```bash
+# Migration status
+./scripts/db-migrate.sh status # Show migration status
+./scripts/db-migrate.sh pending # List pending migrations
+./scripts/db-migrate.sh applied # List applied migrations
+
+# Running migrations
+./scripts/db-migrate.sh run # Run all pending
+./scripts/db-migrate.sh run --version 003 # Run to specific version
+./scripts/db-migrate.sh dry-run # Preview changes
+
+# Creating migrations
+./scripts/db-migrate.sh create --name add_user_preferences
+./scripts/db-migrate.sh create --name migrate_users --type data
+./scripts/db-migrate.sh create --template create-table
+
+# Rollback operations
+./scripts/db-migrate.sh rollback --steps 1 # Rollback last migration
+./scripts/db-migrate.sh rollback --steps 3 # Rollback 3 migrations
+
+# Validation
+./scripts/db-migrate.sh validate # Validate all migrations
+```
+
+**Features:**
+- Migration version control
+- Rollback capabilities
+- Migration templates
+- Dry-run mode
+- Migration validation
+- Automatic rollback script generation
+- Lock-based migration safety
+- Comprehensive migration history
+
+### Database Utilities (`db-utils.sh`)
+
+Comprehensive database utilities and maintenance:
+
+```bash
+# Database information
+./scripts/db-utils.sh size # Database size info
+./scripts/db-utils.sh tables # Table information
+./scripts/db-utils.sh tables --table users # Specific table info
+./scripts/db-utils.sh indexes # Index information
+./scripts/db-utils.sh constraints # Table constraints
+
+# User and session management
+./scripts/db-utils.sh users # Database users
+./scripts/db-utils.sh sessions # Active sessions
+./scripts/db-utils.sh queries # Running queries
+./scripts/db-utils.sh kill-query --query-id 12345 # Kill specific query
+
+# Maintenance operations
+./scripts/db-utils.sh optimize # Optimize database
+./scripts/db-utils.sh reindex # Rebuild indexes
+./scripts/db-utils.sh check-integrity # Integrity check
+./scripts/db-utils.sh cleanup # Clean temporary data
+
+# Data analysis
+./scripts/db-utils.sh duplicate-data --table users # Find duplicates
+./scripts/db-utils.sh table-stats --table users # Detailed table stats
+./scripts/db-utils.sh benchmark # Performance benchmarks
+```
+
+**Features:**
+- Comprehensive database analysis
+- User and session management
+- Query monitoring and termination
+- Database optimization
+- Integrity checking
+- Duplicate data detection
+- Performance benchmarking
+- Automated cleanup tasks
+
+## Configuration
+
+### Environment Variables
+
+The scripts use the following environment variables from your `.env` file:
+
+```env
+# Database Configuration
+DATABASE_URL=postgresql://user:password@localhost:5432/database_name
+# or
+DATABASE_URL=sqlite://data/database.db
+
+# Environment
+ENVIRONMENT=dev
+```
+
+### Script Configuration
+
+Each script has configurable parameters:
+
+```bash
+# Common options
+--env ENV # Environment (dev/prod)
+--force # Skip confirmations
+--quiet # Suppress verbose output
+--debug # Enable debug output
+--dry-run # Show what would be done
+
+# Backup-specific
+--compress # Compress backup files
+--keep-days N # Retention period for backups
+
+# Monitoring-specific
+--interval N # Monitoring interval in seconds
+--threshold-conn N # Connection alert threshold
+--continuous # Run continuously
+
+# Migration-specific
+--version VERSION # Target migration version
+--steps N # Number of migration steps
+--template NAME # Migration template name
+```
+
+## Database Support
+
+### PostgreSQL
+
+Full support for PostgreSQL features:
+- Connection pooling monitoring
+- Query performance analysis
+- Index usage statistics
+- Lock detection and resolution
+- User and permission management
+- Extension management
+- Advanced backup formats
+
+### SQLite
+
+Optimized support for SQLite:
+- File-based operations
+- Integrity checking
+- Vacuum and analyze operations
+- Backup and restore
+- Schema analysis
+
+## Safety Features
+
+### Confirmation Prompts
+
+Destructive operations require confirmation:
+- Database reset
+- Data truncation
+- Migration rollback
+- Backup restoration
+
+### Dry-Run Mode
+
+Preview changes before execution:
+```bash
+./scripts/db-migrate.sh run --dry-run
+./scripts/db-backup.sh backup --dry-run
+./scripts/db-utils.sh optimize --dry-run
+```
+
+### Locking Mechanism
+
+Migration operations use locks to prevent concurrent execution:
+- Automatic lock acquisition
+- Lock timeout handling
+- Process ID tracking
+- Graceful lock release
+
+### Backup Safety
+
+Automatic backup creation before destructive operations:
+- Pre-rollback backups
+- Pre-reset backups
+- Backup validation
+- Checksums for integrity
+
+## Error Handling
+
+### Robust Error Detection
+
+Scripts include comprehensive error checking:
+- Database connectivity verification
+- File existence validation
+- Permission checking
+- SQL syntax validation
+
+### Graceful Recovery
+
+Automatic recovery mechanisms:
+- Transaction rollback on failure
+- Lock release on interruption
+- Temporary file cleanup
+- Error state recovery
+
+## Integration
+
+### CI/CD Integration
+
+Scripts are designed for automation:
+```bash
+# In CI/CD pipeline
+./scripts/db.sh setup create --force --quiet
+./scripts/db.sh migrate run --force
+./scripts/db.sh utils check-integrity
+```
+
+### Monitoring Integration
+
+Easy integration with monitoring systems:
+```bash
+# Health check endpoint
+./scripts/db.sh monitor health --format json
+
+# Metrics collection
+./scripts/db.sh monitor performance --format csv
+```
+
+## Advanced Usage
+
+### Custom Migration Templates
+
+Create custom migration templates in `migration_templates/`:
+
+```sql
+-- migration_templates/add-audit-columns.sql
+-- Add audit columns to a table
+ALTER TABLE ${TABLE_NAME}
+ADD COLUMN created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ADD COLUMN updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ADD COLUMN created_by VARCHAR(255),
+ADD COLUMN updated_by VARCHAR(255);
+```
+
+### Scheduled Operations
+
+Set up automated database maintenance:
+```bash
+# Crontab entry for nightly optimization
+0 2 * * * cd /path/to/project && ./scripts/db.sh utils optimize --quiet
+
+# Weekly backup
+0 0 * * 0 cd /path/to/project && ./scripts/db.sh backup create --compress --quiet
+```
+
+### Performance Tuning
+
+Use monitoring data for optimization:
+```bash
+# Identify slow queries
+./scripts/db.sh monitor slow-queries
+
+# Analyze index usage
+./scripts/db.sh utils indexes
+
+# Check table statistics
+./scripts/db.sh utils table-stats --table high_traffic_table
+```
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Connection Errors**
+ ```bash
+ # Test connection
+ ./scripts/db.sh utils connection-test
+
+ # Check database status
+ ./scripts/db.sh status
+ ```
+
+2. **Migration Failures**
+ ```bash
+ # Check migration status
+ ./scripts/db.sh migrate status
+
+ # Validate migrations
+ ./scripts/db.sh migrate validate
+
+ # Rollback if needed
+ ./scripts/db.sh migrate rollback --steps 1
+ ```
+
+3. **Performance Issues**
+ ```bash
+ # Check database health
+ ./scripts/db.sh monitor health
+
+ # Analyze performance
+ ./scripts/db.sh monitor performance
+
+ # Optimize database
+ ./scripts/db.sh utils optimize
+ ```
+
+### Debug Mode
+
+Enable debug output for troubleshooting:
+```bash
+./scripts/db.sh setup migrate --debug
+./scripts/db.sh backup create --debug
+```
+
+### Log Files
+
+Scripts generate logs in the `logs/` directory:
+- `migration.log` - Migration operations
+- `backup.log` - Backup operations
+- `monitoring.log` - Monitoring data
+
+## Best Practices
+
+### Regular Maintenance
+
+1. **Daily**: Health checks and monitoring
+2. **Weekly**: Backups and cleanup
+3. **Monthly**: Full optimization and analysis
+
+### Development Workflow
+
+1. Create feature branch
+2. Generate migration: `./scripts/db.sh migrate create --name feature_name`
+3. Test migration: `./scripts/db.sh migrate dry-run`
+4. Run migration: `./scripts/db.sh migrate run`
+5. Verify changes: `./scripts/db.sh monitor health`
+
+### Production Deployment
+
+1. Backup before deployment: `./scripts/db.sh backup create`
+2. Run migrations: `./scripts/db.sh migrate run --env prod`
+3. Verify deployment: `./scripts/db.sh monitor health --env prod`
+4. Monitor performance: `./scripts/db.sh monitor performance --env prod`
+
+## Security Considerations
+
+### Environment Variables
+
+- Store sensitive data in `.env` files
+- Use different credentials for each environment
+- Regularly rotate database passwords
+- Limit database user privileges
+
+### Script Permissions
+
+```bash
+# Set appropriate permissions
+chmod 750 scripts/db*.sh
+chown app:app scripts/db*.sh
+```
+
+### Access Control
+
+- Limit script execution to authorized users
+- Use sudo for production operations
+- Audit script usage
+- Monitor database access
+
+## Support
+
+For issues or questions:
+1. Check the script help: `./scripts/db.sh --help`
+2. Review the logs in the `logs/` directory
+3. Run diagnostics: `./scripts/db.sh monitor health`
+4. Test connectivity: `./scripts/db.sh utils connection-test`
+
+## Contributing
+
+To add new database management features:
+1. Follow the existing script structure
+2. Add comprehensive error handling
+3. Include help documentation
+4. Add safety checks for destructive operations
+5. Test with both PostgreSQL and SQLite
+6. Update this documentation
\ No newline at end of file
diff --git a/scripts/databases/db-backup.sh b/scripts/databases/db-backup.sh
new file mode 100755
index 0000000..c943c30
--- /dev/null
+++ b/scripts/databases/db-backup.sh
@@ -0,0 +1,538 @@
+#!/bin/bash
+
+# Database Backup and Restore Script
+# Provides convenient commands for database backup and restore operations
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# Script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+
+# Change to project root
+cd "$PROJECT_ROOT"
+
+# Default backup directory
+BACKUP_DIR="backups"
+DATE_FORMAT="%Y%m%d_%H%M%S"
+
+# Logging functions
+log() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+log_warn() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+log_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+print_header() {
+ echo -e "${BLUE}=== $1 ===${NC}"
+}
+
+print_usage() {
+ echo "Database Backup and Restore Script"
+ echo
+ echo "Usage: $0 [options]"
+ echo
+ echo "Commands:"
+ echo " backup Create database backup"
+ echo " restore Restore database from backup"
+ echo " list List available backups"
+ echo " clean Clean old backups"
+ echo " export Export data to JSON/CSV"
+ echo " import Import data from JSON/CSV"
+ echo " clone Clone database to different name"
+ echo " compare Compare two databases"
+ echo
+ echo "Options:"
+ echo " --env ENV Environment (dev/prod) [default: dev]"
+ echo " --backup-dir DIR Backup directory [default: backups]"
+ echo " --file FILE Backup file path"
+ echo " --format FORMAT Backup format (sql/custom/tar) [default: sql]"
+ echo " --compress Compress backup file"
+ echo " --schema-only Backup schema only (no data)"
+ echo " --data-only Backup data only (no schema)"
+ echo " --tables TABLES Comma-separated list of tables to backup"
+ echo " --keep-days DAYS Keep backups for N days [default: 30]"
+ echo " --force Skip confirmations"
+ echo " --quiet Suppress verbose output"
+ echo
+ echo "Examples:"
+ echo " $0 backup # Create full backup"
+ echo " $0 backup --compress # Create compressed backup"
+ echo " $0 backup --schema-only # Backup schema only"
+ echo " $0 backup --tables users,content # Backup specific tables"
+ echo " $0 restore --file backup.sql # Restore from backup"
+ echo " $0 list # List backups"
+ echo " $0 clean --keep-days 7 # Clean old backups"
+ echo " $0 export --format json # Export to JSON"
+ echo " $0 clone --env prod # Clone to prod database"
+}
+
+# Check if .env file exists and load it
+load_env() {
+ if [ ! -f ".env" ]; then
+ log_error ".env file not found"
+ echo "Please run the database setup script first:"
+ echo " ./scripts/db-setup.sh setup"
+ exit 1
+ fi
+
+ # Load environment variables
+ export $(grep -v '^#' .env | xargs)
+}
+
+# Parse database URL
+parse_database_url() {
+ if [[ $DATABASE_URL == postgresql://* ]] || [[ $DATABASE_URL == postgres://* ]]; then
+ DB_TYPE="postgresql"
+ DB_HOST=$(echo $DATABASE_URL | sed -n 's/.*@\([^:]*\):.*/\1/p')
+ DB_PORT=$(echo $DATABASE_URL | sed -n 's/.*:\([0-9]*\)\/.*/\1/p')
+ DB_NAME=$(echo $DATABASE_URL | sed -n 's/.*\/\([^?]*\).*/\1/p')
+ DB_USER=$(echo $DATABASE_URL | sed -n 's/.*\/\/\([^:]*\):.*/\1/p')
+ DB_PASS=$(echo $DATABASE_URL | sed -n 's/.*:\/\/[^:]*:\([^@]*\)@.*/\1/p')
+ elif [[ $DATABASE_URL == sqlite://* ]]; then
+ DB_TYPE="sqlite"
+ DB_FILE=$(echo $DATABASE_URL | sed 's/sqlite:\/\///')
+ else
+ log_error "Unsupported database URL format: $DATABASE_URL"
+ exit 1
+ fi
+}
+
+# Create backup directory
+setup_backup_dir() {
+ if [ ! -d "$BACKUP_DIR" ]; then
+ log "Creating backup directory: $BACKUP_DIR"
+ mkdir -p "$BACKUP_DIR"
+ fi
+}
+
+# Generate backup filename
+generate_backup_filename() {
+ local timestamp=$(date +"$DATE_FORMAT")
+ local env_suffix=""
+
+ if [ "$ENVIRONMENT" != "dev" ]; then
+ env_suffix="_${ENVIRONMENT}"
+ fi
+
+ local format_ext=""
+ case "$FORMAT" in
+ "sql") format_ext=".sql" ;;
+ "custom") format_ext=".dump" ;;
+ "tar") format_ext=".tar" ;;
+ esac
+
+ local compress_ext=""
+ if [ "$COMPRESS" = "true" ]; then
+ compress_ext=".gz"
+ fi
+
+ echo "${BACKUP_DIR}/${DB_NAME}_${timestamp}${env_suffix}${format_ext}${compress_ext}"
+}
+
+# Create PostgreSQL backup
+backup_postgresql() {
+ local backup_file="$1"
+ local pg_dump_args=()
+
+ # Add connection parameters
+ pg_dump_args+=("-h" "$DB_HOST")
+ pg_dump_args+=("-p" "$DB_PORT")
+ pg_dump_args+=("-U" "$DB_USER")
+ pg_dump_args+=("-d" "$DB_NAME")
+
+ # Add format options
+ case "$FORMAT" in
+ "sql")
+ pg_dump_args+=("--format=plain")
+ ;;
+ "custom")
+ pg_dump_args+=("--format=custom")
+ ;;
+ "tar")
+ pg_dump_args+=("--format=tar")
+ ;;
+ esac
+
+ # Add backup type options
+ if [ "$SCHEMA_ONLY" = "true" ]; then
+ pg_dump_args+=("--schema-only")
+ elif [ "$DATA_ONLY" = "true" ]; then
+ pg_dump_args+=("--data-only")
+ fi
+
+ # Add table selection
+ if [ -n "$TABLES" ]; then
+ IFS=',' read -ra TABLE_ARRAY <<< "$TABLES"
+ for table in "${TABLE_ARRAY[@]}"; do
+ pg_dump_args+=("--table=$table")
+ done
+ fi
+
+ # Add other options
+ pg_dump_args+=("--verbose")
+ pg_dump_args+=("--no-password")
+
+ # Set password environment variable
+ export PGPASSWORD="$DB_PASS"
+
+ log "Creating PostgreSQL backup: $backup_file"
+
+ if [ "$COMPRESS" = "true" ]; then
+ pg_dump "${pg_dump_args[@]}" | gzip > "$backup_file"
+ else
+ pg_dump "${pg_dump_args[@]}" > "$backup_file"
+ fi
+
+ unset PGPASSWORD
+}
+
+# Create SQLite backup
+backup_sqlite() {
+ local backup_file="$1"
+
+ if [ ! -f "$DB_FILE" ]; then
+ log_error "SQLite database file not found: $DB_FILE"
+ exit 1
+ fi
+
+ log "Creating SQLite backup: $backup_file"
+
+ if [ "$COMPRESS" = "true" ]; then
+ sqlite3 "$DB_FILE" ".dump" | gzip > "$backup_file"
+ else
+ sqlite3 "$DB_FILE" ".dump" > "$backup_file"
+ fi
+}
+
+# Restore PostgreSQL backup
+restore_postgresql() {
+ local backup_file="$1"
+
+ if [ ! -f "$backup_file" ]; then
+ log_error "Backup file not found: $backup_file"
+ exit 1
+ fi
+
+ if [ "$FORCE" != "true" ]; then
+ echo -n "This will restore the database '$DB_NAME'. Continue? (y/N): "
+ read -r confirm
+ if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
+ log "Restore cancelled"
+ exit 0
+ fi
+ fi
+
+ export PGPASSWORD="$DB_PASS"
+
+ log "Restoring PostgreSQL backup: $backup_file"
+
+ if [[ "$backup_file" == *.gz ]]; then
+ gunzip -c "$backup_file" | psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME"
+ else
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" < "$backup_file"
+ fi
+
+ unset PGPASSWORD
+}
+
+# Restore SQLite backup
+restore_sqlite() {
+ local backup_file="$1"
+
+ if [ ! -f "$backup_file" ]; then
+ log_error "Backup file not found: $backup_file"
+ exit 1
+ fi
+
+ if [ "$FORCE" != "true" ]; then
+ echo -n "This will restore the database '$DB_FILE'. Continue? (y/N): "
+ read -r confirm
+ if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
+ log "Restore cancelled"
+ exit 0
+ fi
+ fi
+
+ log "Restoring SQLite backup: $backup_file"
+
+ # Create backup of existing database
+ if [ -f "$DB_FILE" ]; then
+ local existing_backup="${DB_FILE}.backup.$(date +"$DATE_FORMAT")"
+ cp "$DB_FILE" "$existing_backup"
+ log "Created backup of existing database: $existing_backup"
+ fi
+
+ if [[ "$backup_file" == *.gz ]]; then
+ gunzip -c "$backup_file" | sqlite3 "$DB_FILE"
+ else
+ sqlite3 "$DB_FILE" < "$backup_file"
+ fi
+}
+
+# List available backups
+list_backups() {
+ print_header "Available Backups"
+
+ if [ ! -d "$BACKUP_DIR" ]; then
+ log_warn "No backup directory found: $BACKUP_DIR"
+ return
+ fi
+
+ if [ ! "$(ls -A "$BACKUP_DIR")" ]; then
+ log_warn "No backups found in $BACKUP_DIR"
+ return
+ fi
+
+ echo "Format: filename | size | date"
+ echo "----------------------------------------"
+
+ for backup in "$BACKUP_DIR"/*; do
+ if [ -f "$backup" ]; then
+ local filename=$(basename "$backup")
+ local size=$(du -h "$backup" | cut -f1)
+ local date=$(date -r "$backup" '+%Y-%m-%d %H:%M:%S')
+ echo "$filename | $size | $date"
+ fi
+ done
+}
+
+# Clean old backups
+clean_backups() {
+ print_header "Cleaning Old Backups"
+
+ if [ ! -d "$BACKUP_DIR" ]; then
+ log_warn "No backup directory found: $BACKUP_DIR"
+ return
+ fi
+
+ log "Removing backups older than $KEEP_DAYS days..."
+
+ local deleted=0
+ while IFS= read -r -d '' backup; do
+ if [ -f "$backup" ]; then
+ local filename=$(basename "$backup")
+ rm "$backup"
+ log "Deleted: $filename"
+ ((deleted++))
+ fi
+ done < <(find "$BACKUP_DIR" -name "*.sql*" -o -name "*.dump*" -o -name "*.tar*" -type f -mtime +$KEEP_DAYS -print0)
+
+ log "Deleted $deleted old backup files"
+}
+
+# Export data to JSON/CSV
+export_data() {
+ print_header "Exporting Data"
+
+ local export_file="${BACKUP_DIR}/export_$(date +"$DATE_FORMAT").json"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ log "Exporting PostgreSQL data to JSON..."
+ # This would require a custom script or tool
+ log_warn "JSON export for PostgreSQL not yet implemented"
+ log "Consider using pg_dump with --data-only and custom processing"
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log "Exporting SQLite data to JSON..."
+ # This would require a custom script or tool
+ log_warn "JSON export for SQLite not yet implemented"
+ log "Consider using sqlite3 with custom queries"
+ fi
+}
+
+# Clone database
+clone_database() {
+ print_header "Cloning Database"
+
+ local timestamp=$(date +"$DATE_FORMAT")
+ local temp_backup="${BACKUP_DIR}/temp_clone_${timestamp}.sql"
+
+ # Create temporary backup
+ log "Creating temporary backup for cloning..."
+ COMPRESS="false"
+ FORMAT="sql"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ backup_postgresql "$temp_backup"
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ backup_sqlite "$temp_backup"
+ fi
+
+ # TODO: Implement actual cloning logic
+ # This would involve creating a new database and restoring the backup
+ log_warn "Database cloning not yet fully implemented"
+ log "Temporary backup created: $temp_backup"
+ log "Manual steps required to complete cloning"
+}
+
+# Parse command line arguments
+COMMAND=""
+ENVIRONMENT="dev"
+FORMAT="sql"
+COMPRESS="false"
+SCHEMA_ONLY="false"
+DATA_ONLY="false"
+TABLES=""
+BACKUP_FILE=""
+KEEP_DAYS=30
+FORCE="false"
+QUIET="false"
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --env)
+ ENVIRONMENT="$2"
+ shift 2
+ ;;
+ --backup-dir)
+ BACKUP_DIR="$2"
+ shift 2
+ ;;
+ --file)
+ BACKUP_FILE="$2"
+ shift 2
+ ;;
+ --format)
+ FORMAT="$2"
+ shift 2
+ ;;
+ --compress)
+ COMPRESS="true"
+ shift
+ ;;
+ --schema-only)
+ SCHEMA_ONLY="true"
+ shift
+ ;;
+ --data-only)
+ DATA_ONLY="true"
+ shift
+ ;;
+ --tables)
+ TABLES="$2"
+ shift 2
+ ;;
+ --keep-days)
+ KEEP_DAYS="$2"
+ shift 2
+ ;;
+ --force)
+ FORCE="true"
+ shift
+ ;;
+ --quiet)
+ QUIET="true"
+ shift
+ ;;
+ -h|--help)
+ print_usage
+ exit 0
+ ;;
+ *)
+ if [ -z "$COMMAND" ]; then
+ COMMAND="$1"
+ else
+ log_error "Unknown option: $1"
+ print_usage
+ exit 1
+ fi
+ shift
+ ;;
+ esac
+done
+
+# Set environment variable
+export ENVIRONMENT="$ENVIRONMENT"
+
+# Validate command
+if [ -z "$COMMAND" ]; then
+ print_usage
+ exit 1
+fi
+
+# Check if we're in the right directory
+if [ ! -f "Cargo.toml" ]; then
+ log_error "Please run this script from the project root directory"
+ exit 1
+fi
+
+# Load environment and parse database URL
+load_env
+parse_database_url
+
+# Setup backup directory
+setup_backup_dir
+
+# Execute command
+case "$COMMAND" in
+ "backup")
+ print_header "Creating Database Backup"
+
+ if [ -z "$BACKUP_FILE" ]; then
+ BACKUP_FILE=$(generate_backup_filename)
+ fi
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ backup_postgresql "$BACKUP_FILE"
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ backup_sqlite "$BACKUP_FILE"
+ fi
+
+ local file_size=$(du -h "$BACKUP_FILE" | cut -f1)
+ log "Backup created successfully: $BACKUP_FILE ($file_size)"
+ ;;
+ "restore")
+ print_header "Restoring Database"
+
+ if [ -z "$BACKUP_FILE" ]; then
+ log_error "Please specify backup file with --file option"
+ exit 1
+ fi
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ restore_postgresql "$BACKUP_FILE"
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ restore_sqlite "$BACKUP_FILE"
+ fi
+
+ log "Database restored successfully"
+ ;;
+ "list")
+ list_backups
+ ;;
+ "clean")
+ clean_backups
+ ;;
+ "export")
+ export_data
+ ;;
+ "import")
+ log_warn "Import functionality not yet implemented"
+ ;;
+ "clone")
+ clone_database
+ ;;
+ "compare")
+ log_warn "Database comparison not yet implemented"
+ ;;
+ *)
+ log_error "Unknown command: $COMMAND"
+ print_usage
+ exit 1
+ ;;
+esac
+
+log "Operation completed successfully"
diff --git a/scripts/databases/db-migrate.sh b/scripts/databases/db-migrate.sh
new file mode 100755
index 0000000..93080a6
--- /dev/null
+++ b/scripts/databases/db-migrate.sh
@@ -0,0 +1,927 @@
+#!/bin/bash
+
+# Database Migration Management Script
+# Advanced migration tools for schema evolution and data management
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+CYAN='\033[0;36m'
+BOLD='\033[1m'
+NC='\033[0m' # No Color
+
+# Script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+
+# Change to project root
+cd "$PROJECT_ROOT"
+
+# Migration configuration
+MIGRATIONS_DIR="migrations"
+MIGRATION_TABLE="__migrations"
+MIGRATION_LOCK_TABLE="__migration_locks"
+MIGRATION_TEMPLATE_DIR="migration_templates"
+ROLLBACK_DIR="rollbacks"
+
+# Logging functions
+log() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+log_warn() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+log_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+log_success() {
+ echo -e "${GREEN}[SUCCESS]${NC} $1"
+}
+
+log_debug() {
+ if [ "$DEBUG" = "true" ]; then
+ echo -e "${CYAN}[DEBUG]${NC} $1"
+ fi
+}
+
+print_header() {
+ echo -e "${BLUE}${BOLD}=== $1 ===${NC}"
+}
+
+print_subheader() {
+ echo -e "${CYAN}--- $1 ---${NC}"
+}
+
+print_usage() {
+ echo "Database Migration Management Script"
+ echo
+ echo "Usage: $0 [options]"
+ echo
+ echo "Commands:"
+ echo " status Show migration status"
+ echo " pending List pending migrations"
+ echo " applied List applied migrations"
+ echo " migrate Run pending migrations"
+ echo " rollback Rollback migrations"
+ echo " create Create new migration"
+ echo " generate Generate migration from schema diff"
+ echo " validate Validate migration files"
+ echo " dry-run Show what would be migrated"
+ echo " force Force migration state"
+ echo " repair Repair migration table"
+ echo " baseline Set migration baseline"
+ echo " history Show migration history"
+ echo " schema-dump Dump current schema"
+ echo " data-migrate Migrate data between schemas"
+ echo " template Manage migration templates"
+ echo
+ echo "Options:"
+ echo " --env ENV Environment (dev/prod) [default: dev]"
+ echo " --version VERSION Target migration version"
+ echo " --steps N Number of migration steps"
+ echo " --name NAME Migration name (for create command)"
+ echo " --type TYPE Migration type (schema/data/both) [default: schema]"
+ echo " --table TABLE Target table name"
+ echo " --template TEMPLATE Migration template name"
+ echo " --dry-run Show changes without applying"
+ echo " --force Force operation without confirmation"
+ echo " --debug Enable debug output"
+ echo " --quiet Suppress verbose output"
+ echo " --batch-size N Batch size for data migrations [default: 1000]"
+ echo " --timeout N Migration timeout in seconds [default: 300]"
+ echo
+ echo "Examples:"
+ echo " $0 status # Show migration status"
+ echo " $0 migrate # Run all pending migrations"
+ echo " $0 migrate --version 003 # Migrate to specific version"
+ echo " $0 rollback --steps 1 # Rollback last migration"
+ echo " $0 create --name add_user_preferences # Create new migration"
+ echo " $0 create --name migrate_users --type data # Create data migration"
+ echo " $0 dry-run # Preview pending migrations"
+ echo " $0 validate # Validate all migrations"
+ echo " $0 baseline --version 001 # Set baseline version"
+ echo
+ echo "Migration Templates:"
+ echo " create-table Create new table"
+ echo " alter-table Modify existing table"
+ echo " add-column Add column to table"
+ echo " drop-column Drop column from table"
+ echo " add-index Add database index"
+ echo " add-constraint Add table constraint"
+ echo " data-migration Migrate data between schemas"
+ echo " seed-data Insert seed data"
+}
+
+# Check if .env file exists and load it
+load_env() {
+ if [ ! -f ".env" ]; then
+ log_error ".env file not found"
+ echo "Please run the database setup script first:"
+ echo " ./scripts/db-setup.sh setup"
+ exit 1
+ fi
+
+ # Load environment variables
+ export $(grep -v '^#' .env | xargs)
+}
+
+# Parse database URL
+parse_database_url() {
+ if [[ $DATABASE_URL == postgresql://* ]] || [[ $DATABASE_URL == postgres://* ]]; then
+ DB_TYPE="postgresql"
+ DB_HOST=$(echo $DATABASE_URL | sed -n 's/.*@\([^:]*\):.*/\1/p')
+ DB_PORT=$(echo $DATABASE_URL | sed -n 's/.*:\([0-9]*\)\/.*/\1/p')
+ DB_NAME=$(echo $DATABASE_URL | sed -n 's/.*\/\([^?]*\).*/\1/p')
+ DB_USER=$(echo $DATABASE_URL | sed -n 's/.*\/\/\([^:]*\):.*/\1/p')
+ DB_PASS=$(echo $DATABASE_URL | sed -n 's/.*:\/\/[^:]*:\([^@]*\)@.*/\1/p')
+ elif [[ $DATABASE_URL == sqlite://* ]]; then
+ DB_TYPE="sqlite"
+ DB_FILE=$(echo $DATABASE_URL | sed 's/sqlite:\/\///')
+ else
+ log_error "Unsupported database URL format: $DATABASE_URL"
+ exit 1
+ fi
+}
+
+# Execute SQL query
+execute_sql() {
+ local query="$1"
+ local capture_output="${2:-false}"
+
+ log_debug "Executing SQL: $query"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ export PGPASSWORD="$DB_PASS"
+ if [ "$capture_output" = "true" ]; then
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -A -c "$query" 2>/dev/null
+ else
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "$query" 2>/dev/null
+ fi
+ unset PGPASSWORD
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ if [ "$capture_output" = "true" ]; then
+ sqlite3 "$DB_FILE" "$query" 2>/dev/null
+ else
+ sqlite3 "$DB_FILE" "$query" 2>/dev/null
+ fi
+ fi
+}
+
+# Execute SQL file
+execute_sql_file() {
+ local file="$1"
+ local ignore_errors="${2:-false}"
+
+ if [ ! -f "$file" ]; then
+ log_error "SQL file not found: $file"
+ return 1
+ fi
+
+ log_debug "Executing SQL file: $file"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ export PGPASSWORD="$DB_PASS"
+ if [ "$ignore_errors" = "true" ]; then
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -f "$file" 2>/dev/null || true
+ else
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -f "$file"
+ fi
+ unset PGPASSWORD
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ if [ "$ignore_errors" = "true" ]; then
+ sqlite3 "$DB_FILE" ".read $file" 2>/dev/null || true
+ else
+ sqlite3 "$DB_FILE" ".read $file"
+ fi
+ fi
+}
+
+# Initialize migration system
+init_migration_system() {
+ log_debug "Initializing migration system"
+
+ # Create migrations directory
+ mkdir -p "$MIGRATIONS_DIR"
+ mkdir -p "$ROLLBACK_DIR"
+ mkdir -p "$MIGRATION_TEMPLATE_DIR"
+
+ # Create migration tracking table
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ execute_sql "
+ CREATE TABLE IF NOT EXISTS $MIGRATION_TABLE (
+ id SERIAL PRIMARY KEY,
+ version VARCHAR(50) NOT NULL UNIQUE,
+ name VARCHAR(255) NOT NULL,
+ type VARCHAR(20) NOT NULL DEFAULT 'schema',
+ applied_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ applied_by VARCHAR(100) DEFAULT USER,
+ execution_time_ms INTEGER DEFAULT 0,
+ checksum VARCHAR(64),
+ success BOOLEAN DEFAULT TRUE
+ );
+ " >/dev/null 2>&1
+
+ execute_sql "
+ CREATE TABLE IF NOT EXISTS $MIGRATION_LOCK_TABLE (
+ id INTEGER PRIMARY KEY DEFAULT 1,
+ is_locked BOOLEAN DEFAULT FALSE,
+ locked_by VARCHAR(100),
+ locked_at TIMESTAMP,
+ process_id INTEGER,
+ CONSTRAINT single_lock CHECK (id = 1)
+ );
+ " >/dev/null 2>&1
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ execute_sql "
+ CREATE TABLE IF NOT EXISTS $MIGRATION_TABLE (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ version TEXT NOT NULL UNIQUE,
+ name TEXT NOT NULL,
+ type TEXT NOT NULL DEFAULT 'schema',
+ applied_at DATETIME DEFAULT CURRENT_TIMESTAMP,
+ applied_by TEXT DEFAULT 'system',
+ execution_time_ms INTEGER DEFAULT 0,
+ checksum TEXT,
+ success BOOLEAN DEFAULT 1
+ );
+ " >/dev/null 2>&1
+
+ execute_sql "
+ CREATE TABLE IF NOT EXISTS $MIGRATION_LOCK_TABLE (
+ id INTEGER PRIMARY KEY DEFAULT 1,
+ is_locked BOOLEAN DEFAULT 0,
+ locked_by TEXT,
+ locked_at DATETIME,
+ process_id INTEGER
+ );
+ " >/dev/null 2>&1
+ fi
+
+ # Insert initial lock record
+ execute_sql "INSERT OR IGNORE INTO $MIGRATION_LOCK_TABLE (id, is_locked) VALUES (1, false);" >/dev/null 2>&1
+}
+
+# Acquire migration lock
+acquire_migration_lock() {
+ local process_id=$$
+ local lock_holder=$(whoami)
+
+ log_debug "Acquiring migration lock"
+
+ # Check if already locked
+ local is_locked=$(execute_sql "SELECT is_locked FROM $MIGRATION_LOCK_TABLE WHERE id = 1;" true)
+
+ if [ "$is_locked" = "true" ] || [ "$is_locked" = "1" ]; then
+ local locked_by=$(execute_sql "SELECT locked_by FROM $MIGRATION_LOCK_TABLE WHERE id = 1;" true)
+ local locked_at=$(execute_sql "SELECT locked_at FROM $MIGRATION_LOCK_TABLE WHERE id = 1;" true)
+ log_error "Migration system is locked by $locked_by at $locked_at"
+ return 1
+ fi
+
+ # Acquire lock
+ execute_sql "
+ UPDATE $MIGRATION_LOCK_TABLE
+ SET is_locked = true, locked_by = '$lock_holder', locked_at = CURRENT_TIMESTAMP, process_id = $process_id
+ WHERE id = 1;
+ " >/dev/null 2>&1
+
+ log_debug "Migration lock acquired by $lock_holder (PID: $process_id)"
+}
+
+# Release migration lock
+release_migration_lock() {
+ log_debug "Releasing migration lock"
+
+ execute_sql "
+ UPDATE $MIGRATION_LOCK_TABLE
+ SET is_locked = false, locked_by = NULL, locked_at = NULL, process_id = NULL
+ WHERE id = 1;
+ " >/dev/null 2>&1
+}
+
+# Get migration files
+get_migration_files() {
+ find "$MIGRATIONS_DIR" -name "*.sql" -type f | sort
+}
+
+# Get applied migrations
+get_applied_migrations() {
+ execute_sql "SELECT version FROM $MIGRATION_TABLE ORDER BY version;" true
+}
+
+# Get pending migrations
+get_pending_migrations() {
+ local applied_migrations=$(get_applied_migrations)
+ local all_migrations=$(get_migration_files)
+
+ for migration_file in $all_migrations; do
+ local version=$(basename "$migration_file" .sql | cut -d'_' -f1)
+ if ! echo "$applied_migrations" | grep -q "^$version$"; then
+ echo "$migration_file"
+ fi
+ done
+}
+
+# Calculate file checksum
+calculate_checksum() {
+ local file="$1"
+ if command -v sha256sum >/dev/null 2>&1; then
+ sha256sum "$file" | cut -d' ' -f1
+ elif command -v shasum >/dev/null 2>&1; then
+ shasum -a 256 "$file" | cut -d' ' -f1
+ else
+ # Fallback to md5
+ md5sum "$file" | cut -d' ' -f1
+ fi
+}
+
+# Show migration status
+show_migration_status() {
+ print_header "Migration Status"
+
+ local applied_count=$(execute_sql "SELECT COUNT(*) FROM $MIGRATION_TABLE;" true)
+ local pending_migrations=$(get_pending_migrations)
+ local pending_count=$(echo "$pending_migrations" | wc -l)
+
+ if [ -z "$pending_migrations" ]; then
+ pending_count=0
+ fi
+
+ log "Applied migrations: $applied_count"
+ log "Pending migrations: $pending_count"
+
+ if [ "$applied_count" -gt "0" ]; then
+ echo
+ print_subheader "Last Applied Migration"
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ execute_sql "
+ SELECT version, name, applied_at, execution_time_ms
+ FROM $MIGRATION_TABLE
+ ORDER BY applied_at DESC
+ LIMIT 1;
+ "
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ execute_sql "
+ SELECT version, name, applied_at, execution_time_ms
+ FROM $MIGRATION_TABLE
+ ORDER BY applied_at DESC
+ LIMIT 1;
+ "
+ fi
+ fi
+
+ if [ "$pending_count" -gt "0" ]; then
+ echo
+ print_subheader "Pending Migrations"
+ for migration in $pending_migrations; do
+ local version=$(basename "$migration" .sql | cut -d'_' -f1)
+ local name=$(basename "$migration" .sql | cut -d'_' -f2-)
+ echo " $version - $name"
+ done
+ fi
+}
+
+# List applied migrations
+list_applied_migrations() {
+ print_header "Applied Migrations"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ execute_sql "
+ SELECT
+ version,
+ name,
+ type,
+ applied_at,
+ applied_by,
+ execution_time_ms || ' ms' as duration,
+ CASE WHEN success THEN '✓' ELSE '✗' END as status
+ FROM $MIGRATION_TABLE
+ ORDER BY version;
+ "
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ execute_sql "
+ SELECT
+ version,
+ name,
+ type,
+ applied_at,
+ applied_by,
+ execution_time_ms || ' ms' as duration,
+ CASE WHEN success THEN '✓' ELSE '✗' END as status
+ FROM $MIGRATION_TABLE
+ ORDER BY version;
+ "
+ fi
+}
+
+# List pending migrations
+list_pending_migrations() {
+ print_header "Pending Migrations"
+
+ local pending_migrations=$(get_pending_migrations)
+
+ if [ -z "$pending_migrations" ]; then
+ log_success "No pending migrations"
+ return
+ fi
+
+ for migration in $pending_migrations; do
+ local version=$(basename "$migration" .sql | cut -d'_' -f1)
+ local name=$(basename "$migration" .sql | cut -d'_' -f2-)
+ local size=$(du -h "$migration" | cut -f1)
+ echo " $version - $name ($size)"
+ done
+}
+
+# Run migrations
+run_migrations() {
+ print_header "Running Migrations"
+
+ local target_version="$1"
+ local pending_migrations=$(get_pending_migrations)
+
+ if [ -z "$pending_migrations" ]; then
+ log_success "No pending migrations to run"
+ return
+ fi
+
+ # Acquire lock
+ if ! acquire_migration_lock; then
+ exit 1
+ fi
+
+ # Set up cleanup trap
+ trap 'release_migration_lock; exit 1' INT TERM EXIT
+
+ local migration_count=0
+ local success_count=0
+
+ for migration_file in $pending_migrations; do
+ local version=$(basename "$migration_file" .sql | cut -d'_' -f1)
+ local name=$(basename "$migration_file" .sql | cut -d'_' -f2-)
+
+ # Check if we should stop at target version
+ if [ -n "$target_version" ] && [ "$version" \> "$target_version" ]; then
+ log "Stopping at target version $target_version"
+ break
+ fi
+
+ ((migration_count++))
+
+ log "Running migration $version: $name"
+
+ if [ "$DRY_RUN" = "true" ]; then
+ echo "Would execute: $migration_file"
+ continue
+ fi
+
+ local start_time=$(date +%s%3N)
+ local success=true
+ local checksum=$(calculate_checksum "$migration_file")
+
+ # Execute migration
+ if execute_sql_file "$migration_file"; then
+ local end_time=$(date +%s%3N)
+ local execution_time=$((end_time - start_time))
+
+ # Record successful migration
+ execute_sql "
+ INSERT INTO $MIGRATION_TABLE (version, name, type, execution_time_ms, checksum, success)
+ VALUES ('$version', '$name', 'schema', $execution_time, '$checksum', true);
+ " >/dev/null 2>&1
+
+ log_success "Migration $version completed in ${execution_time}ms"
+ ((success_count++))
+ else
+ local end_time=$(date +%s%3N)
+ local execution_time=$((end_time - start_time))
+
+ # Record failed migration
+ execute_sql "
+ INSERT INTO $MIGRATION_TABLE (version, name, type, execution_time_ms, checksum, success)
+ VALUES ('$version', '$name', 'schema', $execution_time, '$checksum', false);
+ " >/dev/null 2>&1
+
+ log_error "Migration $version failed"
+ success=false
+ break
+ fi
+ done
+
+ # Release lock
+ release_migration_lock
+ trap - INT TERM EXIT
+
+ if [ "$DRY_RUN" = "true" ]; then
+ log "Dry run completed. Would execute $migration_count migrations."
+ else
+ log "Migration run completed. $success_count/$migration_count migrations successful."
+ fi
+}
+
+# Rollback migrations
+rollback_migrations() {
+ print_header "Rolling Back Migrations"
+
+ local steps="${1:-1}"
+
+ if [ "$steps" -le 0 ]; then
+ log_error "Invalid number of steps: $steps"
+ return 1
+ fi
+
+ # Get last N applied migrations
+ local migrations_to_rollback
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ migrations_to_rollback=$(execute_sql "
+ SELECT version FROM $MIGRATION_TABLE
+ WHERE success = true
+ ORDER BY applied_at DESC
+ LIMIT $steps;
+ " true)
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ migrations_to_rollback=$(execute_sql "
+ SELECT version FROM $MIGRATION_TABLE
+ WHERE success = 1
+ ORDER BY applied_at DESC
+ LIMIT $steps;
+ " true)
+ fi
+
+ if [ -z "$migrations_to_rollback" ]; then
+ log_warn "No migrations to rollback"
+ return
+ fi
+
+ if [ "$FORCE" != "true" ]; then
+ echo -n "This will rollback $steps migration(s). Continue? (y/N): "
+ read -r confirm
+ if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
+ log "Rollback cancelled"
+ return
+ fi
+ fi
+
+ # Acquire lock
+ if ! acquire_migration_lock; then
+ exit 1
+ fi
+
+ # Set up cleanup trap
+ trap 'release_migration_lock; exit 1' INT TERM EXIT
+
+ local rollback_count=0
+
+ for version in $migrations_to_rollback; do
+ local rollback_file="$ROLLBACK_DIR/rollback_${version}.sql"
+
+ if [ -f "$rollback_file" ]; then
+ log "Rolling back migration $version"
+
+ if [ "$DRY_RUN" = "true" ]; then
+ echo "Would execute rollback: $rollback_file"
+ else
+ if execute_sql_file "$rollback_file"; then
+ # Remove from migration table
+ execute_sql "DELETE FROM $MIGRATION_TABLE WHERE version = '$version';" >/dev/null 2>&1
+ log_success "Rollback $version completed"
+ ((rollback_count++))
+ else
+ log_error "Rollback $version failed"
+ break
+ fi
+ fi
+ else
+ log_warn "Rollback file not found for migration $version: $rollback_file"
+ log_warn "Manual rollback required"
+ fi
+ done
+
+ # Release lock
+ release_migration_lock
+ trap - INT TERM EXIT
+
+ if [ "$DRY_RUN" = "true" ]; then
+ log "Dry run completed. Would rollback $rollback_count migrations."
+ else
+ log "Rollback completed. $rollback_count migrations rolled back."
+ fi
+}
+
+# Create new migration
+create_migration() {
+ local migration_name="$1"
+ local migration_type="${2:-schema}"
+ local template_name="$3"
+
+ if [ -z "$migration_name" ]; then
+ log_error "Migration name is required"
+ return 1
+ fi
+
+ # Generate version number
+ local version=$(date +%Y%m%d%H%M%S)
+ local migration_file="$MIGRATIONS_DIR/${version}_${migration_name}.sql"
+ local rollback_file="$ROLLBACK_DIR/rollback_${version}.sql"
+
+ log "Creating migration: $migration_file"
+
+ # Create migration file from template
+ if [ -n "$template_name" ] && [ -f "$MIGRATION_TEMPLATE_DIR/$template_name.sql" ]; then
+ cp "$MIGRATION_TEMPLATE_DIR/$template_name.sql" "$migration_file"
+ log "Created migration from template: $template_name"
+ else
+ # Create basic migration template
+ cat > "$migration_file" << EOF
+-- Migration: $migration_name
+-- Type: $migration_type
+-- Created: $(date)
+-- Description: Add your migration description here
+
+-- Add your migration SQL here
+-- Example:
+-- CREATE TABLE example_table (
+-- id SERIAL PRIMARY KEY,
+-- name VARCHAR(255) NOT NULL,
+-- created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+-- );
+
+EOF
+ fi
+
+ # Create rollback file
+ cat > "$rollback_file" << EOF
+-- Rollback: $migration_name
+-- Version: $version
+-- Created: $(date)
+-- Description: Add your rollback description here
+
+-- Add your rollback SQL here
+-- Example:
+-- DROP TABLE IF EXISTS example_table;
+
+EOF
+
+ log_success "Migration files created:"
+ log " Migration: $migration_file"
+ log " Rollback: $rollback_file"
+ log ""
+ log "Next steps:"
+ log " 1. Edit the migration file with your changes"
+ log " 2. Edit the rollback file with reverse operations"
+ log " 3. Run: $0 validate"
+ log " 4. Run: $0 migrate"
+}
+
+# Validate migration files
+validate_migrations() {
+ print_header "Validating Migrations"
+
+ local migration_files=$(get_migration_files)
+ local validation_errors=0
+
+ for migration_file in $migration_files; do
+ local version=$(basename "$migration_file" .sql | cut -d'_' -f1)
+ local name=$(basename "$migration_file" .sql | cut -d'_' -f2-)
+
+ log_debug "Validating migration: $version - $name"
+
+ # Check file exists and is readable
+ if [ ! -r "$migration_file" ]; then
+ log_error "Migration file not readable: $migration_file"
+ ((validation_errors++))
+ continue
+ fi
+
+ # Check file is not empty
+ if [ ! -s "$migration_file" ]; then
+ log_warn "Migration file is empty: $migration_file"
+ fi
+
+ # Check for rollback file
+ local rollback_file="$ROLLBACK_DIR/rollback_${version}.sql"
+ if [ ! -f "$rollback_file" ]; then
+ log_warn "Rollback file missing: $rollback_file"
+ fi
+
+ # Basic SQL syntax check (if possible)
+ if [ "$DB_TYPE" = "postgresql" ] && command -v psql >/dev/null 2>&1; then
+ # Try to parse SQL without executing
+ export PGPASSWORD="$DB_PASS"
+ if ! psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -f "$migration_file" --echo-queries --dry-run >/dev/null 2>&1; then
+ log_warn "Potential SQL syntax issues in: $migration_file"
+ fi
+ unset PGPASSWORD
+ fi
+ done
+
+ if [ $validation_errors -eq 0 ]; then
+ log_success "All migrations validated successfully"
+ else
+ log_error "Found $validation_errors validation errors"
+ return 1
+ fi
+}
+
+# Show what would be migrated (dry run)
+show_migration_preview() {
+ print_header "Migration Preview (Dry Run)"
+
+ local pending_migrations=$(get_pending_migrations)
+
+ if [ -z "$pending_migrations" ]; then
+ log_success "No pending migrations"
+ return
+ fi
+
+ log "The following migrations would be executed:"
+ echo
+
+ for migration_file in $pending_migrations; do
+ local version=$(basename "$migration_file" .sql | cut -d'_' -f1)
+ local name=$(basename "$migration_file" .sql | cut -d'_' -f2-)
+
+ print_subheader "Migration $version: $name"
+
+ # Show first few lines of migration
+ head -20 "$migration_file" | grep -v "^--" | grep -v "^$" | head -10
+
+ if [ $(wc -l < "$migration_file") -gt 20 ]; then
+ echo " ... (truncated, $(wc -l < "$migration_file") total lines)"
+ fi
+ echo
+ done
+}
+
+# Parse command line arguments
+COMMAND=""
+ENVIRONMENT="dev"
+VERSION=""
+STEPS=""
+MIGRATION_NAME=""
+MIGRATION_TYPE="schema"
+TABLE_NAME=""
+TEMPLATE_NAME=""
+DRY_RUN="false"
+FORCE="false"
+DEBUG="false"
+QUIET="false"
+BATCH_SIZE=1000
+TIMEOUT=300
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --env)
+ ENVIRONMENT="$2"
+ shift 2
+ ;;
+ --version)
+ VERSION="$2"
+ shift 2
+ ;;
+ --steps)
+ STEPS="$2"
+ shift 2
+ ;;
+ --name)
+ MIGRATION_NAME="$2"
+ shift 2
+ ;;
+ --type)
+ MIGRATION_TYPE="$2"
+ shift 2
+ ;;
+ --table)
+ TABLE_NAME="$2"
+ shift 2
+ ;;
+ --template)
+ TEMPLATE_NAME="$2"
+ shift 2
+ ;;
+ --dry-run)
+ DRY_RUN="true"
+ shift
+ ;;
+ --force)
+ FORCE="true"
+ shift
+ ;;
+ --debug)
+ DEBUG="true"
+ shift
+ ;;
+ --quiet)
+ QUIET="true"
+ shift
+ ;;
+ --batch-size)
+ BATCH_SIZE="$2"
+ shift 2
+ ;;
+ --timeout)
+ TIMEOUT="$2"
+ shift 2
+ ;;
+ -h|--help)
+ print_usage
+ exit 0
+ ;;
+ *)
+ if [ -z "$COMMAND" ]; then
+ COMMAND="$1"
+ else
+ log_error "Unknown option: $1"
+ print_usage
+ exit 1
+ fi
+ shift
+ ;;
+ esac
+done
+
+# Set environment variable
+export ENVIRONMENT="$ENVIRONMENT"
+
+# Validate command
+if [ -z "$COMMAND" ]; then
+ print_usage
+ exit 1
+fi
+
+# Check if we're in the right directory
+if [ ! -f "Cargo.toml" ]; then
+ log_error "Please run this script from the project root directory"
+ exit 1
+fi
+
+# Load environment and parse database URL
+load_env
+parse_database_url
+
+# Initialize migration system
+init_migration_system
+
+# Execute command
+case "$COMMAND" in
+ "status")
+ show_migration_status
+ ;;
+ "pending")
+ list_pending_migrations
+ ;;
+ "applied")
+ list_applied_migrations
+ ;;
+ "migrate")
+ run_migrations "$VERSION"
+ ;;
+ "rollback")
+ rollback_migrations "${STEPS:-1}"
+ ;;
+ "create")
+ create_migration "$MIGRATION_NAME" "$MIGRATION_TYPE" "$TEMPLATE_NAME"
+ ;;
+ "generate")
+ log_warn "Schema diff generation not yet implemented"
+ ;;
+ "validate")
+ validate_migrations
+ ;;
+ "dry-run")
+ show_migration_preview
+ ;;
+ "force")
+ log_warn "Force migration state not yet implemented"
+ ;;
+ "repair")
+ log_warn "Migration table repair not yet implemented"
+ ;;
+ "baseline")
+ log_warn "Migration baseline not yet implemented"
+ ;;
+ "history")
+ list_applied_migrations
+ ;;
+ "schema-dump")
+ log_warn "Schema dump not yet implemented"
+ ;;
+ "data-migrate")
+ log_warn "Data migration not yet implemented"
+ ;;
+ "template")
+ log_warn "Migration template management not yet implemented"
+ ;;
+ *)
+ log_error "Unknown command: $COMMAND"
+ print_usage
+ exit 1
+ ;;
+esac
diff --git a/scripts/databases/db-monitor.sh b/scripts/databases/db-monitor.sh
new file mode 100755
index 0000000..ed36639
--- /dev/null
+++ b/scripts/databases/db-monitor.sh
@@ -0,0 +1,720 @@
+#!/bin/bash
+
+# Database Monitoring and Health Check Script
+# Provides comprehensive database monitoring, performance metrics, and health checks
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+CYAN='\033[0;36m'
+BOLD='\033[1m'
+NC='\033[0m' # No Color
+
+# Script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+
+# Change to project root
+cd "$PROJECT_ROOT"
+
+# Default monitoring configuration
+MONITOR_INTERVAL=60
+ALERT_THRESHOLD_CONNECTIONS=80
+ALERT_THRESHOLD_DISK_USAGE=85
+ALERT_THRESHOLD_MEMORY_USAGE=90
+ALERT_THRESHOLD_QUERY_TIME=5000
+LOG_FILE="monitoring.log"
+
+# Logging functions
+log() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+log_warn() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+log_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+log_success() {
+ echo -e "${GREEN}[SUCCESS]${NC} $1"
+}
+
+log_metric() {
+ echo -e "${CYAN}[METRIC]${NC} $1"
+}
+
+print_header() {
+ echo -e "${BLUE}${BOLD}=== $1 ===${NC}"
+}
+
+print_subheader() {
+ echo -e "${CYAN}--- $1 ---${NC}"
+}
+
+print_usage() {
+ echo "Database Monitoring and Health Check Script"
+ echo
+ echo "Usage: $0 [options]"
+ echo
+ echo "Commands:"
+ echo " health Complete health check"
+ echo " status Quick status check"
+ echo " connections Show active connections"
+ echo " performance Show performance metrics"
+ echo " slow-queries Show slow queries"
+ echo " locks Show database locks"
+ echo " disk-usage Show disk usage"
+ echo " memory-usage Show memory usage"
+ echo " backup-status Check backup status"
+ echo " replication Check replication status"
+ echo " monitor Start continuous monitoring"
+ echo " alerts Check for alerts"
+ echo " vacuum Perform database maintenance"
+ echo " analyze Update database statistics"
+ echo " report Generate comprehensive report"
+ echo
+ echo "Options:"
+ echo " --env ENV Environment (dev/prod) [default: dev]"
+ echo " --interval SECS Monitoring interval in seconds [default: 60]"
+ echo " --log-file FILE Log file path [default: monitoring.log]"
+ echo " --threshold-conn N Connection alert threshold [default: 80]"
+ echo " --threshold-disk N Disk usage alert threshold [default: 85]"
+ echo " --threshold-mem N Memory usage alert threshold [default: 90]"
+ echo " --threshold-query N Query time alert threshold in ms [default: 5000]"
+ echo " --format FORMAT Output format (table/json/csv) [default: table]"
+ echo " --quiet Suppress verbose output"
+ echo " --continuous Run continuously (for monitor command)"
+ echo
+ echo "Examples:"
+ echo " $0 health # Complete health check"
+ echo " $0 status # Quick status"
+ echo " $0 performance # Performance metrics"
+ echo " $0 monitor --interval 30 # Monitor every 30 seconds"
+ echo " $0 slow-queries # Show slow queries"
+ echo " $0 report --format json # JSON report"
+ echo " $0 vacuum # Perform maintenance"
+}
+
+# Check if .env file exists and load it
+load_env() {
+ if [ ! -f ".env" ]; then
+ log_error ".env file not found"
+ echo "Please run the database setup script first:"
+ echo " ./scripts/db-setup.sh setup"
+ exit 1
+ fi
+
+ # Load environment variables
+ export $(grep -v '^#' .env | xargs)
+}
+
+# Parse database URL
+parse_database_url() {
+ if [[ $DATABASE_URL == postgresql://* ]] || [[ $DATABASE_URL == postgres://* ]]; then
+ DB_TYPE="postgresql"
+ DB_HOST=$(echo $DATABASE_URL | sed -n 's/.*@\([^:]*\):.*/\1/p')
+ DB_PORT=$(echo $DATABASE_URL | sed -n 's/.*:\([0-9]*\)\/.*/\1/p')
+ DB_NAME=$(echo $DATABASE_URL | sed -n 's/.*\/\([^?]*\).*/\1/p')
+ DB_USER=$(echo $DATABASE_URL | sed -n 's/.*\/\/\([^:]*\):.*/\1/p')
+ DB_PASS=$(echo $DATABASE_URL | sed -n 's/.*:\/\/[^:]*:\([^@]*\)@.*/\1/p')
+ elif [[ $DATABASE_URL == sqlite://* ]]; then
+ DB_TYPE="sqlite"
+ DB_FILE=$(echo $DATABASE_URL | sed 's/sqlite:\/\///')
+ else
+ log_error "Unsupported database URL format: $DATABASE_URL"
+ exit 1
+ fi
+}
+
+# Execute SQL query
+execute_sql() {
+ local query="$1"
+ local format="${2:-tuples-only}"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ export PGPASSWORD="$DB_PASS"
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -A -c "$query" 2>/dev/null
+ unset PGPASSWORD
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ sqlite3 "$DB_FILE" "$query" 2>/dev/null
+ fi
+}
+
+# Check database connectivity
+check_connectivity() {
+ print_subheader "Database Connectivity"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ export PGPASSWORD="$DB_PASS"
+ if pg_isready -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" >/dev/null 2>&1; then
+ log_success "PostgreSQL server is accepting connections"
+
+ # Test actual connection
+ if psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1;" >/dev/null 2>&1; then
+ log_success "Database connection successful"
+ return 0
+ else
+ log_error "Database connection failed"
+ return 1
+ fi
+ else
+ log_error "PostgreSQL server is not accepting connections"
+ return 1
+ fi
+ unset PGPASSWORD
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ if [ -f "$DB_FILE" ]; then
+ if sqlite3 "$DB_FILE" "SELECT 1;" >/dev/null 2>&1; then
+ log_success "SQLite database accessible"
+ return 0
+ else
+ log_error "SQLite database access failed"
+ return 1
+ fi
+ else
+ log_error "SQLite database file not found: $DB_FILE"
+ return 1
+ fi
+ fi
+}
+
+# Check database version
+check_version() {
+ print_subheader "Database Version"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ local version=$(execute_sql "SELECT version();")
+ log_metric "PostgreSQL Version: $version"
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ local version=$(sqlite3 --version | cut -d' ' -f1)
+ log_metric "SQLite Version: $version"
+ fi
+}
+
+# Check database size
+check_database_size() {
+ print_subheader "Database Size"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ local size=$(execute_sql "SELECT pg_size_pretty(pg_database_size('$DB_NAME'));")
+ log_metric "Database Size: $size"
+
+ # Table sizes
+ echo "Top 10 largest tables:"
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size
+ FROM pg_tables
+ WHERE schemaname NOT IN ('information_schema', 'pg_catalog')
+ ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC
+ LIMIT 10;
+ " | while read line; do
+ log_metric " $line"
+ done
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ if [ -f "$DB_FILE" ]; then
+ local size=$(du -h "$DB_FILE" | cut -f1)
+ log_metric "Database Size: $size"
+ fi
+ fi
+}
+
+# Check active connections
+check_connections() {
+ print_subheader "Database Connections"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ local active_connections=$(execute_sql "SELECT count(*) FROM pg_stat_activity WHERE state = 'active';")
+ local total_connections=$(execute_sql "SELECT count(*) FROM pg_stat_activity;")
+ local max_connections=$(execute_sql "SELECT setting FROM pg_settings WHERE name = 'max_connections';")
+
+ log_metric "Active Connections: $active_connections"
+ log_metric "Total Connections: $total_connections"
+ log_metric "Max Connections: $max_connections"
+
+ local connection_percentage=$((total_connections * 100 / max_connections))
+ log_metric "Connection Usage: ${connection_percentage}%"
+
+ if [ $connection_percentage -gt $ALERT_THRESHOLD_CONNECTIONS ]; then
+ log_warn "Connection usage is above ${ALERT_THRESHOLD_CONNECTIONS}%"
+ fi
+
+ # Show connection details
+ echo "Active connections by user:"
+ execute_sql "
+ SELECT
+ usename,
+ count(*) as connections,
+ state
+ FROM pg_stat_activity
+ GROUP BY usename, state
+ ORDER BY connections DESC;
+ " | while read line; do
+ log_metric " $line"
+ done
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log_metric "SQLite connections: Single connection (file-based)"
+ fi
+}
+
+# Check performance metrics
+check_performance() {
+ print_subheader "Performance Metrics"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ # Cache hit ratio
+ local cache_hit_ratio=$(execute_sql "
+ SELECT
+ round(
+ (sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read))) * 100, 2
+ ) as cache_hit_ratio
+ FROM pg_statio_user_tables;
+ ")
+ log_metric "Cache Hit Ratio: ${cache_hit_ratio}%"
+
+ # Index usage
+ local index_usage=$(execute_sql "
+ SELECT
+ round(
+ (sum(idx_blks_hit) / (sum(idx_blks_hit) + sum(idx_blks_read))) * 100, 2
+ ) as index_hit_ratio
+ FROM pg_statio_user_indexes;
+ ")
+ log_metric "Index Hit Ratio: ${index_usage}%"
+
+ # Transaction stats
+ local commits=$(execute_sql "SELECT xact_commit FROM pg_stat_database WHERE datname = '$DB_NAME';")
+ local rollbacks=$(execute_sql "SELECT xact_rollback FROM pg_stat_database WHERE datname = '$DB_NAME';")
+ log_metric "Commits: $commits"
+ log_metric "Rollbacks: $rollbacks"
+
+ # Deadlocks
+ local deadlocks=$(execute_sql "SELECT deadlocks FROM pg_stat_database WHERE datname = '$DB_NAME';")
+ log_metric "Deadlocks: $deadlocks"
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ # SQLite-specific metrics
+ local page_count=$(execute_sql "PRAGMA page_count;")
+ local page_size=$(execute_sql "PRAGMA page_size;")
+ local cache_size=$(execute_sql "PRAGMA cache_size;")
+
+ log_metric "Page Count: $page_count"
+ log_metric "Page Size: $page_size bytes"
+ log_metric "Cache Size: $cache_size pages"
+ fi
+}
+
+# Check slow queries
+check_slow_queries() {
+ print_subheader "Slow Queries"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ # Check if pg_stat_statements is enabled
+ local extension_exists=$(execute_sql "SELECT count(*) FROM pg_available_extensions WHERE name = 'pg_stat_statements';")
+
+ if [ "$extension_exists" -eq "1" ]; then
+ echo "Top 10 slowest queries:"
+ execute_sql "
+ SELECT
+ round(mean_exec_time::numeric, 2) as avg_time_ms,
+ calls,
+ round(total_exec_time::numeric, 2) as total_time_ms,
+ left(query, 100) as query_preview
+ FROM pg_stat_statements
+ ORDER BY mean_exec_time DESC
+ LIMIT 10;
+ " | while read line; do
+ log_metric " $line"
+ done
+ else
+ log_warn "pg_stat_statements extension not available"
+ fi
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log_metric "SQLite slow query monitoring requires application-level logging"
+ fi
+}
+
+# Check database locks
+check_locks() {
+ print_subheader "Database Locks"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ local lock_count=$(execute_sql "SELECT count(*) FROM pg_locks;")
+ log_metric "Active Locks: $lock_count"
+
+ # Check for blocking queries
+ local blocking_queries=$(execute_sql "
+ SELECT count(*)
+ FROM pg_stat_activity
+ WHERE wait_event_type = 'Lock';
+ ")
+
+ if [ "$blocking_queries" -gt "0" ]; then
+ log_warn "Found $blocking_queries queries waiting for locks"
+
+ execute_sql "
+ SELECT
+ blocked_locks.pid AS blocked_pid,
+ blocked_activity.usename AS blocked_user,
+ blocking_locks.pid AS blocking_pid,
+ blocking_activity.usename AS blocking_user,
+ blocked_activity.query AS blocked_statement,
+ blocking_activity.query AS current_statement_in_blocking_process
+ FROM pg_catalog.pg_locks blocked_locks
+ JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
+ JOIN pg_catalog.pg_locks blocking_locks ON blocking_locks.locktype = blocked_locks.locktype
+ AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
+ AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
+ AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
+ AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
+ AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
+ AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
+ AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
+ AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
+ AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
+ AND blocking_locks.pid != blocked_locks.pid
+ JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
+ WHERE NOT blocked_locks.granted;
+ " | while read line; do
+ log_warn " $line"
+ done
+ else
+ log_success "No blocking queries found"
+ fi
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log_metric "SQLite uses file-level locking"
+ fi
+}
+
+# Check disk usage
+check_disk_usage() {
+ print_subheader "Disk Usage"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ # Get PostgreSQL data directory
+ local data_dir=$(execute_sql "SELECT setting FROM pg_settings WHERE name = 'data_directory';")
+
+ if [ -n "$data_dir" ] && [ -d "$data_dir" ]; then
+ local disk_usage=$(df -h "$data_dir" | awk 'NR==2 {print $5}' | sed 's/%//')
+ log_metric "Data Directory Disk Usage: ${disk_usage}%"
+
+ if [ "$disk_usage" -gt "$ALERT_THRESHOLD_DISK_USAGE" ]; then
+ log_warn "Disk usage is above ${ALERT_THRESHOLD_DISK_USAGE}%"
+ fi
+ else
+ log_warn "Could not determine PostgreSQL data directory"
+ fi
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ local db_dir=$(dirname "$DB_FILE")
+ local disk_usage=$(df -h "$db_dir" | awk 'NR==2 {print $5}' | sed 's/%//')
+ log_metric "Database Directory Disk Usage: ${disk_usage}%"
+
+ if [ "$disk_usage" -gt "$ALERT_THRESHOLD_DISK_USAGE" ]; then
+ log_warn "Disk usage is above ${ALERT_THRESHOLD_DISK_USAGE}%"
+ fi
+ fi
+}
+
+# Check memory usage
+check_memory_usage() {
+ print_subheader "Memory Usage"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ # Check shared buffers and other memory settings
+ local shared_buffers=$(execute_sql "SELECT setting FROM pg_settings WHERE name = 'shared_buffers';")
+ local work_mem=$(execute_sql "SELECT setting FROM pg_settings WHERE name = 'work_mem';")
+ local maintenance_work_mem=$(execute_sql "SELECT setting FROM pg_settings WHERE name = 'maintenance_work_mem';")
+
+ log_metric "Shared Buffers: $shared_buffers"
+ log_metric "Work Mem: $work_mem"
+ log_metric "Maintenance Work Mem: $maintenance_work_mem"
+
+ # Check actual memory usage if available
+ if command -v ps >/dev/null 2>&1; then
+ local postgres_memory=$(ps -o pid,vsz,rss,comm -C postgres --no-headers | awk '{rss_total += $3} END {print rss_total/1024 " MB"}')
+ if [ -n "$postgres_memory" ]; then
+ log_metric "PostgreSQL Memory Usage: $postgres_memory"
+ fi
+ fi
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ local cache_size=$(execute_sql "PRAGMA cache_size;")
+ local page_size=$(execute_sql "PRAGMA page_size;")
+ local memory_usage_kb=$((cache_size * page_size / 1024))
+ log_metric "SQLite Cache Memory: ${memory_usage_kb} KB"
+ fi
+}
+
+# Check backup status
+check_backup_status() {
+ print_subheader "Backup Status"
+
+ local backup_dir="backups"
+ if [ -d "$backup_dir" ]; then
+ local backup_count=$(find "$backup_dir" -name "*.sql*" -o -name "*.dump*" -o -name "*.tar*" 2>/dev/null | wc -l)
+ log_metric "Available Backups: $backup_count"
+
+ if [ "$backup_count" -gt "0" ]; then
+ local latest_backup=$(find "$backup_dir" -name "*.sql*" -o -name "*.dump*" -o -name "*.tar*" 2>/dev/null | sort | tail -1)
+ if [ -n "$latest_backup" ]; then
+ local backup_age=$(find "$latest_backup" -mtime +1 2>/dev/null | wc -l)
+ local backup_date=$(date -r "$latest_backup" '+%Y-%m-%d %H:%M:%S' 2>/dev/null || echo "Unknown")
+ log_metric "Latest Backup: $(basename "$latest_backup") ($backup_date)"
+
+ if [ "$backup_age" -gt "0" ]; then
+ log_warn "Latest backup is older than 24 hours"
+ fi
+ fi
+ else
+ log_warn "No backups found"
+ fi
+ else
+ log_warn "Backup directory not found: $backup_dir"
+ fi
+}
+
+# Perform vacuum operation
+perform_vacuum() {
+ print_subheader "Database Maintenance (VACUUM)"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ log "Running VACUUM ANALYZE on all tables..."
+ execute_sql "VACUUM ANALYZE;" >/dev/null 2>&1
+ log_success "VACUUM ANALYZE completed"
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log "Running VACUUM on SQLite database..."
+ execute_sql "VACUUM;" >/dev/null 2>&1
+ log_success "VACUUM completed"
+ fi
+}
+
+# Update database statistics
+update_statistics() {
+ print_subheader "Update Database Statistics"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ log "Running ANALYZE on all tables..."
+ execute_sql "ANALYZE;" >/dev/null 2>&1
+ log_success "ANALYZE completed"
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log "Running ANALYZE on SQLite database..."
+ execute_sql "ANALYZE;" >/dev/null 2>&1
+ log_success "ANALYZE completed"
+ fi
+}
+
+# Generate comprehensive report
+generate_report() {
+ print_header "Database Health Report"
+
+ echo "Report generated on: $(date)"
+ echo "Database Type: $DB_TYPE"
+ echo "Database Name: $DB_NAME"
+ echo "Environment: $ENVIRONMENT"
+ echo
+
+ # Run all checks
+ check_connectivity
+ echo
+ check_version
+ echo
+ check_database_size
+ echo
+ check_connections
+ echo
+ check_performance
+ echo
+ check_slow_queries
+ echo
+ check_locks
+ echo
+ check_disk_usage
+ echo
+ check_memory_usage
+ echo
+ check_backup_status
+ echo
+
+ print_header "Report Complete"
+}
+
+# Continuous monitoring
+start_monitoring() {
+ print_header "Starting Database Monitoring"
+ log "Monitoring interval: ${MONITOR_INTERVAL} seconds"
+ log "Press Ctrl+C to stop monitoring"
+
+ while true; do
+ clear
+ echo "=== Database Monitor - $(date) ==="
+ echo
+
+ # Quick health checks
+ if check_connectivity >/dev/null 2>&1; then
+ echo "✅ Database connectivity: OK"
+ else
+ echo "❌ Database connectivity: FAILED"
+ fi
+
+ check_connections
+ echo
+ check_performance
+ echo
+
+ if [ "$CONTINUOUS" = "true" ]; then
+ sleep "$MONITOR_INTERVAL"
+ else
+ break
+ fi
+ done
+}
+
+# Parse command line arguments
+COMMAND=""
+ENVIRONMENT="dev"
+FORMAT="table"
+CONTINUOUS="false"
+QUIET="false"
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --env)
+ ENVIRONMENT="$2"
+ shift 2
+ ;;
+ --interval)
+ MONITOR_INTERVAL="$2"
+ shift 2
+ ;;
+ --log-file)
+ LOG_FILE="$2"
+ shift 2
+ ;;
+ --threshold-conn)
+ ALERT_THRESHOLD_CONNECTIONS="$2"
+ shift 2
+ ;;
+ --threshold-disk)
+ ALERT_THRESHOLD_DISK_USAGE="$2"
+ shift 2
+ ;;
+ --threshold-mem)
+ ALERT_THRESHOLD_MEMORY_USAGE="$2"
+ shift 2
+ ;;
+ --threshold-query)
+ ALERT_THRESHOLD_QUERY_TIME="$2"
+ shift 2
+ ;;
+ --format)
+ FORMAT="$2"
+ shift 2
+ ;;
+ --continuous)
+ CONTINUOUS="true"
+ shift
+ ;;
+ --quiet)
+ QUIET="true"
+ shift
+ ;;
+ -h|--help)
+ print_usage
+ exit 0
+ ;;
+ *)
+ if [ -z "$COMMAND" ]; then
+ COMMAND="$1"
+ else
+ log_error "Unknown option: $1"
+ print_usage
+ exit 1
+ fi
+ shift
+ ;;
+ esac
+done
+
+# Set environment variable
+export ENVIRONMENT="$ENVIRONMENT"
+
+# Validate command
+if [ -z "$COMMAND" ]; then
+ print_usage
+ exit 1
+fi
+
+# Check if we're in the right directory
+if [ ! -f "Cargo.toml" ]; then
+ log_error "Please run this script from the project root directory"
+ exit 1
+fi
+
+# Load environment and parse database URL
+load_env
+parse_database_url
+
+# Execute command
+case "$COMMAND" in
+ "health")
+ print_header "Complete Health Check"
+ generate_report
+ ;;
+ "status")
+ print_header "Quick Status Check"
+ check_connectivity
+ check_connections
+ ;;
+ "connections")
+ check_connections
+ ;;
+ "performance")
+ check_performance
+ ;;
+ "slow-queries")
+ check_slow_queries
+ ;;
+ "locks")
+ check_locks
+ ;;
+ "disk-usage")
+ check_disk_usage
+ ;;
+ "memory-usage")
+ check_memory_usage
+ ;;
+ "backup-status")
+ check_backup_status
+ ;;
+ "replication")
+ log_warn "Replication monitoring not yet implemented"
+ ;;
+ "monitor")
+ start_monitoring
+ ;;
+ "alerts")
+ log_warn "Alert system not yet implemented"
+ ;;
+ "vacuum")
+ perform_vacuum
+ ;;
+ "analyze")
+ update_statistics
+ ;;
+ "report")
+ generate_report
+ ;;
+ *)
+ log_error "Unknown command: $COMMAND"
+ print_usage
+ exit 1
+ ;;
+esac
diff --git a/scripts/databases/db-setup.sh b/scripts/databases/db-setup.sh
new file mode 100755
index 0000000..6f0f985
--- /dev/null
+++ b/scripts/databases/db-setup.sh
@@ -0,0 +1,388 @@
+#!/bin/bash
+
+# Database Setup Script
+# Provides convenient commands for database management
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# Script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+
+# Change to project root
+cd "$PROJECT_ROOT"
+
+# Logging functions
+log() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+log_warn() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+log_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+print_header() {
+ echo -e "${BLUE}=== $1 ===${NC}"
+}
+
+print_usage() {
+ echo "Database Setup Script"
+ echo
+ echo "Usage: $0 [options]"
+ echo
+ echo "Commands:"
+ echo " setup Full database setup (create + migrate + seed)"
+ echo " create Create the database"
+ echo " migrate Run migrations"
+ echo " seed Seed database with test data"
+ echo " reset Reset database (drop + create + migrate)"
+ echo " status Show migration status"
+ echo " drop Drop the database"
+ echo " postgres Setup PostgreSQL database"
+ echo " sqlite Setup SQLite database"
+ echo
+ echo "Options:"
+ echo " --env ENV Environment (dev/prod) [default: dev]"
+ echo " --force Skip confirmations"
+ echo " --quiet Suppress verbose output"
+ echo
+ echo "Examples:"
+ echo " $0 setup # Full setup with default settings"
+ echo " $0 migrate # Run pending migrations"
+ echo " $0 reset --force # Reset database without confirmation"
+ echo " $0 postgres # Setup PostgreSQL specifically"
+ echo " $0 sqlite # Setup SQLite specifically"
+}
+
+# Check if .env file exists
+check_env_file() {
+ if [ ! -f ".env" ]; then
+ log_warn ".env file not found"
+ log "Creating .env file from template..."
+
+ if [ -f ".env.example" ]; then
+ cp ".env.example" ".env"
+ log "Created .env from .env.example"
+ else
+ create_default_env
+ fi
+ fi
+}
+
+# Create default .env file
+create_default_env() {
+ cat > ".env" << EOF
+# Environment Configuration
+ENVIRONMENT=dev
+
+# Database Configuration
+DATABASE_URL=postgresql://dev:dev@localhost:5432/rustelo_dev
+
+# Server Configuration
+SERVER_HOST=127.0.0.1
+SERVER_PORT=3030
+SERVER_PROTOCOL=http
+
+# Session Configuration
+SESSION_SECRET=dev-secret-not-for-production
+
+# Features
+ENABLE_AUTH=true
+ENABLE_CONTENT_DB=true
+ENABLE_TLS=false
+
+# Logging
+LOG_LEVEL=debug
+RUST_LOG=debug
+EOF
+ log "Created default .env file"
+}
+
+# Check dependencies
+check_dependencies() {
+ local missing=()
+
+ if ! command -v cargo >/dev/null 2>&1; then
+ missing+=("cargo (Rust)")
+ fi
+
+ if ! command -v psql >/dev/null 2>&1 && ! command -v sqlite3 >/dev/null 2>&1; then
+ missing+=("psql (PostgreSQL) or sqlite3")
+ fi
+
+ if [ ${#missing[@]} -gt 0 ]; then
+ log_error "Missing dependencies: ${missing[*]}"
+ echo
+ echo "Please install the missing dependencies:"
+ echo "- Rust: https://rustup.rs/"
+ echo "- PostgreSQL: https://postgresql.org/download/"
+ echo "- SQLite: Usually pre-installed or via package manager"
+ exit 1
+ fi
+}
+
+# Setup PostgreSQL database
+setup_postgresql() {
+ print_header "Setting up PostgreSQL Database"
+
+ # Check if PostgreSQL is running
+ if ! pg_isready >/dev/null 2>&1; then
+ log_warn "PostgreSQL is not running"
+ echo "Please start PostgreSQL service:"
+ echo " macOS (Homebrew): brew services start postgresql"
+ echo " Linux (systemd): sudo systemctl start postgresql"
+ echo " Windows: Start PostgreSQL service from Services panel"
+ exit 1
+ fi
+
+ # Create development user if it doesn't exist
+ if ! psql -U postgres -tc "SELECT 1 FROM pg_user WHERE usename = 'dev'" | grep -q 1; then
+ log "Creating development user..."
+ psql -U postgres -c "CREATE USER dev WITH PASSWORD 'dev' CREATEDB;"
+ fi
+
+ # Update DATABASE_URL in .env
+ if grep -q "sqlite://" .env; then
+ log "Updating .env to use PostgreSQL..."
+ sed -i.bak 's|DATABASE_URL=.*|DATABASE_URL=postgresql://dev:dev@localhost:5432/rustelo_dev|' .env
+ rm -f .env.bak
+ fi
+
+ log "PostgreSQL setup complete"
+}
+
+# Setup SQLite database
+setup_sqlite() {
+ print_header "Setting up SQLite Database"
+
+ # Create data directory
+ mkdir -p data
+
+ # Update DATABASE_URL in .env
+ if grep -q "postgresql://" .env; then
+ log "Updating .env to use SQLite..."
+ sed -i.bak 's|DATABASE_URL=.*|DATABASE_URL=sqlite://data/rustelo.db|' .env
+ rm -f .env.bak
+ fi
+
+ log "SQLite setup complete"
+}
+
+# Run database tool command
+run_db_tool() {
+ local command="$1"
+ log "Running: cargo run --bin db_tool -- $command"
+
+ if [ "$QUIET" = "true" ]; then
+ cargo run --bin db_tool -- "$command" >/dev/null 2>&1
+ else
+ cargo run --bin db_tool -- "$command"
+ fi
+}
+
+# Create seed directory and files if they don't exist
+setup_seeds() {
+ if [ ! -d "seeds" ]; then
+ log "Creating seeds directory..."
+ mkdir -p seeds
+
+ # Create sample seed files
+ cat > "seeds/001_sample_users.sql" << EOF
+-- Sample users for development
+-- This file works for both PostgreSQL and SQLite
+
+INSERT INTO users (username, email, password_hash, is_active, is_verified) VALUES
+('admin', 'admin@example.com', '\$argon2id\$v=19\$m=65536,t=3,p=4\$Ym9vZm9v\$2RmTUplMXB3YUNGeFczL1NyTlJFWERsZVdrbUVuNHhDNlk5K1ZZWVorUT0', true, true),
+('user', 'user@example.com', '\$argon2id\$v=19\$m=65536,t=3,p=4\$Ym9vZm9v\$2RmTUplMXB3YUNGeFczL1NyTlJFWERsZVdrbUVuNHhDNlk5K1ZZWVorUT0', true, true),
+('editor', 'editor@example.com', '\$argon2id\$v=19\$m=65536,t=3,p=4\$Ym9vZm9v\$2RmTUplMXB3YUNGeFczL1NyTlJFWERsZVdrbUVuNHhDNlk5K1ZZWVorUT0', true, true)
+ON CONFLICT (email) DO NOTHING;
+EOF
+
+ cat > "seeds/002_sample_content.sql" << EOF
+-- Sample content for development
+-- This file works for both PostgreSQL and SQLite
+
+INSERT INTO content (title, slug, content_type, body, is_published, published_at) VALUES
+('Welcome to Rustelo', 'welcome', 'markdown', '# Welcome to Rustelo
+
+This is a sample content page created by the seed data.
+
+## Features
+
+- Fast and secure
+- Built with Rust
+- Modern web framework
+- Easy to use
+
+Enjoy building with Rustelo!', true, CURRENT_TIMESTAMP),
+
+('About Us', 'about', 'markdown', '# About Us
+
+This is the about page for your Rustelo application.
+
+You can edit this content through the admin interface or by modifying the seed files.', true, CURRENT_TIMESTAMP),
+
+('Getting Started', 'getting-started', 'markdown', '# Getting Started
+
+Here are some tips to get you started with your new Rustelo application:
+
+1. Check out the admin interface
+2. Create your first content
+3. Customize the design
+4. Deploy to production
+
+Good luck!', false, NULL)
+ON CONFLICT (slug) DO NOTHING;
+EOF
+
+ log "Created sample seed files"
+ fi
+}
+
+# Main setup function
+full_setup() {
+ print_header "Full Database Setup"
+
+ check_env_file
+ setup_seeds
+
+ log "Creating database..."
+ run_db_tool "create"
+
+ log "Running migrations..."
+ run_db_tool "migrate"
+
+ log "Seeding database..."
+ run_db_tool "seed"
+
+ log "Checking status..."
+ run_db_tool "status"
+
+ print_header "Setup Complete!"
+ log "Database is ready for development"
+ echo
+ log "Next steps:"
+ echo " 1. Start the server: cargo leptos watch"
+ echo " 2. Open http://localhost:3030 in your browser"
+ echo " 3. Check the database status: $0 status"
+}
+
+# Parse command line arguments
+COMMAND=""
+ENVIRONMENT="dev"
+FORCE=false
+QUIET=false
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --env)
+ ENVIRONMENT="$2"
+ shift 2
+ ;;
+ --force)
+ FORCE=true
+ shift
+ ;;
+ --quiet)
+ QUIET=true
+ shift
+ ;;
+ -h|--help)
+ print_usage
+ exit 0
+ ;;
+ *)
+ if [ -z "$COMMAND" ]; then
+ COMMAND="$1"
+ else
+ log_error "Unknown option: $1"
+ print_usage
+ exit 1
+ fi
+ shift
+ ;;
+ esac
+done
+
+# Set environment variable
+export ENVIRONMENT="$ENVIRONMENT"
+
+# Validate command
+if [ -z "$COMMAND" ]; then
+ print_usage
+ exit 1
+fi
+
+# Check dependencies
+check_dependencies
+
+# Check if we're in the right directory
+if [ ! -f "Cargo.toml" ]; then
+ log_error "Please run this script from the project root directory"
+ exit 1
+fi
+
+# Execute command
+case "$COMMAND" in
+ "setup")
+ full_setup
+ ;;
+ "create")
+ print_header "Creating Database"
+ check_env_file
+ run_db_tool "create"
+ ;;
+ "migrate")
+ print_header "Running Migrations"
+ run_db_tool "migrate"
+ ;;
+ "seed")
+ print_header "Seeding Database"
+ setup_seeds
+ run_db_tool "seed"
+ ;;
+ "reset")
+ print_header "Resetting Database"
+ if [ "$FORCE" != "true" ]; then
+ echo -n "This will destroy all data. Are you sure? (y/N): "
+ read -r confirm
+ if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
+ log "Reset cancelled"
+ exit 0
+ fi
+ fi
+ run_db_tool "reset"
+ ;;
+ "status")
+ print_header "Database Status"
+ run_db_tool "status"
+ ;;
+ "drop")
+ print_header "Dropping Database"
+ run_db_tool "drop"
+ ;;
+ "postgres")
+ setup_postgresql
+ full_setup
+ ;;
+ "sqlite")
+ setup_sqlite
+ full_setup
+ ;;
+ *)
+ log_error "Unknown command: $COMMAND"
+ print_usage
+ exit 1
+ ;;
+esac
diff --git a/scripts/databases/db-utils.sh b/scripts/databases/db-utils.sh
new file mode 100755
index 0000000..20c2cd9
--- /dev/null
+++ b/scripts/databases/db-utils.sh
@@ -0,0 +1,1070 @@
+#!/bin/bash
+
+# Database Utilities and Maintenance Script
+# Provides various database utility functions and maintenance tasks
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+CYAN='\033[0;36m'
+BOLD='\033[1m'
+NC='\033[0m' # No Color
+
+# Script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+
+# Change to project root
+cd "$PROJECT_ROOT"
+
+# Utility configuration
+TEMP_DIR="temp"
+DUMP_DIR="dumps"
+LOGS_DIR="logs"
+MAX_LOG_SIZE="100M"
+LOG_RETENTION_DAYS=30
+
+# Logging functions
+log() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+log_warn() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+log_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+log_success() {
+ echo -e "${GREEN}[SUCCESS]${NC} $1"
+}
+
+log_debug() {
+ if [ "$DEBUG" = "true" ]; then
+ echo -e "${CYAN}[DEBUG]${NC} $1"
+ fi
+}
+
+print_header() {
+ echo -e "${BLUE}${BOLD}=== $1 ===${NC}"
+}
+
+print_subheader() {
+ echo -e "${CYAN}--- $1 ---${NC}"
+}
+
+print_usage() {
+ echo "Database Utilities and Maintenance Script"
+ echo
+ echo "Usage: $0 [options]"
+ echo
+ echo "Commands:"
+ echo " size Show database size information"
+ echo " tables List all tables with row counts"
+ echo " indexes Show index information"
+ echo " constraints Show table constraints"
+ echo " users Show database users (PostgreSQL only)"
+ echo " permissions Show user permissions"
+ echo " sessions Show active sessions"
+ echo " locks Show current locks"
+ echo " queries Show running queries"
+ echo " kill-query Kill a specific query"
+ echo " optimize Optimize database (VACUUM, ANALYZE)"
+ echo " reindex Rebuild indexes"
+ echo " check-integrity Check database integrity"
+ echo " repair Repair database issues"
+ echo " cleanup Clean up temporary data"
+ echo " logs Show database logs"
+ echo " config Show database configuration"
+ echo " extensions List database extensions (PostgreSQL)"
+ echo " sequences Show sequence information"
+ echo " triggers Show table triggers"
+ echo " functions Show user-defined functions"
+ echo " views Show database views"
+ echo " schema-info Show comprehensive schema information"
+ echo " duplicate-data Find duplicate records"
+ echo " orphaned-data Find orphaned records"
+ echo " table-stats Show detailed table statistics"
+ echo " connection-test Test database connection"
+ echo " benchmark Run database benchmarks"
+ echo " export-schema Export database schema"
+ echo " import-schema Import database schema"
+ echo " copy-table Copy table data"
+ echo " truncate-table Truncate table data"
+ echo " reset-sequence Reset sequence values"
+ echo
+ echo "Options:"
+ echo " --env ENV Environment (dev/prod) [default: dev]"
+ echo " --table TABLE Target table name"
+ echo " --schema SCHEMA Target schema name"
+ echo " --query-id ID Query ID to kill"
+ echo " --limit N Limit results [default: 100]"
+ echo " --output FORMAT Output format (table/json/csv) [default: table]"
+ echo " --file FILE Output file path"
+ echo " --force Force operation without confirmation"
+ echo " --debug Enable debug output"
+ echo " --quiet Suppress verbose output"
+ echo " --dry-run Show what would be done without executing"
+ echo
+ echo "Examples:"
+ echo " $0 size # Show database size"
+ echo " $0 tables # List all tables"
+ echo " $0 tables --table users # Show info for users table"
+ echo " $0 indexes --table users # Show indexes for users table"
+ echo " $0 optimize # Optimize database"
+ echo " $0 cleanup # Clean up temporary data"
+ echo " $0 duplicate-data --table users # Find duplicate users"
+ echo " $0 copy-table --table users # Copy users table"
+ echo " $0 export-schema --file schema.sql # Export schema to file"
+ echo " $0 benchmark # Run performance benchmarks"
+}
+
+# Check if .env file exists and load it
+load_env() {
+ if [ ! -f ".env" ]; then
+ log_error ".env file not found"
+ echo "Please run the database setup script first:"
+ echo " ./scripts/db-setup.sh setup"
+ exit 1
+ fi
+
+ # Load environment variables
+ export $(grep -v '^#' .env | xargs)
+}
+
+# Parse database URL
+parse_database_url() {
+ if [[ $DATABASE_URL == postgresql://* ]] || [[ $DATABASE_URL == postgres://* ]]; then
+ DB_TYPE="postgresql"
+ DB_HOST=$(echo $DATABASE_URL | sed -n 's/.*@\([^:]*\):.*/\1/p')
+ DB_PORT=$(echo $DATABASE_URL | sed -n 's/.*:\([0-9]*\)\/.*/\1/p')
+ DB_NAME=$(echo $DATABASE_URL | sed -n 's/.*\/\([^?]*\).*/\1/p')
+ DB_USER=$(echo $DATABASE_URL | sed -n 's/.*\/\/\([^:]*\):.*/\1/p')
+ DB_PASS=$(echo $DATABASE_URL | sed -n 's/.*:\/\/[^:]*:\([^@]*\)@.*/\1/p')
+ elif [[ $DATABASE_URL == sqlite://* ]]; then
+ DB_TYPE="sqlite"
+ DB_FILE=$(echo $DATABASE_URL | sed 's/sqlite:\/\///')
+ else
+ log_error "Unsupported database URL format: $DATABASE_URL"
+ exit 1
+ fi
+}
+
+# Execute SQL query
+execute_sql() {
+ local query="$1"
+ local capture_output="${2:-false}"
+ local format="${3:-table}"
+
+ log_debug "Executing SQL: $query"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ export PGPASSWORD="$DB_PASS"
+ if [ "$capture_output" = "true" ]; then
+ if [ "$format" = "csv" ]; then
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "$query" --csv 2>/dev/null
+ else
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -A -c "$query" 2>/dev/null
+ fi
+ else
+ psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "$query" 2>/dev/null
+ fi
+ unset PGPASSWORD
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ if [ "$capture_output" = "true" ]; then
+ if [ "$format" = "csv" ]; then
+ sqlite3 -header -csv "$DB_FILE" "$query" 2>/dev/null
+ else
+ sqlite3 "$DB_FILE" "$query" 2>/dev/null
+ fi
+ else
+ sqlite3 "$DB_FILE" "$query" 2>/dev/null
+ fi
+ fi
+}
+
+# Setup utility directories
+setup_directories() {
+ mkdir -p "$TEMP_DIR" "$DUMP_DIR" "$LOGS_DIR"
+}
+
+# Show database size information
+show_database_size() {
+ print_header "Database Size Information"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ # Total database size
+ local total_size=$(execute_sql "SELECT pg_size_pretty(pg_database_size('$DB_NAME'));" true)
+ log "Total Database Size: $total_size"
+
+ # Table sizes
+ print_subheader "Table Sizes (Top 20)"
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size,
+ pg_size_pretty(pg_relation_size(schemaname||'.'||tablename)) as table_size,
+ pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename) - pg_relation_size(schemaname||'.'||tablename)) as index_size
+ FROM pg_tables
+ WHERE schemaname NOT IN ('information_schema', 'pg_catalog')
+ ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC
+ LIMIT 20;
+ "
+
+ # Index sizes
+ print_subheader "Index Sizes (Top 10)"
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ indexname,
+ pg_size_pretty(pg_relation_size(indexrelid)) as size
+ FROM pg_stat_user_indexes
+ ORDER BY pg_relation_size(indexrelid) DESC
+ LIMIT 10;
+ "
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ if [ -f "$DB_FILE" ]; then
+ local size=$(du -h "$DB_FILE" | cut -f1)
+ log "Database File Size: $size"
+
+ # Table info
+ print_subheader "Table Information"
+ execute_sql "
+ SELECT
+ name as table_name,
+ type
+ FROM sqlite_master
+ WHERE type IN ('table', 'view')
+ ORDER BY name;
+ "
+
+ # Page count and size
+ local page_count=$(execute_sql "PRAGMA page_count;" true)
+ local page_size=$(execute_sql "PRAGMA page_size;" true)
+ local total_pages=$((page_count * page_size))
+ log "Total Pages: $page_count"
+ log "Page Size: $page_size bytes"
+ log "Total Size: $total_pages bytes"
+ fi
+ fi
+}
+
+# List tables with row counts
+show_tables() {
+ print_header "Database Tables"
+
+ if [ -n "$TABLE_NAME" ]; then
+ print_subheader "Table: $TABLE_NAME"
+ show_table_details "$TABLE_NAME"
+ return
+ fi
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ n_tup_ins as inserts,
+ n_tup_upd as updates,
+ n_tup_del as deletes,
+ n_live_tup as live_rows,
+ n_dead_tup as dead_rows,
+ last_vacuum,
+ last_analyze
+ FROM pg_stat_user_tables
+ ORDER BY schemaname, tablename;
+ "
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ execute_sql "
+ SELECT
+ name as table_name,
+ type,
+ sql
+ FROM sqlite_master
+ WHERE type = 'table'
+ AND name NOT LIKE 'sqlite_%'
+ ORDER BY name;
+ "
+ fi
+}
+
+# Show table details
+show_table_details() {
+ local table_name="$1"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ print_subheader "Table Structure"
+ execute_sql "
+ SELECT
+ column_name,
+ data_type,
+ is_nullable,
+ column_default,
+ character_maximum_length
+ FROM information_schema.columns
+ WHERE table_name = '$table_name'
+ ORDER BY ordinal_position;
+ "
+
+ print_subheader "Table Statistics"
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ n_live_tup as live_rows,
+ n_dead_tup as dead_rows,
+ n_tup_ins as total_inserts,
+ n_tup_upd as total_updates,
+ n_tup_del as total_deletes,
+ last_vacuum,
+ last_autovacuum,
+ last_analyze,
+ last_autoanalyze
+ FROM pg_stat_user_tables
+ WHERE tablename = '$table_name';
+ "
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ print_subheader "Table Structure"
+ execute_sql "PRAGMA table_info($table_name);"
+
+ print_subheader "Row Count"
+ local row_count=$(execute_sql "SELECT COUNT(*) FROM $table_name;" true)
+ log "Total Rows: $row_count"
+ fi
+}
+
+# Show index information
+show_indexes() {
+ print_header "Database Indexes"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ local where_clause=""
+ if [ -n "$TABLE_NAME" ]; then
+ where_clause="WHERE tablename = '$TABLE_NAME'"
+ fi
+
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ indexname,
+ indexdef,
+ pg_size_pretty(pg_relation_size(indexrelid)) as size
+ FROM pg_indexes
+ $where_clause
+ ORDER BY schemaname, tablename, indexname;
+ "
+
+ print_subheader "Index Usage Statistics"
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ indexname,
+ idx_scan as scans,
+ idx_tup_read as tuples_read,
+ idx_tup_fetch as tuples_fetched
+ FROM pg_stat_user_indexes
+ $where_clause
+ ORDER BY idx_scan DESC;
+ "
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ local where_clause=""
+ if [ -n "$TABLE_NAME" ]; then
+ where_clause="WHERE tbl_name = '$TABLE_NAME'"
+ fi
+
+ execute_sql "
+ SELECT
+ name as index_name,
+ tbl_name as table_name,
+ sql
+ FROM sqlite_master
+ WHERE type = 'index'
+ AND name NOT LIKE 'sqlite_%'
+ $where_clause
+ ORDER BY tbl_name, name;
+ "
+ fi
+}
+
+# Show constraints
+show_constraints() {
+ print_header "Database Constraints"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ local where_clause=""
+ if [ -n "$TABLE_NAME" ]; then
+ where_clause="AND tc.table_name = '$TABLE_NAME'"
+ fi
+
+ execute_sql "
+ SELECT
+ tc.constraint_name,
+ tc.table_name,
+ tc.constraint_type,
+ kcu.column_name,
+ ccu.table_name AS foreign_table_name,
+ ccu.column_name AS foreign_column_name
+ FROM information_schema.table_constraints AS tc
+ JOIN information_schema.key_column_usage AS kcu
+ ON tc.constraint_name = kcu.constraint_name
+ LEFT JOIN information_schema.constraint_column_usage AS ccu
+ ON ccu.constraint_name = tc.constraint_name
+ WHERE tc.table_schema = 'public'
+ $where_clause
+ ORDER BY tc.table_name, tc.constraint_type, tc.constraint_name;
+ "
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ if [ -n "$TABLE_NAME" ]; then
+ execute_sql "PRAGMA foreign_key_list($TABLE_NAME);"
+ else
+ log_warn "SQLite constraint information requires table name"
+ fi
+ fi
+}
+
+# Show database users (PostgreSQL only)
+show_users() {
+ print_header "Database Users"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ execute_sql "
+ SELECT
+ usename as username,
+ usesysid as user_id,
+ usecreatedb as can_create_db,
+ usesuper as is_superuser,
+ userepl as can_replicate,
+ passwd as password_set,
+ valuntil as valid_until
+ FROM pg_user
+ ORDER BY usename;
+ "
+
+ print_subheader "User Privileges"
+ execute_sql "
+ SELECT
+ grantee,
+ table_catalog,
+ table_schema,
+ table_name,
+ privilege_type,
+ is_grantable
+ FROM information_schema.role_table_grants
+ WHERE table_schema = 'public'
+ ORDER BY grantee, table_name;
+ "
+ else
+ log_warn "User information only available for PostgreSQL"
+ fi
+}
+
+# Show active sessions
+show_sessions() {
+ print_header "Active Database Sessions"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ execute_sql "
+ SELECT
+ pid,
+ usename,
+ application_name,
+ client_addr,
+ client_port,
+ backend_start,
+ query_start,
+ state,
+ LEFT(query, 100) as current_query
+ FROM pg_stat_activity
+ WHERE pid <> pg_backend_pid()
+ ORDER BY backend_start;
+ "
+ else
+ log_warn "Session information only available for PostgreSQL"
+ fi
+}
+
+# Show current locks
+show_locks() {
+ print_header "Current Database Locks"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ execute_sql "
+ SELECT
+ l.locktype,
+ l.database,
+ l.relation,
+ l.page,
+ l.tuple,
+ l.virtualxid,
+ l.transactionid,
+ l.mode,
+ l.granted,
+ a.usename,
+ a.query,
+ a.query_start,
+ a.pid
+ FROM pg_locks l
+ LEFT JOIN pg_stat_activity a ON l.pid = a.pid
+ ORDER BY l.granted, l.pid;
+ "
+ else
+ log_warn "Lock information only available for PostgreSQL"
+ fi
+}
+
+# Show running queries
+show_queries() {
+ print_header "Running Queries"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ execute_sql "
+ SELECT
+ pid,
+ usename,
+ application_name,
+ client_addr,
+ now() - query_start as duration,
+ state,
+ query
+ FROM pg_stat_activity
+ WHERE state = 'active'
+ AND pid <> pg_backend_pid()
+ ORDER BY query_start;
+ "
+ else
+ log_warn "Query information only available for PostgreSQL"
+ fi
+}
+
+# Kill a specific query
+kill_query() {
+ local query_id="$1"
+
+ if [ -z "$query_id" ]; then
+ log_error "Query ID is required"
+ return 1
+ fi
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ if [ "$FORCE" != "true" ]; then
+ echo -n "Kill query with PID $query_id? (y/N): "
+ read -r confirm
+ if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
+ log "Query kill cancelled"
+ return 0
+ fi
+ fi
+
+ local result=$(execute_sql "SELECT pg_terminate_backend($query_id);" true)
+ if [ "$result" = "t" ]; then
+ log_success "Query $query_id terminated"
+ else
+ log_error "Failed to terminate query $query_id"
+ fi
+ else
+ log_warn "Query termination only available for PostgreSQL"
+ fi
+}
+
+# Optimize database
+optimize_database() {
+ print_header "Database Optimization"
+
+ if [ "$DRY_RUN" = "true" ]; then
+ log "Would perform database optimization (VACUUM, ANALYZE)"
+ return
+ fi
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ log "Running VACUUM ANALYZE..."
+ execute_sql "VACUUM ANALYZE;"
+ log_success "Database optimization completed"
+
+ # Show updated statistics
+ log "Updated table statistics:"
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ last_vacuum,
+ last_analyze
+ FROM pg_stat_user_tables
+ WHERE last_vacuum IS NOT NULL OR last_analyze IS NOT NULL
+ ORDER BY GREATEST(last_vacuum, last_analyze) DESC
+ LIMIT 10;
+ "
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log "Running VACUUM..."
+ execute_sql "VACUUM;"
+ log "Running ANALYZE..."
+ execute_sql "ANALYZE;"
+ log_success "Database optimization completed"
+ fi
+}
+
+# Rebuild indexes
+rebuild_indexes() {
+ print_header "Rebuilding Database Indexes"
+
+ if [ "$DRY_RUN" = "true" ]; then
+ log "Would rebuild all database indexes"
+ return
+ fi
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ log "Running REINDEX DATABASE..."
+ execute_sql "REINDEX DATABASE $DB_NAME;"
+ log_success "Index rebuild completed"
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log "Running REINDEX..."
+ execute_sql "REINDEX;"
+ log_success "Index rebuild completed"
+ fi
+}
+
+# Check database integrity
+check_integrity() {
+ print_header "Database Integrity Check"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ # Check for corruption
+ log "Checking for table corruption..."
+ execute_sql "
+ SELECT
+ schemaname,
+ tablename,
+ n_dead_tup,
+ n_live_tup,
+ CASE
+ WHEN n_live_tup = 0 THEN 0
+ ELSE round((n_dead_tup::float / n_live_tup::float) * 100, 2)
+ END as bloat_ratio
+ FROM pg_stat_user_tables
+ WHERE n_dead_tup > 0
+ ORDER BY bloat_ratio DESC;
+ "
+
+ # Check for missing indexes on foreign keys
+ log "Checking for missing indexes on foreign keys..."
+ execute_sql "
+ SELECT
+ c.conrelid::regclass as table_name,
+ string_agg(a.attname, ', ') as columns,
+ 'Missing index on foreign key' as issue
+ FROM pg_constraint c
+ JOIN pg_attribute a ON a.attnum = ANY(c.conkey) AND a.attrelid = c.conrelid
+ WHERE c.contype = 'f'
+ AND NOT EXISTS (
+ SELECT 1 FROM pg_index i
+ WHERE i.indrelid = c.conrelid
+ AND c.conkey[1:array_length(c.conkey,1)] <@ i.indkey[0:array_length(i.indkey,1)]
+ )
+ GROUP BY c.conrelid, c.conname;
+ "
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ log "Running integrity check..."
+ local result=$(execute_sql "PRAGMA integrity_check;" true)
+ if [ "$result" = "ok" ]; then
+ log_success "Database integrity check passed"
+ else
+ log_error "Database integrity issues found: $result"
+ fi
+ fi
+}
+
+# Clean up temporary data
+cleanup_database() {
+ print_header "Database Cleanup"
+
+ if [ "$DRY_RUN" = "true" ]; then
+ log "Would clean up temporary database data"
+ return
+ fi
+
+ # Clean up temporary directories
+ if [ -d "$TEMP_DIR" ]; then
+ log "Cleaning temporary directory..."
+ rm -rf "$TEMP_DIR"/*
+ log_success "Temporary files cleaned"
+ fi
+
+ # Clean up old log files
+ if [ -d "$LOGS_DIR" ]; then
+ log "Cleaning old log files..."
+ find "$LOGS_DIR" -name "*.log" -mtime +$LOG_RETENTION_DAYS -delete
+ log_success "Old log files cleaned"
+ fi
+
+ # Database-specific cleanup
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ log "Cleaning expired sessions..."
+ execute_sql "
+ SELECT pg_terminate_backend(pid)
+ FROM pg_stat_activity
+ WHERE state = 'idle'
+ AND query_start < now() - interval '1 hour';
+ " >/dev/null 2>&1 || true
+ log_success "Expired sessions cleaned"
+ fi
+}
+
+# Test database connection
+test_connection() {
+ print_header "Database Connection Test"
+
+ local start_time=$(date +%s%3N)
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ export PGPASSWORD="$DB_PASS"
+ if pg_isready -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" >/dev/null 2>&1; then
+ log_success "PostgreSQL server is accepting connections"
+
+ # Test actual query
+ if execute_sql "SELECT 1;" >/dev/null 2>&1; then
+ local end_time=$(date +%s%3N)
+ local response_time=$((end_time - start_time))
+ log_success "Database connection successful (${response_time}ms)"
+ else
+ log_error "Database connection failed"
+ fi
+ else
+ log_error "PostgreSQL server is not accepting connections"
+ fi
+ unset PGPASSWORD
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ if [ -f "$DB_FILE" ]; then
+ if execute_sql "SELECT 1;" >/dev/null 2>&1; then
+ local end_time=$(date +%s%3N)
+ local response_time=$((end_time - start_time))
+ log_success "SQLite database accessible (${response_time}ms)"
+ else
+ log_error "SQLite database access failed"
+ fi
+ else
+ log_error "SQLite database file not found: $DB_FILE"
+ fi
+ fi
+}
+
+# Find duplicate data
+find_duplicates() {
+ local table_name="$1"
+
+ if [ -z "$table_name" ]; then
+ log_error "Table name is required for duplicate detection"
+ return 1
+ fi
+
+ print_header "Finding Duplicate Data in $table_name"
+
+ if [ "$DB_TYPE" = "postgresql" ]; then
+ # Get table columns
+ local columns=$(execute_sql "
+ SELECT string_agg(column_name, ', ')
+ FROM information_schema.columns
+ WHERE table_name = '$table_name'
+ AND column_name NOT IN ('id', 'created_at', 'updated_at');
+ " true)
+
+ if [ -n "$columns" ]; then
+ execute_sql "
+ SELECT $columns, COUNT(*) as duplicate_count
+ FROM $table_name
+ GROUP BY $columns
+ HAVING COUNT(*) > 1
+ ORDER BY duplicate_count DESC
+ LIMIT $LIMIT;
+ "
+ else
+ log_warn "No suitable columns found for duplicate detection"
+ fi
+
+ elif [ "$DB_TYPE" = "sqlite" ]; then
+ # Basic duplicate detection for SQLite
+ execute_sql "
+ SELECT *, COUNT(*) as duplicate_count
+ FROM $table_name
+ GROUP BY *
+ HAVING COUNT(*) > 1
+ LIMIT $LIMIT;
+ "
+ fi
+}
+
+# Run database benchmarks
+run_benchmarks() {
+ print_header "Database Benchmarks"
+
+ log "Running basic performance tests..."
+
+ # Simple INSERT benchmark
+ local start_time=$(date +%s%3N)
+ execute_sql "
+ CREATE TEMP TABLE benchmark_test (
+ id SERIAL PRIMARY KEY,
+ data TEXT,
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
+ );
+ " >/dev/null 2>&1
+
+ # Insert test data
+ for i in {1..1000}; do
+ execute_sql "INSERT INTO benchmark_test (data) VALUES ('test_data_$i');" >/dev/null 2>&1
+ done
+
+ local end_time=$(date +%s%3N)
+ local insert_time=$((end_time - start_time))
+ log "1000 INSERTs completed in ${insert_time}ms"
+
+ # SELECT benchmark
+ start_time=$(date +%s%3N)
+ execute_sql "SELECT COUNT(*) FROM benchmark_test;" >/dev/null 2>&1
+ end_time=$(date +%s%3N)
+ local select_time=$((end_time - start_time))
+ log "COUNT query completed in ${select_time}ms"
+
+ # Cleanup
+ execute_sql "DROP TABLE benchmark_test;" >/dev/null 2>&1
+
+ log_success "Benchmark completed"
+}
+
+# Parse command line arguments
+COMMAND=""
+ENVIRONMENT="dev"
+TABLE_NAME=""
+SCHEMA_NAME=""
+QUERY_ID=""
+LIMIT=100
+OUTPUT_FORMAT="table"
+OUTPUT_FILE=""
+FORCE="false"
+DEBUG="false"
+QUIET="false"
+DRY_RUN="false"
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --env)
+ ENVIRONMENT="$2"
+ shift 2
+ ;;
+ --table)
+ TABLE_NAME="$2"
+ shift 2
+ ;;
+ --schema)
+ SCHEMA_NAME="$2"
+ shift 2
+ ;;
+ --query-id)
+ QUERY_ID="$2"
+ shift 2
+ ;;
+ --limit)
+ LIMIT="$2"
+ shift 2
+ ;;
+ --output)
+ OUTPUT_FORMAT="$2"
+ shift 2
+ ;;
+ --file)
+ OUTPUT_FILE="$2"
+ shift 2
+ ;;
+ --force)
+ FORCE="true"
+ shift
+ ;;
+ --debug)
+ DEBUG="true"
+ shift
+ ;;
+ --quiet)
+ QUIET="true"
+ shift
+ ;;
+ --dry-run)
+ DRY_RUN="true"
+ shift
+ ;;
+ -h|--help)
+ print_usage
+ exit 0
+ ;;
+ *)
+ if [ -z "$COMMAND" ]; then
+ COMMAND="$1"
+ else
+ log_error "Unknown option: $1"
+ print_usage
+ exit 1
+ fi
+ shift
+ ;;
+ esac
+done
+
+# Set environment variable
+export ENVIRONMENT="$ENVIRONMENT"
+
+# Validate command
+if [ -z "$COMMAND" ]; then
+ print_usage
+ exit 1
+fi
+
+# Check if we're in the right directory
+if [ ! -f "Cargo.toml" ]; then
+ log_error "Please run this script from the project root directory"
+ exit 1
+fi
+
+# Load environment and parse database URL
+load_env
+parse_database_url
+
+# Setup directories
+setup_directories
+
+# Execute command
+case "$COMMAND" in
+ "size")
+ show_database_size
+ ;;
+ "tables")
+ show_tables
+ ;;
+ "indexes")
+ show_indexes
+ ;;
+ "constraints")
+ show_constraints
+ ;;
+ "users")
+ show_users
+ ;;
+ "permissions")
+ show_users
+ ;;
+ "sessions")
+ show_sessions
+ ;;
+ "locks")
+ show_locks
+ ;;
+ "queries")
+ show_queries
+ ;;
+ "kill-query")
+ kill_query "$QUERY_ID"
+ ;;
+ "optimize")
+ optimize_database
+ ;;
+ "reindex")
+ rebuild_indexes
+ ;;
+ "check-integrity")
+ check_integrity
+ ;;
+ "repair")
+ log_warn "Database repair not yet implemented"
+ ;;
+ "cleanup")
+ cleanup_database
+ ;;
+ "logs")
+ log_warn "Database log viewing not yet implemented"
+ ;;
+ "config")
+ log_warn "Database configuration display not yet implemented"
+ ;;
+ "extensions")
+ log_warn "Extension listing not yet implemented"
+ ;;
+ "sequences")
+ log_warn "Sequence information not yet implemented"
+ ;;
+ "triggers")
+ log_warn "Trigger information not yet implemented"
+ ;;
+ "functions")
+ log_warn "Function information not yet implemented"
+ ;;
+ "views")
+ log_warn "View information not yet implemented"
+ ;;
+ "schema-info")
+ show_database_size
+ show_tables
+ show_indexes
+ show_constraints
+ ;;
+ "duplicate-data")
+ find_duplicates "$TABLE_NAME"
+ ;;
+ "orphaned-data")
+ log_warn "Orphaned data detection not yet implemented"
+ ;;
+ "table-stats")
+ show_table_details "$TABLE_NAME"
+ ;;
+ "connection-test")
+ test_connection
+ ;;
+ "benchmark")
+ run_benchmarks
+ ;;
+ "export-schema")
+ log_warn "Schema export not yet implemented"
+ ;;
+ "import-schema")
+ log_warn "Schema import not yet implemented"
+ ;;
+ "copy-table")
+ log_warn "Table copy not yet implemented"
+ ;;
+ "truncate-table")
+ if [ -n "$TABLE_NAME" ]; then
+ if [ "$FORCE" != "true" ]; then
+ echo -n "This will delete all data in table '$TABLE_NAME'. Continue? (y/N): "
+ read -r confirm
+ if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
+ log "Truncate cancelled"
+ exit 0
+ fi
+ fi
+ execute_sql "TRUNCATE TABLE $TABLE_NAME;"
+ log_success "Table $TABLE_NAME truncated"
+ else
+ log_error "Table name is required"
+ fi
+ ;;
+ "reset-sequence")
+ log_warn "Sequence reset not yet implemented"
+ ;;
+ *)
+ log_error "Unknown command: $COMMAND"
+ print_usage
+ exit 1
+ ;;
+esac
diff --git a/scripts/databases/db.sh b/scripts/databases/db.sh
new file mode 100755
index 0000000..85329d6
--- /dev/null
+++ b/scripts/databases/db.sh
@@ -0,0 +1,420 @@
+#!/bin/bash
+
+# Database Management Master Script
+# Central hub for all database operations and tools
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+CYAN='\033[0;36m'
+BOLD='\033[1m'
+NC='\033[0m' # No Color
+
+# Script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+
+# Change to project root
+cd "$PROJECT_ROOT"
+
+# Logging functions
+log() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+log_warn() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+log_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+log_success() {
+ echo -e "${GREEN}[SUCCESS]${NC} $1"
+}
+
+print_header() {
+ echo -e "${BLUE}${BOLD}=== $1 ===${NC}"
+}
+
+print_subheader() {
+ echo -e "${CYAN}--- $1 ---${NC}"
+}
+
+print_usage() {
+ echo -e "${BOLD}Database Management Hub${NC}"
+ echo
+ echo "Usage: $0 [options]"
+ echo
+ echo -e "${BOLD}Categories:${NC}"
+ echo
+ echo -e "${CYAN}setup${NC} Database setup and initialization"
+ echo " setup Full database setup (create + migrate + seed)"
+ echo " create Create the database"
+ echo " migrate Run migrations"
+ echo " seed Seed database with test data"
+ echo " reset Reset database (drop + create + migrate)"
+ echo " status Show migration status"
+ echo " drop Drop the database"
+ echo " postgres Setup PostgreSQL database"
+ echo " sqlite Setup SQLite database"
+ echo
+ echo -e "${CYAN}backup${NC} Backup and restore operations"
+ echo " backup Create database backup"
+ echo " restore Restore database from backup"
+ echo " list List available backups"
+ echo " clean Clean old backups"
+ echo " export Export data to JSON/CSV"
+ echo " import Import data from JSON/CSV"
+ echo " clone Clone database to different name"
+ echo " compare Compare two databases"
+ echo
+ echo -e "${CYAN}monitor${NC} Monitoring and health checks"
+ echo " health Complete health check"
+ echo " status Quick status check"
+ echo " connections Show active connections"
+ echo " performance Show performance metrics"
+ echo " slow-queries Show slow queries"
+ echo " locks Show database locks"
+ echo " disk-usage Show disk usage"
+ echo " memory-usage Show memory usage"
+ echo " backup-status Check backup status"
+ echo " monitor Start continuous monitoring"
+ echo " alerts Check for alerts"
+ echo " vacuum Perform database maintenance"
+ echo " analyze Update database statistics"
+ echo " report Generate comprehensive report"
+ echo
+ echo -e "${CYAN}migrate${NC} Migration management"
+ echo " status Show migration status"
+ echo " pending List pending migrations"
+ echo " applied List applied migrations"
+ echo " run Run pending migrations"
+ echo " rollback Rollback migrations"
+ echo " create Create new migration"
+ echo " generate Generate migration from schema diff"
+ echo " validate Validate migration files"
+ echo " dry-run Show what would be migrated"
+ echo " force Force migration state"
+ echo " repair Repair migration table"
+ echo " baseline Set migration baseline"
+ echo " history Show migration history"
+ echo " schema-dump Dump current schema"
+ echo " data-migrate Migrate data between schemas"
+ echo " template Manage migration templates"
+ echo
+ echo -e "${CYAN}utils${NC} Database utilities and maintenance"
+ echo " size Show database size information"
+ echo " tables List all tables with row counts"
+ echo " indexes Show index information"
+ echo " constraints Show table constraints"
+ echo " users Show database users (PostgreSQL only)"
+ echo " permissions Show user permissions"
+ echo " sessions Show active sessions"
+ echo " locks Show current locks"
+ echo " queries Show running queries"
+ echo " kill-query Kill a specific query"
+ echo " optimize Optimize database (VACUUM, ANALYZE)"
+ echo " reindex Rebuild indexes"
+ echo " check-integrity Check database integrity"
+ echo " repair Repair database issues"
+ echo " cleanup Clean up temporary data"
+ echo " logs Show database logs"
+ echo " config Show database configuration"
+ echo " extensions List database extensions (PostgreSQL)"
+ echo " sequences Show sequence information"
+ echo " triggers Show table triggers"
+ echo " functions Show user-defined functions"
+ echo " views Show database views"
+ echo " schema-info Show comprehensive schema information"
+ echo " duplicate-data Find duplicate records"
+ echo " orphaned-data Find orphaned records"
+ echo " table-stats Show detailed table statistics"
+ echo " connection-test Test database connection"
+ echo " benchmark Run database benchmarks"
+ echo " export-schema Export database schema"
+ echo " import-schema Import database schema"
+ echo " copy-table Copy table data"
+ echo " truncate-table Truncate table data"
+ echo " reset-sequence Reset sequence values"
+ echo
+ echo -e "${BOLD}Common Options:${NC}"
+ echo " --env ENV Environment (dev/prod) [default: dev]"
+ echo " --force Skip confirmations"
+ echo " --quiet Suppress verbose output"
+ echo " --debug Enable debug output"
+ echo " --dry-run Show what would be done without executing"
+ echo " --help Show category-specific help"
+ echo
+ echo -e "${BOLD}Quick Commands:${NC}"
+ echo " $0 status Quick database status"
+ echo " $0 health Complete health check"
+ echo " $0 backup Create backup"
+ echo " $0 migrate Run migrations"
+ echo " $0 optimize Optimize database"
+ echo
+ echo -e "${BOLD}Examples:${NC}"
+ echo " $0 setup create # Create database"
+ echo " $0 setup migrate # Run migrations"
+ echo " $0 backup create # Create backup"
+ echo " $0 backup restore --file backup.sql # Restore from backup"
+ echo " $0 monitor health # Health check"
+ echo " $0 monitor connections # Show connections"
+ echo " $0 migrate create --name add_users # Create migration"
+ echo " $0 migrate run # Run pending migrations"
+ echo " $0 utils size # Show database size"
+ echo " $0 utils optimize # Optimize database"
+ echo
+ echo -e "${BOLD}For detailed help on a specific category:${NC}"
+ echo " $0 setup --help"
+ echo " $0 backup --help"
+ echo " $0 monitor --help"
+ echo " $0 migrate --help"
+ echo " $0 utils --help"
+}
+
+# Check if required scripts exist
+check_scripts() {
+ local missing_scripts=()
+
+ if [ ! -f "$SCRIPT_DIR/db-setup.sh" ]; then
+ missing_scripts+=("db-setup.sh")
+ fi
+
+ if [ ! -f "$SCRIPT_DIR/db-backup.sh" ]; then
+ missing_scripts+=("db-backup.sh")
+ fi
+
+ if [ ! -f "$SCRIPT_DIR/db-monitor.sh" ]; then
+ missing_scripts+=("db-monitor.sh")
+ fi
+
+ if [ ! -f "$SCRIPT_DIR/db-migrate.sh" ]; then
+ missing_scripts+=("db-migrate.sh")
+ fi
+
+ if [ ! -f "$SCRIPT_DIR/db-utils.sh" ]; then
+ missing_scripts+=("db-utils.sh")
+ fi
+
+ if [ ${#missing_scripts[@]} -gt 0 ]; then
+ log_error "Missing required scripts: ${missing_scripts[*]}"
+ echo "Please ensure all database management scripts are present in the scripts directory."
+ exit 1
+ fi
+}
+
+# Make scripts executable
+make_scripts_executable() {
+ chmod +x "$SCRIPT_DIR"/db-*.sh 2>/dev/null || true
+}
+
+# Show quick status
+show_quick_status() {
+ print_header "Quick Database Status"
+
+ # Check if .env exists
+ if [ ! -f ".env" ]; then
+ log_error ".env file not found"
+ echo "Run: $0 setup create"
+ return 1
+ fi
+
+ # Load environment variables
+ export $(grep -v '^#' .env | xargs) 2>/dev/null || true
+
+ # Show basic info
+ log "Environment: ${ENVIRONMENT:-dev}"
+ log "Database URL: ${DATABASE_URL:-not set}"
+
+ # Test connection
+ if command -v "$SCRIPT_DIR/db-utils.sh" >/dev/null 2>&1; then
+ "$SCRIPT_DIR/db-utils.sh" connection-test --quiet 2>/dev/null || log_warn "Database connection failed"
+ fi
+
+ # Show migration status
+ if command -v "$SCRIPT_DIR/db-migrate.sh" >/dev/null 2>&1; then
+ "$SCRIPT_DIR/db-migrate.sh" status --quiet 2>/dev/null || log_warn "Could not check migration status"
+ fi
+}
+
+# Show comprehensive health check
+show_health_check() {
+ print_header "Comprehensive Database Health Check"
+
+ if [ -f "$SCRIPT_DIR/db-monitor.sh" ]; then
+ "$SCRIPT_DIR/db-monitor.sh" health "$@"
+ else
+ log_error "db-monitor.sh not found"
+ exit 1
+ fi
+}
+
+# Create quick backup
+create_quick_backup() {
+ print_header "Quick Database Backup"
+
+ if [ -f "$SCRIPT_DIR/db-backup.sh" ]; then
+ "$SCRIPT_DIR/db-backup.sh" backup --compress "$@"
+ else
+ log_error "db-backup.sh not found"
+ exit 1
+ fi
+}
+
+# Run migrations
+run_migrations() {
+ print_header "Running Database Migrations"
+
+ if [ -f "$SCRIPT_DIR/db-migrate.sh" ]; then
+ "$SCRIPT_DIR/db-migrate.sh" run "$@"
+ else
+ log_error "db-migrate.sh not found"
+ exit 1
+ fi
+}
+
+# Optimize database
+optimize_database() {
+ print_header "Database Optimization"
+
+ if [ -f "$SCRIPT_DIR/db-utils.sh" ]; then
+ "$SCRIPT_DIR/db-utils.sh" optimize "$@"
+ else
+ log_error "db-utils.sh not found"
+ exit 1
+ fi
+}
+
+# Parse command line arguments
+CATEGORY=""
+COMMAND=""
+REMAINING_ARGS=()
+
+# Handle special single commands
+if [[ $# -eq 1 ]]; then
+ case $1 in
+ "status")
+ show_quick_status
+ exit 0
+ ;;
+ "health")
+ show_health_check
+ exit 0
+ ;;
+ "backup")
+ create_quick_backup
+ exit 0
+ ;;
+ "migrate")
+ run_migrations
+ exit 0
+ ;;
+ "optimize")
+ optimize_database
+ exit 0
+ ;;
+ "-h"|"--help")
+ print_usage
+ exit 0
+ ;;
+ esac
+fi
+
+# Parse arguments
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ -h|--help)
+ if [ -n "$CATEGORY" ]; then
+ REMAINING_ARGS+=("$1")
+ else
+ print_usage
+ exit 0
+ fi
+ shift
+ ;;
+ *)
+ if [ -z "$CATEGORY" ]; then
+ CATEGORY="$1"
+ elif [ -z "$COMMAND" ]; then
+ COMMAND="$1"
+ else
+ REMAINING_ARGS+=("$1")
+ fi
+ shift
+ ;;
+ esac
+done
+
+# Check if we're in the right directory
+if [ ! -f "Cargo.toml" ]; then
+ log_error "Please run this script from the project root directory"
+ exit 1
+fi
+
+# Check that all required scripts exist
+check_scripts
+
+# Make scripts executable
+make_scripts_executable
+
+# Validate category and command
+if [ -z "$CATEGORY" ]; then
+ print_usage
+ exit 1
+fi
+
+# Route to appropriate script
+case "$CATEGORY" in
+ "setup")
+ if [ -z "$COMMAND" ]; then
+ log_error "Command required for setup category"
+ echo "Use: $0 setup --help for available commands"
+ exit 1
+ fi
+ exec "$SCRIPT_DIR/db-setup.sh" "$COMMAND" "${REMAINING_ARGS[@]}"
+ ;;
+ "backup")
+ if [ -z "$COMMAND" ]; then
+ log_error "Command required for backup category"
+ echo "Use: $0 backup --help for available commands"
+ exit 1
+ fi
+ exec "$SCRIPT_DIR/db-backup.sh" "$COMMAND" "${REMAINING_ARGS[@]}"
+ ;;
+ "monitor")
+ if [ -z "$COMMAND" ]; then
+ log_error "Command required for monitor category"
+ echo "Use: $0 monitor --help for available commands"
+ exit 1
+ fi
+ exec "$SCRIPT_DIR/db-monitor.sh" "$COMMAND" "${REMAINING_ARGS[@]}"
+ ;;
+ "migrate")
+ if [ -z "$COMMAND" ]; then
+ log_error "Command required for migrate category"
+ echo "Use: $0 migrate --help for available commands"
+ exit 1
+ fi
+ exec "$SCRIPT_DIR/db-migrate.sh" "$COMMAND" "${REMAINING_ARGS[@]}"
+ ;;
+ "utils")
+ if [ -z "$COMMAND" ]; then
+ log_error "Command required for utils category"
+ echo "Use: $0 utils --help for available commands"
+ exit 1
+ fi
+ exec "$SCRIPT_DIR/db-utils.sh" "$COMMAND" "${REMAINING_ARGS[@]}"
+ ;;
+ *)
+ log_error "Unknown category: $CATEGORY"
+ echo
+ print_usage
+ exit 1
+ ;;
+esac
diff --git a/scripts/deploy.sh b/scripts/deploy.sh
new file mode 100755
index 0000000..6689391
--- /dev/null
+++ b/scripts/deploy.sh
@@ -0,0 +1,563 @@
+#!/bin/bash
+
+# Rustelo Application Deployment Script
+# This script handles deployment of the Rustelo application in various environments
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# Default values
+ENVIRONMENT="production"
+COMPOSE_FILE="docker-compose.yml"
+BUILD_ARGS=""
+MIGRATE_DB=false
+BACKUP_DB=false
+HEALTH_CHECK=true
+TIMEOUT=300
+PROJECT_NAME="rustelo"
+DOCKER_REGISTRY=""
+IMAGE_TAG="latest"
+FORCE_RECREATE=false
+SCALE_REPLICAS=1
+FEATURES="production"
+USE_DEFAULT_FEATURES=false
+
+# Function to print colored output
+print_status() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+print_warning() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+print_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+print_debug() {
+ if [[ "$DEBUG" == "true" ]]; then
+ echo -e "${BLUE}[DEBUG]${NC} $1"
+ fi
+}
+
+# Function to show usage
+show_usage() {
+ cat << EOF
+Usage: $0 [OPTIONS] COMMAND
+
+Commands:
+ deploy Deploy the application
+ stop Stop the application
+ restart Restart the application
+ status Show deployment status
+ logs Show application logs
+ scale Scale application replicas
+ backup Create database backup
+ migrate Run database migrations
+ rollback Rollback to previous version
+ health Check application health
+ update Update application to latest version
+ clean Clean up unused containers and images
+
+Options:
+ -e, --env ENV Environment (dev|staging|production) [default: production]
+ -f, --file FILE Docker compose file [default: docker-compose.yml]
+ -p, --project PROJECT Project name [default: rustelo]
+ -t, --tag TAG Docker image tag [default: latest]
+ -r, --registry REGISTRY Docker registry URL
+ -s, --scale REPLICAS Number of replicas [default: 1]
+ --migrate Run database migrations before deployment
+ --backup Create database backup before deployment
+ --no-health-check Skip health check after deployment
+ --force-recreate Force recreation of containers
+ --timeout SECONDS Deployment timeout [default: 300]
+ --build-arg ARG Docker build arguments
+ --features FEATURES Cargo features to enable [default: production]
+ --default-features Use default features instead of custom
+ --debug Enable debug output
+ -h, --help Show this help message
+
+Examples:
+ $0 deploy # Deploy production
+ $0 deploy -e staging # Deploy staging
+ $0 deploy --migrate --backup # Deploy with migration and backup
+ $0 scale -s 3 # Scale to 3 replicas
+ $0 logs -f # Follow logs
+ $0 health # Check health status
+ $0 deploy --features "auth,metrics" # Deploy with specific features
+ $0 deploy --default-features # Deploy with all default features
+
+Environment Variables:
+ DOCKER_REGISTRY Docker registry URL
+ RUSTELO_ENV Environment override
+ COMPOSE_PROJECT_NAME Docker compose project name
+ DATABASE_URL Database connection string
+ DEBUG Enable debug mode
+EOF
+}
+
+# Function to parse command line arguments
+parse_args() {
+ while [[ $# -gt 0 ]]; do
+ case $1 in
+ -e|--env)
+ ENVIRONMENT="$2"
+ shift 2
+ ;;
+ -f|--file)
+ COMPOSE_FILE="$2"
+ shift 2
+ ;;
+ -p|--project)
+ PROJECT_NAME="$2"
+ shift 2
+ ;;
+ -t|--tag)
+ IMAGE_TAG="$2"
+ shift 2
+ ;;
+ -r|--registry)
+ DOCKER_REGISTRY="$2"
+ shift 2
+ ;;
+ -s|--scale)
+ SCALE_REPLICAS="$2"
+ shift 2
+ ;;
+ --migrate)
+ MIGRATE_DB=true
+ shift
+ ;;
+ --backup)
+ BACKUP_DB=true
+ shift
+ ;;
+ --no-health-check)
+ HEALTH_CHECK=false
+ shift
+ ;;
+ --force-recreate)
+ FORCE_RECREATE=true
+ shift
+ ;;
+ --timeout)
+ TIMEOUT="$2"
+ shift 2
+ ;;
+ --build-arg)
+ BUILD_ARGS="$BUILD_ARGS --build-arg $2"
+ shift 2
+ ;;
+ --features)
+ FEATURES="$2"
+ shift 2
+ ;;
+ --default-features)
+ USE_DEFAULT_FEATURES=true
+ shift
+ ;;
+ --debug)
+ DEBUG=true
+ shift
+ ;;
+ -h|--help)
+ show_usage
+ exit 0
+ ;;
+ -*)
+ print_error "Unknown option: $1"
+ show_usage
+ exit 1
+ ;;
+ *)
+ COMMAND="$1"
+ shift
+ ;;
+ esac
+ done
+}
+
+# Function to validate environment
+validate_environment() {
+ case $ENVIRONMENT in
+ dev|development)
+ ENVIRONMENT="development"
+ COMPOSE_FILE="docker-compose.yml"
+ ;;
+ staging)
+ ENVIRONMENT="staging"
+ COMPOSE_FILE="docker-compose.staging.yml"
+ ;;
+ prod|production)
+ ENVIRONMENT="production"
+ COMPOSE_FILE="docker-compose.yml"
+ ;;
+ *)
+ print_error "Invalid environment: $ENVIRONMENT"
+ print_error "Valid environments: dev, staging, production"
+ exit 1
+ ;;
+ esac
+}
+
+# Function to check prerequisites
+check_prerequisites() {
+ print_status "Checking prerequisites..."
+
+ # Check if Docker is installed and running
+ if ! command -v docker &> /dev/null; then
+ print_error "Docker is not installed or not in PATH"
+ exit 1
+ fi
+
+ if ! docker info &> /dev/null; then
+ print_error "Docker daemon is not running"
+ exit 1
+ fi
+
+ # Check if Docker Compose is installed
+ if ! command -v docker-compose &> /dev/null; then
+ print_error "Docker Compose is not installed or not in PATH"
+ exit 1
+ fi
+
+ # Check if compose file exists
+ if [[ ! -f "$COMPOSE_FILE" ]]; then
+ print_error "Compose file not found: $COMPOSE_FILE"
+ exit 1
+ fi
+
+ print_status "Prerequisites check passed"
+}
+
+# Function to set environment variables
+set_environment_vars() {
+ export COMPOSE_PROJECT_NAME="${PROJECT_NAME}"
+ export DOCKER_REGISTRY="${DOCKER_REGISTRY}"
+ export IMAGE_TAG="${IMAGE_TAG}"
+ export ENVIRONMENT="${ENVIRONMENT}"
+
+ # Source environment-specific variables
+ if [[ -f ".env.${ENVIRONMENT}" ]]; then
+ print_status "Loading environment variables from .env.${ENVIRONMENT}"
+ source ".env.${ENVIRONMENT}"
+ elif [[ -f ".env" ]]; then
+ print_status "Loading environment variables from .env"
+ source ".env"
+ fi
+
+ print_debug "Environment variables set:"
+ print_debug " COMPOSE_PROJECT_NAME=${COMPOSE_PROJECT_NAME}"
+ print_debug " DOCKER_REGISTRY=${DOCKER_REGISTRY}"
+ print_debug " IMAGE_TAG=${IMAGE_TAG}"
+ print_debug " ENVIRONMENT=${ENVIRONMENT}"
+ print_debug " FEATURES=${FEATURES}"
+ print_debug " USE_DEFAULT_FEATURES=${USE_DEFAULT_FEATURES}"
+}
+
+# Function to build Docker images
+build_images() {
+ print_status "Building Docker images..."
+
+ local build_cmd="docker-compose -f $COMPOSE_FILE build"
+
+ if [[ -n "$BUILD_ARGS" ]]; then
+ build_cmd="$build_cmd $BUILD_ARGS"
+ fi
+
+ # Add feature arguments to build args
+ if [[ "$USE_DEFAULT_FEATURES" == "false" ]]; then
+ build_cmd="$build_cmd --build-arg CARGO_FEATURES=\"$FEATURES\" --build-arg NO_DEFAULT_FEATURES=\"true\""
+ else
+ build_cmd="$build_cmd --build-arg CARGO_FEATURES=\"\" --build-arg NO_DEFAULT_FEATURES=\"false\""
+ fi
+
+ if [[ "$DEBUG" == "true" ]]; then
+ print_debug "Build command: $build_cmd"
+ fi
+
+ if ! $build_cmd; then
+ print_error "Failed to build Docker images"
+ exit 1
+ fi
+
+ print_status "Docker images built successfully"
+}
+
+# Function to create database backup
+create_backup() {
+ if [[ "$BACKUP_DB" == "true" ]]; then
+ print_status "Creating database backup..."
+
+ local backup_file="backup_$(date +%Y%m%d_%H%M%S).sql"
+
+ if docker-compose -f "$COMPOSE_FILE" exec -T db pg_dump -U postgres rustelo_prod > "$backup_file"; then
+ print_status "Database backup created: $backup_file"
+ else
+ print_error "Failed to create database backup"
+ exit 1
+ fi
+ fi
+}
+
+# Function to run database migrations
+run_migrations() {
+ if [[ "$MIGRATE_DB" == "true" ]]; then
+ print_status "Running database migrations..."
+
+ if docker-compose -f "$COMPOSE_FILE" run --rm migrate; then
+ print_status "Database migrations completed successfully"
+ else
+ print_error "Database migrations failed"
+ exit 1
+ fi
+ fi
+}
+
+# Function to deploy application
+deploy_application() {
+ print_status "Deploying application..."
+
+ local compose_cmd="docker-compose -f $COMPOSE_FILE up -d"
+
+ if [[ "$FORCE_RECREATE" == "true" ]]; then
+ compose_cmd="$compose_cmd --force-recreate"
+ fi
+
+ if [[ "$SCALE_REPLICAS" -gt 1 ]]; then
+ compose_cmd="$compose_cmd --scale app=$SCALE_REPLICAS"
+ fi
+
+ if [[ "$DEBUG" == "true" ]]; then
+ print_debug "Deploy command: $compose_cmd"
+ fi
+
+ if ! $compose_cmd; then
+ print_error "Failed to deploy application"
+ exit 1
+ fi
+
+ print_status "Application deployed successfully"
+}
+
+# Function to wait for application to be ready
+wait_for_health() {
+ if [[ "$HEALTH_CHECK" == "true" ]]; then
+ print_status "Waiting for application to be healthy..."
+
+ local start_time=$(date +%s)
+ local health_url="http://localhost:3030/health"
+
+ while true; do
+ local current_time=$(date +%s)
+ local elapsed=$((current_time - start_time))
+
+ if [[ $elapsed -gt $TIMEOUT ]]; then
+ print_error "Health check timeout after ${TIMEOUT} seconds"
+ exit 1
+ fi
+
+ if curl -f -s "$health_url" > /dev/null 2>&1; then
+ print_status "Application is healthy"
+ break
+ fi
+
+ print_debug "Health check failed, retrying in 5 seconds... (${elapsed}s elapsed)"
+ sleep 5
+ done
+ fi
+}
+
+# Function to show deployment status
+show_status() {
+ print_status "Deployment status:"
+ docker-compose -f "$COMPOSE_FILE" ps
+
+ print_status "Container resource usage:"
+ docker stats --no-stream --format "table {{.Container}}\t{{.CPUPerc}}\t{{.MemUsage}}\t{{.NetIO}}\t{{.BlockIO}}"
+}
+
+# Function to show logs
+show_logs() {
+ local follow_flag=""
+ if [[ "$1" == "-f" ]]; then
+ follow_flag="-f"
+ fi
+
+ docker-compose -f "$COMPOSE_FILE" logs $follow_flag
+}
+
+# Function to scale application
+scale_application() {
+ print_status "Scaling application to $SCALE_REPLICAS replicas..."
+
+ if docker-compose -f "$COMPOSE_FILE" up -d --scale app="$SCALE_REPLICAS"; then
+ print_status "Application scaled successfully"
+ else
+ print_error "Failed to scale application"
+ exit 1
+ fi
+}
+
+# Function to stop application
+stop_application() {
+ print_status "Stopping application..."
+
+ if docker-compose -f "$COMPOSE_FILE" down; then
+ print_status "Application stopped successfully"
+ else
+ print_error "Failed to stop application"
+ exit 1
+ fi
+}
+
+# Function to restart application
+restart_application() {
+ print_status "Restarting application..."
+
+ if docker-compose -f "$COMPOSE_FILE" restart; then
+ print_status "Application restarted successfully"
+ else
+ print_error "Failed to restart application"
+ exit 1
+ fi
+}
+
+# Function to check application health
+check_health() {
+ print_status "Checking application health..."
+
+ local health_url="http://localhost:3030/health"
+
+ if curl -f -s "$health_url" | jq '.status' | grep -q "healthy"; then
+ print_status "Application is healthy"
+
+ # Show detailed health information
+ curl -s "$health_url" | jq .
+ else
+ print_error "Application is not healthy"
+ exit 1
+ fi
+}
+
+# Function to update application
+update_application() {
+ print_status "Updating application..."
+
+ # Pull latest images
+ docker-compose -f "$COMPOSE_FILE" pull
+
+ # Restart with new images
+ docker-compose -f "$COMPOSE_FILE" up -d --force-recreate
+
+ print_status "Application updated successfully"
+}
+
+# Function to rollback application
+rollback_application() {
+ print_warning "Rollback functionality not implemented yet"
+ print_warning "Please manually specify the desired image tag and redeploy"
+}
+
+# Function to clean up
+cleanup() {
+ print_status "Cleaning up unused containers and images..."
+
+ # Remove stopped containers
+ docker container prune -f
+
+ # Remove unused images
+ docker image prune -f
+
+ # Remove unused volumes
+ docker volume prune -f
+
+ # Remove unused networks
+ docker network prune -f
+
+ print_status "Cleanup completed"
+}
+
+# Main function
+main() {
+ # Parse command line arguments
+ parse_args "$@"
+
+ # Validate command
+ if [[ -z "$COMMAND" ]]; then
+ print_error "No command specified"
+ show_usage
+ exit 1
+ fi
+
+ # Validate environment
+ validate_environment
+
+ # Check prerequisites
+ check_prerequisites
+
+ # Set environment variables
+ set_environment_vars
+
+ # Execute command
+ case $COMMAND in
+ deploy)
+ build_images
+ create_backup
+ run_migrations
+ deploy_application
+ wait_for_health
+ show_status
+ ;;
+ stop)
+ stop_application
+ ;;
+ restart)
+ restart_application
+ wait_for_health
+ ;;
+ status)
+ show_status
+ ;;
+ logs)
+ show_logs "$@"
+ ;;
+ scale)
+ scale_application
+ ;;
+ backup)
+ create_backup
+ ;;
+ migrate)
+ run_migrations
+ ;;
+ rollback)
+ rollback_application
+ ;;
+ health)
+ check_health
+ ;;
+ update)
+ update_application
+ wait_for_health
+ ;;
+ clean)
+ cleanup
+ ;;
+ *)
+ print_error "Unknown command: $COMMAND"
+ show_usage
+ exit 1
+ ;;
+ esac
+}
+
+# Run main function
+main "$@"
diff --git a/scripts/docs/QUICK_REFERENCE.md b/scripts/docs/QUICK_REFERENCE.md
new file mode 100644
index 0000000..95ca4cd
--- /dev/null
+++ b/scripts/docs/QUICK_REFERENCE.md
@@ -0,0 +1,233 @@
+# Documentation Scripts - Quick Reference
+
+## 📁 Script Organization
+
+All documentation-related scripts are now organized in `scripts/docs/`:
+
+```
+scripts/docs/
+├── README.md # Comprehensive documentation
+├── QUICK_REFERENCE.md # This file
+├── build-docs.sh # Main build system
+├── enhance-docs.sh # Cargo doc logo enhancement
+├── docs-dev.sh # Development server
+├── setup-docs.sh # Initial setup
+├── deploy-docs.sh # Deployment automation
+└── generate-content.sh # Content generation
+```
+
+## ⚡ Common Commands
+
+### Quick Start
+```bash
+# Build all documentation with logos
+./scripts/docs/build-docs.sh --all
+
+# Start development server
+./scripts/docs/docs-dev.sh
+
+# Enhance cargo docs with logos
+cargo doc --no-deps && ./scripts/docs/enhance-docs.sh
+```
+
+### Development Workflow
+```bash
+# 1. Setup (first time only)
+./scripts/docs/setup-docs.sh --full
+
+# 2. Start dev server with live reload
+./scripts/docs/docs-dev.sh --open
+
+# 3. Build and test
+./scripts/docs/build-docs.sh --watch
+```
+
+### Production Deployment
+```bash
+# Build everything
+./scripts/docs/build-docs.sh --all
+
+# Deploy to GitHub Pages
+./scripts/docs/deploy-docs.sh github-pages
+
+# Deploy to Netlify
+./scripts/docs/deploy-docs.sh netlify
+```
+
+## 🔧 Individual Scripts
+
+### `build-docs.sh` - Main Build System
+```bash
+./scripts/docs/build-docs.sh [OPTIONS]
+
+OPTIONS:
+ (none) Build mdBook only
+ --cargo Build cargo doc with logo enhancement
+ --all Build both mdBook and cargo doc
+ --serve Serve documentation locally
+ --watch Watch for changes and rebuild
+ --sync Sync existing docs into mdBook
+```
+
+### `enhance-docs.sh` - Logo Enhancement
+```bash
+./scripts/docs/enhance-docs.sh [OPTIONS]
+
+OPTIONS:
+ (none) Enhance cargo doc with logos
+ --clean Remove backup files
+ --restore Restore original files
+```
+
+### `docs-dev.sh` - Development Server
+```bash
+./scripts/docs/docs-dev.sh [OPTIONS]
+
+OPTIONS:
+ (none) Start on default port (3000)
+ --port N Use custom port
+ --open Auto-open browser
+```
+
+### `setup-docs.sh` - Initial Setup
+```bash
+./scripts/docs/setup-docs.sh [OPTIONS]
+
+OPTIONS:
+ (none) Basic setup
+ --full Complete setup with all features
+ --ci Setup for CI/CD environments
+```
+
+### `deploy-docs.sh` - Deployment
+```bash
+./scripts/docs/deploy-docs.sh PLATFORM [OPTIONS]
+
+PLATFORMS:
+ github-pages Deploy to GitHub Pages
+ netlify Deploy to Netlify
+ vercel Deploy to Vercel
+ custom Deploy to custom server
+
+OPTIONS:
+ --domain D Custom domain
+ --token T Authentication token
+```
+
+## 🎯 Common Use Cases
+
+### Logo Integration
+```bash
+# Add logos to cargo documentation
+cargo doc --no-deps
+./scripts/docs/enhance-docs.sh
+
+# Build everything with logos
+./scripts/docs/build-docs.sh --all
+```
+
+### Content Development
+```bash
+# Start development with live reload
+./scripts/docs/docs-dev.sh --open
+
+# Generate content from existing docs
+./scripts/docs/generate-content.sh --sync
+
+# Watch and rebuild on changes
+./scripts/docs/build-docs.sh --watch
+```
+
+### CI/CD Integration
+```bash
+# Setup for continuous integration
+./scripts/docs/setup-docs.sh --ci
+
+# Build and deploy automatically
+./scripts/docs/build-docs.sh --all
+./scripts/docs/deploy-docs.sh github-pages --token $GITHUB_TOKEN
+```
+
+## 🚨 Troubleshooting
+
+### Script Not Found
+```bash
+# Old path (DEPRECATED)
+./scripts/build-docs.sh
+
+# New path (CORRECT)
+./scripts/docs/build-docs.sh
+```
+
+### Permission Denied
+```bash
+# Make scripts executable
+chmod +x scripts/docs/*.sh
+```
+
+### Missing Dependencies
+```bash
+# Install required tools
+./scripts/docs/setup-docs.sh --full
+```
+
+### Logo Enhancement Fails
+```bash
+# Ensure cargo doc was built first
+cargo doc --no-deps
+
+# Then enhance
+./scripts/docs/enhance-docs.sh
+```
+
+## 📊 Output Locations
+
+```
+template/
+├── book-output/ # mdBook output
+│ └── html/ # Generated HTML files
+├── target/doc/ # Cargo doc output
+│ ├── server/ # Enhanced with logos
+│ ├── client/ # Enhanced with logos
+│ └── logos/ # Logo assets
+└── dist/ # Combined for deployment
+ ├── book/ # mdBook content
+ └── api/ # API documentation
+```
+
+## 🔗 Related Files
+
+- **Main Config:** `book.toml` - mdBook configuration
+- **Logo Assets:** `logos/` - Source logo files
+- **Public Assets:** `public/logos/` - Web-accessible logos
+- **Components:** `client/src/components/Logo.rs` - React logo components
+- **Templates:** `docs/LOGO_TEMPLATE.md` - Logo usage templates
+
+## 📞 Getting Help
+
+```bash
+# Show help for any script
+./scripts/docs/SCRIPT_NAME.sh --help
+
+# View comprehensive documentation
+cat scripts/docs/README.md
+
+# Check script status
+./scripts/docs/build-docs.sh --version
+```
+
+## 🔄 Migration from Old Paths
+
+If you have bookmarks or CI/CD scripts using old paths:
+
+| Old Path | New Path |
+|----------|----------|
+| `./scripts/build-docs.sh` | `./scripts/docs/build-docs.sh` |
+| `./scripts/enhance-docs.sh` | `./scripts/docs/enhance-docs.sh` |
+| `./scripts/docs-dev.sh` | `./scripts/docs/docs-dev.sh` |
+| `./scripts/setup-docs.sh` | `./scripts/docs/setup-docs.sh` |
+| `./scripts/deploy-docs.sh` | `./scripts/docs/deploy-docs.sh` |
+
+---
+
+**Quick Tip:** Bookmark this file for fast access to documentation commands! 🔖
\ No newline at end of file
diff --git a/scripts/docs/README.md b/scripts/docs/README.md
new file mode 100644
index 0000000..cb22ada
--- /dev/null
+++ b/scripts/docs/README.md
@@ -0,0 +1,382 @@
+# Documentation Scripts
+
+This directory contains all scripts related to building, managing, and deploying documentation for the Rustelo project.
+
+## 📁 Scripts Overview
+
+### 🔨 Build Scripts
+
+#### `build-docs.sh`
+**Purpose:** Comprehensive documentation build system
+**Description:** Builds both mdBook and cargo documentation with logo integration
+
+**Usage:**
+```bash
+# Build mdBook documentation only
+./build-docs.sh
+
+# Build cargo documentation with logos
+./build-docs.sh --cargo
+
+# Build all documentation (mdBook + cargo doc)
+./build-docs.sh --all
+
+# Serve documentation locally
+./build-docs.sh --serve
+
+# Watch for changes and rebuild
+./build-docs.sh --watch
+
+# Sync existing docs into mdBook format
+./build-docs.sh --sync
+```
+
+**Features:**
+- Builds mdBook documentation
+- Generates cargo doc with logo enhancement
+- Serves documentation locally
+- Watches for file changes
+- Syncs existing documentation
+- Provides build metrics
+
+#### `enhance-docs.sh`
+**Purpose:** Add Rustelo branding to cargo doc output
+**Description:** Post-processes cargo doc HTML files to add logos and custom styling
+
+**Usage:**
+```bash
+# Enhance cargo doc with logos
+./enhance-docs.sh
+
+# Clean up backup files
+./enhance-docs.sh --clean
+
+# Restore original documentation
+./enhance-docs.sh --restore
+```
+
+**Features:**
+- Adds logos to all crate documentation pages
+- Injects custom CSS for branding
+- Creates backup files for safety
+- Adds footer with project links
+- Supports restoration of original files
+
+### 🌐 Development Scripts
+
+#### `docs-dev.sh`
+**Purpose:** Start development server for documentation
+**Description:** Launches mdBook development server with live reload
+
+**Usage:**
+```bash
+# Start development server
+./docs-dev.sh
+
+# Start with specific port
+./docs-dev.sh --port 3001
+
+# Start and open browser
+./docs-dev.sh --open
+```
+
+**Features:**
+- Live reload on file changes
+- Automatic browser opening
+- Custom port configuration
+- Hot reloading for rapid development
+
+### ⚙️ Setup Scripts
+
+#### `setup-docs.sh`
+**Purpose:** Initialize documentation system
+**Description:** Sets up the complete documentation infrastructure
+
+**Usage:**
+```bash
+# Basic setup
+./setup-docs.sh
+
+# Full setup with all features
+./setup-docs.sh --full
+
+# Setup with content generation
+./setup-docs.sh --generate
+
+# Setup for specific platform
+./setup-docs.sh --platform github-pages
+```
+
+**Features:**
+- Installs required tools (mdBook, etc.)
+- Creates directory structure
+- Generates initial content
+- Configures theme and styling
+- Platform-specific optimization
+
+#### `generate-content.sh`
+**Purpose:** Generate documentation content
+**Description:** Creates documentation pages from templates and existing content
+
+**Usage:**
+```bash
+# Generate all content
+./generate-content.sh
+
+# Generate specific section
+./generate-content.sh --section features
+
+# Generate from existing docs
+./generate-content.sh --sync
+
+# Force regeneration
+./generate-content.sh --force
+```
+
+**Features:**
+- Converts existing documentation
+- Generates API documentation
+- Creates navigation structure
+- Processes templates
+- Validates content structure
+
+### 🚀 Deployment Scripts
+
+#### `deploy-docs.sh`
+**Purpose:** Deploy documentation to various platforms
+**Description:** Automated deployment of built documentation
+
+**Usage:**
+```bash
+# Deploy to GitHub Pages
+./deploy-docs.sh github-pages
+
+# Deploy to Netlify
+./deploy-docs.sh netlify
+
+# Deploy to custom server
+./deploy-docs.sh custom --server example.com
+
+# Deploy with custom domain
+./deploy-docs.sh github-pages --domain docs.rustelo.dev
+```
+
+**Supported Platforms:**
+- GitHub Pages
+- Netlify
+- Vercel
+- AWS S3
+- Custom servers via SSH
+
+**Features:**
+- Platform-specific optimization
+- Custom domain configuration
+- SSL certificate handling
+- Automated builds
+- Rollback capabilities
+
+## 🔄 Workflow Examples
+
+### Complete Documentation Build
+```bash
+# 1. Setup documentation system
+./setup-docs.sh --full
+
+# 2. Generate content from existing docs
+./generate-content.sh --sync
+
+# 3. Build all documentation
+./build-docs.sh --all
+
+# 4. Deploy to GitHub Pages
+./deploy-docs.sh github-pages
+```
+
+### Development Workflow
+```bash
+# 1. Start development server
+./docs-dev.sh --open
+
+# 2. In another terminal, watch for cargo doc changes
+cargo watch -x "doc --no-deps" -s "./enhance-docs.sh"
+
+# 3. Make changes and see live updates
+```
+
+### CI/CD Integration
+```bash
+# Automated build and deploy (for CI/CD)
+./setup-docs.sh --ci
+./build-docs.sh --all
+./deploy-docs.sh github-pages --token $GITHUB_TOKEN
+```
+
+## 📋 Prerequisites
+
+### Required Tools
+- **mdBook** - `cargo install mdbook`
+- **Rust/Cargo** - For cargo doc generation
+- **Git** - For deployment to GitHub Pages
+
+### Optional Tools
+- **mdbook-linkcheck** - `cargo install mdbook-linkcheck`
+- **mdbook-toc** - `cargo install mdbook-toc`
+- **mdbook-mermaid** - `cargo install mdbook-mermaid`
+- **cargo-watch** - `cargo install cargo-watch`
+
+### Environment Variables
+```bash
+# For deployment
+export GITHUB_TOKEN="your-github-token"
+export NETLIFY_AUTH_TOKEN="your-netlify-token"
+export VERCEL_TOKEN="your-vercel-token"
+
+# For custom domains
+export DOCS_DOMAIN="docs.rustelo.dev"
+export CNAME_RECORD="rustelo.github.io"
+```
+
+## 📁 Output Structure
+
+```
+template/
+├── book-output/ # mdBook output
+│ ├── html/ # Generated HTML
+│ └── index.html # Main documentation entry
+├── target/doc/ # Cargo doc output
+│ ├── server/ # Server crate docs
+│ ├── client/ # Client crate docs
+│ ├── shared/ # Shared crate docs
+│ └── logos/ # Logo assets
+└── docs-dist/ # Combined distribution
+ ├── book/ # mdBook content
+ ├── api/ # API documentation
+ └── assets/ # Static assets
+```
+
+## 🔧 Configuration
+
+### mdBook Configuration
+**File:** `book.toml`
+- Theme customization
+- Logo integration
+- Plugin configuration
+- Build settings
+
+### Script Configuration
+**File:** `scripts/docs/config.sh` (if exists)
+- Default deployment platform
+- Custom domain settings
+- Build optimization flags
+- Platform-specific options
+
+## 🐛 Troubleshooting
+
+### Common Issues
+
+1. **mdBook build fails**
+ ```bash
+ # Check mdBook installation
+ mdbook --version
+
+ # Reinstall if needed
+ cargo install mdbook --force
+ ```
+
+2. **Cargo doc enhancement fails**
+ ```bash
+ # Ensure cargo doc was built first
+ cargo doc --no-deps
+
+ # Check script permissions
+ chmod +x ./enhance-docs.sh
+ ```
+
+3. **Deployment fails**
+ ```bash
+ # Check environment variables
+ echo $GITHUB_TOKEN
+
+ # Verify repository permissions
+ git remote -v
+ ```
+
+4. **Logo files missing**
+ ```bash
+ # Ensure logos are in the correct location
+ ls -la logos/
+ ls -la public/logos/
+ ```
+
+### Debug Mode
+Most scripts support debug mode for troubleshooting:
+```bash
+# Enable debug output
+DEBUG=1 ./build-docs.sh --all
+
+# Verbose logging
+VERBOSE=1 ./deploy-docs.sh github-pages
+```
+
+## 📊 Metrics and Analytics
+
+### Build Metrics
+- Total pages generated
+- Build time
+- File sizes
+- Link validation results
+
+### Deployment Metrics
+- Deployment time
+- File transfer size
+- CDN cache status
+- Performance scores
+
+## 🔒 Security
+
+### Best Practices
+- Use environment variables for sensitive data
+- Validate all input parameters
+- Create backups before destructive operations
+- Use secure protocols for deployments
+
+### Token Management
+- Store tokens in secure environment variables
+- Use minimal required permissions
+- Rotate tokens regularly
+- Monitor token usage
+
+## 🤝 Contributing
+
+### Adding New Scripts
+1. Follow naming convention: `action-target.sh`
+2. Include help text and usage examples
+3. Add error handling and validation
+4. Update this README
+5. Test with different configurations
+
+### Modifying Existing Scripts
+1. Maintain backward compatibility
+2. Update documentation
+3. Test all use cases
+4. Verify CI/CD integration
+
+## 📚 Related Documentation
+
+- **[Logo Usage Guide](../../book/developers/brand/logo-usage.md)** - How to use logos in documentation
+- **[mdBook Configuration](../../book.toml)** - mdBook setup and configuration
+- **[Deployment Guide](../../book/deployment/)** - Platform-specific deployment guides
+- **[Contributing Guidelines](../../CONTRIBUTING.md)** - How to contribute to documentation
+
+## 📞 Support
+
+For issues with documentation scripts:
+1. Check this README for common solutions
+2. Review script help text: `./script-name.sh --help`
+3. Enable debug mode for detailed output
+4. Open an issue on GitHub with logs and configuration
+
+---
+
+*Generated by Rustelo Documentation System*
+*Last updated: $(date)*
\ No newline at end of file
diff --git a/scripts/docs/build-docs.sh b/scripts/docs/build-docs.sh
new file mode 100755
index 0000000..8105985
--- /dev/null
+++ b/scripts/docs/build-docs.sh
@@ -0,0 +1,493 @@
+#!/bin/bash
+
+# Rustelo Documentation Build Script
+# This script builds the documentation using mdBook, cargo doc, and organizes the output
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# Script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
+
+echo -e "${BLUE}🚀 Rustelo Documentation Build Script${NC}"
+echo "================================="
+
+# Check if mdbook is installed
+if ! command -v mdbook &> /dev/null; then
+ echo -e "${RED}❌ mdbook is not installed${NC}"
+ echo "Please install mdbook:"
+ echo " cargo install mdbook"
+ echo " # Optional plugins:"
+ echo " cargo install mdbook-linkcheck"
+ echo " cargo install mdbook-toc"
+ echo " cargo install mdbook-mermaid"
+ exit 1
+fi
+
+# Check mdbook version
+MDBOOK_VERSION=$(mdbook --version | cut -d' ' -f2)
+echo -e "${GREEN}✅ mdbook version: $MDBOOK_VERSION${NC}"
+
+# Create necessary directories
+echo -e "${BLUE}📁 Creating directories...${NC}"
+mkdir -p "$PROJECT_ROOT/book-output"
+mkdir -p "$PROJECT_ROOT/book/theme"
+
+# Copy custom theme files if they don't exist
+if [ ! -f "$PROJECT_ROOT/book/theme/custom.css" ]; then
+ echo -e "${YELLOW}📝 Creating custom CSS...${NC}"
+ cat > "$PROJECT_ROOT/book/theme/custom.css" << 'EOF'
+/* Rustelo Documentation Custom Styles */
+
+:root {
+ --rustelo-primary: #e53e3e;
+ --rustelo-secondary: #3182ce;
+ --rustelo-accent: #38a169;
+ --rustelo-dark: #2d3748;
+ --rustelo-light: #f7fafc;
+}
+
+/* Custom header styling */
+.menu-title {
+ color: var(--rustelo-primary);
+ font-weight: bold;
+}
+
+/* Code block improvements */
+pre {
+ border-radius: 8px;
+ box-shadow: 0 2px 4px rgba(0,0,0,0.1);
+}
+
+/* Improved table styling */
+table {
+ border-collapse: collapse;
+ width: 100%;
+ margin: 1rem 0;
+}
+
+table th,
+table td {
+ border: 1px solid #e2e8f0;
+ padding: 0.75rem;
+ text-align: left;
+}
+
+table th {
+ background-color: var(--rustelo-light);
+ font-weight: 600;
+}
+
+table tr:nth-child(even) {
+ background-color: #f8f9fa;
+}
+
+/* Feature badge styling */
+.feature-badge {
+ display: inline-block;
+ padding: 0.25rem 0.5rem;
+ border-radius: 0.25rem;
+ font-size: 0.875rem;
+ font-weight: 500;
+ margin: 0.125rem;
+}
+
+.feature-badge.enabled {
+ background-color: #c6f6d5;
+ color: #22543d;
+}
+
+.feature-badge.disabled {
+ background-color: #fed7d7;
+ color: #742a2a;
+}
+
+.feature-badge.optional {
+ background-color: #fef5e7;
+ color: #744210;
+}
+
+/* Callout boxes */
+.callout {
+ padding: 1rem;
+ margin: 1rem 0;
+ border-left: 4px solid;
+ border-radius: 0 4px 4px 0;
+}
+
+.callout.note {
+ border-left-color: var(--rustelo-secondary);
+ background-color: #ebf8ff;
+}
+
+.callout.warning {
+ border-left-color: #ed8936;
+ background-color: #fffaf0;
+}
+
+.callout.tip {
+ border-left-color: var(--rustelo-accent);
+ background-color: #f0fff4;
+}
+
+.callout.danger {
+ border-left-color: var(--rustelo-primary);
+ background-color: #fff5f5;
+}
+
+/* Command line styling */
+.command-line {
+ background-color: #1a202c;
+ color: #e2e8f0;
+ padding: 1rem;
+ border-radius: 8px;
+ font-family: 'JetBrains Mono', 'Fira Code', monospace;
+ margin: 1rem 0;
+}
+
+.command-line::before {
+ content: "$ ";
+ color: #48bb78;
+ font-weight: bold;
+}
+
+/* Navigation improvements */
+.chapter li.part-title {
+ color: var(--rustelo-primary);
+ font-weight: bold;
+ margin-top: 1rem;
+}
+
+/* Search improvements */
+#searchresults mark {
+ background-color: #fef5e7;
+ color: #744210;
+}
+
+/* Mobile improvements */
+@media (max-width: 768px) {
+ .content {
+ padding: 1rem;
+ }
+
+ table {
+ font-size: 0.875rem;
+ }
+
+ .command-line {
+ font-size: 0.8rem;
+ padding: 0.75rem;
+ }
+}
+
+/* Dark theme overrides */
+.navy .callout.note {
+ background-color: #1e3a8a;
+}
+
+.navy .callout.warning {
+ background-color: #92400e;
+}
+
+.navy .callout.tip {
+ background-color: #14532d;
+}
+
+.navy .callout.danger {
+ background-color: #991b1b;
+}
+
+/* Print styles */
+@media print {
+ .nav-wrapper,
+ .page-wrapper > .page > .menu,
+ .mobile-nav-chapters,
+ .nav-chapters,
+ .sidebar-scrollbox {
+ display: none !important;
+ }
+
+ .page-wrapper > .page {
+ left: 0 !important;
+ }
+
+ .content {
+ margin-left: 0 !important;
+ max-width: none !important;
+ }
+}
+EOF
+fi
+
+if [ ! -f "$PROJECT_ROOT/book/theme/custom.js" ]; then
+ echo -e "${YELLOW}📝 Creating custom JavaScript...${NC}"
+ cat > "$PROJECT_ROOT/book/theme/custom.js" << 'EOF'
+// Rustelo Documentation Custom JavaScript
+
+// Add copy buttons to code blocks
+document.addEventListener('DOMContentLoaded', function() {
+ // Add copy buttons to code blocks
+ const codeBlocks = document.querySelectorAll('pre > code');
+ codeBlocks.forEach(function(codeBlock) {
+ const pre = codeBlock.parentElement;
+ const button = document.createElement('button');
+ button.className = 'copy-button';
+ button.textContent = 'Copy';
+ button.style.cssText = `
+ position: absolute;
+ top: 8px;
+ right: 8px;
+ background: #4a5568;
+ color: white;
+ border: none;
+ padding: 4px 8px;
+ border-radius: 4px;
+ font-size: 12px;
+ cursor: pointer;
+ opacity: 0;
+ transition: opacity 0.2s;
+ `;
+
+ pre.style.position = 'relative';
+ pre.appendChild(button);
+
+ pre.addEventListener('mouseenter', function() {
+ button.style.opacity = '1';
+ });
+
+ pre.addEventListener('mouseleave', function() {
+ button.style.opacity = '0';
+ });
+
+ button.addEventListener('click', function() {
+ const text = codeBlock.textContent;
+ navigator.clipboard.writeText(text).then(function() {
+ button.textContent = 'Copied!';
+ button.style.background = '#48bb78';
+ setTimeout(function() {
+ button.textContent = 'Copy';
+ button.style.background = '#4a5568';
+ }, 2000);
+ });
+ });
+ });
+
+ // Add feature badges
+ const content = document.querySelector('.content');
+ if (content) {
+ let html = content.innerHTML;
+
+ // Replace feature indicators
+ html = html.replace(/\[FEATURE:([^\]]+)\]/g, '$1');
+ html = html.replace(/\[OPTIONAL:([^\]]+)\]/g, '$1');
+ html = html.replace(/\[DISABLED:([^\]]+)\]/g, '$1');
+
+ // Add callout boxes
+ html = html.replace(/\[NOTE\]([\s\S]*?)\[\/NOTE\]/g, '$1
');
+ html = html.replace(/\[WARNING\]([\s\S]*?)\[\/WARNING\]/g, '$1
');
+ html = html.replace(/\[TIP\]([\s\S]*?)\[\/TIP\]/g, '$1
');
+ html = html.replace(/\[DANGER\]([\s\S]*?)\[\/DANGER\]/g, '$1
');
+
+ content.innerHTML = html;
+ }
+
+ // Add smooth scrolling
+ document.querySelectorAll('a[href^="#"]').forEach(anchor => {
+ anchor.addEventListener('click', function (e) {
+ e.preventDefault();
+ const target = document.querySelector(this.getAttribute('href'));
+ if (target) {
+ target.scrollIntoView({
+ behavior: 'smooth'
+ });
+ }
+ });
+ });
+});
+
+// Add keyboard shortcuts
+document.addEventListener('keydown', function(e) {
+ // Ctrl/Cmd + K to focus search
+ if ((e.ctrlKey || e.metaKey) && e.key === 'k') {
+ e.preventDefault();
+ const searchInput = document.querySelector('#searchbar');
+ if (searchInput) {
+ searchInput.focus();
+ }
+ }
+});
+
+// Add version info to footer
+document.addEventListener('DOMContentLoaded', function() {
+ const content = document.querySelector('.content');
+ if (content) {
+ const footer = document.createElement('div');
+ footer.style.cssText = `
+ margin-top: 3rem;
+ padding: 2rem 0;
+ border-top: 1px solid #e2e8f0;
+ text-align: center;
+ font-size: 0.875rem;
+ color: #718096;
+ `;
+ footer.innerHTML = `
+ Built with ❤️ using mdBook
+ Rustelo Documentation • Last updated: ${new Date().toLocaleDateString()}
+ `;
+ content.appendChild(footer);
+ }
+});
+EOF
+fi
+
+# Check if we should sync content from existing docs
+if [ "$1" = "--sync" ]; then
+ echo -e "${BLUE}🔄 Syncing content from existing documentation...${NC}"
+
+ # Create directories for existing content
+ mkdir -p "$PROJECT_ROOT/book/database"
+ mkdir -p "$PROJECT_ROOT/book/features/auth"
+ mkdir -p "$PROJECT_ROOT/book/features/content"
+
+ # Copy and adapt existing documentation
+ if [ -f "$PROJECT_ROOT/docs/database_configuration.md" ]; then
+ cp "$PROJECT_ROOT/docs/database_configuration.md" "$PROJECT_ROOT/book/database/configuration.md"
+ echo -e "${GREEN}✅ Synced database configuration${NC}"
+ fi
+
+ if [ -f "$PROJECT_ROOT/docs/2fa_implementation.md" ]; then
+ cp "$PROJECT_ROOT/docs/2fa_implementation.md" "$PROJECT_ROOT/book/features/auth/2fa.md"
+ echo -e "${GREEN}✅ Synced 2FA documentation${NC}"
+ fi
+
+ if [ -f "$PROJECT_ROOT/docs/email.md" ]; then
+ cp "$PROJECT_ROOT/docs/email.md" "$PROJECT_ROOT/book/features/email.md"
+ echo -e "${GREEN}✅ Synced email documentation${NC}"
+ fi
+
+ # Copy from info directory
+ if [ -f "$PROJECT_ROOT/info/features.md" ]; then
+ cp "$PROJECT_ROOT/info/features.md" "$PROJECT_ROOT/book/features/detailed.md"
+ echo -e "${GREEN}✅ Synced detailed features${NC}"
+ fi
+
+ echo -e "${GREEN}✅ Content sync complete${NC}"
+fi
+
+# Change to project root
+cd "$PROJECT_ROOT"
+
+# Build the documentation
+echo -e "${BLUE}🔨 Building documentation...${NC}"
+if mdbook build; then
+ echo -e "${GREEN}✅ Documentation built successfully${NC}"
+else
+ echo -e "${RED}❌ Documentation build failed${NC}"
+ exit 1
+fi
+
+# Check if we should serve the documentation
+if [ "$1" = "--serve" ] || [ "$2" = "--serve" ] || [ "$3" = "--serve" ]; then
+ echo -e "${BLUE}🌐 Starting development server...${NC}"
+ echo "Documentation will be available at: http://localhost:3000"
+ echo "Press Ctrl+C to stop the server"
+ mdbook serve --open
+elif [ "$1" = "--watch" ] || [ "$2" = "--watch" ] || [ "$3" = "--watch" ]; then
+ echo -e "${BLUE}👀 Starting file watcher...${NC}"
+ echo "Documentation will be rebuilt automatically on file changes"
+ echo "Press Ctrl+C to stop watching"
+ mdbook watch
+else
+ # Display build information
+ echo ""
+ echo -e "${GREEN}📚 Documentation built successfully!${NC}"
+ echo "Output directory: $PROJECT_ROOT/book-output"
+ echo "HTML files: $PROJECT_ROOT/book-output/html"
+ echo ""
+ echo "To serve the documentation locally:"
+ echo " $0 --serve"
+ echo ""
+ echo "To watch for changes:"
+ echo " $0 --watch"
+ echo ""
+ echo "To sync existing documentation:"
+ echo " $0 --sync"
+ echo ""
+ echo "To build cargo documentation:"
+ echo " $0 --cargo"
+ echo ""
+ echo "To build all documentation:"
+ echo " $0 --all"
+fi
+
+# Generate documentation metrics
+echo -e "${BLUE}📊 Documentation metrics:${NC}"
+TOTAL_PAGES=$(find "$PROJECT_ROOT/book-output/html" -name "*.html" | wc -l)
+TOTAL_SIZE=$(du -sh "$PROJECT_ROOT/book-output/html" | cut -f1)
+echo " Total pages: $TOTAL_PAGES"
+echo " Total size: $TOTAL_SIZE"
+
+# Check for broken links if linkcheck is available
+if command -v mdbook-linkcheck &> /dev/null; then
+ echo -e "${BLUE}🔗 Checking for broken links...${NC}"
+ if mdbook-linkcheck; then
+ echo -e "${GREEN}✅ No broken links found${NC}"
+ else
+ echo -e "${YELLOW}⚠️ Some links may be broken${NC}"
+ fi
+fi
+
+# Build cargo documentation if requested
+if [ "$1" = "--cargo" ] || [ "$2" = "--cargo" ] || [ "$3" = "--cargo" ]; then
+ echo -e "${BLUE}🦀 Building cargo documentation...${NC}"
+
+ # Build cargo doc
+ if cargo doc --no-deps --document-private-items; then
+ echo -e "${GREEN}✅ Cargo documentation built successfully${NC}"
+
+ # Enhance with logos
+ if [ -f "$PROJECT_ROOT/scripts/docs/enhance-docs.sh" ]; then
+ echo -e "${BLUE}🎨 Enhancing cargo docs with logos...${NC}"
+ "$PROJECT_ROOT/scripts/docs/enhance-docs.sh"
+ fi
+
+ echo -e "${GREEN}✅ Cargo documentation enhanced with logos${NC}"
+ else
+ echo -e "${RED}❌ Cargo documentation build failed${NC}"
+ fi
+fi
+
+# Build all documentation if requested
+if [ "$1" = "--all" ] || [ "$2" = "--all" ] || [ "$3" = "--all" ]; then
+ echo -e "${BLUE}📚 Building all documentation...${NC}"
+
+ # Build mdBook
+ if mdbook build; then
+ echo -e "${GREEN}✅ mdBook documentation built${NC}"
+ else
+ echo -e "${RED}❌ mdBook build failed${NC}"
+ fi
+
+ # Build cargo doc
+ if cargo doc --no-deps --document-private-items; then
+ echo -e "${GREEN}✅ Cargo documentation built${NC}"
+
+ # Enhance with logos
+ if [ -f "$PROJECT_ROOT/scripts/docs/enhance-docs.sh" ]; then
+ echo -e "${BLUE}🎨 Enhancing cargo docs with logos...${NC}"
+ "$PROJECT_ROOT/scripts/docs/enhance-docs.sh"
+ fi
+ else
+ echo -e "${RED}❌ Cargo documentation build failed${NC}"
+ fi
+fi
+
+echo ""
+echo -e "${GREEN}✨ Documentation build complete!${NC}"
diff --git a/scripts/docs/deploy-docs.sh b/scripts/docs/deploy-docs.sh
new file mode 100755
index 0000000..b2a41ef
--- /dev/null
+++ b/scripts/docs/deploy-docs.sh
@@ -0,0 +1,545 @@
+#!/bin/bash
+
+# Rustelo Documentation Deployment Script
+# This script deploys the documentation to various platforms
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[0;34m'
+NC='\033[0m' # No Color
+
+# Script directory
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
+
+echo -e "${BLUE}🚀 Rustelo Documentation Deployment Script${NC}"
+echo "==========================================="
+
+# Function to show usage
+show_usage() {
+ echo "Usage: $0 [PLATFORM] [OPTIONS]"
+ echo ""
+ echo "Platforms:"
+ echo " github-pages Deploy to GitHub Pages"
+ echo " netlify Deploy to Netlify"
+ echo " vercel Deploy to Vercel"
+ echo " aws-s3 Deploy to AWS S3"
+ echo " docker Build Docker image"
+ echo " local Serve locally (development)"
+ echo ""
+ echo "Options:"
+ echo " --dry-run Show what would be deployed without actually deploying"
+ echo " --force Force deployment even if no changes detected"
+ echo " --branch NAME Deploy from specific branch (default: main)"
+ echo " --help Show this help message"
+ echo ""
+ echo "Examples:"
+ echo " $0 github-pages"
+ echo " $0 netlify --dry-run"
+ echo " $0 local --force"
+ echo " $0 docker"
+}
+
+# Parse command line arguments
+PLATFORM=""
+DRY_RUN=false
+FORCE=false
+BRANCH="main"
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ github-pages|netlify|vercel|aws-s3|docker|local)
+ PLATFORM="$1"
+ shift
+ ;;
+ --dry-run)
+ DRY_RUN=true
+ shift
+ ;;
+ --force)
+ FORCE=true
+ shift
+ ;;
+ --branch)
+ BRANCH="$2"
+ shift 2
+ ;;
+ --help)
+ show_usage
+ exit 0
+ ;;
+ *)
+ echo -e "${RED}❌ Unknown option: $1${NC}"
+ show_usage
+ exit 1
+ ;;
+ esac
+done
+
+if [ -z "$PLATFORM" ]; then
+ echo -e "${RED}❌ Please specify a platform${NC}"
+ show_usage
+ exit 1
+fi
+
+# Check dependencies
+check_dependencies() {
+ echo -e "${BLUE}🔍 Checking dependencies...${NC}"
+
+ if ! command -v mdbook &> /dev/null; then
+ echo -e "${RED}❌ mdbook is not installed${NC}"
+ echo "Please install mdbook: cargo install mdbook"
+ exit 1
+ fi
+
+ if ! command -v git &> /dev/null; then
+ echo -e "${RED}❌ git is not installed${NC}"
+ exit 1
+ fi
+
+ echo -e "${GREEN}✅ Dependencies check passed${NC}"
+}
+
+# Build documentation
+build_docs() {
+ echo -e "${BLUE}🔨 Building documentation...${NC}"
+
+ cd "$PROJECT_ROOT"
+
+ # Clean previous build
+ rm -rf book-output
+
+ # Build with mdbook
+ if mdbook build; then
+ echo -e "${GREEN}✅ Documentation built successfully${NC}"
+ else
+ echo -e "${RED}❌ Documentation build failed${NC}"
+ exit 1
+ fi
+}
+
+# Deploy to GitHub Pages
+deploy_github_pages() {
+ echo -e "${BLUE}🐙 Deploying to GitHub Pages...${NC}"
+
+ # Check if we're in a git repository
+ if [ ! -d ".git" ]; then
+ echo -e "${RED}❌ Not in a git repository${NC}"
+ exit 1
+ fi
+
+ # Check if gh-pages branch exists
+ if ! git rev-parse --verify gh-pages >/dev/null 2>&1; then
+ echo -e "${YELLOW}📝 Creating gh-pages branch...${NC}"
+ git checkout --orphan gh-pages
+ git rm -rf .
+ git commit --allow-empty -m "Initial gh-pages commit"
+ git checkout "$BRANCH"
+ fi
+
+ if [ "$DRY_RUN" = true ]; then
+ echo -e "${YELLOW}🔍 DRY RUN: Would deploy to GitHub Pages${NC}"
+ return 0
+ fi
+
+ # Deploy to gh-pages branch
+ echo -e "${BLUE}📤 Pushing to gh-pages branch...${NC}"
+
+ # Create temporary directory
+ TEMP_DIR=$(mktemp -d)
+ cp -r book-output/html/* "$TEMP_DIR/"
+
+ # Add .nojekyll file to prevent Jekyll processing
+ touch "$TEMP_DIR/.nojekyll"
+
+ # Add CNAME file if it exists
+ if [ -f "CNAME" ]; then
+ cp CNAME "$TEMP_DIR/"
+ fi
+
+ # Switch to gh-pages branch
+ git checkout gh-pages
+
+ # Remove old files
+ git rm -rf . || true
+
+ # Copy new files
+ cp -r "$TEMP_DIR/"* .
+ cp "$TEMP_DIR/.nojekyll" .
+
+ # Add and commit
+ git add .
+ git commit -m "Deploy documentation - $(date '+%Y-%m-%d %H:%M:%S')"
+
+ # Push to GitHub
+ git push origin gh-pages
+
+ # Switch back to original branch
+ git checkout "$BRANCH"
+
+ # Clean up
+ rm -rf "$TEMP_DIR"
+
+ echo -e "${GREEN}✅ Deployed to GitHub Pages${NC}"
+ echo "Documentation will be available at: https://yourusername.github.io/rustelo"
+}
+
+# Deploy to Netlify
+deploy_netlify() {
+ echo -e "${BLUE}🌐 Deploying to Netlify...${NC}"
+
+ # Check if netlify CLI is installed
+ if ! command -v netlify &> /dev/null; then
+ echo -e "${RED}❌ Netlify CLI is not installed${NC}"
+ echo "Please install: npm install -g netlify-cli"
+ exit 1
+ fi
+
+ # Create netlify.toml if it doesn't exist
+ if [ ! -f "netlify.toml" ]; then
+ echo -e "${YELLOW}📝 Creating netlify.toml...${NC}"
+ cat > netlify.toml << 'EOF'
+[build]
+ publish = "book-output/html"
+ command = "curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y && source ~/.cargo/env && cargo install mdbook && mdbook build"
+
+[build.environment]
+ RUST_VERSION = "1.75"
+
+[[redirects]]
+ from = "/docs/*"
+ to = "/:splat"
+ status = 200
+
+[[headers]]
+ for = "/*"
+ [headers.values]
+ X-Frame-Options = "DENY"
+ X-XSS-Protection = "1; mode=block"
+ X-Content-Type-Options = "nosniff"
+ Referrer-Policy = "strict-origin-when-cross-origin"
+ Content-Security-Policy = "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval'; style-src 'self' 'unsafe-inline'; img-src 'self' data: https:; font-src 'self' data:;"
+EOF
+ fi
+
+ if [ "$DRY_RUN" = true ]; then
+ echo -e "${YELLOW}🔍 DRY RUN: Would deploy to Netlify${NC}"
+ return 0
+ fi
+
+ # Deploy to Netlify
+ netlify deploy --prod --dir=book-output/html
+
+ echo -e "${GREEN}✅ Deployed to Netlify${NC}"
+}
+
+# Deploy to Vercel
+deploy_vercel() {
+ echo -e "${BLUE}▲ Deploying to Vercel...${NC}"
+
+ # Check if vercel CLI is installed
+ if ! command -v vercel &> /dev/null; then
+ echo -e "${RED}❌ Vercel CLI is not installed${NC}"
+ echo "Please install: npm install -g vercel"
+ exit 1
+ fi
+
+ # Create vercel.json if it doesn't exist
+ if [ ! -f "vercel.json" ]; then
+ echo -e "${YELLOW}📝 Creating vercel.json...${NC}"
+ cat > vercel.json << 'EOF'
+{
+ "version": 2,
+ "builds": [
+ {
+ "src": "book.toml",
+ "use": "@vercel/static-build",
+ "config": {
+ "distDir": "book-output/html"
+ }
+ }
+ ],
+ "routes": [
+ {
+ "src": "/docs/(.*)",
+ "dest": "/$1"
+ }
+ ],
+ "headers": [
+ {
+ "source": "/(.*)",
+ "headers": [
+ {
+ "key": "X-Frame-Options",
+ "value": "DENY"
+ },
+ {
+ "key": "X-Content-Type-Options",
+ "value": "nosniff"
+ },
+ {
+ "key": "X-XSS-Protection",
+ "value": "1; mode=block"
+ }
+ ]
+ }
+ ]
+}
+EOF
+ fi
+
+ # Create package.json for build script
+ if [ ! -f "package.json" ]; then
+ echo -e "${YELLOW}📝 Creating package.json...${NC}"
+ cat > package.json << 'EOF'
+{
+ "name": "rustelo-docs",
+ "version": "1.0.0",
+ "scripts": {
+ "build": "curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y && source ~/.cargo/env && cargo install mdbook && mdbook build"
+ }
+}
+EOF
+ fi
+
+ if [ "$DRY_RUN" = true ]; then
+ echo -e "${YELLOW}🔍 DRY RUN: Would deploy to Vercel${NC}"
+ return 0
+ fi
+
+ # Deploy to Vercel
+ vercel --prod
+
+ echo -e "${GREEN}✅ Deployed to Vercel${NC}"
+}
+
+# Deploy to AWS S3
+deploy_aws_s3() {
+ echo -e "${BLUE}☁️ Deploying to AWS S3...${NC}"
+
+ # Check if AWS CLI is installed
+ if ! command -v aws &> /dev/null; then
+ echo -e "${RED}❌ AWS CLI is not installed${NC}"
+ echo "Please install AWS CLI and configure credentials"
+ exit 1
+ fi
+
+ # Check for required environment variables
+ if [ -z "$AWS_S3_BUCKET" ]; then
+ echo -e "${RED}❌ AWS_S3_BUCKET environment variable is not set${NC}"
+ exit 1
+ fi
+
+ if [ "$DRY_RUN" = true ]; then
+ echo -e "${YELLOW}🔍 DRY RUN: Would deploy to AWS S3 bucket: $AWS_S3_BUCKET${NC}"
+ return 0
+ fi
+
+ # Sync to S3
+ echo -e "${BLUE}📤 Syncing to S3...${NC}"
+ aws s3 sync book-output/html/ "s3://$AWS_S3_BUCKET/" --delete
+
+ # Set up CloudFront invalidation if configured
+ if [ -n "$AWS_CLOUDFRONT_DISTRIBUTION_ID" ]; then
+ echo -e "${BLUE}🔄 Creating CloudFront invalidation...${NC}"
+ aws cloudfront create-invalidation \
+ --distribution-id "$AWS_CLOUDFRONT_DISTRIBUTION_ID" \
+ --paths "/*"
+ fi
+
+ echo -e "${GREEN}✅ Deployed to AWS S3${NC}"
+ echo "Documentation available at: https://$AWS_S3_BUCKET.s3-website-us-east-1.amazonaws.com"
+}
+
+# Build Docker image
+build_docker() {
+ echo -e "${BLUE}🐳 Building Docker image...${NC}"
+
+ # Create Dockerfile if it doesn't exist
+ if [ ! -f "Dockerfile.docs" ]; then
+ echo -e "${YELLOW}📝 Creating Dockerfile.docs...${NC}"
+ cat > Dockerfile.docs << 'EOF'
+# Multi-stage Docker build for Rustelo documentation
+FROM rust:1.75-alpine AS builder
+
+# Install dependencies
+RUN apk add --no-cache musl-dev
+
+# Install mdbook
+RUN cargo install mdbook
+
+# Set working directory
+WORKDIR /app
+
+# Copy book configuration and source
+COPY book.toml .
+COPY book/ ./book/
+
+# Build documentation
+RUN mdbook build
+
+# Production stage
+FROM nginx:alpine
+
+# Copy built documentation
+COPY --from=builder /app/book-output/html /usr/share/nginx/html
+
+# Copy nginx configuration
+COPY nginx.conf /etc/nginx/nginx.conf
+
+# Add labels
+LABEL org.opencontainers.image.title="Rustelo Documentation"
+LABEL org.opencontainers.image.description="Rustelo web application template documentation"
+LABEL org.opencontainers.image.source="https://github.com/yourusername/rustelo"
+
+# Expose port
+EXPOSE 80
+
+# Health check
+HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
+ CMD curl -f http://localhost/ || exit 1
+
+# Start nginx
+CMD ["nginx", "-g", "daemon off;"]
+EOF
+ fi
+
+ # Create nginx configuration
+ if [ ! -f "nginx.conf" ]; then
+ echo -e "${YELLOW}📝 Creating nginx.conf...${NC}"
+ cat > nginx.conf << 'EOF'
+events {
+ worker_connections 1024;
+}
+
+http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ # Gzip compression
+ gzip on;
+ gzip_vary on;
+ gzip_min_length 1024;
+ gzip_types
+ text/plain
+ text/css
+ text/xml
+ text/javascript
+ application/javascript
+ application/xml+rss
+ application/json;
+
+ server {
+ listen 80;
+ server_name localhost;
+
+ root /usr/share/nginx/html;
+ index index.html;
+
+ # Security headers
+ add_header X-Frame-Options "DENY" always;
+ add_header X-Content-Type-Options "nosniff" always;
+ add_header X-XSS-Protection "1; mode=block" always;
+ add_header Referrer-Policy "strict-origin-when-cross-origin" always;
+
+ # Cache static assets
+ location ~* \.(css|js|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
+ expires 1y;
+ add_header Cache-Control "public, immutable";
+ }
+
+ # Main location block
+ location / {
+ try_files $uri $uri/ $uri.html =404;
+ }
+
+ # Redirect /docs to root
+ location /docs {
+ return 301 /;
+ }
+
+ # Error pages
+ error_page 404 /404.html;
+ error_page 500 502 503 504 /50x.html;
+
+ location = /50x.html {
+ root /usr/share/nginx/html;
+ }
+ }
+}
+EOF
+ fi
+
+ if [ "$DRY_RUN" = true ]; then
+ echo -e "${YELLOW}🔍 DRY RUN: Would build Docker image${NC}"
+ return 0
+ fi
+
+ # Build Docker image
+ docker build -f Dockerfile.docs -t rustelo-docs:latest .
+
+ echo -e "${GREEN}✅ Docker image built successfully${NC}"
+ echo "To run the documentation server:"
+ echo " docker run -p 8080:80 rustelo-docs:latest"
+}
+
+# Serve locally
+serve_local() {
+ echo -e "${BLUE}🌐 Serving documentation locally...${NC}"
+
+ if [ "$DRY_RUN" = true ]; then
+ echo -e "${YELLOW}🔍 DRY RUN: Would serve locally${NC}"
+ return 0
+ fi
+
+ cd "$PROJECT_ROOT"
+ echo "Documentation will be available at: http://localhost:3000"
+ echo "Press Ctrl+C to stop the server"
+ mdbook serve --open
+}
+
+# Main deployment logic
+main() {
+ check_dependencies
+
+ # Build documentation unless serving locally
+ if [ "$PLATFORM" != "local" ]; then
+ build_docs
+ fi
+
+ case $PLATFORM in
+ github-pages)
+ deploy_github_pages
+ ;;
+ netlify)
+ deploy_netlify
+ ;;
+ vercel)
+ deploy_vercel
+ ;;
+ aws-s3)
+ deploy_aws_s3
+ ;;
+ docker)
+ build_docker
+ ;;
+ local)
+ serve_local
+ ;;
+ *)
+ echo -e "${RED}❌ Unknown platform: $PLATFORM${NC}"
+ show_usage
+ exit 1
+ ;;
+ esac
+}
+
+# Run main function
+main
+
+echo ""
+echo -e "${GREEN}🎉 Deployment complete!${NC}"
diff --git a/scripts/docs/docs-dev.sh b/scripts/docs/docs-dev.sh
new file mode 100755
index 0000000..7e0b193
--- /dev/null
+++ b/scripts/docs/docs-dev.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Quick development script for documentation
+
+set -e
+
+echo "🚀 Starting documentation development server..."
+echo "Documentation will be available at: http://localhost:3000"
+echo "Press Ctrl+C to stop"
+
+# Change to project root
+cd "$(dirname "$0")/.."
+
+# Start mdBook serve with live reload
+mdbook serve --open --port 3000
diff --git a/scripts/docs/enhance-docs.sh b/scripts/docs/enhance-docs.sh
new file mode 100755
index 0000000..a238312
--- /dev/null
+++ b/scripts/docs/enhance-docs.sh
@@ -0,0 +1,432 @@
+#!/bin/bash
+
+# Documentation Enhancement Script for Rustelo
+# This script adds logos and branding to cargo doc output
+
+exit
+# TODO: Requir fix positioning in pages and ensure proper alignment
+
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+# Configuration
+LOGO_DIR="logos"
+DOC_DIR="target/doc"
+LOGO_FILE="rustelo-imag.svg"
+LOGO_HORIZONTAL="rustelo_dev-logo-h.svg"
+
+# Function to print colored output
+print_status() {
+ echo -e "${GREEN}[INFO]${NC} $1"
+}
+
+print_warning() {
+ echo -e "${YELLOW}[WARN]${NC} $1"
+}
+
+print_error() {
+ echo -e "${RED}[ERROR]${NC} $1"
+}
+
+# Check if cargo doc has been run
+check_doc_exists() {
+ if [ ! -d "$DOC_DIR" ]; then
+ print_error "Documentation directory not found. Run 'cargo doc' first."
+ exit 1
+ fi
+}
+
+# Check if logos exist
+check_logos_exist() {
+ if [ ! -f "$LOGO_DIR/$LOGO_FILE" ]; then
+ print_error "Logo file not found: $LOGO_DIR/$LOGO_FILE"
+ exit 1
+ fi
+
+ if [ ! -f "$LOGO_DIR/$LOGO_HORIZONTAL" ]; then
+ print_error "Horizontal logo file not found: $LOGO_DIR/$LOGO_HORIZONTAL"
+ exit 1
+ fi
+}
+
+# Copy logos to doc directory
+copy_logos_to_doc() {
+ print_status "Copying logos to documentation directory..."
+
+ # Create logos directory in doc
+ mkdir -p "$DOC_DIR/logos"
+
+ # Copy all logo files
+ cp "$LOGO_DIR"/*.svg "$DOC_DIR/logos/"
+
+ print_status "Logos copied successfully"
+}
+
+# Add logo to main crate page
+enhance_main_page() {
+ local crate_name="$1"
+ local index_file="$DOC_DIR/$crate_name/index.html"
+
+ if [ ! -f "$index_file" ]; then
+ print_warning "Index file not found for crate: $crate_name"
+ return
+ fi
+
+ print_status "Enhancing main page for crate: $crate_name"
+
+ # Create a backup
+ cp "$index_file" "$index_file.backup"
+
+ # Add logo to the main heading
+ sed -i.tmp 's|Crate '"$crate_name"'|
Crate '"$crate_name"'
|g' "$index_file"
+
+ # Create temporary CSS file
+ cat > "/tmp/rustelo-css.tmp" << 'EOF'
+
+EOF
+
+ # Add custom CSS for logo styling
+ sed -i.tmp -e '/^[[:space:]]*<\/head>/{
+ r /tmp/rustelo-css.tmp
+ d
+ }' "$index_file"
+
+ # Create temporary footer file
+ cat > "/tmp/rustelo-footer.tmp" << 'EOF'
+
+EOF
+
+ # Add footer with branding
+ sed -i.tmp -e '/^[[:space:]]*<\/main>/{
+ r /tmp/rustelo-footer.tmp
+ d
+ }' "$index_file"
+
+ # Clean up temporary files
+ rm -f "/tmp/rustelo-css.tmp" "/tmp/rustelo-footer.tmp"
+
+ # Clean up temporary files
+ rm -f "$index_file.tmp"
+
+ print_status "Enhanced main page for: $crate_name"
+}
+
+# Add logo to all module pages
+enhance_module_pages() {
+ local crate_name="$1"
+ local crate_dir="$DOC_DIR/$crate_name"
+
+ if [ ! -d "$crate_dir" ]; then
+ print_warning "Crate directory not found: $crate_name"
+ return
+ fi
+
+ print_status "Enhancing module pages for crate: $crate_name"
+
+ # Find all HTML files in the crate directory
+ find "$crate_dir" -name "*.html" -type f | while read -r html_file; do
+ # Skip if it's the main index file (already processed)
+ if [[ "$html_file" == "$crate_dir/index.html" ]]; then
+ continue
+ fi
+
+ # Create backup
+ cp "$html_file" "$html_file.backup"
+
+ # Add logo to sidebar
+ sed -i.tmp 's|