1071 lines
30 KiB
Bash
1071 lines
30 KiB
Bash
![]() |
#!/bin/bash
|
||
|
|
||
|
# Database Utilities and Maintenance Script
|
||
|
# Provides various database utility functions and maintenance tasks
|
||
|
|
||
|
set -e
|
||
|
|
||
|
# Colors for output
|
||
|
RED='\033[0;31m'
|
||
|
GREEN='\033[0;32m'
|
||
|
YELLOW='\033[1;33m'
|
||
|
BLUE='\033[0;34m'
|
||
|
CYAN='\033[0;36m'
|
||
|
BOLD='\033[1m'
|
||
|
NC='\033[0m' # No Color
|
||
|
|
||
|
# Script directory
|
||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||
|
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
|
||
|
|
||
|
# Change to project root
|
||
|
cd "$PROJECT_ROOT"
|
||
|
|
||
|
# Utility configuration
|
||
|
TEMP_DIR="temp"
|
||
|
DUMP_DIR="dumps"
|
||
|
LOGS_DIR="logs"
|
||
|
MAX_LOG_SIZE="100M"
|
||
|
LOG_RETENTION_DAYS=30
|
||
|
|
||
|
# Logging functions
|
||
|
log() {
|
||
|
echo -e "${GREEN}[INFO]${NC} $1"
|
||
|
}
|
||
|
|
||
|
log_warn() {
|
||
|
echo -e "${YELLOW}[WARN]${NC} $1"
|
||
|
}
|
||
|
|
||
|
log_error() {
|
||
|
echo -e "${RED}[ERROR]${NC} $1"
|
||
|
}
|
||
|
|
||
|
log_success() {
|
||
|
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||
|
}
|
||
|
|
||
|
log_debug() {
|
||
|
if [ "$DEBUG" = "true" ]; then
|
||
|
echo -e "${CYAN}[DEBUG]${NC} $1"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
print_header() {
|
||
|
echo -e "${BLUE}${BOLD}=== $1 ===${NC}"
|
||
|
}
|
||
|
|
||
|
print_subheader() {
|
||
|
echo -e "${CYAN}--- $1 ---${NC}"
|
||
|
}
|
||
|
|
||
|
print_usage() {
|
||
|
echo "Database Utilities and Maintenance Script"
|
||
|
echo
|
||
|
echo "Usage: $0 <command> [options]"
|
||
|
echo
|
||
|
echo "Commands:"
|
||
|
echo " size Show database size information"
|
||
|
echo " tables List all tables with row counts"
|
||
|
echo " indexes Show index information"
|
||
|
echo " constraints Show table constraints"
|
||
|
echo " users Show database users (PostgreSQL only)"
|
||
|
echo " permissions Show user permissions"
|
||
|
echo " sessions Show active sessions"
|
||
|
echo " locks Show current locks"
|
||
|
echo " queries Show running queries"
|
||
|
echo " kill-query Kill a specific query"
|
||
|
echo " optimize Optimize database (VACUUM, ANALYZE)"
|
||
|
echo " reindex Rebuild indexes"
|
||
|
echo " check-integrity Check database integrity"
|
||
|
echo " repair Repair database issues"
|
||
|
echo " cleanup Clean up temporary data"
|
||
|
echo " logs Show database logs"
|
||
|
echo " config Show database configuration"
|
||
|
echo " extensions List database extensions (PostgreSQL)"
|
||
|
echo " sequences Show sequence information"
|
||
|
echo " triggers Show table triggers"
|
||
|
echo " functions Show user-defined functions"
|
||
|
echo " views Show database views"
|
||
|
echo " schema-info Show comprehensive schema information"
|
||
|
echo " duplicate-data Find duplicate records"
|
||
|
echo " orphaned-data Find orphaned records"
|
||
|
echo " table-stats Show detailed table statistics"
|
||
|
echo " connection-test Test database connection"
|
||
|
echo " benchmark Run database benchmarks"
|
||
|
echo " export-schema Export database schema"
|
||
|
echo " import-schema Import database schema"
|
||
|
echo " copy-table Copy table data"
|
||
|
echo " truncate-table Truncate table data"
|
||
|
echo " reset-sequence Reset sequence values"
|
||
|
echo
|
||
|
echo "Options:"
|
||
|
echo " --env ENV Environment (dev/prod) [default: dev]"
|
||
|
echo " --table TABLE Target table name"
|
||
|
echo " --schema SCHEMA Target schema name"
|
||
|
echo " --query-id ID Query ID to kill"
|
||
|
echo " --limit N Limit results [default: 100]"
|
||
|
echo " --output FORMAT Output format (table/json/csv) [default: table]"
|
||
|
echo " --file FILE Output file path"
|
||
|
echo " --force Force operation without confirmation"
|
||
|
echo " --debug Enable debug output"
|
||
|
echo " --quiet Suppress verbose output"
|
||
|
echo " --dry-run Show what would be done without executing"
|
||
|
echo
|
||
|
echo "Examples:"
|
||
|
echo " $0 size # Show database size"
|
||
|
echo " $0 tables # List all tables"
|
||
|
echo " $0 tables --table users # Show info for users table"
|
||
|
echo " $0 indexes --table users # Show indexes for users table"
|
||
|
echo " $0 optimize # Optimize database"
|
||
|
echo " $0 cleanup # Clean up temporary data"
|
||
|
echo " $0 duplicate-data --table users # Find duplicate users"
|
||
|
echo " $0 copy-table --table users # Copy users table"
|
||
|
echo " $0 export-schema --file schema.sql # Export schema to file"
|
||
|
echo " $0 benchmark # Run performance benchmarks"
|
||
|
}
|
||
|
|
||
|
# Check if .env file exists and load it
|
||
|
load_env() {
|
||
|
if [ ! -f ".env" ]; then
|
||
|
log_error ".env file not found"
|
||
|
echo "Please run the database setup script first:"
|
||
|
echo " ./scripts/db-setup.sh setup"
|
||
|
exit 1
|
||
|
fi
|
||
|
|
||
|
# Load environment variables
|
||
|
export $(grep -v '^#' .env | xargs)
|
||
|
}
|
||
|
|
||
|
# Parse database URL
|
||
|
parse_database_url() {
|
||
|
if [[ $DATABASE_URL == postgresql://* ]] || [[ $DATABASE_URL == postgres://* ]]; then
|
||
|
DB_TYPE="postgresql"
|
||
|
DB_HOST=$(echo $DATABASE_URL | sed -n 's/.*@\([^:]*\):.*/\1/p')
|
||
|
DB_PORT=$(echo $DATABASE_URL | sed -n 's/.*:\([0-9]*\)\/.*/\1/p')
|
||
|
DB_NAME=$(echo $DATABASE_URL | sed -n 's/.*\/\([^?]*\).*/\1/p')
|
||
|
DB_USER=$(echo $DATABASE_URL | sed -n 's/.*\/\/\([^:]*\):.*/\1/p')
|
||
|
DB_PASS=$(echo $DATABASE_URL | sed -n 's/.*:\/\/[^:]*:\([^@]*\)@.*/\1/p')
|
||
|
elif [[ $DATABASE_URL == sqlite://* ]]; then
|
||
|
DB_TYPE="sqlite"
|
||
|
DB_FILE=$(echo $DATABASE_URL | sed 's/sqlite:\/\///')
|
||
|
else
|
||
|
log_error "Unsupported database URL format: $DATABASE_URL"
|
||
|
exit 1
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Execute SQL query
|
||
|
execute_sql() {
|
||
|
local query="$1"
|
||
|
local capture_output="${2:-false}"
|
||
|
local format="${3:-table}"
|
||
|
|
||
|
log_debug "Executing SQL: $query"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
export PGPASSWORD="$DB_PASS"
|
||
|
if [ "$capture_output" = "true" ]; then
|
||
|
if [ "$format" = "csv" ]; then
|
||
|
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "$query" --csv 2>/dev/null
|
||
|
else
|
||
|
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -t -A -c "$query" 2>/dev/null
|
||
|
fi
|
||
|
else
|
||
|
psql -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" -c "$query" 2>/dev/null
|
||
|
fi
|
||
|
unset PGPASSWORD
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
if [ "$capture_output" = "true" ]; then
|
||
|
if [ "$format" = "csv" ]; then
|
||
|
sqlite3 -header -csv "$DB_FILE" "$query" 2>/dev/null
|
||
|
else
|
||
|
sqlite3 "$DB_FILE" "$query" 2>/dev/null
|
||
|
fi
|
||
|
else
|
||
|
sqlite3 "$DB_FILE" "$query" 2>/dev/null
|
||
|
fi
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Setup utility directories
|
||
|
setup_directories() {
|
||
|
mkdir -p "$TEMP_DIR" "$DUMP_DIR" "$LOGS_DIR"
|
||
|
}
|
||
|
|
||
|
# Show database size information
|
||
|
show_database_size() {
|
||
|
print_header "Database Size Information"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
# Total database size
|
||
|
local total_size=$(execute_sql "SELECT pg_size_pretty(pg_database_size('$DB_NAME'));" true)
|
||
|
log "Total Database Size: $total_size"
|
||
|
|
||
|
# Table sizes
|
||
|
print_subheader "Table Sizes (Top 20)"
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
schemaname,
|
||
|
tablename,
|
||
|
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size,
|
||
|
pg_size_pretty(pg_relation_size(schemaname||'.'||tablename)) as table_size,
|
||
|
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename) - pg_relation_size(schemaname||'.'||tablename)) as index_size
|
||
|
FROM pg_tables
|
||
|
WHERE schemaname NOT IN ('information_schema', 'pg_catalog')
|
||
|
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC
|
||
|
LIMIT 20;
|
||
|
"
|
||
|
|
||
|
# Index sizes
|
||
|
print_subheader "Index Sizes (Top 10)"
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
schemaname,
|
||
|
tablename,
|
||
|
indexname,
|
||
|
pg_size_pretty(pg_relation_size(indexrelid)) as size
|
||
|
FROM pg_stat_user_indexes
|
||
|
ORDER BY pg_relation_size(indexrelid) DESC
|
||
|
LIMIT 10;
|
||
|
"
|
||
|
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
if [ -f "$DB_FILE" ]; then
|
||
|
local size=$(du -h "$DB_FILE" | cut -f1)
|
||
|
log "Database File Size: $size"
|
||
|
|
||
|
# Table info
|
||
|
print_subheader "Table Information"
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
name as table_name,
|
||
|
type
|
||
|
FROM sqlite_master
|
||
|
WHERE type IN ('table', 'view')
|
||
|
ORDER BY name;
|
||
|
"
|
||
|
|
||
|
# Page count and size
|
||
|
local page_count=$(execute_sql "PRAGMA page_count;" true)
|
||
|
local page_size=$(execute_sql "PRAGMA page_size;" true)
|
||
|
local total_pages=$((page_count * page_size))
|
||
|
log "Total Pages: $page_count"
|
||
|
log "Page Size: $page_size bytes"
|
||
|
log "Total Size: $total_pages bytes"
|
||
|
fi
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# List tables with row counts
|
||
|
show_tables() {
|
||
|
print_header "Database Tables"
|
||
|
|
||
|
if [ -n "$TABLE_NAME" ]; then
|
||
|
print_subheader "Table: $TABLE_NAME"
|
||
|
show_table_details "$TABLE_NAME"
|
||
|
return
|
||
|
fi
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
schemaname,
|
||
|
tablename,
|
||
|
n_tup_ins as inserts,
|
||
|
n_tup_upd as updates,
|
||
|
n_tup_del as deletes,
|
||
|
n_live_tup as live_rows,
|
||
|
n_dead_tup as dead_rows,
|
||
|
last_vacuum,
|
||
|
last_analyze
|
||
|
FROM pg_stat_user_tables
|
||
|
ORDER BY schemaname, tablename;
|
||
|
"
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
name as table_name,
|
||
|
type,
|
||
|
sql
|
||
|
FROM sqlite_master
|
||
|
WHERE type = 'table'
|
||
|
AND name NOT LIKE 'sqlite_%'
|
||
|
ORDER BY name;
|
||
|
"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Show table details
|
||
|
show_table_details() {
|
||
|
local table_name="$1"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
print_subheader "Table Structure"
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
column_name,
|
||
|
data_type,
|
||
|
is_nullable,
|
||
|
column_default,
|
||
|
character_maximum_length
|
||
|
FROM information_schema.columns
|
||
|
WHERE table_name = '$table_name'
|
||
|
ORDER BY ordinal_position;
|
||
|
"
|
||
|
|
||
|
print_subheader "Table Statistics"
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
schemaname,
|
||
|
tablename,
|
||
|
n_live_tup as live_rows,
|
||
|
n_dead_tup as dead_rows,
|
||
|
n_tup_ins as total_inserts,
|
||
|
n_tup_upd as total_updates,
|
||
|
n_tup_del as total_deletes,
|
||
|
last_vacuum,
|
||
|
last_autovacuum,
|
||
|
last_analyze,
|
||
|
last_autoanalyze
|
||
|
FROM pg_stat_user_tables
|
||
|
WHERE tablename = '$table_name';
|
||
|
"
|
||
|
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
print_subheader "Table Structure"
|
||
|
execute_sql "PRAGMA table_info($table_name);"
|
||
|
|
||
|
print_subheader "Row Count"
|
||
|
local row_count=$(execute_sql "SELECT COUNT(*) FROM $table_name;" true)
|
||
|
log "Total Rows: $row_count"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Show index information
|
||
|
show_indexes() {
|
||
|
print_header "Database Indexes"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
local where_clause=""
|
||
|
if [ -n "$TABLE_NAME" ]; then
|
||
|
where_clause="WHERE tablename = '$TABLE_NAME'"
|
||
|
fi
|
||
|
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
schemaname,
|
||
|
tablename,
|
||
|
indexname,
|
||
|
indexdef,
|
||
|
pg_size_pretty(pg_relation_size(indexrelid)) as size
|
||
|
FROM pg_indexes
|
||
|
$where_clause
|
||
|
ORDER BY schemaname, tablename, indexname;
|
||
|
"
|
||
|
|
||
|
print_subheader "Index Usage Statistics"
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
schemaname,
|
||
|
tablename,
|
||
|
indexname,
|
||
|
idx_scan as scans,
|
||
|
idx_tup_read as tuples_read,
|
||
|
idx_tup_fetch as tuples_fetched
|
||
|
FROM pg_stat_user_indexes
|
||
|
$where_clause
|
||
|
ORDER BY idx_scan DESC;
|
||
|
"
|
||
|
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
local where_clause=""
|
||
|
if [ -n "$TABLE_NAME" ]; then
|
||
|
where_clause="WHERE tbl_name = '$TABLE_NAME'"
|
||
|
fi
|
||
|
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
name as index_name,
|
||
|
tbl_name as table_name,
|
||
|
sql
|
||
|
FROM sqlite_master
|
||
|
WHERE type = 'index'
|
||
|
AND name NOT LIKE 'sqlite_%'
|
||
|
$where_clause
|
||
|
ORDER BY tbl_name, name;
|
||
|
"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Show constraints
|
||
|
show_constraints() {
|
||
|
print_header "Database Constraints"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
local where_clause=""
|
||
|
if [ -n "$TABLE_NAME" ]; then
|
||
|
where_clause="AND tc.table_name = '$TABLE_NAME'"
|
||
|
fi
|
||
|
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
tc.constraint_name,
|
||
|
tc.table_name,
|
||
|
tc.constraint_type,
|
||
|
kcu.column_name,
|
||
|
ccu.table_name AS foreign_table_name,
|
||
|
ccu.column_name AS foreign_column_name
|
||
|
FROM information_schema.table_constraints AS tc
|
||
|
JOIN information_schema.key_column_usage AS kcu
|
||
|
ON tc.constraint_name = kcu.constraint_name
|
||
|
LEFT JOIN information_schema.constraint_column_usage AS ccu
|
||
|
ON ccu.constraint_name = tc.constraint_name
|
||
|
WHERE tc.table_schema = 'public'
|
||
|
$where_clause
|
||
|
ORDER BY tc.table_name, tc.constraint_type, tc.constraint_name;
|
||
|
"
|
||
|
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
if [ -n "$TABLE_NAME" ]; then
|
||
|
execute_sql "PRAGMA foreign_key_list($TABLE_NAME);"
|
||
|
else
|
||
|
log_warn "SQLite constraint information requires table name"
|
||
|
fi
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Show database users (PostgreSQL only)
|
||
|
show_users() {
|
||
|
print_header "Database Users"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
usename as username,
|
||
|
usesysid as user_id,
|
||
|
usecreatedb as can_create_db,
|
||
|
usesuper as is_superuser,
|
||
|
userepl as can_replicate,
|
||
|
passwd as password_set,
|
||
|
valuntil as valid_until
|
||
|
FROM pg_user
|
||
|
ORDER BY usename;
|
||
|
"
|
||
|
|
||
|
print_subheader "User Privileges"
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
grantee,
|
||
|
table_catalog,
|
||
|
table_schema,
|
||
|
table_name,
|
||
|
privilege_type,
|
||
|
is_grantable
|
||
|
FROM information_schema.role_table_grants
|
||
|
WHERE table_schema = 'public'
|
||
|
ORDER BY grantee, table_name;
|
||
|
"
|
||
|
else
|
||
|
log_warn "User information only available for PostgreSQL"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Show active sessions
|
||
|
show_sessions() {
|
||
|
print_header "Active Database Sessions"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
pid,
|
||
|
usename,
|
||
|
application_name,
|
||
|
client_addr,
|
||
|
client_port,
|
||
|
backend_start,
|
||
|
query_start,
|
||
|
state,
|
||
|
LEFT(query, 100) as current_query
|
||
|
FROM pg_stat_activity
|
||
|
WHERE pid <> pg_backend_pid()
|
||
|
ORDER BY backend_start;
|
||
|
"
|
||
|
else
|
||
|
log_warn "Session information only available for PostgreSQL"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Show current locks
|
||
|
show_locks() {
|
||
|
print_header "Current Database Locks"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
l.locktype,
|
||
|
l.database,
|
||
|
l.relation,
|
||
|
l.page,
|
||
|
l.tuple,
|
||
|
l.virtualxid,
|
||
|
l.transactionid,
|
||
|
l.mode,
|
||
|
l.granted,
|
||
|
a.usename,
|
||
|
a.query,
|
||
|
a.query_start,
|
||
|
a.pid
|
||
|
FROM pg_locks l
|
||
|
LEFT JOIN pg_stat_activity a ON l.pid = a.pid
|
||
|
ORDER BY l.granted, l.pid;
|
||
|
"
|
||
|
else
|
||
|
log_warn "Lock information only available for PostgreSQL"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Show running queries
|
||
|
show_queries() {
|
||
|
print_header "Running Queries"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
pid,
|
||
|
usename,
|
||
|
application_name,
|
||
|
client_addr,
|
||
|
now() - query_start as duration,
|
||
|
state,
|
||
|
query
|
||
|
FROM pg_stat_activity
|
||
|
WHERE state = 'active'
|
||
|
AND pid <> pg_backend_pid()
|
||
|
ORDER BY query_start;
|
||
|
"
|
||
|
else
|
||
|
log_warn "Query information only available for PostgreSQL"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Kill a specific query
|
||
|
kill_query() {
|
||
|
local query_id="$1"
|
||
|
|
||
|
if [ -z "$query_id" ]; then
|
||
|
log_error "Query ID is required"
|
||
|
return 1
|
||
|
fi
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
if [ "$FORCE" != "true" ]; then
|
||
|
echo -n "Kill query with PID $query_id? (y/N): "
|
||
|
read -r confirm
|
||
|
if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
|
||
|
log "Query kill cancelled"
|
||
|
return 0
|
||
|
fi
|
||
|
fi
|
||
|
|
||
|
local result=$(execute_sql "SELECT pg_terminate_backend($query_id);" true)
|
||
|
if [ "$result" = "t" ]; then
|
||
|
log_success "Query $query_id terminated"
|
||
|
else
|
||
|
log_error "Failed to terminate query $query_id"
|
||
|
fi
|
||
|
else
|
||
|
log_warn "Query termination only available for PostgreSQL"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Optimize database
|
||
|
optimize_database() {
|
||
|
print_header "Database Optimization"
|
||
|
|
||
|
if [ "$DRY_RUN" = "true" ]; then
|
||
|
log "Would perform database optimization (VACUUM, ANALYZE)"
|
||
|
return
|
||
|
fi
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
log "Running VACUUM ANALYZE..."
|
||
|
execute_sql "VACUUM ANALYZE;"
|
||
|
log_success "Database optimization completed"
|
||
|
|
||
|
# Show updated statistics
|
||
|
log "Updated table statistics:"
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
schemaname,
|
||
|
tablename,
|
||
|
last_vacuum,
|
||
|
last_analyze
|
||
|
FROM pg_stat_user_tables
|
||
|
WHERE last_vacuum IS NOT NULL OR last_analyze IS NOT NULL
|
||
|
ORDER BY GREATEST(last_vacuum, last_analyze) DESC
|
||
|
LIMIT 10;
|
||
|
"
|
||
|
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
log "Running VACUUM..."
|
||
|
execute_sql "VACUUM;"
|
||
|
log "Running ANALYZE..."
|
||
|
execute_sql "ANALYZE;"
|
||
|
log_success "Database optimization completed"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Rebuild indexes
|
||
|
rebuild_indexes() {
|
||
|
print_header "Rebuilding Database Indexes"
|
||
|
|
||
|
if [ "$DRY_RUN" = "true" ]; then
|
||
|
log "Would rebuild all database indexes"
|
||
|
return
|
||
|
fi
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
log "Running REINDEX DATABASE..."
|
||
|
execute_sql "REINDEX DATABASE $DB_NAME;"
|
||
|
log_success "Index rebuild completed"
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
log "Running REINDEX..."
|
||
|
execute_sql "REINDEX;"
|
||
|
log_success "Index rebuild completed"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Check database integrity
|
||
|
check_integrity() {
|
||
|
print_header "Database Integrity Check"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
# Check for corruption
|
||
|
log "Checking for table corruption..."
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
schemaname,
|
||
|
tablename,
|
||
|
n_dead_tup,
|
||
|
n_live_tup,
|
||
|
CASE
|
||
|
WHEN n_live_tup = 0 THEN 0
|
||
|
ELSE round((n_dead_tup::float / n_live_tup::float) * 100, 2)
|
||
|
END as bloat_ratio
|
||
|
FROM pg_stat_user_tables
|
||
|
WHERE n_dead_tup > 0
|
||
|
ORDER BY bloat_ratio DESC;
|
||
|
"
|
||
|
|
||
|
# Check for missing indexes on foreign keys
|
||
|
log "Checking for missing indexes on foreign keys..."
|
||
|
execute_sql "
|
||
|
SELECT
|
||
|
c.conrelid::regclass as table_name,
|
||
|
string_agg(a.attname, ', ') as columns,
|
||
|
'Missing index on foreign key' as issue
|
||
|
FROM pg_constraint c
|
||
|
JOIN pg_attribute a ON a.attnum = ANY(c.conkey) AND a.attrelid = c.conrelid
|
||
|
WHERE c.contype = 'f'
|
||
|
AND NOT EXISTS (
|
||
|
SELECT 1 FROM pg_index i
|
||
|
WHERE i.indrelid = c.conrelid
|
||
|
AND c.conkey[1:array_length(c.conkey,1)] <@ i.indkey[0:array_length(i.indkey,1)]
|
||
|
)
|
||
|
GROUP BY c.conrelid, c.conname;
|
||
|
"
|
||
|
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
log "Running integrity check..."
|
||
|
local result=$(execute_sql "PRAGMA integrity_check;" true)
|
||
|
if [ "$result" = "ok" ]; then
|
||
|
log_success "Database integrity check passed"
|
||
|
else
|
||
|
log_error "Database integrity issues found: $result"
|
||
|
fi
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Clean up temporary data
|
||
|
cleanup_database() {
|
||
|
print_header "Database Cleanup"
|
||
|
|
||
|
if [ "$DRY_RUN" = "true" ]; then
|
||
|
log "Would clean up temporary database data"
|
||
|
return
|
||
|
fi
|
||
|
|
||
|
# Clean up temporary directories
|
||
|
if [ -d "$TEMP_DIR" ]; then
|
||
|
log "Cleaning temporary directory..."
|
||
|
rm -rf "$TEMP_DIR"/*
|
||
|
log_success "Temporary files cleaned"
|
||
|
fi
|
||
|
|
||
|
# Clean up old log files
|
||
|
if [ -d "$LOGS_DIR" ]; then
|
||
|
log "Cleaning old log files..."
|
||
|
find "$LOGS_DIR" -name "*.log" -mtime +$LOG_RETENTION_DAYS -delete
|
||
|
log_success "Old log files cleaned"
|
||
|
fi
|
||
|
|
||
|
# Database-specific cleanup
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
log "Cleaning expired sessions..."
|
||
|
execute_sql "
|
||
|
SELECT pg_terminate_backend(pid)
|
||
|
FROM pg_stat_activity
|
||
|
WHERE state = 'idle'
|
||
|
AND query_start < now() - interval '1 hour';
|
||
|
" >/dev/null 2>&1 || true
|
||
|
log_success "Expired sessions cleaned"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Test database connection
|
||
|
test_connection() {
|
||
|
print_header "Database Connection Test"
|
||
|
|
||
|
local start_time=$(date +%s%3N)
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
export PGPASSWORD="$DB_PASS"
|
||
|
if pg_isready -h "$DB_HOST" -p "$DB_PORT" -U "$DB_USER" >/dev/null 2>&1; then
|
||
|
log_success "PostgreSQL server is accepting connections"
|
||
|
|
||
|
# Test actual query
|
||
|
if execute_sql "SELECT 1;" >/dev/null 2>&1; then
|
||
|
local end_time=$(date +%s%3N)
|
||
|
local response_time=$((end_time - start_time))
|
||
|
log_success "Database connection successful (${response_time}ms)"
|
||
|
else
|
||
|
log_error "Database connection failed"
|
||
|
fi
|
||
|
else
|
||
|
log_error "PostgreSQL server is not accepting connections"
|
||
|
fi
|
||
|
unset PGPASSWORD
|
||
|
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
if [ -f "$DB_FILE" ]; then
|
||
|
if execute_sql "SELECT 1;" >/dev/null 2>&1; then
|
||
|
local end_time=$(date +%s%3N)
|
||
|
local response_time=$((end_time - start_time))
|
||
|
log_success "SQLite database accessible (${response_time}ms)"
|
||
|
else
|
||
|
log_error "SQLite database access failed"
|
||
|
fi
|
||
|
else
|
||
|
log_error "SQLite database file not found: $DB_FILE"
|
||
|
fi
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Find duplicate data
|
||
|
find_duplicates() {
|
||
|
local table_name="$1"
|
||
|
|
||
|
if [ -z "$table_name" ]; then
|
||
|
log_error "Table name is required for duplicate detection"
|
||
|
return 1
|
||
|
fi
|
||
|
|
||
|
print_header "Finding Duplicate Data in $table_name"
|
||
|
|
||
|
if [ "$DB_TYPE" = "postgresql" ]; then
|
||
|
# Get table columns
|
||
|
local columns=$(execute_sql "
|
||
|
SELECT string_agg(column_name, ', ')
|
||
|
FROM information_schema.columns
|
||
|
WHERE table_name = '$table_name'
|
||
|
AND column_name NOT IN ('id', 'created_at', 'updated_at');
|
||
|
" true)
|
||
|
|
||
|
if [ -n "$columns" ]; then
|
||
|
execute_sql "
|
||
|
SELECT $columns, COUNT(*) as duplicate_count
|
||
|
FROM $table_name
|
||
|
GROUP BY $columns
|
||
|
HAVING COUNT(*) > 1
|
||
|
ORDER BY duplicate_count DESC
|
||
|
LIMIT $LIMIT;
|
||
|
"
|
||
|
else
|
||
|
log_warn "No suitable columns found for duplicate detection"
|
||
|
fi
|
||
|
|
||
|
elif [ "$DB_TYPE" = "sqlite" ]; then
|
||
|
# Basic duplicate detection for SQLite
|
||
|
execute_sql "
|
||
|
SELECT *, COUNT(*) as duplicate_count
|
||
|
FROM $table_name
|
||
|
GROUP BY *
|
||
|
HAVING COUNT(*) > 1
|
||
|
LIMIT $LIMIT;
|
||
|
"
|
||
|
fi
|
||
|
}
|
||
|
|
||
|
# Run database benchmarks
|
||
|
run_benchmarks() {
|
||
|
print_header "Database Benchmarks"
|
||
|
|
||
|
log "Running basic performance tests..."
|
||
|
|
||
|
# Simple INSERT benchmark
|
||
|
local start_time=$(date +%s%3N)
|
||
|
execute_sql "
|
||
|
CREATE TEMP TABLE benchmark_test (
|
||
|
id SERIAL PRIMARY KEY,
|
||
|
data TEXT,
|
||
|
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||
|
);
|
||
|
" >/dev/null 2>&1
|
||
|
|
||
|
# Insert test data
|
||
|
for i in {1..1000}; do
|
||
|
execute_sql "INSERT INTO benchmark_test (data) VALUES ('test_data_$i');" >/dev/null 2>&1
|
||
|
done
|
||
|
|
||
|
local end_time=$(date +%s%3N)
|
||
|
local insert_time=$((end_time - start_time))
|
||
|
log "1000 INSERTs completed in ${insert_time}ms"
|
||
|
|
||
|
# SELECT benchmark
|
||
|
start_time=$(date +%s%3N)
|
||
|
execute_sql "SELECT COUNT(*) FROM benchmark_test;" >/dev/null 2>&1
|
||
|
end_time=$(date +%s%3N)
|
||
|
local select_time=$((end_time - start_time))
|
||
|
log "COUNT query completed in ${select_time}ms"
|
||
|
|
||
|
# Cleanup
|
||
|
execute_sql "DROP TABLE benchmark_test;" >/dev/null 2>&1
|
||
|
|
||
|
log_success "Benchmark completed"
|
||
|
}
|
||
|
|
||
|
# Parse command line arguments
|
||
|
COMMAND=""
|
||
|
ENVIRONMENT="dev"
|
||
|
TABLE_NAME=""
|
||
|
SCHEMA_NAME=""
|
||
|
QUERY_ID=""
|
||
|
LIMIT=100
|
||
|
OUTPUT_FORMAT="table"
|
||
|
OUTPUT_FILE=""
|
||
|
FORCE="false"
|
||
|
DEBUG="false"
|
||
|
QUIET="false"
|
||
|
DRY_RUN="false"
|
||
|
|
||
|
while [[ $# -gt 0 ]]; do
|
||
|
case $1 in
|
||
|
--env)
|
||
|
ENVIRONMENT="$2"
|
||
|
shift 2
|
||
|
;;
|
||
|
--table)
|
||
|
TABLE_NAME="$2"
|
||
|
shift 2
|
||
|
;;
|
||
|
--schema)
|
||
|
SCHEMA_NAME="$2"
|
||
|
shift 2
|
||
|
;;
|
||
|
--query-id)
|
||
|
QUERY_ID="$2"
|
||
|
shift 2
|
||
|
;;
|
||
|
--limit)
|
||
|
LIMIT="$2"
|
||
|
shift 2
|
||
|
;;
|
||
|
--output)
|
||
|
OUTPUT_FORMAT="$2"
|
||
|
shift 2
|
||
|
;;
|
||
|
--file)
|
||
|
OUTPUT_FILE="$2"
|
||
|
shift 2
|
||
|
;;
|
||
|
--force)
|
||
|
FORCE="true"
|
||
|
shift
|
||
|
;;
|
||
|
--debug)
|
||
|
DEBUG="true"
|
||
|
shift
|
||
|
;;
|
||
|
--quiet)
|
||
|
QUIET="true"
|
||
|
shift
|
||
|
;;
|
||
|
--dry-run)
|
||
|
DRY_RUN="true"
|
||
|
shift
|
||
|
;;
|
||
|
-h|--help)
|
||
|
print_usage
|
||
|
exit 0
|
||
|
;;
|
||
|
*)
|
||
|
if [ -z "$COMMAND" ]; then
|
||
|
COMMAND="$1"
|
||
|
else
|
||
|
log_error "Unknown option: $1"
|
||
|
print_usage
|
||
|
exit 1
|
||
|
fi
|
||
|
shift
|
||
|
;;
|
||
|
esac
|
||
|
done
|
||
|
|
||
|
# Set environment variable
|
||
|
export ENVIRONMENT="$ENVIRONMENT"
|
||
|
|
||
|
# Validate command
|
||
|
if [ -z "$COMMAND" ]; then
|
||
|
print_usage
|
||
|
exit 1
|
||
|
fi
|
||
|
|
||
|
# Check if we're in the right directory
|
||
|
if [ ! -f "Cargo.toml" ]; then
|
||
|
log_error "Please run this script from the project root directory"
|
||
|
exit 1
|
||
|
fi
|
||
|
|
||
|
# Load environment and parse database URL
|
||
|
load_env
|
||
|
parse_database_url
|
||
|
|
||
|
# Setup directories
|
||
|
setup_directories
|
||
|
|
||
|
# Execute command
|
||
|
case "$COMMAND" in
|
||
|
"size")
|
||
|
show_database_size
|
||
|
;;
|
||
|
"tables")
|
||
|
show_tables
|
||
|
;;
|
||
|
"indexes")
|
||
|
show_indexes
|
||
|
;;
|
||
|
"constraints")
|
||
|
show_constraints
|
||
|
;;
|
||
|
"users")
|
||
|
show_users
|
||
|
;;
|
||
|
"permissions")
|
||
|
show_users
|
||
|
;;
|
||
|
"sessions")
|
||
|
show_sessions
|
||
|
;;
|
||
|
"locks")
|
||
|
show_locks
|
||
|
;;
|
||
|
"queries")
|
||
|
show_queries
|
||
|
;;
|
||
|
"kill-query")
|
||
|
kill_query "$QUERY_ID"
|
||
|
;;
|
||
|
"optimize")
|
||
|
optimize_database
|
||
|
;;
|
||
|
"reindex")
|
||
|
rebuild_indexes
|
||
|
;;
|
||
|
"check-integrity")
|
||
|
check_integrity
|
||
|
;;
|
||
|
"repair")
|
||
|
log_warn "Database repair not yet implemented"
|
||
|
;;
|
||
|
"cleanup")
|
||
|
cleanup_database
|
||
|
;;
|
||
|
"logs")
|
||
|
log_warn "Database log viewing not yet implemented"
|
||
|
;;
|
||
|
"config")
|
||
|
log_warn "Database configuration display not yet implemented"
|
||
|
;;
|
||
|
"extensions")
|
||
|
log_warn "Extension listing not yet implemented"
|
||
|
;;
|
||
|
"sequences")
|
||
|
log_warn "Sequence information not yet implemented"
|
||
|
;;
|
||
|
"triggers")
|
||
|
log_warn "Trigger information not yet implemented"
|
||
|
;;
|
||
|
"functions")
|
||
|
log_warn "Function information not yet implemented"
|
||
|
;;
|
||
|
"views")
|
||
|
log_warn "View information not yet implemented"
|
||
|
;;
|
||
|
"schema-info")
|
||
|
show_database_size
|
||
|
show_tables
|
||
|
show_indexes
|
||
|
show_constraints
|
||
|
;;
|
||
|
"duplicate-data")
|
||
|
find_duplicates "$TABLE_NAME"
|
||
|
;;
|
||
|
"orphaned-data")
|
||
|
log_warn "Orphaned data detection not yet implemented"
|
||
|
;;
|
||
|
"table-stats")
|
||
|
show_table_details "$TABLE_NAME"
|
||
|
;;
|
||
|
"connection-test")
|
||
|
test_connection
|
||
|
;;
|
||
|
"benchmark")
|
||
|
run_benchmarks
|
||
|
;;
|
||
|
"export-schema")
|
||
|
log_warn "Schema export not yet implemented"
|
||
|
;;
|
||
|
"import-schema")
|
||
|
log_warn "Schema import not yet implemented"
|
||
|
;;
|
||
|
"copy-table")
|
||
|
log_warn "Table copy not yet implemented"
|
||
|
;;
|
||
|
"truncate-table")
|
||
|
if [ -n "$TABLE_NAME" ]; then
|
||
|
if [ "$FORCE" != "true" ]; then
|
||
|
echo -n "This will delete all data in table '$TABLE_NAME'. Continue? (y/N): "
|
||
|
read -r confirm
|
||
|
if [[ ! "$confirm" =~ ^[Yy]$ ]]; then
|
||
|
log "Truncate cancelled"
|
||
|
exit 0
|
||
|
fi
|
||
|
fi
|
||
|
execute_sql "TRUNCATE TABLE $TABLE_NAME;"
|
||
|
log_success "Table $TABLE_NAME truncated"
|
||
|
else
|
||
|
log_error "Table name is required"
|
||
|
fi
|
||
|
;;
|
||
|
"reset-sequence")
|
||
|
log_warn "Sequence reset not yet implemented"
|
||
|
;;
|
||
|
*)
|
||
|
log_error "Unknown command: $COMMAND"
|
||
|
print_usage
|
||
|
exit 1
|
||
|
;;
|
||
|
esac
|