378 lines
11 KiB
Plaintext
378 lines
11 KiB
Plaintext
# Integration Test Runner
|
|
# Orchestrates execution of all integration tests with parallel support and reporting
|
|
|
|
use std log
|
|
use test_helpers.nu *
|
|
use orbstack_helpers.nu *
|
|
|
|
# Main test runner
|
|
export def main [
|
|
--filter: string = "" # Filter pattern (regex) to run subset of tests
|
|
--parallel: int = 1 # Number of parallel test workers
|
|
--mode: string = "" # Test specific mode (solo, multiuser, cicd, enterprise)
|
|
--verbose: bool = false # Detailed output
|
|
--report: string = "" # Generate HTML report at path
|
|
--skip-setup: bool = false # Skip environment setup
|
|
--skip-teardown: bool = false # Skip environment teardown
|
|
] {
|
|
log info "Integration Test Runner"
|
|
log info "======================="
|
|
|
|
let test_config = (load-test-config)
|
|
|
|
# Discover all test files
|
|
let test_files = discover-test-files $filter
|
|
|
|
log info $"Found ($test_files | length) test files"
|
|
|
|
if $verbose {
|
|
log info "Test files:"
|
|
$test_files | each { |f| log info $" - ($f)" }
|
|
}
|
|
|
|
mut all_results = []
|
|
|
|
# Determine which modes to test
|
|
let modes_to_test = if ($mode | is-not-empty) {
|
|
[$mode]
|
|
} else {
|
|
["solo", "multiuser", "cicd", "enterprise"]
|
|
}
|
|
|
|
for mode in $modes_to_test {
|
|
log info $"Testing mode: ($mode)"
|
|
|
|
# Setup environment for this mode
|
|
if not $skip_setup {
|
|
setup-test-environment $mode $test_config
|
|
}
|
|
|
|
# Run tests for this mode
|
|
let mode_results = run-tests-for-mode $mode $test_files $parallel $verbose
|
|
|
|
$all_results = ($all_results | append $mode_results)
|
|
|
|
# Teardown environment
|
|
if not $skip_teardown {
|
|
teardown-test-environment $test_config
|
|
}
|
|
}
|
|
|
|
# Generate reports
|
|
generate-junit-report $all_results $test_config
|
|
|
|
if ($report | is-not-empty) {
|
|
generate-html-report $all_results $report
|
|
}
|
|
|
|
# Print summary
|
|
print-test-summary $all_results
|
|
|
|
# Exit with appropriate code
|
|
let failed_count = ($all_results | where status == "failed" | length)
|
|
|
|
if $failed_count > 0 {
|
|
exit 1
|
|
} else {
|
|
exit 0
|
|
}
|
|
}
|
|
|
|
# Discover all test files matching pattern
|
|
def discover-test-files [filter: string] -> list<string> {
|
|
let test_root = $"($env.PWD)/provisioning/tests/integration"
|
|
|
|
let all_tests = (
|
|
ls $"($test_root)/**/*test*.nu"
|
|
| get name
|
|
| where ($it | path basename) starts-with "test_"
|
|
)
|
|
|
|
if ($filter | is-empty) {
|
|
$all_tests
|
|
} else {
|
|
$all_tests | where ($it =~ $filter)
|
|
}
|
|
}
|
|
|
|
# Setup test environment for mode
|
|
def setup-test-environment [mode: string, test_config: record] {
|
|
log info $"Setting up test environment for mode: ($mode)"
|
|
|
|
nu provisioning/tests/integration/setup_test_environment.nu --mode $mode
|
|
|
|
log info "Test environment ready"
|
|
}
|
|
|
|
# Teardown test environment
|
|
def teardown-test-environment [test_config: record] {
|
|
log info "Tearing down test environment..."
|
|
|
|
nu provisioning/tests/integration/teardown_test_environment.nu --force
|
|
|
|
log info "Test environment cleaned up"
|
|
}
|
|
|
|
# Run tests for a specific mode
|
|
def run-tests-for-mode [
|
|
mode: string
|
|
test_files: list<string>
|
|
parallel: int
|
|
verbose: bool
|
|
] -> list<record> {
|
|
log info $"Running tests for mode: ($mode)"
|
|
|
|
# Filter test files relevant to this mode
|
|
let mode_tests = $test_files | where ($it =~ $"modes/test_($mode)_mode.nu" or not ($it =~ "modes/"))
|
|
|
|
if $parallel > 1 {
|
|
run-tests-parallel $mode_tests $parallel $verbose
|
|
} else {
|
|
run-tests-sequential $mode_tests $verbose
|
|
}
|
|
}
|
|
|
|
# Run tests sequentially
|
|
def run-tests-sequential [test_files: list<string>, verbose: bool] -> list<record> {
|
|
mut results = []
|
|
|
|
for test_file in $test_files {
|
|
log info $"Running test file: ($test_file | path basename)"
|
|
|
|
let test_result = execute-test-file $test_file $verbose
|
|
|
|
$results = ($results | append $test_result)
|
|
}
|
|
|
|
$results
|
|
}
|
|
|
|
# Run tests in parallel
|
|
def run-tests-parallel [
|
|
test_files: list<string>
|
|
workers: int
|
|
verbose: bool
|
|
] -> list<record> {
|
|
log info $"Running tests in parallel with ($workers) workers"
|
|
|
|
# Split test files into chunks
|
|
let chunk_size = (($test_files | length) / $workers | into int) + 1
|
|
|
|
let chunks = (
|
|
$test_files
|
|
| enumerate
|
|
| group-by { |x| ($x.index / $chunk_size | into int) }
|
|
| values
|
|
| each { |chunk| $chunk | get item }
|
|
)
|
|
|
|
# Run each chunk in parallel
|
|
let results = (
|
|
$chunks
|
|
| par-each { |chunk|
|
|
$chunk | each { |test_file|
|
|
execute-test-file $test_file $verbose
|
|
}
|
|
}
|
|
| flatten
|
|
)
|
|
|
|
$results
|
|
}
|
|
|
|
# Execute a single test file
|
|
def execute-test-file [test_file: string, verbose: bool] -> record {
|
|
let start_time = (date now)
|
|
|
|
try {
|
|
# Run the test file
|
|
let output = (nu $test_file | complete)
|
|
|
|
let duration = ((date now) - $start_time | into int) / 1000000
|
|
|
|
if $output.exit_code == 0 {
|
|
if $verbose {
|
|
log info $"✓ ($test_file | path basename) passed \(($duration)ms\)"
|
|
}
|
|
|
|
{
|
|
test_file: $test_file
|
|
test_name: ($test_file | path basename | str replace ".nu" "")
|
|
status: "passed"
|
|
duration_ms: $duration
|
|
error_message: ""
|
|
stdout: $output.stdout
|
|
stderr: $output.stderr
|
|
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S%.3fZ")
|
|
}
|
|
} else {
|
|
log error $"✗ ($test_file | path basename) failed \(($duration)ms\)"
|
|
|
|
{
|
|
test_file: $test_file
|
|
test_name: ($test_file | path basename | str replace ".nu" "")
|
|
status: "failed"
|
|
duration_ms: $duration
|
|
error_message: $output.stderr
|
|
stdout: $output.stdout
|
|
stderr: $output.stderr
|
|
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S%.3fZ")
|
|
}
|
|
}
|
|
} catch { |err|
|
|
let duration = ((date now) - $start_time | into int) / 1000000
|
|
|
|
log error $"✗ ($test_file | path basename) crashed \(($duration)ms\): ($err.msg)"
|
|
|
|
{
|
|
test_file: $test_file
|
|
test_name: ($test_file | path basename | str replace ".nu" "")
|
|
status: "failed"
|
|
duration_ms: $duration
|
|
error_message: $err.msg
|
|
stdout: ""
|
|
stderr: $err.msg
|
|
timestamp: (date now | format date "%Y-%m-%dT%H:%M:%S%.3fZ")
|
|
}
|
|
}
|
|
}
|
|
|
|
# Generate JUnit XML report
|
|
def generate-junit-report [results: list<record>, test_config: record] {
|
|
log info "Generating JUnit report..."
|
|
|
|
let report_dir = $test_config.reporting.output_dir
|
|
mkdir $report_dir
|
|
|
|
let junit_file = $"($report_dir)/($test_config.reporting.junit.filename)"
|
|
|
|
let total = ($results | length)
|
|
let failures = ($results | where status == "failed" | length)
|
|
let total_time = ($results | get duration_ms | math sum) / 1000.0
|
|
|
|
let xml = $"<?xml version=\"1.0\" encoding=\"UTF-8\"?>
|
|
<testsuites tests=\"($total)\" failures=\"($failures)\" time=\"($total_time)\">
|
|
<testsuite name=\"Integration Tests\" tests=\"($total)\" failures=\"($failures)\" time=\"($total_time)\">
|
|
($results | each { |test|
|
|
let status_tag = if $test.status == "failed" {
|
|
$" <failure message=\"($test.error_message)\">
|
|
<![CDATA[($test.stderr)]]>
|
|
</failure>"
|
|
} else {
|
|
""
|
|
}
|
|
|
|
$" <testcase name=\"($test.test_name)\" time=\"(($test.duration_ms / 1000.0))\">
|
|
($status_tag)
|
|
</testcase>"
|
|
} | str join "\n")
|
|
</testsuite>
|
|
</testsuites>"
|
|
|
|
$xml | save -f $junit_file
|
|
|
|
log info $"JUnit report saved: ($junit_file)"
|
|
}
|
|
|
|
# Generate HTML report
|
|
def generate-html-report [results: list<record>, output_path: string] {
|
|
log info "Generating HTML report..."
|
|
|
|
let total = ($results | length)
|
|
let passed = ($results | where status == "passed" | length)
|
|
let failed = ($results | where status == "failed" | length)
|
|
let pass_rate = (($passed / $total) * 100 | into int)
|
|
|
|
let html = $"<!DOCTYPE html>
|
|
<html>
|
|
<head>
|
|
<meta charset=\"UTF-8\">
|
|
<title>Integration Test Report</title>
|
|
<style>
|
|
body { font-family: Arial, sans-serif; margin: 20px; }
|
|
h1 { color: #333; }
|
|
.summary { background: #f5f5f5; padding: 20px; border-radius: 5px; margin-bottom: 20px; }
|
|
.stats { display: flex; gap: 20px; }
|
|
.stat { padding: 10px; border-radius: 3px; }
|
|
.passed { background: #d4edda; color: #155724; }
|
|
.failed { background: #f8d7da; color: #721c24; }
|
|
table { width: 100%; border-collapse: collapse; }
|
|
th, td { padding: 10px; text-align: left; border-bottom: 1px solid #ddd; }
|
|
th { background: #333; color: white; }
|
|
.status-passed { color: green; font-weight: bold; }
|
|
.status-failed { color: red; font-weight: bold; }
|
|
</style>
|
|
</head>
|
|
<body>
|
|
<h1>Integration Test Report</h1>
|
|
|
|
<div class=\"summary\">
|
|
<h2>Summary</h2>
|
|
<div class=\"stats\">
|
|
<div class=\"stat\">Total Tests: ($total)</div>
|
|
<div class=\"stat passed\">Passed: ($passed)</div>
|
|
<div class=\"stat failed\">Failed: ($failed)</div>
|
|
<div class=\"stat\">Pass Rate: ($pass_rate)%</div>
|
|
</div>
|
|
</div>
|
|
|
|
<h2>Test Results</h2>
|
|
<table>
|
|
<thead>
|
|
<tr>
|
|
<th>Test Name</th>
|
|
<th>Status</th>
|
|
<th>Duration (ms)</th>
|
|
<th>Error Message</th>
|
|
</tr>
|
|
</thead>
|
|
<tbody>
|
|
($results | each { |test|
|
|
let status_class = if $test.status == "passed" { "status-passed" } else { "status-failed" }
|
|
|
|
$" <tr>
|
|
<td>($test.test_name)</td>
|
|
<td class=\"($status_class)\">($test.status | str upcase)</td>
|
|
<td>($test.duration_ms)</td>
|
|
<td>($test.error_message)</td>
|
|
</tr>"
|
|
} | str join "\n")
|
|
</tbody>
|
|
</table>
|
|
|
|
<p><small>Generated: (date now | format date "%Y-%m-%d %H:%M:%S")</small></p>
|
|
</body>
|
|
</html>"
|
|
|
|
$html | save -f $output_path
|
|
|
|
log info $"HTML report saved: ($output_path)"
|
|
}
|
|
|
|
# Print test summary
|
|
def print-test-summary [results: list<record>] {
|
|
let total = ($results | length)
|
|
let passed = ($results | where status == "passed" | length)
|
|
let failed = ($results | where status == "failed" | length)
|
|
let total_time = ($results | get duration_ms | math sum)
|
|
|
|
print ""
|
|
print "========================================="
|
|
print "Integration Test Summary"
|
|
print "========================================="
|
|
print $"Total Tests: ($total)"
|
|
print $"Passed: ($passed)"
|
|
print $"Failed: ($failed)"
|
|
print $"Total Time: ($total_time)ms"
|
|
print "========================================="
|
|
|
|
if $failed > 0 {
|
|
print ""
|
|
print "Failed Tests:"
|
|
$results | where status == "failed" | each { |test|
|
|
print $" ✗ ($test.test_name)"
|
|
print $" Error: ($test.error_message)"
|
|
}
|
|
}
|
|
}
|