Browse Source
- Extract shared functionality into test-stability-common.sh - Refactor test-stability-runner.sh from 421 to 40 lines - Refactor test-stability-runner-simple.sh from 423 to 117 lines - Refactor test-stability-runner.zsh from 607 to 93 lines - Net reduction: 1,336 deletions, 485 additions (-851 lines) - Maintain all existing functionality while eliminating code duplication - Improve maintainability with single source of truth for common functionspull/159/head
4 changed files with 485 additions and 1336 deletions
@ -0,0 +1,347 @@ |
|||||
|
#!/bin/bash |
||||
|
|
||||
|
# Test Stability Runner Common Functions for TimeSafari |
||||
|
# Shared functionality for all test stability runners |
||||
|
# Author: Matthew Raymer |
||||
|
|
||||
|
set -euo pipefail |
||||
|
|
||||
|
# Configuration |
||||
|
TOTAL_RUNS=10 |
||||
|
RESULTS_DIR="test-stability-results" |
||||
|
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S") |
||||
|
LOG_FILE="${RESULTS_DIR}/stability-run-${TIMESTAMP}.log" |
||||
|
SUMMARY_FILE="${RESULTS_DIR}/stability-summary-${TIMESTAMP}.json" |
||||
|
FAILURE_LOG="${RESULTS_DIR}/failure-details-${TIMESTAMP}.log" |
||||
|
|
||||
|
# Colors for output |
||||
|
RED='\033[0;31m' |
||||
|
GREEN='\033[0;32m' |
||||
|
YELLOW='\033[1;33m' |
||||
|
BLUE='\033[0;34m' |
||||
|
CYAN='\033[0;36m' |
||||
|
MAGENTA='\033[0;35m' |
||||
|
NC='\033[0m' # No Color |
||||
|
|
||||
|
# Progress bar characters |
||||
|
PROGRESS_CHAR="█" |
||||
|
EMPTY_CHAR="░" |
||||
|
|
||||
|
# Initialize results tracking (bash associative arrays) |
||||
|
declare -A test_results |
||||
|
declare -A test_failures |
||||
|
declare -A test_successes |
||||
|
declare -A run_times |
||||
|
declare -A test_names |
||||
|
|
||||
|
# Create results directory |
||||
|
mkdir -p "${RESULTS_DIR}" |
||||
|
|
||||
|
# Logging functions |
||||
|
log_info() { |
||||
|
echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_success() { |
||||
|
echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_warning() { |
||||
|
echo -e "${YELLOW}[WARNING]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_error() { |
||||
|
echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
# Function to extract test names from Playwright output |
||||
|
extract_test_names() { |
||||
|
local output_file="$1" |
||||
|
# Extract test names from lines like "✓ 13 [chromium] › test-playwright/30-record-gift.spec.ts:84:5 › Record something given" |
||||
|
grep -E "✓.*test-playwright" "$output_file" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//' | sort | uniq |
||||
|
} |
||||
|
|
||||
|
# Function to check if test passed in a run |
||||
|
test_passed_in_run() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
grep -q "✓.*test-playwright/$test_name" "$run_output" 2>/dev/null |
||||
|
} |
||||
|
|
||||
|
# Function to check if test failed in a run |
||||
|
test_failed_in_run() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
grep -q "✗.*test-playwright/$test_name" "$run_output" 2>/dev/null |
||||
|
} |
||||
|
|
||||
|
# Function to get test duration |
||||
|
get_test_duration() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
local duration=$(grep -A 1 "✓ $test_name\|✗ $test_name" "$run_output" | grep -o "[0-9]\+ms" | head -1) |
||||
|
echo "${duration:-unknown}" |
||||
|
} |
||||
|
|
||||
|
# Function to calculate percentage |
||||
|
calculate_percentage() { |
||||
|
local passes="$1" |
||||
|
local total="$2" |
||||
|
if [ "$total" -eq 0 ]; then |
||||
|
echo "0" |
||||
|
else |
||||
|
echo "$((passes * 100 / total))" |
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
# Function to display progress bar |
||||
|
show_progress() { |
||||
|
local current="$1" |
||||
|
local total="$2" |
||||
|
local width="${3:-50}" |
||||
|
local label="${4:-Progress}" |
||||
|
|
||||
|
# Validate inputs |
||||
|
if [[ ! "$current" =~ ^[0-9]+$ ]] || [[ ! "$total" =~ ^[0-9]+$ ]] || [[ ! "$width" =~ ^[0-9]+$ ]]; then |
||||
|
return |
||||
|
fi |
||||
|
|
||||
|
# Ensure we don't divide by zero |
||||
|
if [ "$total" -eq 0 ]; then |
||||
|
total=1 |
||||
|
fi |
||||
|
|
||||
|
local percentage=$((current * 100 / total)) |
||||
|
local filled=$((current * width / total)) |
||||
|
local empty=$((width - filled)) |
||||
|
|
||||
|
# Create progress bar string |
||||
|
local progress_bar="" |
||||
|
for ((i=0; i<filled; i++)); do |
||||
|
progress_bar+="$PROGRESS_CHAR" |
||||
|
done |
||||
|
for ((i=0; i<empty; i++)); do |
||||
|
progress_bar+="$EMPTY_CHAR" |
||||
|
done |
||||
|
|
||||
|
# Print progress bar with carriage return to overwrite |
||||
|
printf "\r${CYAN}[%s]${NC} %s [%s] %d%% (%d/%d)" \ |
||||
|
"$label" "$progress_bar" "$percentage" "$current" "$total" |
||||
|
} |
||||
|
|
||||
|
# Function to clear progress bar line |
||||
|
clear_progress() { |
||||
|
printf "\r%*s\r" "$(tput cols)" "" |
||||
|
} |
||||
|
|
||||
|
# Function to track test execution progress |
||||
|
track_test_progress() { |
||||
|
local run_number="$1" |
||||
|
local test_file="$2" |
||||
|
|
||||
|
log_info "Run $run_number/$TOTAL_RUNS: Executing $test_file" |
||||
|
show_progress "$run_number" "$TOTAL_RUNS" 50 "Test Run" |
||||
|
} |
||||
|
|
||||
|
# Function to run a single test execution |
||||
|
run_single_test() { |
||||
|
local run_number="$1" |
||||
|
local run_output="${RESULTS_DIR}/run-${run_number}.txt" |
||||
|
local start_time=$(date +%s) |
||||
|
|
||||
|
log_info "Starting test run $run_number/$TOTAL_RUNS" |
||||
|
|
||||
|
# Run the test suite |
||||
|
if npm run test:playwright > "$run_output" 2>&1; then |
||||
|
local end_time=$(date +%s) |
||||
|
local duration=$((end_time - start_time)) |
||||
|
run_times[$run_number]=$duration |
||||
|
|
||||
|
log_success "Test run $run_number completed successfully in ${duration}s" |
||||
|
|
||||
|
# Extract and analyze test results |
||||
|
local test_names_list=$(extract_test_names "$run_output") |
||||
|
for test_name in $test_names_list; do |
||||
|
if test_passed_in_run "$test_name" "$run_output"; then |
||||
|
test_successes[$test_name]=$((${test_successes[$test_name]:-0} + 1)) |
||||
|
test_results[$test_name]="pass" |
||||
|
elif test_failed_in_run "$test_name" "$run_output"; then |
||||
|
test_failures[$test_name]=$((${test_failures[$test_name]:-0} + 1)) |
||||
|
test_results[$test_name]="fail" |
||||
|
fi |
||||
|
test_names[$test_name]=1 |
||||
|
done |
||||
|
|
||||
|
return 0 |
||||
|
else |
||||
|
local end_time=$(date +%s) |
||||
|
local duration=$((end_time - start_time)) |
||||
|
run_times[$run_number]=$duration |
||||
|
|
||||
|
log_error "Test run $run_number failed after ${duration}s" |
||||
|
|
||||
|
# Extract test names even from failed runs |
||||
|
local test_names_list=$(extract_test_names "$run_output" 2>/dev/null || true) |
||||
|
for test_name in $test_names_list; do |
||||
|
test_names[$test_name]=1 |
||||
|
if test_failed_in_run "$test_name" "$run_output"; then |
||||
|
test_failures[$test_name]=$((${test_failures[$test_name]:-0} + 1)) |
||||
|
test_results[$test_name]="fail" |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
return 1 |
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
# Function to generate summary report |
||||
|
generate_summary_report() { |
||||
|
log_info "Generating summary report..." |
||||
|
|
||||
|
local total_tests=0 |
||||
|
local always_passing=0 |
||||
|
local always_failing=0 |
||||
|
local intermittent=0 |
||||
|
|
||||
|
# Count test statistics |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
total_tests=$((total_tests + 1)) |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
local total=$((passes + fails)) |
||||
|
|
||||
|
if [ "$fails" -eq 0 ]; then |
||||
|
always_passing=$((always_passing + 1)) |
||||
|
elif [ "$passes" -eq 0 ]; then |
||||
|
always_failing=$((always_failing + 1)) |
||||
|
else |
||||
|
intermittent=$((intermittent + 1)) |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
# Calculate overall success rate |
||||
|
local total_runs=$((TOTAL_RUNS * total_tests)) |
||||
|
local total_successes=0 |
||||
|
for passes in "${test_successes[@]}"; do |
||||
|
total_successes=$((total_successes + passes)) |
||||
|
done |
||||
|
local overall_success_rate=0 |
||||
|
if [ "$total_runs" -gt 0 ]; then |
||||
|
overall_success_rate=$((total_successes * 100 / total_runs)) |
||||
|
fi |
||||
|
|
||||
|
# Generate summary data |
||||
|
cat > "$SUMMARY_FILE" << EOF |
||||
|
{ |
||||
|
"timestamp": "$(date -Iseconds)", |
||||
|
"total_runs": $TOTAL_RUNS, |
||||
|
"test_results": { |
||||
|
EOF |
||||
|
|
||||
|
# Add individual test results |
||||
|
local first=true |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
local total=$((passes + fails)) |
||||
|
local success_rate=$(calculate_percentage "$passes" "$total") |
||||
|
|
||||
|
if [ "$first" = true ]; then |
||||
|
first=false |
||||
|
else |
||||
|
echo "," >> "$SUMMARY_FILE" |
||||
|
fi |
||||
|
|
||||
|
cat >> "$SUMMARY_FILE" << EOF |
||||
|
"$test_name": { |
||||
|
"passes": $passes, |
||||
|
"failures": $fails, |
||||
|
"total": $total, |
||||
|
"success_rate": $success_rate, |
||||
|
"status": "${test_results[$test_name]:-unknown}" |
||||
|
} |
||||
|
EOF |
||||
|
done |
||||
|
|
||||
|
# Close summary |
||||
|
cat >> "$SUMMARY_FILE" << EOF |
||||
|
}, |
||||
|
"summary_stats": { |
||||
|
"total_tests": $total_tests, |
||||
|
"always_passing": $always_passing, |
||||
|
"always_failing": $always_failing, |
||||
|
"intermittent": $intermittent, |
||||
|
"overall_success_rate": $overall_success_rate |
||||
|
} |
||||
|
} |
||||
|
EOF |
||||
|
|
||||
|
log_success "Summary report generated: $SUMMARY_FILE" |
||||
|
} |
||||
|
|
||||
|
# Function to display final results |
||||
|
display_final_results() { |
||||
|
clear_progress |
||||
|
echo |
||||
|
log_info "=== TEST STABILITY ANALYSIS COMPLETE ===" |
||||
|
echo |
||||
|
|
||||
|
# Display summary statistics |
||||
|
local total_tests=${#test_names[@]} |
||||
|
local always_passing=0 |
||||
|
local always_failing=0 |
||||
|
local intermittent=0 |
||||
|
|
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
|
||||
|
if [ "$fails" -eq 0 ]; then |
||||
|
always_passing=$((always_passing + 1)) |
||||
|
elif [ "$passes" -eq 0 ]; then |
||||
|
always_failing=$((always_failing + 1)) |
||||
|
else |
||||
|
intermittent=$((intermittent + 1)) |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
echo -e "${GREEN}✅ Always Passing: $always_passing tests${NC}" |
||||
|
echo -e "${RED}❌ Always Failing: $always_failing tests${NC}" |
||||
|
echo -e "${YELLOW}⚠️ Intermittent: $intermittent tests${NC}" |
||||
|
echo -e "${BLUE}📊 Total Tests: $total_tests${NC}" |
||||
|
echo |
||||
|
|
||||
|
# Display intermittent tests |
||||
|
if [ "$intermittent" -gt 0 ]; then |
||||
|
log_warning "Intermittent tests (require investigation):" |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
|
||||
|
if [ "$passes" -gt 0 ] && [ "$fails" -gt 0 ]; then |
||||
|
local success_rate=$(calculate_percentage "$passes" "$((passes + fails))") |
||||
|
echo -e " ${YELLOW}$test_name: $success_rate% success rate${NC}" |
||||
|
fi |
||||
|
done |
||||
|
echo |
||||
|
fi |
||||
|
|
||||
|
# Display always failing tests |
||||
|
if [ "$always_failing" -gt 0 ]; then |
||||
|
log_error "Always failing tests (require immediate attention):" |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
|
||||
|
if [ "$passes" -eq 0 ] && [ "$fails" -gt 0 ]; then |
||||
|
echo -e " ${RED}$test_name: 0% success rate${NC}" |
||||
|
fi |
||||
|
done |
||||
|
echo |
||||
|
fi |
||||
|
|
||||
|
log_info "Detailed results saved to:" |
||||
|
echo -e " ${BLUE}Summary: $SUMMARY_FILE${NC}" |
||||
|
echo -e " ${BLUE}Log: $LOG_FILE${NC}" |
||||
|
echo -e " ${BLUE}Results directory: $RESULTS_DIR${NC}" |
||||
|
} |
Loading…
Reference in new issue