Browse Source

Fix zsh test stability runner script dependencies and npm script reference

- Create zsh-compatible common functions script (test-stability-common-zsh.sh)
- Fix script directory detection in zsh runner to use $(dirname "$0")
- Update zsh runner to source zsh-compatible common file instead of bash version
- Change npm script from test:playwright to test:web to match package.json
- Remove duplicate array declarations from zsh runner
- Make both scripts executable

Resolves "no such file or directory" and "command not found" errors when running zsh scripts.
pull/159/head
Matthew Raymer 4 days ago
parent
commit
ececbd3cc2
  1. 247
      scripts/test-stability-common-zsh.sh
  2. 11
      scripts/test-stability-runner.zsh

247
scripts/test-stability-common-zsh.sh

@ -0,0 +1,247 @@
#!/bin/zsh
# Test Stability Runner Common Functions for TimeSafari (Zsh Version)
# Shared functionality for zsh test stability runners
# Author: Matthew Raymer
set -euo pipefail
# Configuration
TOTAL_RUNS=10
RESULTS_DIR="test-stability-results"
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
LOG_FILE="${RESULTS_DIR}/stability-run-${TIMESTAMP}.log"
SUMMARY_FILE="${RESULTS_DIR}/stability-summary-${TIMESTAMP}.json"
FAILURE_LOG="${RESULTS_DIR}/failure-details-${TIMESTAMP}.log"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
MAGENTA='\033[0;35m'
NC='\033[0m' # No Color
# Progress bar characters
PROGRESS_CHAR="█"
EMPTY_CHAR="░"
# Initialize results tracking (zsh associative arrays)
typeset -A test_results
typeset -A test_failures
typeset -A test_successes
typeset -A run_times
typeset -A test_names
# Create results directory
mkdir -p "${RESULTS_DIR}"
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
# Function to extract test names from Playwright output
extract_test_names() {
local output_file="$1"
# Extract test names from lines like "✓ 13 [chromium] › test-playwright/30-record-gift.spec.ts:84:5 › Record something given"
grep -E "✓.*test-playwright" "$output_file" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//' | sort | uniq
}
# Function to check if test passed in a run
test_passed_in_run() {
local test_name="$1"
local run_output="$2"
grep -q "✓.*test-playwright/$test_name" "$run_output" 2>/dev/null
}
# Function to check if test failed in a run
test_failed_in_run() {
local test_name="$1"
local run_output="$2"
grep -q "✗.*test-playwright/$test_name" "$run_output" 2>/dev/null
}
# Function to get test duration
get_test_duration() {
local test_name="$1"
local run_output="$2"
local duration=$(grep -A 1 "$test_name\|✗ $test_name" "$run_output" | grep -o "[0-9]\+ms" | head -1)
echo "${duration:-unknown}"
}
# Function to calculate percentage
calculate_percentage() {
local passes="$1"
local total="$2"
if [ "$total" -eq 0 ]; then
echo "0"
else
echo "$((passes * 100 / total))"
fi
}
# Function to display progress bar
show_progress() {
local current="$1"
local total="$2"
local percentage=$((current * 100 / total))
local filled=$((current * 50 / total))
local empty=$((50 - filled))
local progress_bar=""
for ((i=0; i<filled; i++)); do
progress_bar+="$PROGRESS_CHAR"
done
for ((i=0; i<empty; i++)); do
progress_bar+="$EMPTY_CHAR"
done
printf "\r%s [%d%%] (%d/%d)" "$progress_bar" "$percentage" "$current" "$total"
}
# Function to run a single test execution
run_single_test() {
local run_number="$1"
local run_output="${RESULTS_DIR}/run-${run_number}-output.txt"
local start_time=$(date +%s)
log_info "Starting run $run_number/$TOTAL_RUNS"
# Run the test suite and capture output
if npm run test:web > "$run_output" 2>&1; then
local end_time=$(date +%s)
local duration=$((end_time - start_time))
test_results[$run_number]="PASS"
test_successes[$run_number]="true"
run_times[$run_number]="$duration"
log_success "Run $run_number completed successfully in ${duration}s"
return 0
else
local end_time=$(date +%s)
local duration=$((end_time - start_time))
test_results[$run_number]="FAIL"
test_failures[$run_number]="true"
run_times[$run_number]="$duration"
log_error "Run $run_number failed after ${duration}s"
return 1
fi
}
# Function to generate summary report
generate_summary_report() {
log_info "Generating summary report..."
local total_passes=0
local total_failures=0
local total_time=0
for run_number in $(seq 1 $TOTAL_RUNS); do
if [[ "${test_results[$run_number]:-}" == "PASS" ]]; then
((total_passes++))
else
((total_failures++))
fi
if [[ -n "${run_times[$run_number]:-}" ]]; then
((total_time += run_times[$run_number]))
fi
done
local success_rate=$(calculate_percentage $total_passes $TOTAL_RUNS)
local avg_time=$((total_time / TOTAL_RUNS))
# Create summary JSON
cat > "$SUMMARY_FILE" << EOF
{
"timestamp": "$TIMESTAMP",
"total_runs": $TOTAL_RUNS,
"successful_runs": $total_passes,
"failed_runs": $total_failures,
"success_rate": $success_rate,
"average_time_seconds": $avg_time,
"total_time_seconds": $total_time,
"run_details": {
EOF
for run_number in $(seq 1 $TOTAL_RUNS); do
local comma=""
if [ "$run_number" -lt $TOTAL_RUNS ]; then
comma=","
fi
cat >> "$SUMMARY_FILE" << EOF
"run_$run_number": {
"result": "${test_results[$run_number]:-unknown}",
"duration_seconds": "${run_times[$run_number]:-unknown}",
"timestamp": "$(date -d @${run_times[$run_number]:-0} +%Y-%m-%d_%H-%M-%S 2>/dev/null || echo 'unknown')"
}$comma
EOF
done
cat >> "$SUMMARY_FILE" << EOF
}
}
EOF
log_success "Summary report generated: $SUMMARY_FILE"
}
# Function to display final results
display_final_results() {
echo
echo "=========================================="
echo " TEST STABILITY RESULTS "
echo "=========================================="
echo "Timestamp: $TIMESTAMP"
echo "Total Runs: $TOTAL_RUNS"
local total_passes=0
local total_failures=0
local total_time=0
for run_number in $(seq 1 $TOTAL_RUNS); do
if [[ "${test_results[$run_number]:-}" == "PASS" ]]; then
((total_passes++))
else
((total_failures++))
fi
if [[ -n "${run_times[$run_number]:-}" ]]; then
((total_time += run_times[$run_number]))
fi
done
local success_rate=$(calculate_percentage $total_passes $TOTAL_RUNS)
local avg_time=$((total_time / TOTAL_RUNS))
echo "Successful Runs: $total_passes"
echo "Failed Runs: $total_failures"
echo "Success Rate: ${success_rate}%"
echo "Average Time: ${avg_time}s"
echo "Total Time: ${total_time}s"
echo "=========================================="
echo
echo "Detailed results saved to:"
echo " - Log: $LOG_FILE"
echo " - Summary: $SUMMARY_FILE"
echo " - Results directory: $RESULTS_DIR"
echo
}

11
scripts/test-stability-runner.zsh

@ -5,16 +5,11 @@
# Author: Matthew Raymer # Author: Matthew Raymer
# Source common functions # Source common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" SCRIPT_DIR="$(dirname "$0")"
source "${SCRIPT_DIR}/test-stability-common.sh" source "${SCRIPT_DIR}/test-stability-common-zsh.sh"
# Zsh-specific overrides and enhancements # Zsh-specific overrides and enhancements
# Override associative array declarations for zsh compatibility # Note: Associative arrays are now defined in the common file
typeset -A test_results
typeset -A test_failures
typeset -A test_successes
typeset -A run_times
typeset -A test_names
# Enhanced progress tracking for zsh # Enhanced progress tracking for zsh
track_test_progress_enhanced() { track_test_progress_enhanced() {

Loading…
Cancel
Save