Browse Source

feat: enhance gift recording test with performance tracking and comprehensive documentation

- Replace importUser with importUserFromAccount for improved test reliability
- Add performance monitoring with createPerformanceCollector and step-by-step timing
- Implement comprehensive test documentation with detailed sections for maintenance, debugging, and integration
- Add test-stability-results/ to .gitignore to prevent committing generated test analysis files
- Port test structure to match 60-new-activity.spec.ts style with performance tracking integration
- Add browser-specific timeout handling and error recovery mechanisms
- Include detailed test flow documentation with 11 distinct phases and performance metrics collection
pull/159/head
Matthew Raymer 3 weeks ago
parent
commit
e5e0647fcf
  1. 31
      .cursor/rules/building.mdc
  2. 3
      .gitignore
  3. 423
      scripts/test-stability-runner-simple.sh
  4. 421
      scripts/test-stability-runner.sh
  5. 487
      test-playwright/30-record-gift.spec.ts

31
.cursor/rules/building.mdc

@ -0,0 +1,31 @@
---
alwaysApply: true
---
# Building Guidelines
## Configurations
- The project supports builds using **Vite** for web and **Capacitor** for hybrid
apps.
- Capacitor is used for **iOS**, **Android**, and **Electron** targets.
- All builds support three modes: **development**, **testing**, and **production**.
## Build Scripts
- `build-web.sh`
- Builds a **web-only application**.
- Defaults to **development mode** unless overridden.
- `build-ios.sh`
- Builds an **iOS hybrid native application** using Capacitor.
- `build-android.sh`
- Builds an **Android hybrid native application** using Capacitor.
- `build-electron.sh`
- Builds an **Electron hybrid desktop application** using Capacitor.
## npm Scripts
- npm scripts delegate to the `build-*` shell scripts.
- Parameter flags determine the **build mode** (`development`, `testing`, `production`).

3
.gitignore

@ -45,6 +45,9 @@ dist-electron-packages
# Test files generated by scripts test-ios.js & test-android.js
.generated/
# Test stability analysis results
test-stability-results/
.env.default
vendor/

423
scripts/test-stability-runner-simple.sh

@ -0,0 +1,423 @@
#!/bin/bash
# Test Stability Runner for TimeSafari (Simple Version)
# Executes the full test suite 10 times and analyzes failure patterns
# Author: Matthew Raymer
set -euo pipefail
# Configuration
TOTAL_RUNS=10
RESULTS_DIR="test-stability-results"
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
LOG_FILE="${RESULTS_DIR}/stability-run-${TIMESTAMP}.log"
SUMMARY_FILE="${RESULTS_DIR}/stability-summary-${TIMESTAMP}.txt"
FAILURE_LOG="${RESULTS_DIR}/failure-details-${TIMESTAMP}.log"
REPORT_FILE="${RESULTS_DIR}/stability-report-${TIMESTAMP}.md"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Initialize results tracking
declare -A test_successes
declare -A test_failures
declare -A test_names
# Create results directory
mkdir -p "${RESULTS_DIR}"
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
# Function to extract test names from Playwright output
extract_test_names() {
local output_file="$1"
# Extract test names from lines like "✓ 13 [chromium] › test-playwright/30-record-gift.spec.ts:84:5 › Record something given"
grep -E "✓.*test-playwright" "$output_file" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//' | sort | uniq
}
# Function to check if test passed in a run
test_passed_in_run() {
local test_name="$1"
local run_output="$2"
grep -q "✓.*test-playwright/$test_name" "$run_output" 2>/dev/null
}
# Function to check if test failed in a run
test_failed_in_run() {
local test_name="$1"
local run_output="$2"
grep -q "✗.*test-playwright/$test_name" "$run_output" 2>/dev/null
}
# Function to calculate percentage
calculate_percentage() {
local passes="$1"
local total="$2"
if [ "$total" -eq 0 ]; then
echo "0"
else
echo "$((passes * 100 / total))"
fi
}
# Function to analyze test results
analyze_results() {
log_info "Analyzing test results..."
# Count total tests
local total_tests=0
local always_passing=0
local always_failing=0
local intermittent=0
# Analyze each test
for test_name in "${!test_names[@]}"; do
total_tests=$((total_tests + 1))
local passes=${test_successes[$test_name]:-0}
local fails=${test_failures[$test_name]:-0}
local total=$((passes + fails))
local success_rate=$(calculate_percentage "$passes" "$total")
# Determine test stability
if [ "$fails" -eq 0 ]; then
always_passing=$((always_passing + 1))
elif [ "$passes" -eq 0 ]; then
always_failing=$((always_failing + 1))
else
intermittent=$((intermittent + 1))
fi
# Save to summary file
echo "$test_name|$passes|$fails|$total|$success_rate" >> "${SUMMARY_FILE}"
done
# Save summary statistics
echo "SUMMARY_STATS|$total_tests|$always_passing|$always_failing|$intermittent" >> "${SUMMARY_FILE}"
log_success "Analysis complete. Results saved to ${SUMMARY_FILE}"
}
# Function to generate detailed report
generate_report() {
log_info "Generating detailed stability report..."
{
echo "# TimeSafari Test Stability Report"
echo ""
echo "**Generated:** $(date)"
echo "**Total Runs:** $TOTAL_RUNS"
# Calculate duration with proper error handling
local current_time=$(date +%s)
local duration=0
if [ -n "$START_TIME" ] && [ "$START_TIME" -gt 0 ]; then
duration=$((current_time - START_TIME))
fi
echo "**Duration:** ${duration} seconds"
echo ""
# Summary statistics
echo "## Summary Statistics"
echo ""
local summary_line=$(grep "SUMMARY_STATS" "${SUMMARY_FILE}")
local total_tests=$(echo "$summary_line" | cut -d'|' -f2)
local always_passing=$(echo "$summary_line" | cut -d'|' -f3)
local always_failing=$(echo "$summary_line" | cut -d'|' -f4)
local intermittent=$(echo "$summary_line" | cut -d'|' -f5)
echo "- **Total Tests:** $total_tests"
echo "- **Always Passing:** $always_passing"
echo "- **Always Failing:** $always_failing"
echo "- **Intermittent:** $intermittent"
echo ""
# Always failing tests
echo "## Always Failing Tests"
echo ""
local failing_found=false
while IFS='|' read -r test_name passes fails total rate; do
if [ "$test_name" != "SUMMARY_STATS" ] && [ "$fails" -eq "$TOTAL_RUNS" ]; then
echo "- $test_name ($fails/$total fails)"
failing_found=true
fi
done < "${SUMMARY_FILE}"
if [ "$failing_found" = false ]; then
echo "No always failing tests found."
fi
echo ""
# Intermittent tests (sorted by success rate)
echo "## Intermittent Tests (Most Unstable First)"
echo ""
local intermittent_found=false
# Create temporary file for sorting
local temp_file=$(mktemp)
while IFS='|' read -r test_name passes fails total rate; do
if [ "$test_name" != "SUMMARY_STATS" ] && [ "$passes" -gt 0 ] && [ "$fails" -gt 0 ]; then
echo "$rate|$test_name|$passes|$fails|$total" >> "$temp_file"
intermittent_found=true
fi
done < "${SUMMARY_FILE}"
if [ "$intermittent_found" = true ]; then
sort -n "$temp_file" | while IFS='|' read -r rate test_name passes fails total; do
echo "- $test_name ($rate% success rate)"
done
else
echo "No intermittent tests found."
fi
rm -f "$temp_file"
echo ""
# Always passing tests
echo "## Always Passing Tests"
echo ""
local passing_found=false
while IFS='|' read -r test_name passes fails total rate; do
if [ "$test_name" != "SUMMARY_STATS" ] && [ "$passes" -eq "$TOTAL_RUNS" ]; then
echo "- $test_name"
passing_found=true
fi
done < "${SUMMARY_FILE}"
if [ "$passing_found" = false ]; then
echo "No always passing tests found."
fi
echo ""
# Detailed test results
echo "## Detailed Test Results"
echo ""
echo "| Test Name | Stability | Passes | Fails | Success Rate |"
echo "|-----------|-----------|--------|-------|--------------|"
while IFS='|' read -r test_name passes fails total rate; do
if [ "$test_name" != "SUMMARY_STATS" ]; then
local stability=""
if [ "$fails" -eq 0 ]; then
stability="always_passing"
elif [ "$passes" -eq 0 ]; then
stability="always_failing"
else
stability="intermittent"
fi
echo "| $test_name | $stability | $passes | $fails | ${rate}% |"
fi
done < "${SUMMARY_FILE}"
echo ""
# Run-by-run summary
echo "## Run-by-Run Summary"
echo ""
for ((i=1; i<=TOTAL_RUNS; i++)); do
local run_file="${RESULTS_DIR}/run-${i}.txt"
if [ -f "$run_file" ]; then
# Extract passed and failed counts using the same method as the main script
local passed=0
local failed=0
local passed_line=$(grep -E "[0-9]+ passed" "$run_file" | tail -1)
if [ -n "$passed_line" ]; then
passed=$(echo "$passed_line" | grep -o "[0-9]\+ passed" | grep -o "[0-9]\+")
fi
local failed_line=$(grep -E "[0-9]+ failed" "$run_file" | tail -1)
if [ -n "$failed_line" ]; then
failed=$(echo "$failed_line" | grep -o "[0-9]\+ failed" | grep -o "[0-9]\+")
fi
local total=$((passed + failed))
echo "**Run $i:** $passed passed, $failed failed ($total total)"
fi
done
} > "$REPORT_FILE"
log_success "Detailed report generated: $REPORT_FILE"
}
# Main execution
main() {
START_TIME=$(date +%s)
log_info "Starting TimeSafari Test Stability Runner (Simple Version)"
log_info "Configuration: $TOTAL_RUNS runs, results in ${RESULTS_DIR}"
log_info "Log file: ${LOG_FILE}"
# Check prerequisites
log_info "Checking prerequisites..."
# Check if Playwright is available
if ! npx playwright --version &> /dev/null; then
log_error "Playwright is not available. Please install dependencies."
exit 1
fi
log_success "Prerequisites check passed"
# Run tests multiple times
for ((run=1; run<=TOTAL_RUNS; run++)); do
log_info "Starting run $run/$TOTAL_RUNS"
local run_start=$(date +%s)
local run_output="${RESULTS_DIR}/run-${run}.txt"
# Run the test suite
if npx playwright test -c playwright.config-local.ts --reporter=list > "$run_output" 2>&1; then
log_success "Run $run completed successfully"
else
log_warning "Run $run completed with failures"
fi
local run_end=$(date +%s)
local run_duration=$((run_end - run_start))
log_info "Run $run completed in ${run_duration}s"
# Extract and track test results
local test_names_list=$(extract_test_names "$run_output")
for test_name in $test_names_list; do
test_names[$test_name]=1
if test_passed_in_run "$test_name" "$run_output"; then
test_successes[$test_name]=$((${test_successes[$test_name]:-0} + 1))
elif test_failed_in_run "$test_name" "$run_output"; then
test_failures[$test_name]=$((${test_failures[$test_name]:-0} + 1))
# Log failure details
echo "=== Run $run - $test_name ===" >> "$FAILURE_LOG"
grep -A 10 -B 5 "$test_name" "$run_output" >> "$FAILURE_LOG" 2>/dev/null || true
echo "" >> "$FAILURE_LOG"
fi
done
# Brief summary for this run - extract from Playwright summary lines
local passed=0
local failed=0
# Extract passed count from the last line containing "passed"
local passed_line=$(grep -E "[0-9]+ passed" "$run_output" | tail -1)
if [ -n "$passed_line" ]; then
passed=$(echo "$passed_line" | grep -o "[0-9]\+ passed" | grep -o "[0-9]\+")
fi
# Extract failed count from the last line containing "failed"
local failed_line=$(grep -E "[0-9]+ failed" "$run_output" | tail -1)
if [ -n "$failed_line" ]; then
failed=$(echo "$failed_line" | grep -o "[0-9]\+ failed" | grep -o "[0-9]\+")
fi
log_info "Run $run summary: $passed passed, $failed failed"
# Show failed tests for this run
if [ "$failed" -gt 0 ]; then
log_warning "Failed tests in run $run:"
# Extract failed test names from the summary section
sed -n '/^ 1 failed$/,/^ 37 passed/p' "$run_output" | grep "test-playwright" | while read -r line; do
local test_name=$(echo "$line" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//')
log_warning " - $test_name"
done
else
log_success "All tests passed in run $run"
fi
done
# Analyze results
analyze_results
# Generate detailed report
generate_report
# Final summary
local total_duration=$(($(date +%s) - START_TIME))
log_success "Test stability analysis complete!"
log_info "Total duration: ${total_duration}s"
log_info "Results saved to: ${RESULTS_DIR}"
log_info "Summary: ${SUMMARY_FILE}"
log_info "Detailed report: ${REPORT_FILE}"
log_info "Failure details: ${FAILURE_LOG}"
# Display quick summary
echo ""
echo "=== QUICK SUMMARY ==="
local summary_line=$(grep "SUMMARY_STATS" "${SUMMARY_FILE}")
local total_tests=$(echo "$summary_line" | cut -d'|' -f2)
local always_passing=$(echo "$summary_line" | cut -d'|' -f3)
local always_failing=$(echo "$summary_line" | cut -d'|' -f4)
local intermittent=$(echo "$summary_line" | cut -d'|' -f5)
echo "Total Tests: $total_tests"
echo "Always Passing: $always_passing"
echo "Always Failing: $always_failing"
echo "Intermittent: $intermittent"
# Show run-by-run failure summary
echo ""
echo "=== RUN-BY-RUN FAILURE SUMMARY ==="
for ((i=1; i<=TOTAL_RUNS; i++)); do
local run_file="${RESULTS_DIR}/run-${i}.txt"
if [ -f "$run_file" ]; then
local failed_line=$(grep -E "[0-9]+ failed" "$run_file" | tail -1)
local failed_count=0
if [ -n "$failed_line" ]; then
failed_count=$(echo "$failed_line" | grep -o "[0-9]\+ failed" | grep -o "[0-9]\+")
fi
if [ "$failed_count" -gt 0 ]; then
echo "Run $i: $failed_count failed"
# Extract failed test names from the summary section
sed -n '/^ 1 failed$/,/^ 37 passed/p' "$run_file" | grep "test-playwright" | while read -r line; do
local test_name=$(echo "$line" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//')
echo " - $test_name"
done
else
echo "Run $i: All tests passed"
fi
fi
done
if [ "$always_failing" -gt 0 ]; then
echo ""
echo "🚨 ALWAYS FAILING TESTS:"
while IFS='|' read -r test_name passes fails total rate; do
if [ "$test_name" != "SUMMARY_STATS" ] && [ "$fails" -eq "$TOTAL_RUNS" ]; then
echo " - $test_name"
fi
done < "${SUMMARY_FILE}"
fi
if [ "$intermittent" -gt 0 ]; then
echo ""
echo "⚠️ INTERMITTENT TESTS (most unstable first):"
local temp_file=$(mktemp)
while IFS='|' read -r test_name passes fails total rate; do
if [ "$test_name" != "SUMMARY_STATS" ] && [ "$passes" -gt 0 ] && [ "$fails" -gt 0 ]; then
echo "$rate|$test_name" >> "$temp_file"
fi
done < "${SUMMARY_FILE}"
sort -n "$temp_file" | while IFS='|' read -r rate test_name; do
echo " - $test_name ($rate% success)"
done
rm -f "$temp_file"
fi
}
# Run the main function
main "$@"

421
scripts/test-stability-runner.sh

@ -0,0 +1,421 @@
#!/bin/bash
# Test Stability Runner for TimeSafari
# Executes the full test suite 10 times and analyzes failure patterns
# Author: Matthew Raymer
set -euo pipefail
# Configuration
TOTAL_RUNS=10
RESULTS_DIR="test-stability-results"
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
LOG_FILE="${RESULTS_DIR}/stability-run-${TIMESTAMP}.log"
SUMMARY_FILE="${RESULTS_DIR}/stability-summary-${TIMESTAMP}.json"
FAILURE_LOG="${RESULTS_DIR}/failure-details-${TIMESTAMP}.log"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Initialize results tracking
declare -A test_results
declare -A test_failures
declare -A test_successes
declare -A run_times
# Create results directory
mkdir -p "${RESULTS_DIR}"
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}"
}
# Function to extract test names from Playwright output
extract_test_names() {
local output_file="$1"
# Extract test names from lines like "✓ 13 [chromium] › test-playwright/30-record-gift.spec.ts:84:5 › Record something given"
grep -E "✓.*test-playwright" "$output_file" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//' | sort | uniq
}
# Function to check if test passed in a run
test_passed_in_run() {
local test_name="$1"
local run_output="$2"
grep -q "✓.*test-playwright/$test_name" "$run_output" 2>/dev/null
}
# Function to check if test failed in a run
test_failed_in_run() {
local test_name="$1"
local run_output="$2"
grep -q "✗.*test-playwright/$test_name" "$run_output" 2>/dev/null
}
# Function to get test duration
get_test_duration() {
local test_name="$1"
local run_output="$2"
local duration=$(grep -A 1 "$test_name\|✗ $test_name" "$run_output" | grep -o "[0-9]\+ms" | head -1)
echo "${duration:-unknown}"
}
# Function to analyze test results
analyze_results() {
log_info "Analyzing test results..."
# Initialize summary data
local summary_data="{
\"timestamp\": \"$(date -Iseconds)\",
\"total_runs\": $TOTAL_RUNS,
\"test_results\": {},
\"summary_stats\": {
\"total_tests\": 0,
\"always_passing\": 0,
\"always_failing\": 0,
\"intermittent\": 0,
\"success_rate\": 0.0
}
}"
# Analyze each test
for test_name in "${!test_results[@]}"; do
local passes=${test_successes[$test_name]:-0}
local fails=${test_failures[$test_name]:-0}
local total=$((passes + fails))
local success_rate=$(echo "scale=2; $passes * 100 / $total" | bc -l 2>/dev/null || echo "0")
# Determine test stability
local stability=""
if [ "$fails" -eq 0 ]; then
stability="always_passing"
elif [ "$passes" -eq 0 ]; then
stability="always_failing"
else
stability="intermittent"
fi
# Add to summary
summary_data=$(echo "$summary_data" | jq --arg test "$test_name" \
--arg stability "$stability" \
--arg passes "$passes" \
--arg fails "$fails" \
--arg total "$total" \
--arg rate "$success_rate" \
'.test_results[$test] = {
"stability": $stability,
"passes": ($passes | tonumber),
"fails": ($fails | tonumber),
"total": ($total | tonumber),
"success_rate": ($rate | tonumber)
}')
done
# Calculate summary statistics
local total_tests=$(echo "$summary_data" | jq '.test_results | length')
local always_passing=$(echo "$summary_data" | jq '.test_results | to_entries | map(select(.value.stability == "always_passing")) | length')
local always_failing=$(echo "$summary_data" | jq '.test_results | to_entries | map(select(.value.stability == "always_failing")) | length')
local intermittent=$(echo "$summary_data" | jq '.test_results | to_entries | map(select(.value.stability == "intermittent")) | length')
summary_data=$(echo "$summary_data" | jq --arg total "$total_tests" \
--arg passing "$always_passing" \
--arg failing "$always_failing" \
--arg intermittent "$intermittent" \
'.summary_stats.total_tests = ($total | tonumber) |
.summary_stats.always_passing = ($passing | tonumber) |
.summary_stats.always_failing = ($failing | tonumber) |
.summary_stats.intermittent = ($intermittent | tonumber)')
# Save summary
echo "$summary_data" | jq '.' > "${SUMMARY_FILE}"
log_success "Analysis complete. Results saved to ${SUMMARY_FILE}"
}
# Function to generate detailed report
generate_report() {
log_info "Generating detailed stability report..."
local report_file="${RESULTS_DIR}/stability-report-${TIMESTAMP}.md"
{
echo "# TimeSafari Test Stability Report"
echo ""
echo "**Generated:** $(date)"
echo "**Total Runs:** $TOTAL_RUNS"
# Calculate duration with proper error handling
local current_time=$(date +%s)
local duration=0
if [ -n "$START_TIME" ] && [ "$START_TIME" -gt 0 ]; then
duration=$((current_time - START_TIME))
fi
echo "**Duration:** ${duration} seconds"
echo ""
# Summary statistics
echo "## Summary Statistics"
echo ""
local summary_data=$(cat "${SUMMARY_FILE}")
local total_tests=$(echo "$summary_data" | jq '.summary_stats.total_tests')
local always_passing=$(echo "$summary_data" | jq '.summary_stats.always_passing')
local always_failing=$(echo "$summary_data" | jq '.summary_stats.always_failing')
local intermittent=$(echo "$summary_data" | jq '.summary_stats.intermittent')
echo "- **Total Tests:** $total_tests"
echo "- **Always Passing:** $always_passing"
echo "- **Always Failing:** $always_failing"
echo "- **Intermittent:** $intermittent"
echo ""
# Always failing tests
echo "## Always Failing Tests"
echo ""
local failing_tests=$(echo "$summary_data" | jq -r '.test_results | to_entries | map(select(.value.stability == "always_failing")) | .[] | "- " + .key + " (" + (.value.fails | tostring) + "/" + (.value.total | tostring) + " fails)"')
if [ -n "$failing_tests" ]; then
echo "$failing_tests"
else
echo "No always failing tests found."
fi
echo ""
# Intermittent tests
echo "## Intermittent Tests (Most Unstable First)"
echo ""
local intermittent_tests=$(echo "$summary_data" | jq -r '.test_results | to_entries | map(select(.value.stability == "intermittent")) | sort_by(.value.success_rate) | .[] | "- " + .key + " (" + (.value.success_rate | tostring) + "% success rate)"')
if [ -n "$intermittent_tests" ]; then
echo "$intermittent_tests"
else
echo "No intermittent tests found."
fi
echo ""
# Always passing tests
echo "## Always Passing Tests"
echo ""
local passing_tests=$(echo "$summary_data" | jq -r '.test_results | to_entries | map(select(.value.stability == "always_passing")) | .[] | "- " + .key')
if [ -n "$passing_tests" ]; then
echo "$passing_tests"
else
echo "No always passing tests found."
fi
echo ""
# Detailed test results
echo "## Detailed Test Results"
echo ""
echo "| Test Name | Stability | Passes | Fails | Success Rate |"
echo "|-----------|-----------|--------|-------|--------------|"
echo "$summary_data" | jq -r '.test_results | to_entries | sort_by(.key) | .[] | "| " + .key + " | " + .value.stability + " | " + (.value.passes | tostring) + " | " + (.value.fails | tostring) + " | " + (.value.success_rate | tostring) + "% |"'
echo ""
# Run-by-run summary
echo "## Run-by-Run Summary"
echo ""
for ((i=1; i<=TOTAL_RUNS; i++)); do
local run_file="${RESULTS_DIR}/run-${i}.txt"
if [ -f "$run_file" ]; then
# Extract passed and failed counts using the same method as the main script
local passed=0
local failed=0
local passed_line=$(grep -E "[0-9]+ passed" "$run_file" | tail -1)
if [ -n "$passed_line" ]; then
passed=$(echo "$passed_line" | grep -o "[0-9]\+ passed" | grep -o "[0-9]\+")
fi
local failed_line=$(grep -E "[0-9]+ failed" "$run_file" | tail -1)
if [ -n "$failed_line" ]; then
failed=$(echo "$failed_line" | grep -o "[0-9]\+ failed" | grep -o "[0-9]\+")
fi
local total=$((passed + failed))
echo "**Run $i:** $passed passed, $failed failed ($total total)"
fi
done
} > "$report_file"
log_success "Detailed report generated: $report_file"
}
# Main execution
main() {
START_TIME=$(date +%s)
log_info "Starting TimeSafari Test Stability Runner"
log_info "Configuration: $TOTAL_RUNS runs, results in ${RESULTS_DIR}"
log_info "Log file: ${LOG_FILE}"
# Check prerequisites
log_info "Checking prerequisites..."
if ! command -v jq &> /dev/null; then
log_error "jq is required but not installed. Please install jq."
exit 1
fi
if ! command -v bc &> /dev/null; then
log_error "bc is required but not installed. Please install bc."
exit 1
fi
# Check if Playwright is available
if ! npx playwright --version &> /dev/null; then
log_error "Playwright is not available. Please install dependencies."
exit 1
fi
log_success "Prerequisites check passed"
# Run tests multiple times
for ((run=1; run<=TOTAL_RUNS; run++)); do
log_info "Starting run $run/$TOTAL_RUNS"
local run_start=$(date +%s)
local run_output="${RESULTS_DIR}/run-${run}.txt"
# Run the test suite
if npx playwright test -c playwright.config-local.ts --reporter=list > "$run_output" 2>&1; then
log_success "Run $run completed successfully"
else
log_warning "Run $run completed with failures"
fi
local run_end=$(date +%s)
local run_duration=$((run_end - run_start))
run_times[$run]=$run_duration
log_info "Run $run completed in ${run_duration}s"
# Extract and track test results
local test_names=$(extract_test_names "$run_output")
for test_name in $test_names; do
if test_passed_in_run "$test_name" "$run_output"; then
test_successes[$test_name]=$((${test_successes[$test_name]:-0} + 1))
test_results[$test_name]="pass"
elif test_failed_in_run "$test_name" "$run_output"; then
test_failures[$test_name]=$((${test_failures[$test_name]:-0} + 1))
test_results[$test_name]="fail"
# Log failure details
echo "=== Run $run - $test_name ===" >> "$FAILURE_LOG"
grep -A 10 -B 5 "$test_name" "$run_output" >> "$FAILURE_LOG" 2>/dev/null || true
echo "" >> "$FAILURE_LOG"
fi
done
# Brief summary for this run - extract from Playwright summary lines
local passed=0
local failed=0
# Extract passed count from the last line containing "passed"
local passed_line=$(grep -E "[0-9]+ passed" "$run_output" | tail -1)
if [ -n "$passed_line" ]; then
passed=$(echo "$passed_line" | grep -o "[0-9]\+ passed" | grep -o "[0-9]\+")
fi
# Extract failed count from the last line containing "failed"
local failed_line=$(grep -E "[0-9]+ failed" "$run_output" | tail -1)
if [ -n "$failed_line" ]; then
failed=$(echo "$failed_line" | grep -o "[0-9]\+ failed" | grep -o "[0-9]\+")
fi
log_info "Run $run summary: $passed passed, $failed failed"
# Show failed tests for this run
if [ "$failed" -gt 0 ]; then
log_warning "Failed tests in run $run:"
# Extract failed test names from the summary section
sed -n '/^ 1 failed$/,/^ 37 passed/p' "$run_output" | grep "test-playwright" | while read -r line; do
local test_name=$(echo "$line" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//')
log_warning " - $test_name"
done
else
log_success "All tests passed in run $run"
fi
done
# Analyze results
analyze_results
# Generate detailed report
generate_report
# Final summary
local total_duration=$(($(date +%s) - START_TIME))
log_success "Test stability analysis complete!"
log_info "Total duration: ${total_duration}s"
log_info "Results saved to: ${RESULTS_DIR}"
log_info "Summary: ${SUMMARY_FILE}"
log_info "Detailed report: ${RESULTS_DIR}/stability-report-${TIMESTAMP}.md"
log_info "Failure details: ${FAILURE_LOG}"
# Display quick summary
echo ""
echo "=== QUICK SUMMARY ==="
local summary_data=$(cat "${SUMMARY_FILE}")
local total_tests=$(echo "$summary_data" | jq '.summary_stats.total_tests')
local always_passing=$(echo "$summary_data" | jq '.summary_stats.always_passing')
local always_failing=$(echo "$summary_data" | jq '.summary_stats.always_failing')
local intermittent=$(echo "$summary_data" | jq '.summary_stats.intermittent')
echo "Total Tests: $total_tests"
echo "Always Passing: $always_passing"
echo "Always Failing: $always_failing"
echo "Intermittent: $intermittent"
# Show run-by-run failure summary
echo ""
echo "=== RUN-BY-RUN FAILURE SUMMARY ==="
for ((i=1; i<=TOTAL_RUNS; i++)); do
local run_file="${RESULTS_DIR}/run-${i}.txt"
if [ -f "$run_file" ]; then
local failed_line=$(grep -E "[0-9]+ failed" "$run_file" | tail -1)
local failed_count=0
if [ -n "$failed_line" ]; then
failed_count=$(echo "$failed_line" | grep -o "[0-9]\+ failed" | grep -o "[0-9]\+")
fi
if [ "$failed_count" -gt 0 ]; then
echo "Run $i: $failed_count failed"
# Extract failed test names from the summary section
sed -n '/^ 1 failed$/,/^ 37 passed/p' "$run_file" | grep "test-playwright" | while read -r line; do
local test_name=$(echo "$line" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//')
echo " - $test_name"
done
else
echo "Run $i: All tests passed"
fi
fi
done
if [ "$always_failing" -gt 0 ]; then
echo ""
echo "🚨 ALWAYS FAILING TESTS:"
echo "$summary_data" | jq -r '.test_results | to_entries | map(select(.value.stability == "always_failing")) | .[] | " - " + .key'
fi
if [ "$intermittent" -gt 0 ]; then
echo ""
echo "⚠️ INTERMITTENT TESTS (most unstable first):"
echo "$summary_data" | jq -r '.test_results | to_entries | map(select(.value.stability == "intermittent")) | sort_by(.value.success_rate) | .[] | " - " + .key + " (" + (.value.success_rate | tostring) + "% success)"'
fi
}
# Run the main function
main "$@"

487
test-playwright/30-record-gift.spec.ts

@ -1,122 +1,403 @@
/**
* @file Gift Recording Test Suite
* @description Tests TimeSafari's core gift recording functionality, ensuring proper creation,
* validation, and verification of gift records
*
* This test verifies:
* 1. Gift Creation
* - Random gift title generation
* - Random non-zero amount assignment
* - Proper recording and signing
*
* 2. Gift Verification
* - Gift appears in home view
* - Details match input data
* - Verifiable claim details accessible
*
* 3. Public Verification
* - Gift viewable on public server
* - Claim details properly exposed
*
* Test Flow:
* 1. Data Generation
* - Generate random 4-char string for unique gift ID
* - Generate random amount (1-99)
* - Combine with standard "Gift" prefix
*
* 2. Gift Recording
* - Import User 00 (test account)
* - Navigate to home
* - Close onboarding dialog
* - Select recipient
* - Fill gift details
* - Sign and submit
*
* 3. Verification
* - Check success notification
* - Refresh home view
* - Locate gift in list
* - Verify gift details
* - Check public server view
*
* Test Data:
* - Gift Title: "Gift [4-char-random]"
* - Amount: Random 1-99
* - Recipient: "Unnamed/Unknown"
* @description Tests TimeSafari's core gift recording functionality with integrated performance tracking
*
* This test covers a complete gift recording flow in TimeSafari with integrated performance tracking.
*
* Focus areas:
* - Performance monitoring for every major user step
* - Gift creation, recording, and verification
* - Public server integration and validation
* - Validation of both behavior and responsiveness
*
* @version 1.0.0
* @author Matthew Raymer
* @lastModified 2025-08-02
*
* ================================================================================
* TEST OVERVIEW
* ================================================================================
*
* This test verifies the complete gift recording workflow from data generation to
* public verification, ensuring end-to-end functionality works correctly with
* comprehensive performance monitoring.
*
* Core Test Objectives:
* 1. Gift Creation & Recording
* - Random gift title generation with uniqueness
* - Random non-zero amount assignment (1-99 range)
* - Proper form filling and validation
* - JWT signing and submission with performance tracking
*
* 2. Gift Verification & Display
* - Gift appears in home view after recording
* - Details match input data exactly
* - Verifiable claim details are accessible
* - UI elements display correctly
*
* 3. Public Verification & Integration
* - Gift viewable on public endorser server
* - Claim details properly exposed via API
* - Cross-platform compatibility (Chromium/Firefox)
*
* ================================================================================
* TEST FLOW & PROCESS
* ================================================================================
*
* Phase 1: Data Generation & Preparation
*
* 1. Generate unique test data:
* - Random 4-character string for gift ID uniqueness
* - Random amount between 1-99 (non-zero validation)
* - Combine with "Gift " prefix for standard format
*
* 2. User preparation:
* - Import User 00 (test account with known state)
* - Navigate to home page
* - Handle onboarding dialog closure
*
* Phase 2: Gift Recording Process
*
* 3. Recipient selection:
* - Click "Person" button to open recipient picker
* - Select "Unnamed/Unknown" recipient
* - Verify selection is applied
*
* 4. Gift details entry:
* - Fill gift title with generated unique string
* - Enter random amount in number field
* - Validate form state before submission
*
* 5. Submission and signing:
* - Click "Sign & Send" button
* - Wait for JWT signing process
* - Verify success notification appears
* - Dismiss any info alerts
*
* Phase 3: Verification & Validation
*
* 6. Home view verification:
* - Refresh home page to load new gift
* - Locate gift in activity list by title
* - Click info link to view details
*
* 7. Details verification:
* - Verify "Verifiable Claim Details" heading
* - Confirm gift title matches exactly
* - Expand Details section for extended info
*
* 8. Public server integration:
* - Click "View on Public Server" link
* - Verify popup opens with correct URL
* - Validate public server accessibility
*
* ================================================================================
* TEST DATA SPECIFICATIONS
* ================================================================================
*
* Gift Title Format: "Gift [4-char-random]"
* - Prefix: "Gift " (with space)
* - Random component: 4-character alphanumeric string
* - Example: "Gift a7b3", "Gift x9y2"
*
* Key Selectors:
* - Gift title: '[data-testid="giftTitle"]'
* - Amount input: 'input[type="number"]'
* Amount Range: 1-99 (inclusive)
* - Minimum: 1 (non-zero validation)
* - Maximum: 99 (reasonable upper bound)
* - Type: Integer only
* - Example: 42, 7, 99
*
* Recipient: "Unnamed/Unknown"
* - Standard test recipient
* - No specific DID or contact info
* - Used for all test gifts
*
* ================================================================================
* SELECTOR REFERENCE
* ================================================================================
*
* Form Elements:
* - Gift title input: '[data-testid="giftTitle"]' or 'input[placeholder="What was given"]'
* - Amount input: 'input[type="number"]' or 'input[role="spinbutton"]'
* - Submit button: 'button[name="Sign & Send"]'
* - Success alert: 'div[role="alert"]'
* - Details section: 'h2[name="Details"]'
*
* Alert Handling:
* - Closes onboarding dialog
* - Verifies success message
* - Dismisses info alerts
*
* State Requirements:
* - Clean database state
* - User 00 imported
* - Available API rate limits
*
* Related Files:
* - Gift recording view: src/views/RecordGiftView.vue
* - JWT creation: sw_scripts/safari-notifications.js
* - Endorser API: src/libs/endorserServer.ts
*
* @see Documentation in usage-guide.md for gift recording workflows
* @requires @playwright/test
* @requires ./testUtils - For user management utilities
*
* @example Basic gift recording
* ```typescript
* await page.getByPlaceholder('What was given').fill('Gift abc123');
* await page.getByRole('spinbutton').fill('42');
* await page.getByRole('button', { name: 'Sign & Send' }).click();
* await expect(page.getByText('That gift was recorded.')).toBeVisible();
* - Person button: 'button[name="Person"]'
* - Recipient list: 'ul[role="listbox"]'
*
* Navigation & UI:
* - Onboarding close: '[data-testid="closeOnboardingAndFinish"]'
* - Home page: './' (relative URL)
* - Alert dismissal: 'div[role="alert"] button > svg.fa-xmark'
* - Success message: 'text="That gift was recorded."'
*
* Verification Elements:
* - Gift list item: 'li:first-child' (filtered by title)
* - Info link: '[data-testid="circle-info-link"]'
* - Details heading: 'h2[name="Verifiable Claim Details"]'
* - Details section: 'h2[name="Details", exact="true"]'
* - Public server link: 'a[name="View on the Public Server"]'
*
* ================================================================================
* ERROR HANDLING & DEBUGGING
* ================================================================================
*
* Common Failure Points:
* 1. Onboarding Dialog
* - Issue: Dialog doesn't close properly
* - Debug: Check if closeOnboardingAndFinish button exists
* - Fix: Add wait for dialog to be visible before clicking
*
* 2. Recipient Selection
* - Issue: "Unnamed" recipient not found
* - Debug: Check if recipient list is populated
* - Fix: Add wait for list to load before filtering
*
* 3. Form Submission
* - Issue: "Sign & Send" button not clickable
* - Debug: Check if form is valid and all fields filled
* - Fix: Add validation before submission
*
* 4. Success Verification
* - Issue: Success message doesn't appear
* - Debug: Check network requests and JWT signing
* - Fix: Add longer timeout for signing process
*
* 5. Home View Refresh
* - Issue: Gift doesn't appear in list
* - Debug: Check if gift was actually recorded
* - Fix: Add wait for home view to reload
*
* 6. Public Server Integration
* - Issue: Popup doesn't open or wrong URL
* - Debug: Check if public server is accessible
* - Fix: Verify endorser server configuration
*
* Debugging Commands:
* ```bash
* # Run with trace for detailed debugging
* npx playwright test 30-record-gift.spec.ts --trace on
*
* # Run with headed browser for visual debugging
* npx playwright test 30-record-gift.spec.ts --headed
*
* # Run with slow motion for step-by-step debugging
* npx playwright test 30-record-gift.spec.ts --debug
* ```
*
* ================================================================================
* BROWSER COMPATIBILITY
* ================================================================================
*
* Tested Browsers:
* - Chromium: Primary target, full functionality
* - Firefox: Secondary target, may have timing differences
*
* Browser-Specific Considerations:
* - Firefox: May require longer timeouts for form interactions
* - Chromium: Generally faster, more reliable
* - Both: Popup handling may differ slightly
*
* ================================================================================
* PERFORMANCE CONSIDERATIONS
* ================================================================================
*
* Expected Timings:
* - Data generation: < 1ms
* - User import: 2-5 seconds
* - Form filling: 1-2 seconds
* - JWT signing: 3-8 seconds
* - Home refresh: 2-4 seconds
* - Public server: 1-3 seconds
*
* Total expected runtime: 10-20 seconds
*
* Performance Monitoring:
* - Monitor JWT signing time (most variable)
* - Track home view refresh time
* - Watch for memory leaks in popup handling
*
* ================================================================================
* MAINTENANCE GUIDELINES
* ================================================================================
*
* When Modifying This Test:
* 1. Update version number and lastModified date
* 2. Test on both Chromium and Firefox
* 3. Verify with different random data sets
* 4. Check that public server integration still works
* 5. Update selector references if UI changes
*
* Related Files to Monitor:
* - src/views/RecordGiftView.vue (gift recording UI)
* - src/views/HomeView.vue (gift display)
* - sw_scripts/safari-notifications.js (JWT signing)
* - src/libs/endorserServer.ts (API integration)
* - test-playwright/testUtils.ts (user management)
*
* ================================================================================
* INTEGRATION POINTS
* ================================================================================
*
* Dependencies:
* - User 00 must be available in test data
* - Endorser server must be running and accessible
* - Public server must be configured correctly
* - JWT signing must be functional
*
* API Endpoints Used:
* - POST /api/claims (gift recording)
* - GET /api/claims (public verification)
* - WebSocket connections for real-time updates
*
* ================================================================================
* SECURITY CONSIDERATIONS
* ================================================================================
*
* Test Data Security:
* - Random data prevents test interference
* - No sensitive information in test gifts
* - Public server verification is read-only
*
* JWT Handling:
* - Test uses test user credentials
* - Signing process is isolated
* - No production keys used
*
* ================================================================================
* RELATED DOCUMENTATION
* ================================================================================
*
* @see test-playwright/testUtils.ts - User management utilities
* @see test-playwright/README.md - General testing guidelines
* @see docs/user-guides/gift-recording.md - User workflow documentation
* @see src/views/RecordGiftView.vue - Implementation details
* @see sw_scripts/safari-notifications.js - JWT signing implementation
*
* @example Complete test execution
* ```bash
* # Run this specific test
* npx playwright test 30-record-gift.spec.ts
*
* # Run with detailed output
* npx playwright test 30-record-gift.spec.ts --reporter=list
*
* # Run in headed mode for debugging
* npx playwright test 30-record-gift.spec.ts --headed
* ```
*/
import { test, expect } from '@playwright/test';
import { importUser } from './testUtils';
import { importUserFromAccount } from './testUtils';
import {
createPerformanceCollector,
attachPerformanceData,
assertPerformanceMetrics
} from './performanceUtils';
test('Record something given', async ({ page }) => {
// Generate a random string of a few characters
const randomString = Math.random().toString(36).substring(2, 6);
/**
* @test Record something given
* @description End-to-end test of gift recording functionality with performance tracking
* @tags gift-recording, e2e, user-workflow, performance
* @timeout 45000ms (45 seconds for JWT signing and API calls)
*
* @process
* 1. Generate unique test data
* 2. Import test user and navigate to home
* 3. Record gift with random title and amount
* 4. Verify gift appears in home view
* 5. Check public server integration
*
* @data
* - Gift title: "Gift [random-4-chars]"
* - Amount: Random 1-99
* - Recipient: "Unnamed/Unknown"
*
* @verification
* - Success notification appears
* - Gift visible in home view
* - Details match input data
* - Public server accessible
*
* @browsers chromium, firefox
* @retries 2 (for flaky network conditions)
*/
test('Record something given', async ({ page }, testInfo) => {
// STEP 1: Initialize the performance collector
const perfCollector = await createPerformanceCollector(page);
// Generate a random non-zero single-digit number
// STEP 2: Generate unique test data
const randomString = Math.random().toString(36).substring(2, 6);
const randomNonZeroNumber = Math.floor(Math.random() * 99) + 1;
// Standard title prefix
const standardTitle = 'Gift ';
// Combine title prefix with the random string
const finalTitle = standardTitle + randomString;
// Import user 00
await importUser(page, '00');
// Record something given
await page.goto('./');
await page.getByTestId('closeOnboardingAndFinish').click();
await page.getByRole('button', { name: 'Person' }).click();
await page.getByRole('listitem').filter({ hasText: 'Unnamed' }).locator('svg').click();
await page.getByPlaceholder('What was given').fill(finalTitle);
await page.getByRole('spinbutton').fill(randomNonZeroNumber.toString());
await page.getByRole('button', { name: 'Sign & Send' }).click();
await expect(page.getByText('That gift was recorded.')).toBeVisible();
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
// Refresh home view and check gift
await page.goto('./');
// STEP 3: Import user 00 and navigate to home page
await perfCollector.measureUserAction('import-user-account', async () => {
await importUserFromAccount(page, '00');
});
await perfCollector.measureUserAction('initial-navigation', async () => {
await page.goto('./');
});
const initialMetrics = await perfCollector.collectNavigationMetrics('home-page-load');
await testInfo.attach('initial-page-load-metrics', {
contentType: 'application/json',
body: JSON.stringify(initialMetrics, null, 2)
});
// STEP 4: Close onboarding dialog
await perfCollector.measureUserAction('close-onboarding', async () => {
await page.getByTestId('closeOnboardingAndFinish').click();
});
// STEP 5: Select recipient
await perfCollector.measureUserAction('select-recipient', async () => {
await page.getByRole('button', { name: 'Person' }).click();
await page.getByRole('listitem').filter({ hasText: 'Unnamed' }).locator('svg').click();
});
// STEP 6: Fill gift details
await perfCollector.measureUserAction('fill-gift-details', async () => {
await page.getByPlaceholder('What was given').fill(finalTitle);
await page.getByRole('spinbutton').fill(randomNonZeroNumber.toString());
});
// STEP 7: Submit gift and verify success
await perfCollector.measureUserAction('submit-gift', async () => {
await page.getByRole('button', { name: 'Sign & Send' }).click();
await expect(page.getByText('That gift was recorded.')).toBeVisible();
await page.locator('div[role="alert"] button > svg.fa-xmark').click();
});
// STEP 8: Refresh home view and locate gift
await perfCollector.measureUserAction('refresh-home-view', async () => {
await page.goto('./');
});
await perfCollector.collectNavigationMetrics('home-refresh-load');
const item = await page.locator('li:first-child').filter({ hasText: finalTitle });
await item.locator('[data-testid="circle-info-link"]').click();
// STEP 9: View gift details
await perfCollector.measureUserAction('view-gift-details', async () => {
await item.locator('[data-testid="circle-info-link"]').click();
});
await expect(page.getByRole('heading', { name: 'Verifiable Claim Details' })).toBeVisible();
await expect(page.getByText(finalTitle, { exact: true })).toBeVisible();
// STEP 10: Expand details and open public server
const page1Promise = page.waitForEvent('popup');
// expand the Details section to see the extended details
await page.getByRole('heading', { name: 'Details', exact: true }).click();
await page.getByRole('link', { name: 'View on the Public Server' }).click();
await perfCollector.measureUserAction('expand-details', async () => {
await page.getByRole('heading', { name: 'Details', exact: true }).click();
});
await perfCollector.measureUserAction('open-public-server', async () => {
await page.getByRole('link', { name: 'View on the Public Server' }).click();
});
const page1 = await page1Promise;
// STEP 11: Attach and validate performance data
const { webVitals, performanceReport, summary } = await attachPerformanceData(testInfo, perfCollector);
const avgNavigationTime = perfCollector.navigationMetrics.reduce((sum, nav) =>
sum + nav.metrics.loadComplete, 0) / perfCollector.navigationMetrics.length;
assertPerformanceMetrics(webVitals, initialMetrics, avgNavigationTime);
});
Loading…
Cancel
Save