performance-optimizations-testing #171
Open
anomalist
wants to merge 27 commits from performance-optimizations-testing
into master
20 changed files with 5506 additions and 482 deletions
@ -0,0 +1,31 @@ |
|||||
|
--- |
||||
|
alwaysApply: true |
||||
|
--- |
||||
|
# Building Guidelines |
||||
|
|
||||
|
## Configurations |
||||
|
|
||||
|
- The project supports builds using **Vite** for web and **Capacitor** for hybrid |
||||
|
apps. |
||||
|
- Capacitor is used for **iOS**, **Android**, and **Electron** targets. |
||||
|
- All builds support three modes: **development**, **testing**, and **production**. |
||||
|
|
||||
|
## Build Scripts |
||||
|
|
||||
|
- `build-web.sh` |
||||
|
- Builds a **web-only application**. |
||||
|
- Defaults to **development mode** unless overridden. |
||||
|
|
||||
|
- `build-ios.sh` |
||||
|
- Builds an **iOS hybrid native application** using Capacitor. |
||||
|
|
||||
|
- `build-android.sh` |
||||
|
- Builds an **Android hybrid native application** using Capacitor. |
||||
|
|
||||
|
- `build-electron.sh` |
||||
|
- Builds an **Electron hybrid desktop application** using Capacitor. |
||||
|
|
||||
|
## npm Scripts |
||||
|
|
||||
|
- npm scripts delegate to the `build-*` shell scripts. |
||||
|
- Parameter flags determine the **build mode** (`development`, `testing`, `production`). |
@ -0,0 +1,247 @@ |
|||||
|
#!/bin/zsh |
||||
|
|
||||
|
# Test Stability Runner Common Functions for TimeSafari (Zsh Version) |
||||
|
# Shared functionality for zsh test stability runners |
||||
|
# Author: Matthew Raymer |
||||
|
|
||||
|
set -euo pipefail |
||||
|
|
||||
|
# Configuration |
||||
|
TOTAL_RUNS=10 |
||||
|
RESULTS_DIR="test-stability-results" |
||||
|
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S") |
||||
|
LOG_FILE="${RESULTS_DIR}/stability-run-${TIMESTAMP}.log" |
||||
|
SUMMARY_FILE="${RESULTS_DIR}/stability-summary-${TIMESTAMP}.json" |
||||
|
FAILURE_LOG="${RESULTS_DIR}/failure-details-${TIMESTAMP}.log" |
||||
|
|
||||
|
# Colors for output |
||||
|
RED='\033[0;31m' |
||||
|
GREEN='\033[0;32m' |
||||
|
YELLOW='\033[1;33m' |
||||
|
BLUE='\033[0;34m' |
||||
|
CYAN='\033[0;36m' |
||||
|
MAGENTA='\033[0;35m' |
||||
|
NC='\033[0m' # No Color |
||||
|
|
||||
|
# Progress bar characters |
||||
|
PROGRESS_CHAR="█" |
||||
|
EMPTY_CHAR="░" |
||||
|
|
||||
|
# Initialize results tracking (zsh associative arrays) |
||||
|
typeset -A test_results |
||||
|
typeset -A test_failures |
||||
|
typeset -A test_successes |
||||
|
typeset -A run_times |
||||
|
typeset -A test_names |
||||
|
|
||||
|
# Create results directory |
||||
|
mkdir -p "${RESULTS_DIR}" |
||||
|
|
||||
|
# Logging functions |
||||
|
log_info() { |
||||
|
echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_success() { |
||||
|
echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_warning() { |
||||
|
echo -e "${YELLOW}[WARNING]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_error() { |
||||
|
echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
# Function to extract test names from Playwright output |
||||
|
extract_test_names() { |
||||
|
local output_file="$1" |
||||
|
# Extract test names from lines like "✓ 13 [chromium] › test-playwright/30-record-gift.spec.ts:84:5 › Record something given" |
||||
|
grep -E "✓.*test-playwright" "$output_file" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//' | sort | uniq |
||||
|
} |
||||
|
|
||||
|
# Function to check if test passed in a run |
||||
|
test_passed_in_run() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
grep -q "✓.*test-playwright/$test_name" "$run_output" 2>/dev/null |
||||
|
} |
||||
|
|
||||
|
# Function to check if test failed in a run |
||||
|
test_failed_in_run() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
grep -q "✗.*test-playwright/$test_name" "$run_output" 2>/dev/null |
||||
|
} |
||||
|
|
||||
|
# Function to get test duration |
||||
|
get_test_duration() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
local duration=$(grep -A 1 "✓ $test_name\|✗ $test_name" "$run_output" | grep -o "[0-9]\+ms" | head -1) |
||||
|
echo "${duration:-unknown}" |
||||
|
} |
||||
|
|
||||
|
# Function to calculate percentage |
||||
|
calculate_percentage() { |
||||
|
local passes="$1" |
||||
|
local total="$2" |
||||
|
if [ "$total" -eq 0 ]; then |
||||
|
echo "0" |
||||
|
else |
||||
|
echo "$((passes * 100 / total))" |
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
# Function to display progress bar |
||||
|
show_progress() { |
||||
|
local current="$1" |
||||
|
local total="$2" |
||||
|
local percentage=$((current * 100 / total)) |
||||
|
local filled=$((current * 50 / total)) |
||||
|
local empty=$((50 - filled)) |
||||
|
|
||||
|
local progress_bar="" |
||||
|
for ((i=0; i<filled; i++)); do |
||||
|
progress_bar+="$PROGRESS_CHAR" |
||||
|
done |
||||
|
for ((i=0; i<empty; i++)); do |
||||
|
progress_bar+="$EMPTY_CHAR" |
||||
|
done |
||||
|
|
||||
|
printf "\r%s [%d%%] (%d/%d)" "$progress_bar" "$percentage" "$current" "$total" |
||||
|
} |
||||
|
|
||||
|
# Function to run a single test execution |
||||
|
run_single_test() { |
||||
|
local run_number="$1" |
||||
|
local run_output="${RESULTS_DIR}/run-${run_number}-output.txt" |
||||
|
local start_time=$(date +%s) |
||||
|
|
||||
|
log_info "Starting run $run_number/$TOTAL_RUNS" |
||||
|
|
||||
|
# Run the test suite and capture output |
||||
|
if npm run test:web > "$run_output" 2>&1; then |
||||
|
local end_time=$(date +%s) |
||||
|
local duration=$((end_time - start_time)) |
||||
|
|
||||
|
test_results[$run_number]="PASS" |
||||
|
test_successes[$run_number]="true" |
||||
|
run_times[$run_number]="$duration" |
||||
|
|
||||
|
log_success "Run $run_number completed successfully in ${duration}s" |
||||
|
return 0 |
||||
|
else |
||||
|
local end_time=$(date +%s) |
||||
|
local duration=$((end_time - start_time)) |
||||
|
|
||||
|
test_results[$run_number]="FAIL" |
||||
|
test_failures[$run_number]="true" |
||||
|
run_times[$run_number]="$duration" |
||||
|
|
||||
|
log_error "Run $run_number failed after ${duration}s" |
||||
|
return 1 |
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
# Function to generate summary report |
||||
|
generate_summary_report() { |
||||
|
log_info "Generating summary report..." |
||||
|
|
||||
|
local total_passes=0 |
||||
|
local total_failures=0 |
||||
|
local total_time=0 |
||||
|
|
||||
|
for run_number in $(seq 1 $TOTAL_RUNS); do |
||||
|
if [[ "${test_results[$run_number]:-}" == "PASS" ]]; then |
||||
|
((total_passes++)) |
||||
|
else |
||||
|
((total_failures++)) |
||||
|
fi |
||||
|
|
||||
|
if [[ -n "${run_times[$run_number]:-}" ]]; then |
||||
|
((total_time += run_times[$run_number])) |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
local success_rate=$(calculate_percentage $total_passes $TOTAL_RUNS) |
||||
|
local avg_time=$((total_time / TOTAL_RUNS)) |
||||
|
|
||||
|
# Create summary JSON |
||||
|
cat > "$SUMMARY_FILE" << EOF |
||||
|
{ |
||||
|
"timestamp": "$TIMESTAMP", |
||||
|
"total_runs": $TOTAL_RUNS, |
||||
|
"successful_runs": $total_passes, |
||||
|
"failed_runs": $total_failures, |
||||
|
"success_rate": $success_rate, |
||||
|
"average_time_seconds": $avg_time, |
||||
|
"total_time_seconds": $total_time, |
||||
|
"run_details": { |
||||
|
EOF |
||||
|
|
||||
|
for run_number in $(seq 1 $TOTAL_RUNS); do |
||||
|
local comma="" |
||||
|
if [ "$run_number" -lt $TOTAL_RUNS ]; then |
||||
|
comma="," |
||||
|
fi |
||||
|
|
||||
|
cat >> "$SUMMARY_FILE" << EOF |
||||
|
"run_$run_number": { |
||||
|
"result": "${test_results[$run_number]:-unknown}", |
||||
|
"duration_seconds": "${run_times[$run_number]:-unknown}", |
||||
|
"timestamp": "$(date -d @${run_times[$run_number]:-0} +%Y-%m-%d_%H-%M-%S 2>/dev/null || echo 'unknown')" |
||||
|
}$comma |
||||
|
EOF |
||||
|
done |
||||
|
|
||||
|
cat >> "$SUMMARY_FILE" << EOF |
||||
|
} |
||||
|
} |
||||
|
EOF |
||||
|
|
||||
|
log_success "Summary report generated: $SUMMARY_FILE" |
||||
|
} |
||||
|
|
||||
|
# Function to display final results |
||||
|
display_final_results() { |
||||
|
echo |
||||
|
echo "==========================================" |
||||
|
echo " TEST STABILITY RESULTS " |
||||
|
echo "==========================================" |
||||
|
echo "Timestamp: $TIMESTAMP" |
||||
|
echo "Total Runs: $TOTAL_RUNS" |
||||
|
|
||||
|
local total_passes=0 |
||||
|
local total_failures=0 |
||||
|
local total_time=0 |
||||
|
|
||||
|
for run_number in $(seq 1 $TOTAL_RUNS); do |
||||
|
if [[ "${test_results[$run_number]:-}" == "PASS" ]]; then |
||||
|
((total_passes++)) |
||||
|
else |
||||
|
((total_failures++)) |
||||
|
fi |
||||
|
|
||||
|
if [[ -n "${run_times[$run_number]:-}" ]]; then |
||||
|
((total_time += run_times[$run_number])) |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
local success_rate=$(calculate_percentage $total_passes $TOTAL_RUNS) |
||||
|
local avg_time=$((total_time / TOTAL_RUNS)) |
||||
|
|
||||
|
echo "Successful Runs: $total_passes" |
||||
|
echo "Failed Runs: $total_failures" |
||||
|
echo "Success Rate: ${success_rate}%" |
||||
|
echo "Average Time: ${avg_time}s" |
||||
|
echo "Total Time: ${total_time}s" |
||||
|
echo "==========================================" |
||||
|
echo |
||||
|
echo "Detailed results saved to:" |
||||
|
echo " - Log: $LOG_FILE" |
||||
|
echo " - Summary: $SUMMARY_FILE" |
||||
|
echo " - Results directory: $RESULTS_DIR" |
||||
|
echo |
||||
|
} |
@ -0,0 +1,347 @@ |
|||||
|
#!/bin/bash |
||||
|
|
||||
|
# Test Stability Runner Common Functions for TimeSafari |
||||
|
# Shared functionality for all test stability runners |
||||
|
# Author: Matthew Raymer |
||||
|
|
||||
|
set -euo pipefail |
||||
|
|
||||
|
# Configuration |
||||
|
TOTAL_RUNS=10 |
||||
|
RESULTS_DIR="test-stability-results" |
||||
|
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S") |
||||
|
LOG_FILE="${RESULTS_DIR}/stability-run-${TIMESTAMP}.log" |
||||
|
SUMMARY_FILE="${RESULTS_DIR}/stability-summary-${TIMESTAMP}.json" |
||||
|
FAILURE_LOG="${RESULTS_DIR}/failure-details-${TIMESTAMP}.log" |
||||
|
|
||||
|
# Colors for output |
||||
|
RED='\033[0;31m' |
||||
|
GREEN='\033[0;32m' |
||||
|
YELLOW='\033[1;33m' |
||||
|
BLUE='\033[0;34m' |
||||
|
CYAN='\033[0;36m' |
||||
|
MAGENTA='\033[0;35m' |
||||
|
NC='\033[0m' # No Color |
||||
|
|
||||
|
# Progress bar characters |
||||
|
PROGRESS_CHAR="█" |
||||
|
EMPTY_CHAR="░" |
||||
|
|
||||
|
# Initialize results tracking (bash associative arrays) |
||||
|
declare -A test_results |
||||
|
declare -A test_failures |
||||
|
declare -A test_successes |
||||
|
declare -A run_times |
||||
|
declare -A test_names |
||||
|
|
||||
|
# Create results directory |
||||
|
mkdir -p "${RESULTS_DIR}" |
||||
|
|
||||
|
# Logging functions |
||||
|
log_info() { |
||||
|
echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_success() { |
||||
|
echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_warning() { |
||||
|
echo -e "${YELLOW}[WARNING]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
log_error() { |
||||
|
echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "${LOG_FILE}" |
||||
|
} |
||||
|
|
||||
|
# Function to extract test names from Playwright output |
||||
|
extract_test_names() { |
||||
|
local output_file="$1" |
||||
|
# Extract test names from lines like "✓ 13 [chromium] › test-playwright/30-record-gift.spec.ts:84:5 › Record something given" |
||||
|
grep -E "✓.*test-playwright" "$output_file" | sed 's/.*test-playwright\///' | sed 's/:[0-9]*:[0-9]*.*$//' | sort | uniq |
||||
|
} |
||||
|
|
||||
|
# Function to check if test passed in a run |
||||
|
test_passed_in_run() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
grep -q "✓.*test-playwright/$test_name" "$run_output" 2>/dev/null |
||||
|
} |
||||
|
|
||||
|
# Function to check if test failed in a run |
||||
|
test_failed_in_run() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
grep -q "✗.*test-playwright/$test_name" "$run_output" 2>/dev/null |
||||
|
} |
||||
|
|
||||
|
# Function to get test duration |
||||
|
get_test_duration() { |
||||
|
local test_name="$1" |
||||
|
local run_output="$2" |
||||
|
local duration=$(grep -A 1 "✓ $test_name\|✗ $test_name" "$run_output" | grep -o "[0-9]\+ms" | head -1) |
||||
|
echo "${duration:-unknown}" |
||||
|
} |
||||
|
|
||||
|
# Function to calculate percentage |
||||
|
calculate_percentage() { |
||||
|
local passes="$1" |
||||
|
local total="$2" |
||||
|
if [ "$total" -eq 0 ]; then |
||||
|
echo "0" |
||||
|
else |
||||
|
echo "$((passes * 100 / total))" |
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
# Function to display progress bar |
||||
|
show_progress() { |
||||
|
local current="$1" |
||||
|
local total="$2" |
||||
|
local width="${3:-50}" |
||||
|
local label="${4:-Progress}" |
||||
|
|
||||
|
# Validate inputs |
||||
|
if [[ ! "$current" =~ ^[0-9]+$ ]] || [[ ! "$total" =~ ^[0-9]+$ ]] || [[ ! "$width" =~ ^[0-9]+$ ]]; then |
||||
|
return |
||||
|
fi |
||||
|
|
||||
|
# Ensure we don't divide by zero |
||||
|
if [ "$total" -eq 0 ]; then |
||||
|
total=1 |
||||
|
fi |
||||
|
|
||||
|
local percentage=$((current * 100 / total)) |
||||
|
local filled=$((current * width / total)) |
||||
|
local empty=$((width - filled)) |
||||
|
|
||||
|
# Create progress bar string |
||||
|
local progress_bar="" |
||||
|
for ((i=0; i<filled; i++)); do |
||||
|
progress_bar+="$PROGRESS_CHAR" |
||||
|
done |
||||
|
for ((i=0; i<empty; i++)); do |
||||
|
progress_bar+="$EMPTY_CHAR" |
||||
|
done |
||||
|
|
||||
|
# Print progress bar with carriage return to overwrite |
||||
|
printf "\r${CYAN}[%s]${NC} %s [%s] %d%% (%d/%d)" \ |
||||
|
"$label" "$progress_bar" "$percentage" "$current" "$total" |
||||
|
} |
||||
|
|
||||
|
# Function to clear progress bar line |
||||
|
clear_progress() { |
||||
|
printf "\r%*s\r" "$(tput cols)" "" |
||||
|
} |
||||
|
|
||||
|
# Function to track test execution progress |
||||
|
track_test_progress() { |
||||
|
local run_number="$1" |
||||
|
local test_file="$2" |
||||
|
|
||||
|
log_info "Run $run_number/$TOTAL_RUNS: Executing $test_file" |
||||
|
show_progress "$run_number" "$TOTAL_RUNS" 50 "Test Run" |
||||
|
} |
||||
|
|
||||
|
# Function to run a single test execution |
||||
|
run_single_test() { |
||||
|
local run_number="$1" |
||||
|
local run_output="${RESULTS_DIR}/run-${run_number}.txt" |
||||
|
local start_time=$(date +%s) |
||||
|
|
||||
|
log_info "Starting test run $run_number/$TOTAL_RUNS" |
||||
|
|
||||
|
# Run the test suite |
||||
|
if npm run test:playwright > "$run_output" 2>&1; then |
||||
|
local end_time=$(date +%s) |
||||
|
local duration=$((end_time - start_time)) |
||||
|
run_times[$run_number]=$duration |
||||
|
|
||||
|
log_success "Test run $run_number completed successfully in ${duration}s" |
||||
|
|
||||
|
# Extract and analyze test results |
||||
|
local test_names_list=$(extract_test_names "$run_output") |
||||
|
for test_name in $test_names_list; do |
||||
|
if test_passed_in_run "$test_name" "$run_output"; then |
||||
|
test_successes[$test_name]=$((${test_successes[$test_name]:-0} + 1)) |
||||
|
test_results[$test_name]="pass" |
||||
|
elif test_failed_in_run "$test_name" "$run_output"; then |
||||
|
test_failures[$test_name]=$((${test_failures[$test_name]:-0} + 1)) |
||||
|
test_results[$test_name]="fail" |
||||
|
fi |
||||
|
test_names[$test_name]=1 |
||||
|
done |
||||
|
|
||||
|
return 0 |
||||
|
else |
||||
|
local end_time=$(date +%s) |
||||
|
local duration=$((end_time - start_time)) |
||||
|
run_times[$run_number]=$duration |
||||
|
|
||||
|
log_error "Test run $run_number failed after ${duration}s" |
||||
|
|
||||
|
# Extract test names even from failed runs |
||||
|
local test_names_list=$(extract_test_names "$run_output" 2>/dev/null || true) |
||||
|
for test_name in $test_names_list; do |
||||
|
test_names[$test_name]=1 |
||||
|
if test_failed_in_run "$test_name" "$run_output"; then |
||||
|
test_failures[$test_name]=$((${test_failures[$test_name]:-0} + 1)) |
||||
|
test_results[$test_name]="fail" |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
return 1 |
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
# Function to generate summary report |
||||
|
generate_summary_report() { |
||||
|
log_info "Generating summary report..." |
||||
|
|
||||
|
local total_tests=0 |
||||
|
local always_passing=0 |
||||
|
local always_failing=0 |
||||
|
local intermittent=0 |
||||
|
|
||||
|
# Count test statistics |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
total_tests=$((total_tests + 1)) |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
local total=$((passes + fails)) |
||||
|
|
||||
|
if [ "$fails" -eq 0 ]; then |
||||
|
always_passing=$((always_passing + 1)) |
||||
|
elif [ "$passes" -eq 0 ]; then |
||||
|
always_failing=$((always_failing + 1)) |
||||
|
else |
||||
|
intermittent=$((intermittent + 1)) |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
# Calculate overall success rate |
||||
|
local total_runs=$((TOTAL_RUNS * total_tests)) |
||||
|
local total_successes=0 |
||||
|
for passes in "${test_successes[@]}"; do |
||||
|
total_successes=$((total_successes + passes)) |
||||
|
done |
||||
|
local overall_success_rate=0 |
||||
|
if [ "$total_runs" -gt 0 ]; then |
||||
|
overall_success_rate=$((total_successes * 100 / total_runs)) |
||||
|
fi |
||||
|
|
||||
|
# Generate summary data |
||||
|
cat > "$SUMMARY_FILE" << EOF |
||||
|
{ |
||||
|
"timestamp": "$(date -Iseconds)", |
||||
|
"total_runs": $TOTAL_RUNS, |
||||
|
"test_results": { |
||||
|
EOF |
||||
|
|
||||
|
# Add individual test results |
||||
|
local first=true |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
local total=$((passes + fails)) |
||||
|
local success_rate=$(calculate_percentage "$passes" "$total") |
||||
|
|
||||
|
if [ "$first" = true ]; then |
||||
|
first=false |
||||
|
else |
||||
|
echo "," >> "$SUMMARY_FILE" |
||||
|
fi |
||||
|
|
||||
|
cat >> "$SUMMARY_FILE" << EOF |
||||
|
"$test_name": { |
||||
|
"passes": $passes, |
||||
|
"failures": $fails, |
||||
|
"total": $total, |
||||
|
"success_rate": $success_rate, |
||||
|
"status": "${test_results[$test_name]:-unknown}" |
||||
|
} |
||||
|
EOF |
||||
|
done |
||||
|
|
||||
|
# Close summary |
||||
|
cat >> "$SUMMARY_FILE" << EOF |
||||
|
}, |
||||
|
"summary_stats": { |
||||
|
"total_tests": $total_tests, |
||||
|
"always_passing": $always_passing, |
||||
|
"always_failing": $always_failing, |
||||
|
"intermittent": $intermittent, |
||||
|
"overall_success_rate": $overall_success_rate |
||||
|
} |
||||
|
} |
||||
|
EOF |
||||
|
|
||||
|
log_success "Summary report generated: $SUMMARY_FILE" |
||||
|
} |
||||
|
|
||||
|
# Function to display final results |
||||
|
display_final_results() { |
||||
|
clear_progress |
||||
|
echo |
||||
|
log_info "=== TEST STABILITY ANALYSIS COMPLETE ===" |
||||
|
echo |
||||
|
|
||||
|
# Display summary statistics |
||||
|
local total_tests=${#test_names[@]} |
||||
|
local always_passing=0 |
||||
|
local always_failing=0 |
||||
|
local intermittent=0 |
||||
|
|
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
|
||||
|
if [ "$fails" -eq 0 ]; then |
||||
|
always_passing=$((always_passing + 1)) |
||||
|
elif [ "$passes" -eq 0 ]; then |
||||
|
always_failing=$((always_failing + 1)) |
||||
|
else |
||||
|
intermittent=$((intermittent + 1)) |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
echo -e "${GREEN}✅ Always Passing: $always_passing tests${NC}" |
||||
|
echo -e "${RED}❌ Always Failing: $always_failing tests${NC}" |
||||
|
echo -e "${YELLOW}⚠️ Intermittent: $intermittent tests${NC}" |
||||
|
echo -e "${BLUE}📊 Total Tests: $total_tests${NC}" |
||||
|
echo |
||||
|
|
||||
|
# Display intermittent tests |
||||
|
if [ "$intermittent" -gt 0 ]; then |
||||
|
log_warning "Intermittent tests (require investigation):" |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
|
||||
|
if [ "$passes" -gt 0 ] && [ "$fails" -gt 0 ]; then |
||||
|
local success_rate=$(calculate_percentage "$passes" "$((passes + fails))") |
||||
|
echo -e " ${YELLOW}$test_name: $success_rate% success rate${NC}" |
||||
|
fi |
||||
|
done |
||||
|
echo |
||||
|
fi |
||||
|
|
||||
|
# Display always failing tests |
||||
|
if [ "$always_failing" -gt 0 ]; then |
||||
|
log_error "Always failing tests (require immediate attention):" |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
|
||||
|
if [ "$passes" -eq 0 ] && [ "$fails" -gt 0 ]; then |
||||
|
echo -e " ${RED}$test_name: 0% success rate${NC}" |
||||
|
fi |
||||
|
done |
||||
|
echo |
||||
|
fi |
||||
|
|
||||
|
log_info "Detailed results saved to:" |
||||
|
echo -e " ${BLUE}Summary: $SUMMARY_FILE${NC}" |
||||
|
echo -e " ${BLUE}Log: $LOG_FILE${NC}" |
||||
|
echo -e " ${BLUE}Results directory: $RESULTS_DIR${NC}" |
||||
|
} |
@ -0,0 +1,118 @@ |
|||||
|
#!/bin/bash |
||||
|
|
||||
|
# Test Stability Runner for TimeSafari (Simple Version) |
||||
|
# Executes the full test suite 10 times and analyzes failure patterns |
||||
|
# Author: Matthew Raymer |
||||
|
|
||||
|
# Source common functions |
||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
||||
|
source "${SCRIPT_DIR}/test-stability-common.sh" |
||||
|
|
||||
|
# Override summary file to use text format instead of JSON |
||||
|
SUMMARY_FILE="${RESULTS_DIR}/stability-summary-${TIMESTAMP}.txt" |
||||
|
|
||||
|
# Function to generate simple text summary |
||||
|
generate_simple_summary() { |
||||
|
log_info "Generating simple text summary..." |
||||
|
|
||||
|
local total_tests=0 |
||||
|
local always_passing=0 |
||||
|
local always_failing=0 |
||||
|
local intermittent=0 |
||||
|
|
||||
|
# Count test statistics |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
total_tests=$((total_tests + 1)) |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
local total=$((passes + fails)) |
||||
|
|
||||
|
if [ "$fails" -eq 0 ]; then |
||||
|
always_passing=$((always_passing + 1)) |
||||
|
elif [ "$passes" -eq 0 ]; then |
||||
|
always_failing=$((always_failing + 1)) |
||||
|
else |
||||
|
intermittent=$((intermittent + 1)) |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
# Calculate overall success rate |
||||
|
local total_runs=$((TOTAL_RUNS * total_tests)) |
||||
|
local total_successes=0 |
||||
|
for passes in "${test_successes[@]}"; do |
||||
|
total_successes=$((total_successes + passes)) |
||||
|
done |
||||
|
local overall_success_rate=0 |
||||
|
if [ "$total_runs" -gt 0 ]; then |
||||
|
overall_success_rate=$((total_successes * 100 / total_runs)) |
||||
|
fi |
||||
|
|
||||
|
# Generate simple text summary |
||||
|
cat > "$SUMMARY_FILE" << EOF |
||||
|
TimeSafari Test Stability Summary |
||||
|
================================ |
||||
|
|
||||
|
Generated: $(date) |
||||
|
Total Runs: $TOTAL_RUNS |
||||
|
Total Tests: $total_tests |
||||
|
|
||||
|
Summary Statistics: |
||||
|
- Always Passing: $always_passing tests |
||||
|
- Always Failing: $always_failing tests |
||||
|
- Intermittent: $intermittent tests |
||||
|
- Overall Success Rate: $overall_success_rate% |
||||
|
|
||||
|
Individual Test Results: |
||||
|
EOF |
||||
|
|
||||
|
# Add individual test results |
||||
|
for test_name in "${!test_names[@]}"; do |
||||
|
local passes=${test_successes[$test_name]:-0} |
||||
|
local fails=${test_failures[$test_name]:-0} |
||||
|
local total=$((passes + fails)) |
||||
|
local success_rate=$(calculate_percentage "$passes" "$total") |
||||
|
|
||||
|
cat >> "$SUMMARY_FILE" << EOF |
||||
|
$test_name: |
||||
|
Passes: $passes |
||||
|
Failures: $fails |
||||
|
Total: $total |
||||
|
Success Rate: $success_rate% |
||||
|
Status: ${test_results[$test_name]:-unknown} |
||||
|
EOF |
||||
|
done |
||||
|
|
||||
|
log_success "Simple summary generated: $SUMMARY_FILE" |
||||
|
} |
||||
|
|
||||
|
# Main execution function |
||||
|
main() { |
||||
|
log_info "Starting simple test stability analysis with $TOTAL_RUNS runs" |
||||
|
log_info "Results will be saved to: $RESULTS_DIR" |
||||
|
echo |
||||
|
|
||||
|
# Run all test executions |
||||
|
for run_number in $(seq 1 $TOTAL_RUNS); do |
||||
|
track_test_progress "$run_number" "test suite" |
||||
|
|
||||
|
if run_single_test "$run_number"; then |
||||
|
log_success "Run $run_number completed successfully" |
||||
|
else |
||||
|
log_warning "Run $run_number failed, continuing with remaining runs" |
||||
|
fi |
||||
|
|
||||
|
# Small delay between runs to avoid overwhelming the system |
||||
|
if [ "$run_number" -lt $TOTAL_RUNS ]; then |
||||
|
sleep 2 |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
# Generate and display results |
||||
|
generate_simple_summary |
||||
|
display_final_results |
||||
|
|
||||
|
log_success "Simple test stability analysis complete!" |
||||
|
} |
||||
|
|
||||
|
# Run main function |
||||
|
main "$@" |
@ -0,0 +1,41 @@ |
|||||
|
#!/bin/bash |
||||
|
|
||||
|
# Test Stability Runner for TimeSafari |
||||
|
# Executes the full test suite 10 times and analyzes failure patterns |
||||
|
# Author: Matthew Raymer |
||||
|
|
||||
|
# Source common functions |
||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" |
||||
|
source "${SCRIPT_DIR}/test-stability-common.sh" |
||||
|
|
||||
|
# Main execution function |
||||
|
main() { |
||||
|
log_info "Starting test stability analysis with $TOTAL_RUNS runs" |
||||
|
log_info "Results will be saved to: $RESULTS_DIR" |
||||
|
echo |
||||
|
|
||||
|
# Run all test executions |
||||
|
for run_number in $(seq 1 $TOTAL_RUNS); do |
||||
|
track_test_progress "$run_number" "test suite" |
||||
|
|
||||
|
if run_single_test "$run_number"; then |
||||
|
log_success "Run $run_number completed successfully" |
||||
|
else |
||||
|
log_warning "Run $run_number failed, continuing with remaining runs" |
||||
|
fi |
||||
|
|
||||
|
# Small delay between runs to avoid overwhelming the system |
||||
|
if [ "$run_number" -lt $TOTAL_RUNS ]; then |
||||
|
sleep 2 |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
# Generate and display results |
||||
|
generate_summary_report |
||||
|
display_final_results |
||||
|
|
||||
|
log_success "Test stability analysis complete!" |
||||
|
} |
||||
|
|
||||
|
# Run main function |
||||
|
main "$@" |
@ -0,0 +1,89 @@ |
|||||
|
#!/bin/zsh |
||||
|
|
||||
|
# Test Stability Runner for TimeSafari (Zsh Version) |
||||
|
# Executes the full test suite 10 times and analyzes failure patterns |
||||
|
# Author: Matthew Raymer |
||||
|
|
||||
|
# Source common functions |
||||
|
SCRIPT_DIR="$(dirname "$0")" |
||||
|
source "${SCRIPT_DIR}/test-stability-common-zsh.sh" |
||||
|
|
||||
|
# Zsh-specific overrides and enhancements |
||||
|
# Note: Associative arrays are now defined in the common file |
||||
|
|
||||
|
# Enhanced progress tracking for zsh |
||||
|
track_test_progress_enhanced() { |
||||
|
local run_number="$1" |
||||
|
local test_file="$2" |
||||
|
|
||||
|
log_info "Run $run_number/$TOTAL_RUNS: Executing $test_file" |
||||
|
|
||||
|
# Enhanced progress bar with zsh-specific features |
||||
|
local percentage=$((run_number * 100 / TOTAL_RUNS)) |
||||
|
local filled=$((run_number * 50 / TOTAL_RUNS)) |
||||
|
local empty=$((50 - filled)) |
||||
|
|
||||
|
# Create enhanced progress bar |
||||
|
local progress_bar="" |
||||
|
for ((i=0; i<filled; i++)); do |
||||
|
progress_bar+="$PROGRESS_CHAR" |
||||
|
done |
||||
|
for ((i=0; i<empty; i++)); do |
||||
|
progress_bar+="$EMPTY_CHAR" |
||||
|
done |
||||
|
|
||||
|
# Print enhanced progress with zsh formatting |
||||
|
printf "\r${CYAN}[ZSH]${NC} %s [%d%%] (%d/%d) ${MAGENTA}%s${NC}" \ |
||||
|
"$progress_bar" "$percentage" "$run_number" "$TOTAL_RUNS" "$test_file" |
||||
|
} |
||||
|
|
||||
|
# Enhanced error handling for zsh |
||||
|
handle_zsh_error() { |
||||
|
local error_code=$? |
||||
|
local error_line=$1 |
||||
|
|
||||
|
if [ $error_code -ne 0 ]; then |
||||
|
log_error "Zsh error occurred at line $error_line (exit code: $error_code)" |
||||
|
# Additional zsh-specific error handling can be added here |
||||
|
fi |
||||
|
} |
||||
|
|
||||
|
# Set up zsh error handling |
||||
|
trap 'handle_zsh_error $LINENO' ERR |
||||
|
|
||||
|
# Main execution function with zsh enhancements |
||||
|
main() { |
||||
|
log_info "Starting enhanced test stability analysis with $TOTAL_RUNS runs (Zsh Version)" |
||||
|
log_info "Results will be saved to: $RESULTS_DIR" |
||||
|
echo |
||||
|
|
||||
|
# Run all test executions with enhanced tracking |
||||
|
for run_number in $(seq 1 $TOTAL_RUNS); do |
||||
|
track_test_progress_enhanced "$run_number" "test suite" |
||||
|
|
||||
|
if run_single_test "$run_number"; then |
||||
|
log_success "Run $run_number completed successfully" |
||||
|
else |
||||
|
log_warning "Run $run_number failed, continuing with remaining runs" |
||||
|
fi |
||||
|
|
||||
|
# Enhanced delay with zsh-specific features |
||||
|
if [ "$run_number" -lt $TOTAL_RUNS ]; then |
||||
|
# Use zsh's built-in sleep with progress indication |
||||
|
for i in {1..2}; do |
||||
|
printf "\r${YELLOW}Waiting...${NC} %d/2" "$i" |
||||
|
sleep 1 |
||||
|
done |
||||
|
printf "\r%*s\r" "$(tput cols)" "" |
||||
|
fi |
||||
|
done |
||||
|
|
||||
|
# Generate and display results |
||||
|
generate_summary_report |
||||
|
display_final_results |
||||
|
|
||||
|
log_success "Enhanced test stability analysis complete! (Zsh Version)" |
||||
|
} |
||||
|
|
||||
|
# Run main function |
||||
|
main "$@" |
@ -1,122 +1,492 @@ |
|||||
/** |
/** |
||||
* @file Gift Recording Test Suite |
* @file Gift Recording Test Suite |
||||
* @description Tests TimeSafari's core gift recording functionality, ensuring proper creation, |
* @description Tests TimeSafari's core gift recording functionality with integrated performance tracking |
||||
* validation, and verification of gift records |
* |
||||
* |
* This test covers a complete gift recording flow in TimeSafari with integrated performance tracking. |
||||
* This test verifies: |
* |
||||
* 1. Gift Creation |
* Focus areas: |
||||
* - Random gift title generation |
* - Performance monitoring for every major user step |
||||
* - Random non-zero amount assignment |
* - Gift creation, recording, and verification |
||||
* - Proper recording and signing |
* - Public server integration and validation |
||||
* |
* - Validation of both behavior and responsiveness |
||||
* 2. Gift Verification |
* |
||||
* - Gift appears in home view |
* @version 1.0.0 |
||||
* - Details match input data |
* @author Matthew Raymer |
||||
* - Verifiable claim details accessible |
* @lastModified 2025-08-02 |
||||
* |
* |
||||
* 3. Public Verification |
* ================================================================================ |
||||
* - Gift viewable on public server |
* TEST OVERVIEW |
||||
* - Claim details properly exposed |
* ================================================================================ |
||||
* |
* |
||||
* Test Flow: |
* This test verifies the complete gift recording workflow from data generation to |
||||
* 1. Data Generation |
* public verification, ensuring end-to-end functionality works correctly with |
||||
* - Generate random 4-char string for unique gift ID |
* comprehensive performance monitoring. |
||||
* - Generate random amount (1-99) |
* |
||||
* - Combine with standard "Gift" prefix |
* Core Test Objectives: |
||||
* |
* 1. Gift Creation & Recording |
||||
* 2. Gift Recording |
* - Random gift title generation with uniqueness |
||||
* - Import User 00 (test account) |
* - Random non-zero amount assignment (1-99 range) |
||||
* - Navigate to home |
* - Proper form filling and validation |
||||
* - Close onboarding dialog |
* - JWT signing and submission with performance tracking |
||||
* - Select recipient |
* |
||||
* - Fill gift details |
* 2. Gift Verification & Display |
||||
* - Sign and submit |
* - Gift appears in home view after recording |
||||
* |
* - Details match input data exactly |
||||
* 3. Verification |
* - Verifiable claim details are accessible |
||||
* - Check success notification |
* - UI elements display correctly |
||||
* - Refresh home view |
* |
||||
* - Locate gift in list |
* 3. Public Verification & Integration |
||||
* - Verify gift details |
* - Gift viewable on public endorser server |
||||
* - Check public server view |
* - Claim details properly exposed via API |
||||
* |
* - Cross-platform compatibility (Chromium/Firefox) |
||||
* Test Data: |
* |
||||
* - Gift Title: "Gift [4-char-random]" |
* ================================================================================ |
||||
* - Amount: Random 1-99 |
* TEST FLOW & PROCESS |
||||
* - Recipient: "Unnamed/Unknown" |
* ================================================================================ |
||||
|
* |
||||
|
* Phase 1: Data Generation & Preparation |
||||
|
* ──────────────────────────────────────────────────────────────────────────────── |
||||
|
* 1. Generate unique test data: |
||||
|
* - Random 4-character string for gift ID uniqueness |
||||
|
* - Random amount between 1-99 (non-zero validation) |
||||
|
* - Combine with "Gift " prefix for standard format |
||||
|
* |
||||
|
* 2. User preparation: |
||||
|
* - Import User 00 (test account with known state) |
||||
|
* - Navigate to home page |
||||
|
* - Handle onboarding dialog closure |
||||
|
* |
||||
|
* Phase 2: Gift Recording Process |
||||
|
* ──────────────────────────────────────────────────────────────────────────────── |
||||
|
* 3. Recipient selection: |
||||
|
* - Click "Person" button to open recipient picker |
||||
|
* - Select "Unnamed/Unknown" recipient |
||||
|
* - Verify selection is applied |
||||
|
* |
||||
|
* 4. Gift details entry: |
||||
|
* - Fill gift title with generated unique string |
||||
|
* - Enter random amount in number field |
||||
|
* - Validate form state before submission |
||||
|
* |
||||
|
* 5. Submission and signing: |
||||
|
* - Click "Sign & Send" button |
||||
|
* - Wait for JWT signing process |
||||
|
* - Verify success notification appears |
||||
|
* - Dismiss any info alerts |
||||
|
* |
||||
|
* Phase 3: Verification & Validation |
||||
|
* ──────────────────────────────────────────────────────────────────────────────── |
||||
|
* 6. Home view verification: |
||||
|
* - Refresh home page to load new gift |
||||
|
* - Locate gift in activity list by title |
||||
|
* - Click info link to view details |
||||
|
* |
||||
|
* 7. Details verification: |
||||
|
* - Verify "Verifiable Claim Details" heading |
||||
|
* - Confirm gift title matches exactly |
||||
|
* - Expand Details section for extended info |
||||
|
* |
||||
|
* 8. Public server integration: |
||||
|
* - Click "View on Public Server" link |
||||
|
* - Verify popup opens with correct URL |
||||
|
* - Validate public server accessibility |
||||
|
* |
||||
|
* ================================================================================ |
||||
|
* TEST DATA SPECIFICATIONS |
||||
|
* ================================================================================ |
||||
|
* |
||||
|
* Gift Title Format: "Gift [4-char-random]" |
||||
|
* - Prefix: "Gift " (with space) |
||||
|
* - Random component: 4-character alphanumeric string |
||||
|
* - Example: "Gift a7b3", "Gift x9y2" |
||||
|
* |
||||
|
* Amount Range: 1-99 (inclusive) |
||||
|
* - Minimum: 1 (non-zero validation) |
||||
|
* - Maximum: 99 (reasonable upper bound) |
||||
|
* - Type: Integer only |
||||
|
* - Example: 42, 7, 99 |
||||
* |
* |
||||
* Key Selectors: |
* Recipient: "Unnamed/Unknown" |
||||
* - Gift title: '[data-testid="giftTitle"]' |
* - Standard test recipient |
||||
* - Amount input: 'input[type="number"]' |
* - No specific DID or contact info |
||||
|
* - Used for all test gifts |
||||
|
* |
||||
|
* ================================================================================ |
||||
|
* SELECTOR REFERENCE |
||||
|
* ================================================================================ |
||||
|
* |
||||
|
* Form Elements: |
||||
|
* - Gift title input: '[data-testid="giftTitle"]' or 'input[placeholder="What was given"]' |
||||
|
* - Amount input: 'input[type="number"]' or 'input[role="spinbutton"]' |
||||
* - Submit button: 'button[name="Sign & Send"]' |
* - Submit button: 'button[name="Sign & Send"]' |
||||
* - Success alert: 'div[role="alert"]' |
* - Person button: 'button[name="Person"]' |
||||
* - Details section: 'h2[name="Details"]' |
* - Recipient list: 'ul[role="listbox"]' |
||||
* |
* |
||||
* Alert Handling: |
* Navigation & UI: |
||||
* - Closes onboarding dialog |
* - Onboarding close: '[data-testid="closeOnboardingAndFinish"]' |
||||
* - Verifies success message |
* - Home page: './' (relative URL) |
||||
* - Dismisses info alerts |
* - Alert dismissal: 'div[role="alert"] button > svg.fa-xmark' |
||||
* |
* - Success message: 'text="That gift was recorded."' |
||||
* State Requirements: |
* |
||||
* - Clean database state |
* Verification Elements: |
||||
* - User 00 imported |
* - Gift list item: 'li:first-child' (filtered by title) |
||||
* - Available API rate limits |
* - Info link: '[data-testid="circle-info-link"]' |
||||
* |
* - Details heading: 'h2[name="Verifiable Claim Details"]' |
||||
* Related Files: |
* - Details section: 'h2[name="Details", exact="true"]' |
||||
* - Gift recording view: src/views/RecordGiftView.vue |
* - Public server link: 'a[name="View on the Public Server"]' |
||||
* - JWT creation: sw_scripts/safari-notifications.js |
* |
||||
* - Endorser API: src/libs/endorserServer.ts |
* ================================================================================ |
||||
* |
* ERROR HANDLING & DEBUGGING |
||||
* @see Documentation in usage-guide.md for gift recording workflows |
* ================================================================================ |
||||
* @requires @playwright/test |
* |
||||
* @requires ./testUtils - For user management utilities |
* Common Failure Points: |
||||
* |
* 1. Onboarding Dialog |
||||
* @example Basic gift recording |
* - Issue: Dialog doesn't close properly |
||||
* ```typescript
|
* - Debug: Check if closeOnboardingAndFinish button exists |
||||
* await page.getByPlaceholder('What was given').fill('Gift abc123'); |
* - Fix: Add wait for dialog to be visible before clicking |
||||
* await page.getByRole('spinbutton').fill('42'); |
* |
||||
* await page.getByRole('button', { name: 'Sign & Send' }).click(); |
* 2. Recipient Selection |
||||
* await expect(page.getByText('That gift was recorded.')).toBeVisible(); |
* - Issue: "Unnamed" recipient not found |
||||
|
* - Debug: Check if recipient list is populated |
||||
|
* - Fix: Add wait for list to load before filtering |
||||
|
* |
||||
|
* 3. Form Submission |
||||
|
* - Issue: "Sign & Send" button not clickable |
||||
|
* - Debug: Check if form is valid and all fields filled |
||||
|
* - Fix: Add validation before submission |
||||
|
* |
||||
|
* 4. Success Verification |
||||
|
* - Issue: Success message doesn't appear |
||||
|
* - Debug: Check network requests and JWT signing |
||||
|
* - Fix: Add longer timeout for signing process |
||||
|
* |
||||
|
* 5. Home View Refresh |
||||
|
* - Issue: Gift doesn't appear in list |
||||
|
* - Debug: Check if gift was actually recorded |
||||
|
* - Fix: Add wait for home view to reload |
||||
|
* |
||||
|
* 6. Public Server Integration |
||||
|
* - Issue: Popup doesn't open or wrong URL |
||||
|
* - Debug: Check if public server is accessible |
||||
|
* - Fix: Verify endorser server configuration |
||||
|
* |
||||
|
* Debugging Commands: |
||||
|
* ```bash
|
||||
|
* # Run with trace for detailed debugging |
||||
|
* npx playwright test 30-record-gift.spec.ts --trace on |
||||
|
* |
||||
|
* # Run with headed browser for visual debugging |
||||
|
* npx playwright test 30-record-gift.spec.ts --headed |
||||
|
* |
||||
|
* # Run with slow motion for step-by-step debugging |
||||
|
* npx playwright test 30-record-gift.spec.ts --debug |
||||
|
* ``` |
||||
|
* |
||||
|
* ================================================================================ |
||||
|
* BROWSER COMPATIBILITY |
||||
|
* ================================================================================ |
||||
|
* |
||||
|
* Tested Browsers: |
||||
|
* - Chromium: Primary target, full functionality |
||||
|
* - Firefox: Secondary target, may have timing differences |
||||
|
* |
||||
|
* Browser-Specific Considerations: |
||||
|
* - Firefox: May require longer timeouts for form interactions |
||||
|
* - Chromium: Generally faster, more reliable |
||||
|
* - Both: Popup handling may differ slightly |
||||
|
* |
||||
|
* ================================================================================ |
||||
|
* PERFORMANCE CONSIDERATIONS |
||||
|
* ================================================================================ |
||||
|
* |
||||
|
* Expected Timings: |
||||
|
* - Data generation: < 1ms |
||||
|
* - User import: 2-5 seconds |
||||
|
* - Form filling: 1-2 seconds |
||||
|
* - JWT signing: 3-8 seconds |
||||
|
* - Home refresh: 2-4 seconds |
||||
|
* - Public server: 1-3 seconds |
||||
|
* |
||||
|
* Total expected runtime: 10-20 seconds |
||||
|
* |
||||
|
* Performance Monitoring: |
||||
|
* - Monitor JWT signing time (most variable) |
||||
|
* - Track home view refresh time |
||||
|
* - Watch for memory leaks in popup handling |
||||
|
* |
||||
|
* ================================================================================ |
||||
|
* MAINTENANCE GUIDELINES |
||||
|
* ================================================================================ |
||||
|
* |
||||
|
* When Modifying This Test: |
||||
|
* 1. Update version number and lastModified date |
||||
|
* 2. Test on both Chromium and Firefox |
||||
|
* 3. Verify with different random data sets |
||||
|
* 4. Check that public server integration still works |
||||
|
* 5. Update selector references if UI changes |
||||
|
* |
||||
|
* Related Files to Monitor: |
||||
|
* - src/views/RecordGiftView.vue (gift recording UI) |
||||
|
* - src/views/HomeView.vue (gift display) |
||||
|
* - sw_scripts/safari-notifications.js (JWT signing) |
||||
|
* - src/libs/endorserServer.ts (API integration) |
||||
|
* - test-playwright/testUtils.ts (user management) |
||||
|
* |
||||
|
* ================================================================================ |
||||
|
* INTEGRATION POINTS |
||||
|
* ================================================================================ |
||||
|
* |
||||
|
* Dependencies: |
||||
|
* - User 00 must be available in test data |
||||
|
* - Endorser server must be running and accessible |
||||
|
* - Public server must be configured correctly |
||||
|
* - JWT signing must be functional |
||||
|
* |
||||
|
* API Endpoints Used: |
||||
|
* - POST /api/claims (gift recording) |
||||
|
* - GET /api/claims (public verification) |
||||
|
* - WebSocket connections for real-time updates |
||||
|
* |
||||
|
* ================================================================================ |
||||
|
* SECURITY CONSIDERATIONS |
||||
|
* ================================================================================ |
||||
|
* |
||||
|
* Test Data Security: |
||||
|
* - Random data prevents test interference |
||||
|
* - No sensitive information in test gifts |
||||
|
* - Public server verification is read-only |
||||
|
* |
||||
|
* JWT Handling: |
||||
|
* - Test uses test user credentials |
||||
|
* - Signing process is isolated |
||||
|
* - No production keys used |
||||
|
* |
||||
|
* ================================================================================ |
||||
|
* RELATED DOCUMENTATION |
||||
|
* ================================================================================ |
||||
|
* |
||||
|
* @see test-playwright/testUtils.ts - User management utilities |
||||
|
* @see test-playwright/README.md - General testing guidelines |
||||
|
* @see docs/user-guides/gift-recording.md - User workflow documentation |
||||
|
* @see src/views/RecordGiftView.vue - Implementation details |
||||
|
* @see sw_scripts/safari-notifications.js - JWT signing implementation |
||||
|
* |
||||
|
* @example Complete test execution |
||||
|
* ```bash
|
||||
|
* # Run this specific test |
||||
|
* npx playwright test 30-record-gift.spec.ts |
||||
|
* |
||||
|
* # Run with detailed output |
||||
|
* npx playwright test 30-record-gift.spec.ts --reporter=list |
||||
|
* |
||||
|
* # Run in headed mode for debugging |
||||
|
* npx playwright test 30-record-gift.spec.ts --headed |
||||
* ``` |
* ``` |
||||
*/ |
*/ |
||||
import { test, expect } from '@playwright/test'; |
import { test, expect } from '@playwright/test'; |
||||
import { importUser } from './testUtils'; |
import { importUserFromAccount } from './testUtils'; |
||||
|
import { |
||||
|
createPerformanceCollector, |
||||
|
attachPerformanceData, |
||||
|
assertPerformanceMetrics |
||||
|
} from './performanceUtils'; |
||||
|
|
||||
test('Record something given', async ({ page }) => { |
/** |
||||
// Generate a random string of a few characters
|
* @test Record something given |
||||
const randomString = Math.random().toString(36).substring(2, 6); |
* @description End-to-end test of gift recording functionality with performance tracking |
||||
|
* @tags gift-recording, e2e, user-workflow, performance |
||||
|
* @timeout 45000ms (45 seconds for JWT signing and API calls) |
||||
|
* |
||||
|
* @process |
||||
|
* 1. Generate unique test data |
||||
|
* 2. Import test user and navigate to home |
||||
|
* 3. Record gift with random title and amount |
||||
|
* 4. Verify gift appears in home view |
||||
|
* 5. Check public server integration |
||||
|
* |
||||
|
* @data |
||||
|
* - Gift title: "Gift [random-4-chars]" |
||||
|
* - Amount: Random 1-99 |
||||
|
* - Recipient: "Unnamed/Unknown" |
||||
|
* |
||||
|
* @verification |
||||
|
* - Success notification appears |
||||
|
* - Gift visible in home view |
||||
|
* - Details match input data |
||||
|
* - Public server accessible |
||||
|
* |
||||
|
* @browsers chromium, firefox |
||||
|
* @retries 2 (for flaky network conditions) |
||||
|
*/ |
||||
|
test('Record something given', async ({ page }, testInfo) => { |
||||
|
// STEP 1: Initialize the performance collector
|
||||
|
const perfCollector = await createPerformanceCollector(page); |
||||
|
|
||||
// Generate a random non-zero single-digit number
|
// STEP 2: Generate unique test data
|
||||
|
const randomString = Math.random().toString(36).substring(2, 6); |
||||
const randomNonZeroNumber = Math.floor(Math.random() * 99) + 1; |
const randomNonZeroNumber = Math.floor(Math.random() * 99) + 1; |
||||
|
|
||||
// Standard title prefix
|
|
||||
const standardTitle = 'Gift '; |
const standardTitle = 'Gift '; |
||||
|
|
||||
// Combine title prefix with the random string
|
|
||||
const finalTitle = standardTitle + randomString; |
const finalTitle = standardTitle + randomString; |
||||
|
|
||||
// Import user 00
|
// STEP 3: Import user 00 and navigate to home page
|
||||
await importUser(page, '00'); |
await perfCollector.measureUserAction('import-user-account', async () => { |
||||
|
await importUserFromAccount(page, '00'); |
||||
// Record something given
|
}); |
||||
await page.goto('./'); |
|
||||
await page.getByTestId('closeOnboardingAndFinish').click(); |
await perfCollector.measureUserAction('initial-navigation', async () => { |
||||
await page.getByRole('button', { name: 'Person' }).click(); |
await page.goto('./'); |
||||
await page.getByRole('listitem').filter({ hasText: 'Unnamed' }).locator('svg').click(); |
}); |
||||
await page.getByPlaceholder('What was given').fill(finalTitle); |
const initialMetrics = await perfCollector.collectNavigationMetrics('home-page-load'); |
||||
await page.getByRole('spinbutton').fill(randomNonZeroNumber.toString()); |
await testInfo.attach('initial-page-load-metrics', { |
||||
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
contentType: 'application/json', |
||||
await expect(page.getByText('That gift was recorded.')).toBeVisible(); |
body: JSON.stringify(initialMetrics, null, 2) |
||||
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
|
}); |
||||
|
|
||||
// Refresh home view and check gift
|
// STEP 4: Close onboarding dialog
|
||||
await page.goto('./'); |
await perfCollector.measureUserAction('close-onboarding', async () => { |
||||
const item = await page.locator('li:first-child').filter({ hasText: finalTitle }); |
await page.getByTestId('closeOnboardingAndFinish').click(); |
||||
await item.locator('[data-testid="circle-info-link"]').click(); |
}); |
||||
|
|
||||
|
// STEP 4.5: Close any additional dialogs that might be blocking
|
||||
|
await perfCollector.measureUserAction('close-additional-dialogs', async () => { |
||||
|
// Wait a moment for any dialogs to appear
|
||||
|
await page.waitForTimeout(1000); |
||||
|
|
||||
|
// Try to close any remaining dialogs
|
||||
|
const closeButtons = page.locator('button[aria-label*="close"], button[aria-label*="Close"], .dialog-overlay button, [role="dialog"] button'); |
||||
|
const count = await closeButtons.count(); |
||||
|
|
||||
|
for (let i = 0; i < count; i++) { |
||||
|
try { |
||||
|
await closeButtons.nth(i).click({ timeout: 2000 }); |
||||
|
} catch (e) { |
||||
|
// Ignore errors if button is not clickable
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Wait for any animations to complete
|
||||
|
await page.waitForTimeout(500); |
||||
|
}); |
||||
|
|
||||
|
// STEP 5: Select recipient
|
||||
|
await perfCollector.measureUserAction('select-recipient', async () => { |
||||
|
await page.getByRole('button', { name: 'Person' }).click(); |
||||
|
await page.getByRole('listitem').filter({ hasText: 'Unnamed' }).locator('svg').click(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 6: Fill gift details
|
||||
|
await perfCollector.measureUserAction('fill-gift-details', async () => { |
||||
|
await page.getByPlaceholder('What was given').fill(finalTitle); |
||||
|
await page.getByRole('spinbutton').fill(randomNonZeroNumber.toString()); |
||||
|
}); |
||||
|
|
||||
|
// STEP 7: Submit gift and verify success
|
||||
|
await perfCollector.measureUserAction('submit-gift', async () => { |
||||
|
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
||||
|
await expect(page.getByText('That gift was recorded.')).toBeVisible(); |
||||
|
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 8: Refresh home view and locate gift
|
||||
|
await perfCollector.measureUserAction('refresh-home-view', async () => { |
||||
|
// Try page.reload() instead of goto to see if that helps
|
||||
|
await page.reload(); |
||||
|
}); |
||||
|
await perfCollector.collectNavigationMetrics('home-refresh-load'); |
||||
|
|
||||
|
// Wait for feed to load and gift to appear
|
||||
|
await perfCollector.measureUserAction('wait-for-feed-load', async () => { |
||||
|
// Wait for the feed container to be present
|
||||
|
await page.locator('ul').first().waitFor({ state: 'visible', timeout: 15000 }); |
||||
|
|
||||
|
// Wait for any feed items to load (not just the first one)
|
||||
|
await page.locator('li').first().waitFor({ state: 'visible', timeout: 15000 }); |
||||
|
|
||||
|
// Debug: Check what's actually in the feed
|
||||
|
const feedItems = page.locator('li'); |
||||
|
const count = await feedItems.count(); |
||||
|
|
||||
|
|
||||
|
// Try to find our gift in any position, not just first
|
||||
|
let giftFound = false; |
||||
|
for (let i = 0; i < count; i++) { |
||||
|
try { |
||||
|
const itemText = await feedItems.nth(i).textContent(); |
||||
|
if (itemText?.includes(finalTitle)) { |
||||
|
giftFound = true; |
||||
|
break; |
||||
|
} |
||||
|
} catch (e) { |
||||
|
// Continue to next item
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if (!giftFound) { |
||||
|
// Wait a bit more and try again
|
||||
|
await page.waitForTimeout(3000); |
||||
|
|
||||
|
// Check again
|
||||
|
const newCount = await feedItems.count(); |
||||
|
|
||||
|
for (let i = 0; i < newCount; i++) { |
||||
|
try { |
||||
|
const itemText = await feedItems.nth(i).textContent(); |
||||
|
if (itemText?.includes(finalTitle)) { |
||||
|
giftFound = true; |
||||
|
break; |
||||
|
} |
||||
|
} catch (e) { |
||||
|
// Continue to next item
|
||||
|
} |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
if (!giftFound) { |
||||
|
throw new Error(`Gift with title "${finalTitle}" not found in feed after waiting`); |
||||
|
} |
||||
|
}); |
||||
|
|
||||
|
// Find the gift item (could be in any position)
|
||||
|
const item = page.locator('li').filter({ hasText: finalTitle }); |
||||
|
|
||||
|
// STEP 9: View gift details
|
||||
|
await perfCollector.measureUserAction('view-gift-details', async () => { |
||||
|
// Debug: Check what elements are actually present
|
||||
|
|
||||
|
// Wait for the item to be visible
|
||||
|
await item.waitFor({ state: 'visible', timeout: 10000 }); |
||||
|
|
||||
|
// Check if the circle-info-link exists
|
||||
|
const circleInfoLink = item.locator('[data-testid="circle-info-link"]'); |
||||
|
const isVisible = await circleInfoLink.isVisible(); |
||||
|
|
||||
|
// If not visible, let's see what's in the item
|
||||
|
if (!isVisible) { |
||||
|
const itemHtml = await item.innerHTML(); |
||||
|
} |
||||
|
|
||||
|
await circleInfoLink.click(); |
||||
|
}); |
||||
|
|
||||
await expect(page.getByRole('heading', { name: 'Verifiable Claim Details' })).toBeVisible(); |
await expect(page.getByRole('heading', { name: 'Verifiable Claim Details' })).toBeVisible(); |
||||
await expect(page.getByText(finalTitle, { exact: true })).toBeVisible(); |
await expect(page.getByText(finalTitle, { exact: true })).toBeVisible(); |
||||
|
|
||||
|
// STEP 10: Expand details and open public server
|
||||
const page1Promise = page.waitForEvent('popup'); |
const page1Promise = page.waitForEvent('popup'); |
||||
// expand the Details section to see the extended details
|
|
||||
await page.getByRole('heading', { name: 'Details', exact: true }).click(); |
await perfCollector.measureUserAction('expand-details', async () => { |
||||
await page.getByRole('link', { name: 'View on the Public Server' }).click(); |
await page.getByRole('heading', { name: 'Details', exact: true }).click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('open-public-server', async () => { |
||||
|
await page.getByRole('link', { name: 'View on the Public Server' }).click(); |
||||
|
}); |
||||
|
|
||||
const page1 = await page1Promise; |
const page1 = await page1Promise; |
||||
|
|
||||
|
// STEP 11: Attach and validate performance data
|
||||
|
const { webVitals, performanceReport, summary } = await attachPerformanceData(testInfo, perfCollector); |
||||
|
const avgNavigationTime = perfCollector.navigationMetrics.reduce((sum, nav) => |
||||
|
sum + nav.metrics.loadComplete, 0) / perfCollector.navigationMetrics.length; |
||||
|
assertPerformanceMetrics(webVitals, initialMetrics, avgNavigationTime); |
||||
}); |
}); |
@ -1,50 +1,101 @@ |
|||||
import { test, expect, Page } from '@playwright/test'; |
import { test, expect, Page } from '@playwright/test'; |
||||
import { importUser } from './testUtils'; |
import { importUser } from './testUtils'; |
||||
|
import { createPerformanceCollector, attachPerformanceData, assertPerformanceMetrics } from './performanceUtils'; |
||||
|
|
||||
async function testProjectGive(page: Page, selector: string) { |
async function testProjectGive(page: Page, selector: string, testInfo: any) { |
||||
|
// STEP 1: Initialize the performance collector
|
||||
|
const perfCollector = await createPerformanceCollector(page); |
||||
|
|
||||
// Generate a random string of a few characters
|
// STEP 2: Generate unique test data
|
||||
const randomString = Math.random().toString(36).substring(2, 6); |
const randomString = Math.random().toString(36).substring(2, 6); |
||||
|
|
||||
// Generate a random non-zero single-digit number
|
|
||||
const randomNonZeroNumber = Math.floor(Math.random() * 99) + 1; |
const randomNonZeroNumber = Math.floor(Math.random() * 99) + 1; |
||||
|
|
||||
// Standard title prefix
|
|
||||
const standardTitle = 'Gift '; |
const standardTitle = 'Gift '; |
||||
|
|
||||
// Combine title prefix with the random string
|
|
||||
const finalTitle = standardTitle + randomString; |
const finalTitle = standardTitle + randomString; |
||||
|
|
||||
// find a project and enter a give to it and see that it shows
|
// STEP 3: Import user and navigate to discover
|
||||
await importUser(page, '00'); |
await perfCollector.measureUserAction('import-user-account', async () => { |
||||
await page.goto('./discover'); |
await importUser(page, '00'); |
||||
await page.getByTestId('closeOnboardingAndFinish').click(); |
}); |
||||
|
|
||||
await page.locator('ul#listDiscoverResults li:first-child a').click() |
await perfCollector.measureUserAction('navigate-to-discover', async () => { |
||||
// wait for the project page to load
|
await page.goto('./discover'); |
||||
await page.waitForLoadState('networkidle'); |
}); |
||||
// click the give button, inside the first div
|
const initialMetrics = await perfCollector.collectNavigationMetrics('discover-page-load'); |
||||
await page.getByTestId(selector).locator('div:first-child div button').click(); |
await testInfo.attach('initial-page-load-metrics', { |
||||
await page.getByPlaceholder('What was given').fill(finalTitle); |
contentType: 'application/json', |
||||
await page.getByRole('spinbutton').fill(randomNonZeroNumber.toString()); |
body: JSON.stringify(initialMetrics, null, 2) |
||||
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
}); |
||||
await expect(page.getByText('That gift was recorded.')).toBeVisible(); |
|
||||
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
|
await perfCollector.measureUserAction('close-onboarding', async () => { |
||||
|
await page.getByTestId('closeOnboardingAndFinish').click(); |
||||
// refresh the page
|
}); |
||||
await page.reload(); |
|
||||
// check that the give is in the list
|
await perfCollector.measureUserAction('select-first-project', async () => { |
||||
await page |
await page.locator('ul#listDiscoverResults li:first-child a').click(); |
||||
.getByTestId(selector) |
}); |
||||
.locator('div ul li:first-child') |
|
||||
.filter({ hasText: finalTitle }) |
// STEP 4: Wait for project page to load
|
||||
.isVisible(); |
await perfCollector.measureUserAction('wait-for-project-load', async () => { |
||||
|
await page.waitForLoadState('networkidle'); |
||||
|
}); |
||||
|
|
||||
|
// STEP 5: Handle dialog overlays
|
||||
|
await perfCollector.measureUserAction('close-dialog-overlays', async () => { |
||||
|
await page.waitForTimeout(1000); |
||||
|
const closeButtons = page.locator('button[aria-label*="close"], button[aria-label*="Close"], .dialog-overlay button, [role="dialog"] button'); |
||||
|
const count = await closeButtons.count(); |
||||
|
|
||||
|
for (let i = 0; i < count; i++) { |
||||
|
try { |
||||
|
await closeButtons.nth(i).click({ timeout: 2000 }); |
||||
|
} catch (e) { |
||||
|
// Ignore errors if button is not clickable
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
await page.waitForTimeout(500); |
||||
|
}); |
||||
|
|
||||
|
// STEP 6: Record gift
|
||||
|
await perfCollector.measureUserAction('click-give-button', async () => { |
||||
|
await page.getByTestId(selector).locator('div:first-child div button').click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('fill-gift-details', async () => { |
||||
|
await page.getByPlaceholder('What was given').fill(finalTitle); |
||||
|
await page.getByRole('spinbutton').fill(randomNonZeroNumber.toString()); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('submit-gift', async () => { |
||||
|
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
||||
|
await expect(page.getByText('That gift was recorded.')).toBeVisible(); |
||||
|
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 7: Verify gift appears in list
|
||||
|
await perfCollector.measureUserAction('refresh-page', async () => { |
||||
|
await page.reload(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('verify-gift-in-list', async () => { |
||||
|
await page |
||||
|
.getByTestId(selector) |
||||
|
.locator('div ul li:first-child') |
||||
|
.filter({ hasText: finalTitle }) |
||||
|
.isVisible(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 8: Attach and validate performance data
|
||||
|
const { webVitals, performanceReport, summary } = await attachPerformanceData(testInfo, perfCollector); |
||||
|
const avgNavigationTime = perfCollector.navigationMetrics.reduce((sum, nav) => |
||||
|
sum + nav.metrics.loadComplete, 0) / perfCollector.navigationMetrics.length; |
||||
|
assertPerformanceMetrics(webVitals, initialMetrics, avgNavigationTime); |
||||
} |
} |
||||
|
|
||||
test('Record a give to a project', async ({ page }) => { |
test('Record a give to a project', async ({ page }, testInfo) => { |
||||
await testProjectGive(page, 'gives-to'); |
await testProjectGive(page, 'gives-to', testInfo); |
||||
}); |
}); |
||||
|
|
||||
test('Record a give from a project', async ({ page }) => { |
test('Record a give from a project', async ({ page }, testInfo) => { |
||||
await testProjectGive(page, 'gives-from'); |
await testProjectGive(page, 'gives-from', testInfo); |
||||
}); |
}); |
||||
|
File diff suppressed because it is too large
@ -1,127 +1,291 @@ |
|||||
import { test, expect, Page } from '@playwright/test'; |
import { test, expect, Page } from '@playwright/test'; |
||||
import { importUser, importUserFromAccount } from './testUtils'; |
import { importUser, importUserFromAccount } from './testUtils'; |
||||
|
import { createPerformanceCollector, attachPerformanceData, assertPerformanceMetrics } from './performanceUtils'; |
||||
|
|
||||
test('Record an offer', async ({ page }) => { |
test('Record an offer', async ({ page }, testInfo) => { |
||||
test.setTimeout(60000); |
test.setTimeout(60000); |
||||
|
|
||||
// Generate a random string of 3 characters, skipping the "0." at the beginning
|
// STEP 1: Initialize the performance collector
|
||||
|
const perfCollector = await createPerformanceCollector(page); |
||||
|
|
||||
|
// STEP 2: Generate unique test data
|
||||
const randomString = Math.random().toString(36).substring(2, 5); |
const randomString = Math.random().toString(36).substring(2, 5); |
||||
// Standard title prefix
|
|
||||
const description = `Offering of ${randomString}`; |
const description = `Offering of ${randomString}`; |
||||
const updatedDescription = `Updated ${description}`; |
const updatedDescription = `Updated ${description}`; |
||||
const randomNonZeroNumber = Math.floor(Math.random() * 998) + 1; |
const randomNonZeroNumber = Math.floor(Math.random() * 998) + 1; |
||||
|
|
||||
// Switch to user 0
|
// STEP 3: Import user and navigate to discover page
|
||||
// await importUser(page);
|
await perfCollector.measureUserAction('import-user-account', async () => { |
||||
// Become User Zero
|
await importUserFromAccount(page, "00"); |
||||
await importUserFromAccount(page, "00"); |
}); |
||||
// Select a project
|
|
||||
await page.goto('./discover'); |
await perfCollector.measureUserAction('navigate-to-discover', async () => { |
||||
await page.getByTestId('closeOnboardingAndFinish').click(); |
await page.goto('./discover'); |
||||
await page.locator('ul#listDiscoverResults li:nth-child(1)').click(); |
}); |
||||
// Record an offer
|
const initialMetrics = await perfCollector.collectNavigationMetrics('discover-page-load'); |
||||
await page.locator('button', { hasText: 'Edit' }).isVisible(); // since the 'edit' takes longer to show, wait for that (lest the click miss)
|
await testInfo.attach('initial-page-load-metrics', { |
||||
await page.getByTestId('offerButton').click(); |
contentType: 'application/json', |
||||
await page.getByTestId('inputDescription').fill(description); |
body: JSON.stringify(initialMetrics, null, 2) |
||||
await page.getByTestId('inputOfferAmount').locator('input').fill(randomNonZeroNumber.toString()); |
}); |
||||
expect(page.getByRole('button', { name: 'Sign & Send' })); |
|
||||
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
// STEP 4: Close onboarding and select project
|
||||
await expect(page.getByText('That offer was recorded.')).toBeVisible(); |
await perfCollector.measureUserAction('close-onboarding', async () => { |
||||
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
|
await page.getByTestId('closeOnboardingAndFinish').click(); |
||||
// go to the offer and check the values
|
}); |
||||
await page.goto('./projects'); |
|
||||
await page.getByRole('link', { name: 'Offers', exact: true }).click(); |
await perfCollector.measureUserAction('select-project', async () => { |
||||
await page.locator('li').filter({ hasText: description }).locator('a').first().click(); |
await page.locator('ul#listDiscoverResults li:nth-child(1)').click(); |
||||
await expect(page.getByRole('heading', { name: 'Verifiable Claim Details' })).toBeVisible(); |
}); |
||||
await expect(page.getByText(description, { exact: true })).toBeVisible(); |
|
||||
await expect(page.getByText('Offered to a bigger plan')).toBeVisible(); |
// STEP 5: Record an offer
|
||||
|
await perfCollector.measureUserAction('wait-for-edit-button', async () => { |
||||
|
await page.locator('button', { hasText: 'Edit' }).isVisible(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('click-offer-button', async () => { |
||||
|
await page.getByTestId('offerButton').click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('fill-offer-details', async () => { |
||||
|
await page.getByTestId('inputDescription').fill(description); |
||||
|
await page.getByTestId('inputOfferAmount').fill(randomNonZeroNumber.toString()); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('submit-offer', async () => { |
||||
|
expect(page.getByRole('button', { name: 'Sign & Send' })); |
||||
|
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
||||
|
await expect(page.getByText('That offer was recorded.')).toBeVisible(); |
||||
|
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 6: Navigate to projects and check offer
|
||||
|
await perfCollector.measureUserAction('navigate-to-projects', async () => { |
||||
|
await page.goto('./projects'); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('click-offers-tab', async () => { |
||||
|
await page.getByRole('link', { name: 'Offers', exact: true }).click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('click-offer-details', async () => { |
||||
|
await page.locator('li').filter({ hasText: description }).locator('a').first().click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('verify-offer-details', async () => { |
||||
|
await expect(page.getByRole('heading', { name: 'Verifiable Claim Details' })).toBeVisible(); |
||||
|
await expect(page.getByText(description, { exact: true })).toBeVisible(); |
||||
|
await expect(page.getByText('Offered to a bigger plan')).toBeVisible(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 7: Expand details and check public server
|
||||
const serverPagePromise = page.waitForEvent('popup'); |
const serverPagePromise = page.waitForEvent('popup'); |
||||
// expand the Details section to see the extended details
|
|
||||
await page.getByRole('heading', { name: 'Details', exact: true }).click(); |
await perfCollector.measureUserAction('expand-details', async () => { |
||||
await page.getByRole('link', { name: 'View on the Public Server' }).click(); |
await page.getByRole('heading', { name: 'Details', exact: true }).click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('open-public-server', async () => { |
||||
|
await page.getByRole('link', { name: 'View on the Public Server' }).click(); |
||||
|
}); |
||||
|
|
||||
const serverPage = await serverPagePromise; |
const serverPage = await serverPagePromise; |
||||
await expect(serverPage.getByText(description)).toBeVisible(); |
await perfCollector.measureUserAction('verify-public-server', async () => { |
||||
await expect(serverPage.getByText('did:none:HIDDEN')).toBeVisible(); |
await expect(serverPage.getByText(description)).toBeVisible(); |
||||
// Now update that offer
|
await expect(serverPage.getByText('did:none:HIDDEN')).toBeVisible(); |
||||
|
}); |
||||
// find the edit page and check the old values again
|
|
||||
await page.goto('./projects'); |
// STEP 8: Update the offer
|
||||
await page.getByRole('link', { name: 'Offers', exact: true }).click(); |
await perfCollector.measureUserAction('navigate-back-to-projects', async () => { |
||||
await page.locator('li').filter({ hasText: description }).locator('a').first().click(); |
await page.goto('./projects'); |
||||
await page.getByTestId('editClaimButton').click(); |
}); |
||||
await page.locator('heading', { hasText: 'What is offered' }).isVisible(); |
|
||||
const itemDesc = await page.getByTestId('itemDescription'); |
await perfCollector.measureUserAction('click-offers-tab-again', async () => { |
||||
await expect(itemDesc).toHaveValue(description); |
await page.getByRole('link', { name: 'Offers', exact: true }).click(); |
||||
const amount = await page.getByTestId('inputOfferAmount'); |
}); |
||||
await expect(amount).toHaveValue(randomNonZeroNumber.toString()); |
|
||||
// update the values
|
await perfCollector.measureUserAction('click-offer-to-edit', async () => { |
||||
await itemDesc.fill(updatedDescription); |
await page.locator('li').filter({ hasText: description }).locator('a').first().click(); |
||||
await amount.fill(String(randomNonZeroNumber + 1)); |
}); |
||||
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
|
||||
await expect(page.getByText('That offer was recorded.')).toBeVisible(); |
await perfCollector.measureUserAction('click-edit-button', async () => { |
||||
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
|
await page.getByTestId('editClaimButton').click(); |
||||
// go to the offer claim again and check the updated values
|
}); |
||||
await page.goto('./projects'); |
|
||||
await page.getByRole('link', { name: 'Offers', exact: true }).click(); |
await perfCollector.measureUserAction('verify-edit-form', async () => { |
||||
await page.locator('li').filter({ hasText: description }).locator('a').first().click(); |
await page.locator('heading', { hasText: 'What is offered' }).isVisible(); |
||||
const newItemDesc = page.getByTestId('description'); |
const itemDesc = await page.getByTestId('itemDescription'); |
||||
await expect(newItemDesc).toHaveText(updatedDescription); |
await expect(itemDesc).toHaveValue(description); |
||||
// go to edit page
|
const amount = await page.getByTestId('inputOfferAmount'); |
||||
await page.getByTestId('editClaimButton').click(); |
await expect(amount).toHaveValue(randomNonZeroNumber.toString()); |
||||
const newAmount = page.getByTestId('inputOfferAmount'); |
}); |
||||
await expect(newAmount).toHaveValue((randomNonZeroNumber + 1).toString()); |
|
||||
// go to the home page and check that the offer is shown as new
|
await perfCollector.measureUserAction('update-offer-values', async () => { |
||||
await page.goto('./'); |
const itemDesc = await page.getByTestId('itemDescription'); |
||||
const offerNumElem = page.getByTestId('newOffersToUserProjectsActivityNumber'); |
await itemDesc.fill(updatedDescription); |
||||
// extract the number and check that it's greater than 0 or "50+"
|
const amount = await page.getByTestId('inputOfferAmount'); |
||||
const offerNumText = await offerNumElem.textContent(); |
await amount.fill(String(randomNonZeroNumber + 1)); |
||||
if (offerNumText === null) { |
}); |
||||
throw new Error('Expected Activity Number greater than 0 but got null.'); |
|
||||
} else if (offerNumText === '50+') { |
await perfCollector.measureUserAction('submit-updated-offer', async () => { |
||||
// we're OK
|
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
||||
} else if (parseInt(offerNumText) > 0) { |
await expect(page.getByText('That offer was recorded.')).toBeVisible(); |
||||
// we're OK
|
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); |
||||
} else { |
}); |
||||
throw new Error(`Expected Activity Number of greater than 0 but got ${offerNumText}.`); |
|
||||
} |
// STEP 9: Verify updated offer
|
||||
|
await perfCollector.measureUserAction('navigate-to-projects-final', async () => { |
||||
// click on the number of new offers to go to the list page
|
await page.goto('./projects'); |
||||
await offerNumElem.click(); |
}); |
||||
await expect(page.getByText('New Offers To Your Projects', { exact: true })).toBeVisible(); |
|
||||
// get the icon child of the showOffersToUserProjects
|
await perfCollector.measureUserAction('click-offers-tab-final', async () => { |
||||
await page.getByTestId('showOffersToUserProjects').locator('div > svg.fa-chevron-right').click(); |
await page.getByRole('link', { name: 'Offers', exact: true }).click(); |
||||
await expect(page.getByText(description)).toBeVisible(); |
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('click-updated-offer', async () => { |
||||
|
await page.locator('li').filter({ hasText: description }).locator('a').first().click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('verify-updated-offer', async () => { |
||||
|
const newItemDesc = page.getByTestId('description'); |
||||
|
await expect(newItemDesc).toHaveText(updatedDescription); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('click-edit-button-final', async () => { |
||||
|
await page.getByTestId('editClaimButton').click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('verify-updated-amount', async () => { |
||||
|
const newAmount = page.getByTestId('inputOfferAmount'); |
||||
|
await expect(newAmount).toHaveValue((randomNonZeroNumber + 1).toString()); |
||||
|
}); |
||||
|
|
||||
|
// STEP 10: Check home page for new offers
|
||||
|
await perfCollector.measureUserAction('navigate-to-home', async () => { |
||||
|
await page.goto('./'); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('verify-new-offers-indicator', async () => { |
||||
|
const offerNumElem = page.getByTestId('newOffersToUserProjectsActivityNumber'); |
||||
|
const offerNumText = await offerNumElem.textContent(); |
||||
|
if (offerNumText === null) { |
||||
|
throw new Error('Expected Activity Number greater than 0 but got null.'); |
||||
|
} else if (offerNumText === '50+') { |
||||
|
// we're OK
|
||||
|
} else if (parseInt(offerNumText) > 0) { |
||||
|
// we're OK
|
||||
|
} else { |
||||
|
throw new Error(`Expected Activity Number of greater than 0 but got ${offerNumText}.`); |
||||
|
} |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('click-new-offers-number', async () => { |
||||
|
const offerNumElem = page.getByTestId('newOffersToUserProjectsActivityNumber'); |
||||
|
await offerNumElem.click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('verify-new-offers-page', async () => { |
||||
|
await expect(page.getByText('New Offers To Your Projects', { exact: true })).toBeVisible(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('expand-offers-section', async () => { |
||||
|
await page.getByTestId('showOffersToUserProjects').locator('div > svg.fa-chevron-right').click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('verify-offer-in-list', async () => { |
||||
|
await expect(page.getByText(description)).toBeVisible(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 11: Attach and validate performance data
|
||||
|
const { webVitals, performanceReport, summary } = await attachPerformanceData(testInfo, perfCollector); |
||||
|
const avgNavigationTime = perfCollector.navigationMetrics.reduce((sum, nav) => |
||||
|
sum + nav.metrics.loadComplete, 0) / perfCollector.navigationMetrics.length; |
||||
|
assertPerformanceMetrics(webVitals, initialMetrics, avgNavigationTime); |
||||
}); |
}); |
||||
|
|
||||
test('Affirm delivery of an offer', async ({ page }) => { |
test('Affirm delivery of an offer', async ({ page }, testInfo) => { |
||||
// go to the home page and check that the offer is shown as new
|
// STEP 1: Initialize the performance collector
|
||||
// await importUser(page);
|
const perfCollector = await createPerformanceCollector(page); |
||||
|
|
||||
await importUserFromAccount(page, "00"); |
// STEP 2: Import user and navigate to home
|
||||
await page.goto('./'); |
await perfCollector.measureUserAction('import-user-account', async () => { |
||||
await page.getByTestId('closeOnboardingAndFinish').click(); |
await importUserFromAccount(page, "00"); |
||||
const offerNumElem = page.getByTestId('newOffersToUserProjectsActivityNumber'); |
}); |
||||
await expect(offerNumElem).toBeVisible(); |
|
||||
|
|
||||
// click on the number of new offers to go to the list page
|
await perfCollector.measureUserAction('navigate-to-home', async () => { |
||||
await offerNumElem.click(); |
await page.goto('./'); |
||||
|
}); |
||||
// get the link that comes after the showOffersToUserProjects and click it
|
const initialMetrics = await perfCollector.collectNavigationMetrics('home-page-load'); |
||||
await page.getByTestId('showOffersToUserProjects').locator('a').click(); |
await testInfo.attach('initial-page-load-metrics', { |
||||
|
contentType: 'application/json', |
||||
// get the first item of the list and click on the icon with file-lines
|
body: JSON.stringify(initialMetrics, null, 2) |
||||
const firstItem = page.getByTestId('listRecentOffersToUserProjects').locator('li').first(); |
}); |
||||
await expect(firstItem).toBeVisible(); |
|
||||
await firstItem.locator('svg.fa-file-lines').click(); |
await perfCollector.measureUserAction('close-onboarding', async () => { |
||||
await expect(page.getByText('Verifiable Claim Details', { exact: true })).toBeVisible(); |
await page.getByTestId('closeOnboardingAndFinish').click(); |
||||
|
}); |
||||
// click on the 'Affirm Delivery' button
|
|
||||
await page.getByRole('button', { name: 'Affirm Delivery' }).click(); |
// STEP 3: Check new offers indicator
|
||||
// fill our offer info and submit
|
await perfCollector.measureUserAction('verify-new-offers-indicator', async () => { |
||||
await page.getByPlaceholder('What was given').fill('Whatever the offer says'); |
const offerNumElem = page.getByTestId('newOffersToUserProjectsActivityNumber'); |
||||
await page.getByRole('spinbutton').fill('2'); |
await expect(offerNumElem).toBeVisible(); |
||||
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
}); |
||||
await expect(page.getByText('That gift was recorded.')).toBeVisible(); |
|
||||
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
|
// STEP 4: Navigate to offers list
|
||||
|
await perfCollector.measureUserAction('click-new-offers-number', async () => { |
||||
|
// Close any dialog overlays that might be blocking clicks
|
||||
|
await page.waitForTimeout(1000); |
||||
|
const closeButtons = page.locator('button[aria-label*="close"], button[aria-label*="Close"], .dialog-overlay button, [role="dialog"] button'); |
||||
|
const count = await closeButtons.count(); |
||||
|
|
||||
|
for (let i = 0; i < count; i++) { |
||||
|
try { |
||||
|
await closeButtons.nth(i).click({ timeout: 2000 }); |
||||
|
} catch (e) { |
||||
|
// Ignore errors if button is not clickable
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Wait for any animations to complete
|
||||
|
await page.waitForTimeout(500); |
||||
|
|
||||
|
const offerNumElem = page.getByTestId('newOffersToUserProjectsActivityNumber'); |
||||
|
await offerNumElem.click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('click-offers-link', async () => { |
||||
|
await page.getByTestId('showOffersToUserProjects').locator('a').click(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 5: Affirm delivery
|
||||
|
await perfCollector.measureUserAction('select-first-offer', async () => { |
||||
|
const firstItem = page.getByTestId('listRecentOffersToUserProjects').locator('li').first(); |
||||
|
await expect(firstItem).toBeVisible(); |
||||
|
await firstItem.locator('svg.fa-file-lines').click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('verify-claim-details', async () => { |
||||
|
await expect(page.getByText('Verifiable Claim Details', { exact: true })).toBeVisible(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('click-affirm-delivery', async () => { |
||||
|
await page.getByRole('button', { name: 'Affirm Delivery' }).click(); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('fill-delivery-details', async () => { |
||||
|
await page.getByPlaceholder('What was given').fill('Whatever the offer says'); |
||||
|
await page.getByRole('spinbutton').fill('2'); |
||||
|
}); |
||||
|
|
||||
|
await perfCollector.measureUserAction('submit-delivery', async () => { |
||||
|
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
||||
|
await expect(page.getByText('That gift was recorded.')).toBeVisible(); |
||||
|
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 6: Attach and validate performance data
|
||||
|
const { webVitals, performanceReport, summary } = await attachPerformanceData(testInfo, perfCollector); |
||||
|
const avgNavigationTime = perfCollector.navigationMetrics.reduce((sum, nav) => |
||||
|
sum + nav.metrics.loadComplete, 0) / perfCollector.navigationMetrics.length; |
||||
|
assertPerformanceMetrics(webVitals, initialMetrics, avgNavigationTime); |
||||
}); |
}); |
||||
|
|
||||
|
@ -1,94 +1,162 @@ |
|||||
|
/** |
||||
|
* This test covers a complete user flow in TimeSafari with integrated performance tracking. |
||||
|
* |
||||
|
* Focus areas: |
||||
|
* - Performance monitoring for every major user step |
||||
|
* - Multi-user flow using DID switching |
||||
|
* - Offer creation, viewing, and state updates |
||||
|
* - Validation of both behavior and responsiveness |
||||
|
*/ |
||||
|
|
||||
import { test, expect } from '@playwright/test'; |
import { test, expect } from '@playwright/test'; |
||||
import { switchToUser, getTestUserData, importUserFromAccount } from './testUtils'; |
import { switchToUser, importUserFromAccount } from './testUtils'; |
||||
|
import { |
||||
|
createPerformanceCollector, |
||||
|
attachPerformanceData, |
||||
|
assertPerformanceMetrics |
||||
|
} from './performanceUtils'; |
||||
|
|
||||
|
test('New offers for another user', async ({ page }, testInfo) => { |
||||
|
// STEP 1: Initialize the performance collector
|
||||
|
const perfCollector = await createPerformanceCollector(page); |
||||
|
|
||||
test('New offers for another user', async ({ page }) => { |
// STEP 2: Navigate to home page and measure baseline performance
|
||||
await page.goto('./'); |
await perfCollector.measureUserAction('initial-navigation', async () => { |
||||
|
await page.goto('/'); |
||||
|
}); |
||||
|
const initialMetrics = await perfCollector.collectNavigationMetrics('home-page-load'); |
||||
|
await testInfo.attach('initial-page-load-metrics', { |
||||
|
contentType: 'application/json', |
||||
|
body: JSON.stringify(initialMetrics, null, 2) |
||||
|
}); |
||||
|
|
||||
// Get the auto-created DID from the HomeView
|
// STEP 3: Extract the auto-created DID from the page
|
||||
await page.waitForLoadState('networkidle'); |
// Wait for the page to be ready and the DID to be available
|
||||
|
await page.waitForSelector('#Content[data-active-did]', { timeout: 10000 }); |
||||
const autoCreatedDid = await page.getAttribute('#Content', 'data-active-did'); |
const autoCreatedDid = await page.getAttribute('#Content', 'data-active-did'); |
||||
|
if (!autoCreatedDid) throw new Error('Auto-created DID not found in HomeView'); |
||||
if (!autoCreatedDid) { |
|
||||
throw new Error('Auto-created DID not found in HomeView'); |
|
||||
} |
|
||||
|
|
||||
await page.getByTestId('closeOnboardingAndFinish').click(); |
// STEP 4: Close onboarding dialog and confirm no new offers are visible
|
||||
|
await perfCollector.measureUserAction('close-onboarding', async () => { |
||||
|
await page.getByTestId('closeOnboardingAndFinish').click(); |
||||
|
}); |
||||
await expect(page.getByTestId('newDirectOffersActivityNumber')).toBeHidden(); |
await expect(page.getByTestId('newDirectOffersActivityNumber')).toBeHidden(); |
||||
|
|
||||
// Become User Zero
|
// STEP 5: Switch to User Zero, who will create offers
|
||||
await importUserFromAccount(page, "00"); |
await perfCollector.measureUserAction('import-user-account', async () => { |
||||
|
await importUserFromAccount(page, "00"); |
||||
|
}); |
||||
|
|
||||
// As User Zero, add the auto-created DID as a contact
|
// STEP 6: Navigate to contacts page
|
||||
await page.goto('./contacts'); |
await perfCollector.measureUserAction('navigate-to-contacts', async () => { |
||||
await page.getByPlaceholder('URL or DID, Name, Public Key').fill(autoCreatedDid + ', A Friend'); |
await page.goto('/contacts'); |
||||
await expect(page.locator('button > svg.fa-plus')).toBeVisible(); |
}); |
||||
await page.locator('button > svg.fa-plus').click(); |
await perfCollector.collectNavigationMetrics('contacts-page-load'); |
||||
await page.locator('div[role="alert"] button:has-text("No")').click(); // don't register
|
|
||||
await expect(page.locator('div[role="alert"] h4:has-text("Success")')).toBeVisible(); |
|
||||
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
|
|
||||
await expect(page.locator('div[role="alert"] button > svg.fa-xmark')).toBeHidden(); // ensure alert is gone
|
|
||||
|
|
||||
// show buttons to make offers directly to people
|
// STEP 7: Add the auto-created DID as a contact
|
||||
await page.getByRole('button').filter({ hasText: /See Actions/i }).click(); |
await perfCollector.measureUserAction('add-contact', async () => { |
||||
|
await page.getByPlaceholder('URL or DID, Name, Public Key').fill(autoCreatedDid + ', A Friend'); |
||||
|
await page.locator('button > svg.fa-plus').click(); |
||||
|
await page.locator('div[role="alert"] button:has-text("No")').click(); |
||||
|
await expect(page.locator('div[role="alert"] span:has-text("Success")')).toBeVisible(); |
||||
|
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); |
||||
|
await expect(page.locator('div[role="alert"] button > svg.fa-xmark')).toBeHidden(); |
||||
|
}); |
||||
|
|
||||
// make an offer directly to user 1
|
// STEP 8: Show action buttons for making offers
|
||||
// Generate a random string of 3 characters, skipping the "0." at the beginning
|
await perfCollector.measureUserAction('show-actions', async () => { |
||||
|
await page.getByRole('button').filter({ hasText: /See Actions/i }).click(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 9 & 10: Create two offers for the auto-created user
|
||||
const randomString1 = Math.random().toString(36).substring(2, 5); |
const randomString1 = Math.random().toString(36).substring(2, 5); |
||||
await page.getByTestId('offerButton').click(); |
await perfCollector.measureUserAction('create-first-offer', async () => { |
||||
await page.getByTestId('inputDescription').fill(`help of ${randomString1} from #000`); |
await page.getByTestId('offerButton').click(); |
||||
await page.getByTestId('inputOfferAmount').locator('input').fill('1'); |
await page.getByTestId('inputDescription').fill(`help of ${randomString1} from #000`); |
||||
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
await page.getByTestId('inputOfferAmount').fill('1'); |
||||
await expect(page.getByText('That offer was recorded.')).toBeVisible(); |
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
||||
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
|
await expect(page.getByText('That offer was recorded.')).toBeVisible(); |
||||
await expect(page.locator('div[role="alert"] button > svg.fa-xmark')).toBeHidden(); // ensure alert is gone
|
await page.locator('div[role="alert"]').filter({ hasText: 'That offer was recorded.' }).locator('button > svg.fa-xmark').click(); |
||||
|
// Wait for alert to be hidden to prevent multiple dialogs
|
||||
// make another offer to user 1
|
await expect(page.locator('div[role="alert"]').filter({ hasText: 'That offer was recorded.' })).toBeHidden(); |
||||
|
}); |
||||
|
|
||||
|
// Add delay between offers to prevent performance issues
|
||||
|
await page.waitForTimeout(500); |
||||
|
|
||||
const randomString2 = Math.random().toString(36).substring(2, 5); |
const randomString2 = Math.random().toString(36).substring(2, 5); |
||||
await page.getByTestId('offerButton').click(); |
await perfCollector.measureUserAction('create-second-offer', async () => { |
||||
await page.getByTestId('inputDescription').fill(`help of ${randomString2} from #000`); |
await page.getByTestId('offerButton').click(); |
||||
await page.getByTestId('inputOfferAmount').locator('input').fill('3'); |
await page.getByTestId('inputDescription').fill(`help of ${randomString2} from #000`); |
||||
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
await page.getByTestId('inputOfferAmount').fill('3'); |
||||
await expect(page.getByText('That offer was recorded.')).toBeVisible(); |
await page.getByRole('button', { name: 'Sign & Send' }).click(); |
||||
await page.locator('div[role="alert"] button > svg.fa-xmark').click(); // dismiss info alert
|
await expect(page.getByText('That offer was recorded.')).toBeVisible(); |
||||
await expect(page.locator('div[role="alert"] button > svg.fa-xmark')).toBeHidden(); // ensure alert is gone
|
await page.locator('div[role="alert"]').filter({ hasText: 'That offer was recorded.' }).locator('button > svg.fa-xmark').click(); |
||||
|
// Wait for alert to be hidden to prevent multiple dialogs
|
||||
// Switch back to the auto-created DID (the "another user") to see the offers
|
await expect(page.locator('div[role="alert"]').filter({ hasText: 'That offer was recorded.' })).toBeHidden(); |
||||
await switchToUser(page, autoCreatedDid); |
}); |
||||
await page.goto('./'); |
|
||||
|
// STEP 11: Switch back to the auto-created DID
|
||||
|
await perfCollector.measureUserAction('switch-user', async () => { |
||||
|
await switchToUser(page, autoCreatedDid); |
||||
|
}); |
||||
|
|
||||
|
// STEP 12: Navigate back home as the auto-created user
|
||||
|
await perfCollector.measureUserAction('navigate-home-as-other-user', async () => { |
||||
|
await page.goto('/'); |
||||
|
}); |
||||
|
await perfCollector.collectNavigationMetrics('home-return-load'); |
||||
|
|
||||
|
// STEP 13: Confirm 2 new offers are visible
|
||||
let offerNumElem = page.getByTestId('newDirectOffersActivityNumber'); |
let offerNumElem = page.getByTestId('newDirectOffersActivityNumber'); |
||||
await expect(offerNumElem).toHaveText('2'); |
await expect(offerNumElem).toHaveText('2'); |
||||
|
|
||||
// click on the number of new offers to go to the list page
|
// STEP 14 & 15: View and expand the offers list
|
||||
await offerNumElem.click(); |
await perfCollector.measureUserAction('view-offers-list', async () => { |
||||
|
await offerNumElem.click(); |
||||
|
}); |
||||
await expect(page.getByText('New Offers To You', { exact: true })).toBeVisible(); |
await expect(page.getByText('New Offers To You', { exact: true })).toBeVisible(); |
||||
await page.getByTestId('showOffersToUser').locator('div > svg.fa-chevron-right').click(); |
await perfCollector.measureUserAction('expand-offers', async () => { |
||||
// note that they show in reverse chronologicalorder
|
await page.getByTestId('showOffersToUser').locator('div > svg.fa-chevron-right').click(); |
||||
|
}); |
||||
|
|
||||
|
// STEP 16: Validate both offers are displayed
|
||||
await expect(page.getByText(`help of ${randomString2} from #000`)).toBeVisible(); |
await expect(page.getByText(`help of ${randomString2} from #000`)).toBeVisible(); |
||||
await expect(page.getByText(`help of ${randomString1} from #000`)).toBeVisible(); |
await expect(page.getByText(`help of ${randomString1} from #000`)).toBeVisible(); |
||||
|
|
||||
// click on the latest offer to keep it as "unread"
|
// STEP 17: Mark one offer as read
|
||||
await page.hover(`li:has-text("help of ${randomString2} from #000")`); |
await perfCollector.measureUserAction('mark-offers-as-read', async () => { |
||||
// await page.locator('li').filter({ hasText: `help of ${randomString2} from #000` }).click();
|
const liElem = page.locator('li').filter({ hasText: `help of ${randomString2} from #000` }); |
||||
// await page.locator('div').filter({ hasText: /keep all above/ }).click();
|
// Hover over the li element to make the "keep all above" text visible
|
||||
// now find the "Click to keep all above as new offers" after that list item and click it
|
await liElem.hover(); |
||||
const liElem = page.locator('li').filter({ hasText: `help of ${randomString2} from #000` }); |
await liElem.locator('div').filter({ hasText: /keep all above/ }).click(); |
||||
await liElem.hover(); |
}); |
||||
const keepAboveAsNew = await liElem.locator('div').filter({ hasText: /keep all above/ }); |
|
||||
|
|
||||
await keepAboveAsNew.click(); |
|
||||
|
|
||||
// now see that only one offer is shown as new
|
// STEP 18 & 19: Return home and check that the count has dropped to 1
|
||||
await page.goto('./'); |
await perfCollector.measureUserAction('final-home-navigation', async () => { |
||||
|
await page.goto('/'); |
||||
|
}); |
||||
|
await perfCollector.collectNavigationMetrics('final-home-load'); |
||||
offerNumElem = page.getByTestId('newDirectOffersActivityNumber'); |
offerNumElem = page.getByTestId('newDirectOffersActivityNumber'); |
||||
await expect(offerNumElem).toHaveText('1'); |
await expect(offerNumElem).toHaveText('1'); |
||||
await offerNumElem.click(); |
|
||||
await expect(page.getByText('New Offer To You', { exact: true })).toBeVisible(); |
// STEP 20: Open the offers list again to confirm the remaining offer
|
||||
await page.getByTestId('showOffersToUser').locator('div > svg.fa-chevron-right').click(); |
await perfCollector.measureUserAction('final-offer-check', async () => { |
||||
|
await offerNumElem.click(); |
||||
// now see that no offers are shown as new
|
await expect(page.getByText('New Offer To You', { exact: true })).toBeVisible(); |
||||
await page.goto('./'); |
await page.getByTestId('showOffersToUser').locator('div > svg.fa-chevron-right').click(); |
||||
// wait until the list with ID listLatestActivity has at least one visible item
|
}); |
||||
await page.locator('#listLatestActivity li').first().waitFor({ state: 'visible' }); |
|
||||
|
// STEP 21 & 22: Final verification that the UI reflects the read/unread state correctly
|
||||
|
await perfCollector.measureUserAction('final-verification', async () => { |
||||
|
await page.goto('/'); |
||||
|
await page.locator('#listLatestActivity li').first().waitFor({ state: 'visible' }); |
||||
|
}); |
||||
await expect(page.getByTestId('newDirectOffersActivityNumber')).toBeHidden(); |
await expect(page.getByTestId('newDirectOffersActivityNumber')).toBeHidden(); |
||||
|
|
||||
|
// STEP 23: Attach and validate performance data
|
||||
|
const { webVitals, performanceReport, summary } = await attachPerformanceData(testInfo, perfCollector); |
||||
|
const avgNavigationTime = perfCollector.navigationMetrics.reduce((sum, nav) => |
||||
|
sum + nav.metrics.loadComplete, 0) / perfCollector.navigationMetrics.length; |
||||
|
assertPerformanceMetrics(webVitals, initialMetrics, avgNavigationTime); |
||||
}); |
}); |
||||
|
@ -0,0 +1,343 @@ |
|||||
|
import { Page, TestInfo, expect } from '@playwright/test'; |
||||
|
|
||||
|
// Performance metrics collection utilities
|
||||
|
export class PerformanceCollector { |
||||
|
private page: Page; |
||||
|
public metrics: any; |
||||
|
public navigationMetrics: any[]; |
||||
|
private cdpSession: any; |
||||
|
|
||||
|
constructor(page: Page) { |
||||
|
this.page = page; |
||||
|
this.metrics = {}; |
||||
|
this.navigationMetrics = []; |
||||
|
this.cdpSession = null; |
||||
|
} |
||||
|
|
||||
|
async initialize() { |
||||
|
// Initialize CDP session for detailed metrics (only in Chromium)
|
||||
|
try { |
||||
|
this.cdpSession = await this.page.context().newCDPSession(this.page); |
||||
|
await this.cdpSession.send('Performance.enable'); |
||||
|
} catch (error) { |
||||
|
// CDP not available in Firefox, continue without it
|
||||
|
// Note: This will be captured in test attachments instead of console.log
|
||||
|
} |
||||
|
|
||||
|
// Track network requests
|
||||
|
this.page.on('response', response => { |
||||
|
if (!this.metrics.networkRequests) this.metrics.networkRequests = []; |
||||
|
this.metrics.networkRequests.push({ |
||||
|
url: response.url(), |
||||
|
status: response.status(), |
||||
|
timing: null, // response.timing() is not available in Playwright
|
||||
|
size: response.headers()['content-length'] || 0 |
||||
|
}); |
||||
|
}); |
||||
|
|
||||
|
// Inject performance monitoring script
|
||||
|
await this.page.addInitScript(() => { |
||||
|
(window as any).performanceMarks = {}; |
||||
|
(window as any).markStart = (name: string) => { |
||||
|
(window as any).performanceMarks[name] = performance.now(); |
||||
|
}; |
||||
|
(window as any).markEnd = (name: string) => { |
||||
|
if ((window as any).performanceMarks[name]) { |
||||
|
const duration = performance.now() - (window as any).performanceMarks[name]; |
||||
|
// Note: Browser console logs are kept for debugging performance in browser
|
||||
|
console.log(`Performance: ${name} took ${duration.toFixed(2)}ms`); |
||||
|
return duration; |
||||
|
} |
||||
|
}; |
||||
|
}); |
||||
|
} |
||||
|
|
||||
|
async ensurePerformanceScript() { |
||||
|
// Ensure the performance script is available in the current page context
|
||||
|
await this.page.evaluate(() => { |
||||
|
if (!(window as any).performanceMarks) { |
||||
|
(window as any).performanceMarks = {}; |
||||
|
} |
||||
|
if (!(window as any).markStart) { |
||||
|
(window as any).markStart = (name: string) => { |
||||
|
(window as any).performanceMarks[name] = performance.now(); |
||||
|
}; |
||||
|
} |
||||
|
if (!(window as any).markEnd) { |
||||
|
(window as any).markEnd = (name: string) => { |
||||
|
if ((window as any).performanceMarks[name]) { |
||||
|
const duration = performance.now() - (window as any).performanceMarks[name]; |
||||
|
console.log(`Performance: ${name} took ${duration.toFixed(2)}ms`); |
||||
|
return duration; |
||||
|
} |
||||
|
}; |
||||
|
} |
||||
|
}); |
||||
|
} |
||||
|
|
||||
|
async collectNavigationMetrics(label = 'navigation') { |
||||
|
const startTime = performance.now(); |
||||
|
|
||||
|
const metrics = await this.page.evaluate(() => { |
||||
|
const timing = (performance as any).timing; |
||||
|
const navigation = performance.getEntriesByType('navigation')[0] as any; |
||||
|
|
||||
|
// Firefox-compatible performance metrics
|
||||
|
const paintEntries = performance.getEntriesByType('paint'); |
||||
|
const firstPaint = paintEntries.find((entry: any) => entry.name === 'first-paint')?.startTime || 0; |
||||
|
const firstContentfulPaint = paintEntries.find((entry: any) => entry.name === 'first-contentful-paint')?.startTime || 0; |
||||
|
|
||||
|
// Resource timing (works in both browsers)
|
||||
|
const resourceEntries = performance.getEntriesByType('resource'); |
||||
|
const resourceTiming = resourceEntries.map((entry: any) => ({ |
||||
|
name: entry.name, |
||||
|
duration: entry.duration, |
||||
|
transferSize: entry.transferSize || 0, |
||||
|
decodedBodySize: entry.decodedBodySize || 0 |
||||
|
})); |
||||
|
|
||||
|
return { |
||||
|
// Core timing metrics
|
||||
|
domContentLoaded: timing.domContentLoadedEventEnd - timing.navigationStart, |
||||
|
loadComplete: timing.loadEventEnd - timing.navigationStart, |
||||
|
firstPaint: firstPaint, |
||||
|
firstContentfulPaint: firstContentfulPaint, |
||||
|
|
||||
|
// Navigation API metrics (if available)
|
||||
|
dnsLookup: navigation ? navigation.domainLookupEnd - navigation.domainLookupStart : 0, |
||||
|
tcpConnect: navigation ? navigation.connectEnd - navigation.connectStart : 0, |
||||
|
serverResponse: navigation ? navigation.responseEnd - navigation.requestStart : 0, |
||||
|
|
||||
|
// Resource counts and timing
|
||||
|
resourceCount: resourceEntries.length, |
||||
|
resourceTiming: resourceTiming, |
||||
|
|
||||
|
// Memory usage (Chrome only, null in Firefox)
|
||||
|
memoryUsage: (performance as any).memory ? { |
||||
|
used: (performance as any).memory.usedJSHeapSize, |
||||
|
total: (performance as any).memory.totalJSHeapSize, |
||||
|
limit: (performance as any).memory.jsHeapSizeLimit |
||||
|
} : null, |
||||
|
|
||||
|
// Firefox-specific: Performance marks and measures
|
||||
|
performanceMarks: performance.getEntriesByType('mark').map((mark: any) => ({ |
||||
|
name: mark.name, |
||||
|
startTime: mark.startTime |
||||
|
})), |
||||
|
|
||||
|
// Browser detection
|
||||
|
browser: navigator.userAgent.includes('Firefox') ? 'firefox' : 'chrome' |
||||
|
}; |
||||
|
}); |
||||
|
|
||||
|
const collectTime = performance.now() - startTime; |
||||
|
|
||||
|
this.navigationMetrics.push({ |
||||
|
label, |
||||
|
timestamp: new Date().toISOString(), |
||||
|
metrics, |
||||
|
collectionTime: collectTime |
||||
|
}); |
||||
|
|
||||
|
return metrics; |
||||
|
} |
||||
|
|
||||
|
async collectWebVitals() { |
||||
|
return await this.page.evaluate(() => { |
||||
|
return new Promise((resolve) => { |
||||
|
const vitals: any = {}; |
||||
|
let pendingVitals = 3; // LCP, FID, CLS
|
||||
|
|
||||
|
const checkComplete = () => { |
||||
|
pendingVitals--; |
||||
|
if (pendingVitals <= 0) { |
||||
|
setTimeout(() => resolve(vitals), 100); |
||||
|
} |
||||
|
}; |
||||
|
|
||||
|
// Largest Contentful Paint
|
||||
|
new PerformanceObserver((list) => { |
||||
|
const entries = list.getEntries(); |
||||
|
if (entries.length > 0) { |
||||
|
vitals.lcp = entries[entries.length - 1].startTime; |
||||
|
} |
||||
|
checkComplete(); |
||||
|
}).observe({ entryTypes: ['largest-contentful-paint'] }); |
||||
|
|
||||
|
// First Input Delay
|
||||
|
new PerformanceObserver((list) => { |
||||
|
const entries = list.getEntries(); |
||||
|
if (entries.length > 0) { |
||||
|
vitals.fid = (entries[0] as any).processingStart - entries[0].startTime; |
||||
|
} |
||||
|
checkComplete(); |
||||
|
}).observe({ entryTypes: ['first-input'] }); |
||||
|
|
||||
|
// Cumulative Layout Shift
|
||||
|
let clsValue = 0; |
||||
|
new PerformanceObserver((list) => { |
||||
|
for (const entry of list.getEntries()) { |
||||
|
if (!(entry as any).hadRecentInput) { |
||||
|
clsValue += (entry as any).value; |
||||
|
} |
||||
|
} |
||||
|
vitals.cls = clsValue; |
||||
|
checkComplete(); |
||||
|
}).observe({ entryTypes: ['layout-shift'] }); |
||||
|
|
||||
|
// Fallback timeout
|
||||
|
setTimeout(() => resolve(vitals), 3000); |
||||
|
}); |
||||
|
}); |
||||
|
} |
||||
|
|
||||
|
async measureUserAction(actionName: string, actionFn: () => Promise<void>) { |
||||
|
const startTime = performance.now(); |
||||
|
|
||||
|
// Ensure performance script is available
|
||||
|
await this.ensurePerformanceScript(); |
||||
|
|
||||
|
// Mark start in browser
|
||||
|
await this.page.evaluate((name: string) => { |
||||
|
(window as any).markStart(name); |
||||
|
}, actionName); |
||||
|
|
||||
|
// Execute the action
|
||||
|
await actionFn(); |
||||
|
|
||||
|
// Mark end and collect metrics
|
||||
|
const browserDuration = await this.page.evaluate((name: string) => { |
||||
|
return (window as any).markEnd(name); |
||||
|
}, actionName); |
||||
|
|
||||
|
const totalDuration = performance.now() - startTime; |
||||
|
|
||||
|
if (!this.metrics.userActions) this.metrics.userActions = []; |
||||
|
this.metrics.userActions.push({ |
||||
|
action: actionName, |
||||
|
browserDuration: browserDuration, |
||||
|
totalDuration: totalDuration, |
||||
|
timestamp: new Date().toISOString() |
||||
|
}); |
||||
|
|
||||
|
return { browserDuration, totalDuration }; |
||||
|
} |
||||
|
|
||||
|
async getDetailedMetrics() { |
||||
|
if (this.cdpSession) { |
||||
|
const cdpMetrics = await this.cdpSession.send('Performance.getMetrics'); |
||||
|
this.metrics.cdpMetrics = cdpMetrics.metrics; |
||||
|
} |
||||
|
return this.metrics; |
||||
|
} |
||||
|
|
||||
|
generateReport() { |
||||
|
const report = { |
||||
|
testSummary: { |
||||
|
totalNavigations: this.navigationMetrics.length, |
||||
|
totalUserActions: this.metrics.userActions?.length || 0, |
||||
|
totalNetworkRequests: this.metrics.networkRequests?.length || 0 |
||||
|
}, |
||||
|
navigationMetrics: this.navigationMetrics, |
||||
|
userActionMetrics: this.metrics.userActions || [], |
||||
|
networkSummary: this.metrics.networkRequests ? { |
||||
|
totalRequests: this.metrics.networkRequests.length, |
||||
|
averageResponseTime: 0, // timing not available in Playwright
|
||||
|
errorCount: this.metrics.networkRequests.filter((req: any) => req.status >= 400).length |
||||
|
} : null |
||||
|
}; |
||||
|
|
||||
|
return report; |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Convenience function to create and initialize a performance collector
|
||||
|
export async function createPerformanceCollector(page: Page): Promise<PerformanceCollector> { |
||||
|
const collector = new PerformanceCollector(page); |
||||
|
await collector.initialize(); |
||||
|
return collector; |
||||
|
} |
||||
|
|
||||
|
// Helper function to attach performance data to test reports
|
||||
|
export async function attachPerformanceData( |
||||
|
testInfo: TestInfo, |
||||
|
collector: PerformanceCollector, |
||||
|
additionalData?: Record<string, any> |
||||
|
) { |
||||
|
// Collect Web Vitals
|
||||
|
const webVitals = await collector.collectWebVitals() as any; |
||||
|
|
||||
|
// Attach Web Vitals to test report
|
||||
|
await testInfo.attach('web-vitals', { |
||||
|
contentType: 'application/json', |
||||
|
body: JSON.stringify(webVitals, null, 2) |
||||
|
}); |
||||
|
|
||||
|
// Generate final performance report
|
||||
|
const performanceReport = collector.generateReport(); |
||||
|
|
||||
|
// Attach performance report to test report
|
||||
|
await testInfo.attach('performance-report', { |
||||
|
contentType: 'application/json', |
||||
|
body: JSON.stringify(performanceReport, null, 2) |
||||
|
}); |
||||
|
|
||||
|
// Attach summary metrics to test report
|
||||
|
const avgNavigationTime = collector.navigationMetrics.reduce((sum, nav) => |
||||
|
sum + nav.metrics.loadComplete, 0) / collector.navigationMetrics.length; |
||||
|
|
||||
|
const summary = { |
||||
|
averageNavigationTime: avgNavigationTime.toFixed(2), |
||||
|
totalTestDuration: collector.metrics.userActions?.reduce((sum: number, action: any) => sum + action.totalDuration, 0).toFixed(2), |
||||
|
slowestAction: collector.metrics.userActions?.reduce((slowest: any, action: any) => |
||||
|
action.totalDuration > (slowest?.totalDuration || 0) ? action : slowest, null)?.action || 'N/A', |
||||
|
networkRequests: performanceReport.testSummary.totalNetworkRequests, |
||||
|
...additionalData |
||||
|
}; |
||||
|
|
||||
|
await testInfo.attach('performance-summary', { |
||||
|
contentType: 'application/json', |
||||
|
body: JSON.stringify(summary, null, 2) |
||||
|
}); |
||||
|
|
||||
|
return { webVitals, performanceReport, summary }; |
||||
|
} |
||||
|
|
||||
|
// Helper function to run performance assertions
|
||||
|
export function assertPerformanceMetrics( |
||||
|
webVitals: any, |
||||
|
initialMetrics: any, |
||||
|
avgNavigationTime: number |
||||
|
) { |
||||
|
// Performance assertions (adjust thresholds as needed)
|
||||
|
expect(avgNavigationTime).toBeLessThan(5000); // Average navigation under 5s
|
||||
|
expect(initialMetrics.loadComplete).toBeLessThan(8000); // Initial load under 8s
|
||||
|
|
||||
|
if (webVitals.lcp) { |
||||
|
expect(webVitals.lcp).toBeLessThan(2500); // LCP under 2.5s (good threshold)
|
||||
|
} |
||||
|
|
||||
|
if (webVitals.fid !== undefined) { |
||||
|
expect(webVitals.fid).toBeLessThan(100); // FID under 100ms (good threshold)
|
||||
|
} |
||||
|
|
||||
|
if (webVitals.cls !== undefined) { |
||||
|
expect(webVitals.cls).toBeLessThan(0.1); // CLS under 0.1 (good threshold)
|
||||
|
} |
||||
|
} |
||||
|
|
||||
|
// Simple performance wrapper for quick tests
|
||||
|
export async function withPerformanceTracking<T>( |
||||
|
page: Page, |
||||
|
testInfo: TestInfo, |
||||
|
testName: string, |
||||
|
testFn: (collector: PerformanceCollector) => Promise<T> |
||||
|
): Promise<T> { |
||||
|
const collector = await createPerformanceCollector(page); |
||||
|
|
||||
|
const result = await testFn(collector); |
||||
|
|
||||
|
await attachPerformanceData(testInfo, collector, { testName }); |
||||
|
|
||||
|
return result; |
||||
|
} |
Loading…
Reference in new issue