feat: add operator console and wire test scripts with event emission

- Add TestEventsPlugin for receiving ADB broadcast intents
- Create operator console UI (console/index.html, console.css, console.js)
- Add test plan structure (plan.json) with phases, tests, and steps
- Wire all test scripts (phase1, phase2, phase3) with step context and events
- Add event emission helpers to alarm-test-lib.sh (step_start, step_pass, etc.)
- Update test-phase1.sh with comprehensive prerequisite verification
- Register TestEventsPlugin in capacitor.plugins.json
- Add console documentation (CONSOLE-USAGE.md, CONSOLE-REMAINING-WORK.md)
- Add test implementation alignment tracking (TEST-IMPLEMENTATION-ALIGNMENT.md)

This enables real-time test progress tracking via structured events
from shell scripts to the operator console UI.
This commit is contained in:
Matthew Raymer
2025-12-29 09:37:12 +00:00
parent b53042d679
commit f6df9e13fb
13 changed files with 3216 additions and 7 deletions

View File

@@ -57,6 +57,138 @@ NC='\033[0m' # No Color
# These are the primary functions that all scripts should use.
# Deprecated functions (print_*, wait_for_*) are kept for backward compatibility.
# ========================================
# Event Emission (Phase B - Live Console Updates)
# ========================================
# Initialize run ID for event emission
: "${DNP_RUN_ID:=$(date '+%Y%m%d_%H%M%S' 2>/dev/null || echo 'unknown')}"
# Emit test event to console via ADB broadcast
emit_event() {
# Usage: emit_event TYPE LEVEL [PHASE] [TEST] [STEP] [MESSAGE] [EXTRA_JSON]
# Example: emit_event "step_start" "INFO" "phase1" "phase1_test0" "p1_t0_s1" "Starting step"
local event_type="$1"
local level="${2:-INFO}"
local phase_id="${3:-}"
local test_id="${4:-}"
local step_id="${5:-}"
local message="${6:-}"
local extra_json="${7:-}"
# Only emit if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" != "1" ]; then
return 0
fi
# Build JSON payload using Python (safer than shell JSON escaping)
local payload
payload=$(python3 -c "
import json
import sys
from datetime import datetime
event = {
'version': 'testevent.v1',
'ts': datetime.now().isoformat(),
'runId': '${DNP_RUN_ID}',
'type': '${event_type}',
'level': '${level}',
'phaseId': '${phase_id}',
'testId': '${test_id}',
'stepId': '${step_id}',
'message': '${message}'
}
# Add extra JSON fields if provided
if '${extra_json}':
try:
extra = json.loads('${extra_json}')
event.update(extra)
except:
pass
print(json.dumps(event))
" 2>/dev/null)
if [ -z "$payload" ]; then
# Fallback: simple JSON without Python
payload="{\"version\":\"testevent.v1\",\"ts\":\"$(date -Iseconds 2>/dev/null || date '+%Y-%m-%dT%H:%M:%S')\",\"runId\":\"${DNP_RUN_ID}\",\"type\":\"${event_type}\",\"level\":\"${level}\",\"phaseId\":\"${phase_id}\",\"testId\":\"${test_id}\",\"stepId\":\"${step_id}\",\"message\":\"${message}\"}"
fi
# Send via ADB broadcast
adb_broadcast_event "$payload"
}
# Send event via ADB broadcast
adb_broadcast_event() {
local payload="$1"
local action="com.timesafari.dailynotification.TEST_EVENT"
# Escape payload for shell (single quotes are safest)
# Replace single quotes with '\'' (end quote, escaped quote, start quote)
local escaped_payload
escaped_payload=$(echo "$payload" | sed "s/'/'\\\\''/g")
# Send broadcast
$ADB_BIN shell am broadcast \
-a "$action" \
--es payload "$escaped_payload" \
>/dev/null 2>&1 || true
}
# Set test context (phase/test/step) for event emission
set_test_context() {
# Usage: set_test_context PHASE_ID TEST_ID STEP_ID
export DNP_PHASE="${1:-}"
export DNP_TEST="${2:-}"
export DNP_STEP="${3:-}"
}
# Emit step start event
step_start() {
# Usage: step_start STEP_ID [MESSAGE]
local step_id="${1:-${DNP_STEP}}"
local message="${2:-Starting step}"
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
emit_event "step_start" "INFO" "${DNP_PHASE:-}" "${DNP_TEST:-}" "$step_id" "$message"
fi
}
# Emit step pass event
step_pass() {
# Usage: step_pass STEP_ID [MESSAGE]
local step_id="${1:-${DNP_STEP}}"
local message="${2:-Step completed}"
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
emit_event "step_pass" "INFO" "${DNP_PHASE:-}" "${DNP_TEST:-}" "$step_id" "$message"
fi
}
# Emit step warn event
step_warn() {
# Usage: step_warn STEP_ID [MESSAGE]
local step_id="${1:-${DNP_STEP}}"
local message="${2:-Step warning}"
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
emit_event "step_warn" "WARN" "${DNP_PHASE:-}" "${DNP_TEST:-}" "$step_id" "$message"
fi
}
# Emit step fail event
step_fail() {
# Usage: step_fail STEP_ID [MESSAGE]
local step_id="${1:-${DNP_STEP}}"
local message="${2:-Step failed}"
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
emit_event "step_fail" "ERROR" "${DNP_PHASE:-}" "${DNP_TEST:-}" "$step_id" "$message"
fi
}
section() {
echo
echo "========================================"
@@ -97,6 +229,12 @@ ui_prompt() {
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo -e "$1"
echo
# Emit operator_required event if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
emit_event "operator_required" "WAIT" "${DNP_PHASE:-}" "${DNP_TEST:-}" "${DNP_STEP:-}" "$1"
fi
read -rp "Press Enter after completing the action above..."
echo
}
@@ -740,6 +878,13 @@ capture_alarms() {
info "Capturing alarms: $label$file"
if $ADB_BIN shell dumpsys alarm > "$file" 2>/dev/null; then
ok "Alarms captured: $file"
# Emit artifact event if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
local artifact_json="{\"kind\":\"alarms\",\"name\":\"${label}_alarms\",\"path\":\"${file}\"}"
emit_event "artifact" "EVID" "${DNP_PHASE:-}" "${DNP_TEST:-}" "${DNP_STEP:-}" "Alarms captured: $label" "$artifact_json"
fi
return 0
else
warn "Failed to capture alarms: $label"
@@ -771,6 +916,13 @@ capture_logcat() {
if [ -n "$pattern" ]; then
if $ADB_BIN logcat -d -t "$lines" | grep -E "$pattern" > "$file" 2>/dev/null; then
ok "Logcat captured (filtered): $file"
# Emit artifact event if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
local artifact_json="{\"kind\":\"logs\",\"name\":\"${label}_logcat\",\"path\":\"${file}\"}"
emit_event "artifact" "EVID" "${DNP_PHASE:-}" "${DNP_TEST:-}" "${DNP_STEP:-}" "Logcat captured: $label" "$artifact_json"
fi
return 0
else
# Even if grep finds nothing, create empty file to indicate attempt
@@ -781,6 +933,13 @@ capture_logcat() {
else
if $ADB_BIN logcat -d -t "$lines" > "$file" 2>/dev/null; then
ok "Logcat captured: $file"
# Emit artifact event if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
local artifact_json="{\"kind\":\"logs\",\"name\":\"${label}_logcat\",\"path\":\"${file}\"}"
emit_event "artifact" "EVID" "${DNP_PHASE:-}" "${DNP_TEST:-}" "${DNP_STEP:-}" "Logcat captured: $label" "$artifact_json"
fi
return 0
else
warn "Failed to capture logcat: $label"
@@ -815,6 +974,13 @@ capture_screenshot() {
if "$ADB_BIN" exec-out screencap -p > "$file" 2>/dev/null; then
if [ -s "$file" ]; then
ok "Screenshot captured: $file"
# Emit artifact event if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
local artifact_json="{\"kind\":\"screen\",\"name\":\"${label}_screenshot\",\"path\":\"${file}\"}"
emit_event "artifact" "EVID" "${DNP_PHASE:-}" "${DNP_TEST:-}" "${DNP_STEP:-}" "Screenshot captured: $label" "$artifact_json"
fi
return 0
else
warn "Screenshot file is empty: $file"
@@ -881,6 +1047,11 @@ verdict_pass() {
echo "Next: Continue to next test"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo
# Emit test_verdict event if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
emit_event "test_verdict" "INFO" "${DNP_PHASE:-}" "$test_id" "" "Test passed: $message"
fi
}
verdict_warn() {
@@ -904,6 +1075,11 @@ verdict_warn() {
echo "Next: Review evidence and continue"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo
# Emit test_verdict event if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
emit_event "test_verdict" "WARN" "${DNP_PHASE:-}" "$test_id" "" "Test warning: $message"
fi
}
verdict_fail() {
@@ -929,6 +1105,11 @@ verdict_fail() {
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo
# Emit test_verdict event if UI events enabled
if [ "${DNP_UI_EVENTS:-0}" = "1" ]; then
emit_event "test_verdict" "ERROR" "${DNP_PHASE:-}" "$test_id" "" "Test failed: $message"
fi
# If release gating is enabled, exit with failure
if [ "${RELEASE_GATE_PHASE3:-0}" = "1" ]; then
error "Release gating enabled: exiting due to test failure"

View File

@@ -2,5 +2,9 @@
{
"name": "DailyNotification",
"classpath": "com.timesafari.dailynotification.DailyNotificationPlugin"
},
{
"name": "TestEvents",
"classpath": "com.timesafari.dailynotification.TestEventsPlugin"
}
]

View File

@@ -0,0 +1,573 @@
/* Console CSS - Textual-inspired design */
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', sans-serif;
font-size: 14px;
line-height: 1.5;
color: #e0e0e0;
background: #1e1e1e;
overflow: hidden;
height: 100vh;
}
.console-container {
display: flex;
flex-direction: column;
height: 100vh;
overflow: hidden;
}
/* Header */
.console-header {
background: #252526;
border-bottom: 1px solid #3e3e42;
padding: 12px 16px;
flex-shrink: 0;
}
.header-row {
display: flex;
gap: 16px;
align-items: center;
flex-wrap: wrap;
}
.header-row:first-child {
margin-bottom: 8px;
}
.console-header h1 {
font-size: 18px;
font-weight: 600;
color: #ffffff;
}
.console-header span {
font-size: 12px;
color: #cccccc;
}
.console-header #device-id,
.console-header #run-id,
.console-header #mode,
.console-header #strictness {
color: #4ec9b0;
font-weight: 500;
}
/* Main Content */
.console-main {
display: flex;
flex: 1;
overflow: hidden;
min-height: 0;
}
/* Sidebar */
.console-sidebar {
width: 280px;
background: #252526;
border-right: 1px solid #3e3e42;
overflow-y: auto;
flex-shrink: 0;
padding: 16px;
}
.sidebar-section h2 {
font-size: 14px;
font-weight: 600;
color: #cccccc;
margin-bottom: 12px;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.phase-item {
margin-bottom: 8px;
}
.phase-header {
display: flex;
align-items: center;
gap: 8px;
padding: 8px;
cursor: pointer;
border-radius: 4px;
user-select: none;
}
.phase-header:hover {
background: #2a2d2e;
}
.phase-status {
font-size: 16px;
width: 20px;
text-align: center;
}
.phase-title {
flex: 1;
font-size: 13px;
color: #cccccc;
}
.phase-progress {
font-size: 11px;
color: #858585;
}
.test-item {
margin-left: 28px;
margin-top: 4px;
padding: 6px 8px;
cursor: pointer;
border-radius: 4px;
display: flex;
align-items: center;
gap: 8px;
}
.test-item:hover {
background: #2a2d2e;
}
.test-item.active {
background: #094771;
}
.test-status {
font-size: 14px;
width: 18px;
text-align: center;
}
.test-title {
flex: 1;
font-size: 12px;
color: #cccccc;
}
/* Content Area */
.console-content {
flex: 1;
overflow-y: auto;
padding: 20px;
background: #1e1e1e;
}
.test-header {
margin-bottom: 20px;
padding-bottom: 16px;
border-bottom: 1px solid #3e3e42;
}
.test-header h2 {
font-size: 18px;
font-weight: 600;
color: #ffffff;
margin-bottom: 8px;
}
.test-purpose {
font-size: 13px;
color: #858585;
line-height: 1.6;
}
.step-checklist-section {
margin-bottom: 24px;
}
.step-checklist-section h3 {
font-size: 14px;
font-weight: 600;
color: #cccccc;
margin-bottom: 12px;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.step-checklist {
background: #252526;
border: 1px solid #3e3e42;
border-radius: 4px;
padding: 12px;
}
.step-item {
display: flex;
align-items: center;
gap: 12px;
padding: 8px;
border-radius: 4px;
cursor: pointer;
margin-bottom: 4px;
}
.step-item:hover {
background: #2a2d2e;
}
.step-item.active {
background: #094771;
}
.step-checkbox {
font-size: 16px;
width: 24px;
text-align: center;
}
.step-number {
font-size: 12px;
color: #858585;
min-width: 60px;
}
.step-name {
flex: 1;
font-size: 13px;
color: #cccccc;
}
.step-type-badge {
font-size: 10px;
padding: 2px 6px;
border-radius: 3px;
background: #3e3e42;
color: #cccccc;
text-transform: uppercase;
}
/* Current Step Frame */
.current-step-section {
margin-bottom: 24px;
}
.current-step-section h3 {
font-size: 14px;
font-weight: 600;
color: #cccccc;
margin-bottom: 12px;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.current-step-frame {
background: #252526;
border: 1px solid #3e3e42;
border-radius: 4px;
padding: 20px;
}
.step-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 20px;
padding-bottom: 12px;
border-bottom: 1px solid #3e3e42;
}
.step-title {
font-size: 16px;
font-weight: 600;
color: #ffffff;
}
.step-type {
font-size: 11px;
padding: 4px 8px;
border-radius: 3px;
background: #094771;
color: #4ec9b0;
text-transform: uppercase;
}
.step-content {
margin-bottom: 20px;
}
.step-section {
margin-bottom: 16px;
}
.step-section h4 {
font-size: 13px;
font-weight: 600;
color: #4ec9b0;
margin-bottom: 8px;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.step-text {
font-size: 13px;
color: #cccccc;
line-height: 1.6;
}
.step-list {
font-size: 13px;
color: #cccccc;
line-height: 1.8;
}
.step-list ul {
list-style: none;
padding-left: 0;
}
.step-list li {
padding-left: 20px;
position: relative;
}
.step-list li:before {
content: "•";
position: absolute;
left: 8px;
color: #4ec9b0;
}
.step-actions {
display: flex;
gap: 12px;
padding-top: 16px;
border-top: 1px solid #3e3e42;
}
/* Buttons */
.btn {
padding: 8px 16px;
border: none;
border-radius: 4px;
font-size: 13px;
font-weight: 500;
cursor: pointer;
transition: all 0.2s;
}
.btn-success {
background: #0e639c;
color: #ffffff;
}
.btn-success:hover {
background: #1177bb;
}
.btn-danger {
background: #a1260d;
color: #ffffff;
}
.btn-danger:hover {
background: #c42e11;
}
.btn-secondary {
background: #3e3e42;
color: #cccccc;
}
.btn-secondary:hover {
background: #4a4a4a;
}
.btn-small {
padding: 4px 8px;
font-size: 11px;
background: #3e3e42;
color: #cccccc;
border: none;
border-radius: 3px;
cursor: pointer;
}
.btn-close {
background: none;
border: none;
color: #cccccc;
font-size: 20px;
cursor: pointer;
padding: 0;
width: 24px;
height: 24px;
display: flex;
align-items: center;
justify-content: center;
}
/* Empty State */
.empty-state {
text-align: center;
padding: 60px 20px;
color: #858585;
}
/* Evidence Panel */
.console-evidence {
width: 300px;
background: #252526;
border-left: 1px solid #3e3e42;
overflow-y: auto;
flex-shrink: 0;
}
.evidence-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 16px;
border-bottom: 1px solid #3e3e42;
}
.evidence-header h3 {
font-size: 14px;
font-weight: 600;
color: #cccccc;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.evidence-list {
padding: 16px;
}
.evidence-item {
display: flex;
align-items: center;
gap: 12px;
padding: 8px;
border-radius: 4px;
margin-bottom: 8px;
cursor: pointer;
}
.evidence-item:hover {
background: #2a2d2e;
}
.evidence-icon {
font-size: 14px;
width: 20px;
text-align: center;
}
.evidence-name {
flex: 1;
font-size: 12px;
color: #cccccc;
}
.evidence-action {
font-size: 11px;
color: #4ec9b0;
text-decoration: none;
}
.evidence-action:hover {
text-decoration: underline;
}
/* Footer - Live Feed */
.console-footer {
height: 200px;
background: #252526;
border-top: 1px solid #3e3e42;
display: flex;
flex-direction: column;
flex-shrink: 0;
}
.footer-header {
display: flex;
justify-content: space-between;
align-items: center;
padding: 8px 16px;
border-bottom: 1px solid #3e3e42;
}
.footer-header h3 {
font-size: 12px;
font-weight: 600;
color: #cccccc;
text-transform: uppercase;
letter-spacing: 0.5px;
}
.live-feed {
flex: 1;
overflow-y: auto;
padding: 8px 16px;
font-family: 'Courier New', monospace;
font-size: 11px;
line-height: 1.6;
}
.feed-line {
margin-bottom: 4px;
white-space: pre-wrap;
word-break: break-all;
}
.feed-line.INFO {
color: #cccccc;
}
.feed-line.WAIT {
color: #ffa500;
}
.feed-line.WARN {
color: #ffaa00;
}
.feed-line.ERROR {
color: #f48771;
}
.feed-line.EVID {
color: #4ec9b0;
}
.feed-timestamp {
color: #858585;
margin-right: 8px;
}
/* Status Icons */
.status-pending { color: #858585; }
.status-running { color: #ffa500; }
.status-pass { color: #4ec9b0; }
.status-warn { color: #ffaa00; }
.status-fail { color: #f48771; }
.status-skip { color: #858585; }
/* Scrollbar */
::-webkit-scrollbar {
width: 8px;
height: 8px;
}
::-webkit-scrollbar-track {
background: #1e1e1e;
}
::-webkit-scrollbar-thumb {
background: #3e3e42;
border-radius: 4px;
}
::-webkit-scrollbar-thumb:hover {
background: #4a4a4a;
}

View File

@@ -0,0 +1,685 @@
/**
* Daily Notification Plugin Test Console
* Textual-inspired operator console for test execution
*/
// State management
const ConsoleState = {
plan: null,
runId: null,
runState: {},
currentPhase: null,
currentTest: null,
currentStep: null,
evidence: {},
feed: []
};
// Initialize console
async function initConsole() {
try {
// Generate run ID
ConsoleState.runId = generateRunId();
updateHeader();
// Load test plan
const planResponse = await fetch('plan.json');
ConsoleState.plan = await planResponse.json();
// Load saved run state
loadRunState();
// Render UI
renderPhaseTree();
renderEmptyState();
// Setup event listeners
setupEventListeners();
// Setup Capacitor plugin listener (if available)
setupCapacitorListener();
// Add initial feed message
addFeedLine('INFO', 'console', 'Console initialized', 'Console ready');
} catch (error) {
console.error('Failed to initialize console:', error);
addFeedLine('ERROR', 'console', 'Initialization failed', error.message);
}
}
// Generate run ID
function generateRunId() {
const now = new Date();
const dateStr = now.toISOString().slice(0, 10).replace(/-/g, '');
const timeStr = now.toTimeString().slice(0, 8).replace(/:/g, '');
const random = Math.random().toString(36).substring(2, 6);
return `${dateStr}_${timeStr}_${random}`;
}
// Update header
function updateHeader() {
const runIdEl = document.getElementById('run-id');
if (runIdEl) {
runIdEl.textContent = ConsoleState.runId;
}
// Try to get device info from Capacitor
if (window.Capacitor && window.Capacitor.Plugins.Device) {
window.Capacitor.Plugins.Device.getInfo().then(info => {
const deviceIdEl = document.getElementById('device-id');
if (deviceIdEl) {
deviceIdEl.textContent = info.model || 'unknown';
}
}).catch(() => {
});
}
}
// Load run state from localStorage
function loadRunState() {
const saved = localStorage.getItem(`runState_${ConsoleState.runId}`);
if (saved) {
try {
ConsoleState.runState = JSON.parse(saved);
} catch (e) {
console.error('Failed to parse saved run state:', e);
}
}
}
// Save run state to localStorage
function saveRunState() {
try {
localStorage.setItem(`runState_${ConsoleState.runId}`, JSON.stringify(ConsoleState.runState));
} catch (e) {
console.error('Failed to save run state:', e);
}
}
// Render phase tree
function renderPhaseTree() {
const treeEl = document.getElementById('phase-tree');
if (!treeEl || !ConsoleState.plan) return;
treeEl.innerHTML = '';
ConsoleState.plan.phases.forEach(phase => {
const phaseEl = createPhaseElement(phase);
treeEl.appendChild(phaseEl);
});
}
// Create phase element
function createPhaseElement(phase) {
const phaseDiv = document.createElement('div');
phaseDiv.className = 'phase-item';
const phaseState = getPhaseState(phase.id);
const statusIcon = getStatusIcon(phaseState.status);
const progress = getPhaseProgress(phase.id);
const header = document.createElement('div');
header.className = 'phase-header';
header.innerHTML = `
<span class="phase-status">${statusIcon}</span>
<span class="phase-title">${phase.title}</span>
<span class="phase-progress">${progress}</span>
`;
header.onclick = () => togglePhase(phase.id);
phaseDiv.appendChild(header);
// Test list (initially hidden, shown when phase expanded)
const testList = document.createElement('div');
testList.className = 'test-list';
testList.style.display = 'none';
testList.id = `test-list-${phase.id}`;
phase.tests.forEach(test => {
const testEl = createTestElement(phase.id, test);
testList.appendChild(testEl);
});
phaseDiv.appendChild(testList);
return phaseDiv;
}
// Create test element
function createTestElement(phaseId, test) {
const testDiv = document.createElement('div');
testDiv.className = 'test-item';
testDiv.id = `test-${phaseId}-${test.id}`;
const testState = getTestState(phaseId, test.id);
const statusIcon = getStatusIcon(testState.status);
testDiv.innerHTML = `
<span class="test-status">${statusIcon}</span>
<span class="test-title">${test.title}</span>
`;
testDiv.onclick = () => selectTest(phaseId, test.id);
return testDiv;
}
// Get phase state
function getPhaseState(phaseId) {
if (!ConsoleState.runState.phases) {
ConsoleState.runState.phases = {};
}
if (!ConsoleState.runState.phases[phaseId]) {
ConsoleState.runState.phases[phaseId] = { status: 'pending' };
}
return ConsoleState.runState.phases[phaseId];
}
// Get test state
function getTestState(phaseId, testId) {
if (!ConsoleState.runState.phases) {
ConsoleState.runState.phases = {};
}
if (!ConsoleState.runState.phases[phaseId]) {
ConsoleState.runState.phases[phaseId] = { tests: {} };
}
if (!ConsoleState.runState.phases[phaseId].tests) {
ConsoleState.runState.phases[phaseId].tests = {};
}
if (!ConsoleState.runState.phases[phaseId].tests[testId]) {
ConsoleState.runState.phases[phaseId].tests[testId] = { status: 'pending', steps: {} };
}
return ConsoleState.runState.phases[phaseId].tests[testId];
}
// Get step state
function getStepState(phaseId, testId, stepId) {
const testState = getTestState(phaseId, testId);
if (!testState.steps) {
testState.steps = {};
}
if (!testState.steps[stepId]) {
testState.steps[stepId] = { status: 'pending' };
}
return testState.steps[stepId];
}
// Get status icon
function getStatusIcon(status) {
const icons = {
'pending': '·',
'running': '→',
'pass': '✓',
'warn': '⚠',
'fail': '✗',
'skip': '⊘'
};
return icons[status] || '·';
}
// Get phase progress
function getPhaseProgress(phaseId) {
const phase = ConsoleState.plan.phases.find(p => p.id === phaseId);
if (!phase) return '(0/0)';
let completed = 0;
let total = phase.tests.length;
phase.tests.forEach(test => {
const testState = getTestState(phaseId, test.id);
if (testState.status === 'pass' || testState.status === 'warn') {
completed++;
}
});
return `(${completed}/${total})`;
}
// Toggle phase expansion
function togglePhase(phaseId) {
const testList = document.getElementById(`test-list-${phaseId}`);
if (testList) {
testList.style.display = testList.style.display === 'none' ? 'block' : 'none';
}
}
// Select test
function selectTest(phaseId, testId) {
ConsoleState.currentPhase = phaseId;
ConsoleState.currentTest = testId;
// Update active test highlight
document.querySelectorAll('.test-item').forEach(el => {
el.classList.remove('active');
});
const testEl = document.getElementById(`test-${phaseId}-${testId}`);
if (testEl) {
testEl.classList.add('active');
}
// Render test view
renderTestView(phaseId, testId);
}
// Render test view
function renderTestView(phaseId, testId) {
const phase = ConsoleState.plan.phases.find(p => p.id === phaseId);
if (!phase) return;
const test = phase.tests.find(t => t.id === testId);
if (!test) return;
// Hide empty state
document.getElementById('empty-state').style.display = 'none';
// Show test header
const testHeader = document.getElementById('test-header');
testHeader.style.display = 'block';
document.getElementById('test-title').textContent = `${phase.title} / ${test.title}`;
document.getElementById('test-purpose').textContent = test.purpose;
// Render step checklist
renderStepChecklist(phaseId, testId, test.steps);
// Render current step (first step or active step)
const activeStepId = getActiveStepId(phaseId, testId, test.steps);
if (activeStepId) {
renderCurrentStep(phaseId, testId, activeStepId, test.steps);
}
// Show sections
document.getElementById('step-checklist-section').style.display = 'block';
document.getElementById('current-step-section').style.display = 'block';
}
// Render step checklist
function renderStepChecklist(phaseId, testId, steps) {
const checklistEl = document.getElementById('step-checklist');
if (!checklistEl) return;
checklistEl.innerHTML = '';
steps.forEach((step, index) => {
const stepState = getStepState(phaseId, testId, step.id);
const statusIcon = getStatusIcon(stepState.status);
const stepEl = document.createElement('div');
stepEl.className = 'step-item';
if (step.id === ConsoleState.currentStep) {
stepEl.classList.add('active');
}
stepEl.innerHTML = `
<span class="step-checkbox">${statusIcon}</span>
<span class="step-number">${index + 1}/${steps.length}</span>
<span class="step-name">${step.title}</span>
<span class="step-type-badge">${step.type}</span>
`;
stepEl.onclick = () => {
ConsoleState.currentStep = step.id;
renderCurrentStep(phaseId, testId, step.id, steps);
renderStepChecklist(phaseId, testId, steps); // Re-render to update active
};
checklistEl.appendChild(stepEl);
});
}
// Get active step ID
function getActiveStepId(phaseId, testId, steps) {
// Find first step that's not pass
for (const step of steps) {
const stepState = getStepState(phaseId, testId, step.id);
if (stepState.status !== 'pass') {
return step.id;
}
}
// If all passed, return last step
return steps.length > 0 ? steps[steps.length - 1].id : null;
}
// Render current step
function renderCurrentStep(phaseId, testId, stepId, steps) {
const step = steps.find(s => s.id === stepId);
if (!step) return;
ConsoleState.currentStep = stepId;
// Update step header
document.getElementById('step-title').textContent = step.title;
document.getElementById('step-type').textContent = step.type + (step.blocking ? ' (blocking)' : '');
// Update step content
document.getElementById('step-purpose').textContent = step.purpose || '—';
document.getElementById('step-instructions').innerHTML = formatList(step.instructions || []);
document.getElementById('step-expected').innerHTML = formatList(step.expected || []);
document.getElementById('step-artifacts').innerHTML = formatList(step.artifacts || []);
// Update step actions
const stepState = getStepState(phaseId, testId, stepId);
updateStepActions(phaseId, testId, stepId, stepState);
}
// Format list as HTML
function formatList(items) {
if (!items || items.length === 0) return '—';
return '<ul>' + items.map(item => `<li>${item}</li>`).join('') + '</ul>';
}
// Update step actions
function updateStepActions(phaseId, testId, stepId, stepState) {
const btnDone = document.getElementById('btn-step-done');
const btnFail = document.getElementById('btn-step-fail');
if (stepState.status === 'pass') {
btnDone.disabled = true;
btnDone.textContent = 'Done ✓';
} else {
btnDone.disabled = false;
btnDone.textContent = 'Mark Done ✓';
}
if (stepState.status === 'fail') {
btnFail.disabled = true;
btnFail.textContent = 'Failed ✗';
} else {
btnFail.disabled = false;
btnFail.textContent = 'Mark Fail ✗';
}
}
// Setup event listeners
function setupEventListeners() {
// Step action buttons
document.getElementById('btn-step-done').onclick = () => {
if (ConsoleState.currentPhase && ConsoleState.currentTest && ConsoleState.currentStep) {
markStepDone(ConsoleState.currentPhase, ConsoleState.currentTest, ConsoleState.currentStep);
}
};
document.getElementById('btn-step-fail').onclick = () => {
if (ConsoleState.currentPhase && ConsoleState.currentTest && ConsoleState.currentStep) {
markStepFail(ConsoleState.currentPhase, ConsoleState.currentTest, ConsoleState.currentStep);
}
};
document.getElementById('btn-step-notes').onclick = () => {
if (ConsoleState.currentPhase && ConsoleState.currentTest && ConsoleState.currentStep) {
showStepNotes();
}
};
// Evidence panel
document.getElementById('btn-evidence-close').onclick = () => {
document.getElementById('evidence-panel').style.display = 'none';
};
// Feed clear
document.getElementById('btn-feed-clear').onclick = () => {
ConsoleState.feed = [];
renderLiveFeed();
};
}
// Mark step done
function markStepDone(phaseId, testId, stepId) {
const stepState = getStepState(phaseId, testId, stepId);
stepState.status = 'pass';
stepState.completedAt = new Date().toISOString();
saveRunState();
updateTestStatus(phaseId, testId);
renderTestView(phaseId, testId);
addFeedLine('INFO', stepId, 'Step completed', `Step marked as done`);
}
// Mark step fail
function markStepFail(phaseId, testId, stepId) {
const stepState = getStepState(phaseId, testId, stepId);
stepState.status = 'fail';
stepState.failedAt = new Date().toISOString();
saveRunState();
updateTestStatus(phaseId, testId);
renderTestView(phaseId, testId);
addFeedLine('ERROR', stepId, 'Step failed', `Step marked as failed`);
}
// Update test status based on steps
function updateTestStatus(phaseId, testId) {
const phase = ConsoleState.plan.phases.find(p => p.id === phaseId);
if (!phase) return;
const test = phase.tests.find(t => t.id === testId);
if (!test) return;
const testState = getTestState(phaseId, testId);
let hasFail = false;
let hasWarn = false;
let allPass = true;
test.steps.forEach(step => {
const stepState = getStepState(phaseId, testId, step.id);
if (stepState.status === 'fail') {
hasFail = true;
allPass = false;
} else if (stepState.status === 'warn') {
hasWarn = true;
allPass = false;
} else if (stepState.status !== 'pass') {
allPass = false;
}
});
if (hasFail) {
testState.status = 'fail';
} else if (hasWarn) {
testState.status = 'warn';
} else if (allPass) {
testState.status = 'pass';
} else {
testState.status = 'running';
}
saveRunState();
renderPhaseTree(); // Update phase tree to reflect new status
}
// Show step notes
function showStepNotes() {
const stepState = getStepState(ConsoleState.currentPhase, ConsoleState.currentTest, ConsoleState.currentStep);
const notes = prompt('Add notes for this step:', stepState.notes || '');
if (notes !== null) {
stepState.notes = notes;
saveRunState();
}
}
// Render empty state
function renderEmptyState() {
document.getElementById('empty-state').style.display = 'block';
document.getElementById('test-header').style.display = 'none';
document.getElementById('step-checklist-section').style.display = 'none';
document.getElementById('current-step-section').style.display = 'none';
}
// Add feed line
function addFeedLine(level, source, category, message) {
const timestamp = new Date().toLocaleTimeString();
const line = {
timestamp,
level,
source,
category,
message
};
ConsoleState.feed.push(line);
// Keep feed to last 100 lines
if (ConsoleState.feed.length > 100) {
ConsoleState.feed.shift();
}
renderLiveFeed();
}
// Render live feed
function renderLiveFeed() {
const feedEl = document.getElementById('live-feed');
if (!feedEl) return;
feedEl.innerHTML = ConsoleState.feed.map(line => {
return `<div class="feed-line ${line.level}">
<span class="feed-timestamp">${line.timestamp}</span>
<span>${line.level}</span>
<span>${line.source}</span>
<span>${line.message}</span>
</div>`;
}).join('');
// Auto-scroll to bottom
feedEl.scrollTop = feedEl.scrollHeight;
}
// Setup Capacitor listener for test events
function setupCapacitorListener() {
if (window.Capacitor && window.Capacitor.Plugins.TestEvents) {
window.Capacitor.Plugins.TestEvents.addListener('testEvent', (event) => {
handleTestEvent(event.payload);
});
} else {
// Fallback: poll for events file (if scripts write to /sdcard/Download/dnp-events.jsonl)
// This is a simple fallback - in production, use broadcast
console.log('TestEvents plugin not available, using fallback polling');
}
}
// Handle test event from scripts
function handleTestEvent(eventData) {
try {
const event = typeof eventData === 'string' ? JSON.parse(eventData) : eventData;
// Update step/test status based on event
if (event.phaseId && event.testId && event.stepId) {
const stepState = getStepState(event.phaseId, event.testId, event.stepId);
if (event.type === 'step_start') {
stepState.status = 'running';
stepState.startedAt = event.ts;
} else if (event.type === 'step_pass') {
stepState.status = 'pass';
stepState.completedAt = event.ts;
} else if (event.type === 'step_warn') {
stepState.status = 'warn';
stepState.completedAt = event.ts;
} else if (event.type === 'step_fail') {
stepState.status = 'fail';
stepState.failedAt = event.ts;
}
updateTestStatus(event.phaseId, event.testId);
// Re-render if this is the current test
if (ConsoleState.currentPhase === event.phaseId && ConsoleState.currentTest === event.testId) {
const phase = ConsoleState.plan.phases.find(p => p.id === event.phaseId);
if (phase) {
const test = phase.tests.find(t => t.id === event.testId);
if (test) {
renderTestView(event.phaseId, event.testId);
}
}
}
}
// Add to feed
addFeedLine(event.level || 'INFO', event.stepId || event.testId || 'system', event.type || 'event', event.message || JSON.stringify(event));
// Handle artifacts
if (event.type === 'artifact' && event.artifact) {
addEvidence(event.phaseId, event.testId, event.artifact);
}
saveRunState();
} catch (error) {
console.error('Failed to handle test event:', error);
addFeedLine('ERROR', 'system', 'event_handler', `Failed to process event: ${error.message}`);
}
}
// Add evidence
function addEvidence(phaseId, testId, artifact) {
if (!ConsoleState.evidence[phaseId]) {
ConsoleState.evidence[phaseId] = {};
}
if (!ConsoleState.evidence[phaseId][testId]) {
ConsoleState.evidence[phaseId][testId] = [];
}
ConsoleState.evidence[phaseId][testId].push(artifact);
renderEvidence(phaseId, testId);
}
// Render evidence
function renderEvidence(phaseId, testId) {
if (ConsoleState.currentPhase !== phaseId || ConsoleState.currentTest !== testId) {
return; // Not viewing this test
}
const evidenceList = document.getElementById('evidence-list');
if (!evidenceList) return;
const artifacts = ConsoleState.evidence[phaseId]?.[testId] || [];
evidenceList.innerHTML = '';
if (artifacts.length === 0) {
evidenceList.innerHTML = '<div style="color: #858585; padding: 20px; text-align: center;">No evidence captured yet</div>';
return;
}
artifacts.forEach(artifact => {
const item = document.createElement('div');
item.className = 'evidence-item';
const icon = getArtifactIcon(artifact.kind);
const name = artifact.name || artifact.path || 'Unknown';
item.innerHTML = `
<span class="evidence-icon">${icon}</span>
<span class="evidence-name">${name}</span>
<a href="#" class="evidence-action" onclick="viewEvidence('${artifact.path || ''}')">view</a>
`;
evidenceList.appendChild(item);
});
// Show evidence panel
document.getElementById('evidence-panel').style.display = 'block';
}
// Get artifact icon
function getArtifactIcon(kind) {
const icons = {
'alarms': '📋',
'logs': '📄',
'screen': '📷',
'notes': '📝'
};
return icons[kind] || '📎';
}
// View evidence (placeholder)
function viewEvidence(path) {
addFeedLine('INFO', 'evidence', 'view', `Viewing evidence: ${path}`);
// In a real implementation, this would open the file or show it in a modal
alert(`Evidence file: ${path}\n\nIn production, this would open the file viewer.`);
}
// Initialize on load
document.addEventListener('DOMContentLoaded', initConsole);

View File

@@ -0,0 +1,114 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="viewport-fit=cover, width=device-width, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no">
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate">
<meta http-equiv="Pragma" content="no-cache">
<meta http-equiv="Expires" content="0">
<title>Daily Notification Plugin — Test Console</title>
<link rel="stylesheet" href="console.css">
</head>
<body>
<div id="app" class="console-container">
<!-- Header -->
<header class="console-header">
<div class="header-row">
<h1>Daily Notification Plugin — Test Console</h1>
</div>
<div class="header-row">
<span id="device-info">Device: <span id="device-id"></span></span>
<span id="run-info">Run: <span id="run-id"></span></span>
<span id="mode-info">Mode: <span id="mode">ADVISORY</span></span>
<span id="strictness-info">Strictness: <span id="strictness">SOFT</span></span>
</div>
</header>
<!-- Main Content -->
<div class="console-main">
<!-- Left: Phase/Test Tree -->
<aside class="console-sidebar">
<div class="sidebar-section">
<h2>PHASES</h2>
<div id="phase-tree"></div>
</div>
</aside>
<!-- Middle: Step Checklist + Current Step -->
<main class="console-content">
<!-- Test Header -->
<div id="test-header" class="test-header" style="display: none;">
<h2 id="test-title"></h2>
<p id="test-purpose" class="test-purpose"></p>
</div>
<!-- Step Checklist -->
<div id="step-checklist-section" class="step-checklist-section" style="display: none;">
<h3>STEP CHECKLIST</h3>
<div id="step-checklist" class="step-checklist"></div>
</div>
<!-- Current Step Frame -->
<div id="current-step-section" class="current-step-section" style="display: none;">
<h3>CURRENT STEP</h3>
<div id="current-step" class="current-step-frame">
<div class="step-header">
<span id="step-title" class="step-title"></span>
<span id="step-type" class="step-type"></span>
</div>
<div id="step-content" class="step-content">
<div class="step-section">
<h4>Why this step exists</h4>
<div id="step-purpose" class="step-text"></div>
</div>
<div class="step-section">
<h4>Do this now</h4>
<div id="step-instructions" class="step-list"></div>
</div>
<div class="step-section">
<h4>Expected</h4>
<div id="step-expected" class="step-list"></div>
</div>
<div class="step-section">
<h4>Evidence to capture</h4>
<div id="step-artifacts" class="step-list"></div>
</div>
</div>
<div class="step-actions">
<button id="btn-step-done" class="btn btn-success">Mark Done ✓</button>
<button id="btn-step-fail" class="btn btn-danger">Mark Fail ✗</button>
<button id="btn-step-notes" class="btn btn-secondary">Notes</button>
</div>
</div>
</div>
<!-- Empty State -->
<div id="empty-state" class="empty-state">
<p>Select a test from the left sidebar to begin.</p>
</div>
</main>
<!-- Right: Evidence Panel (collapsible) -->
<aside class="console-evidence" id="evidence-panel" style="display: none;">
<div class="evidence-header">
<h3>EVIDENCE</h3>
<button id="btn-evidence-close" class="btn-close">×</button>
</div>
<div id="evidence-list" class="evidence-list"></div>
</aside>
</div>
<!-- Bottom: Live Feed -->
<footer class="console-footer">
<div class="footer-header">
<h3>LIVE FEED</h3>
<button id="btn-feed-clear" class="btn-small">Clear</button>
</div>
<div id="live-feed" class="live-feed"></div>
</footer>
</div>
<script src="console.js"></script>
</body>
</html>

View File

@@ -0,0 +1,793 @@
{
"version": "testplan.v1",
"app": "Daily Notification Plugin",
"phases": [
{
"id": "phase1",
"title": "Daily Rollover & Recovery",
"tests": [
{
"id": "phase1_setup",
"title": "Setup: Pre-flight Checks",
"purpose": "Verify ADB connection, build app, install, check permissions, and configure plugin.",
"steps": [
{
"id": "p1_setup_s1",
"title": "Preflight: ADB Connection",
"type": "AUTO",
"blocking": true,
"instructions": ["Verify ADB device connected", "Check emulator boot status"],
"expected": ["ADB device in 'device' state", "Emulator boot completed"],
"artifacts": []
},
{
"id": "p1_setup_s2",
"title": "Build App",
"type": "AUTO",
"blocking": true,
"instructions": ["Run ./gradlew :app:assembleDebug"],
"expected": ["APK built successfully", "APK found at expected path"],
"artifacts": []
},
{
"id": "p1_setup_s3",
"title": "Install App",
"type": "AUTO",
"blocking": true,
"instructions": ["Uninstall existing app (if present)", "Install new APK", "Verify installation"],
"expected": ["App uninstalled (or not present)", "APK installed successfully", "App in package list"],
"artifacts": []
},
{
"id": "p1_setup_s4",
"title": "Launch App & Check Permissions",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Launch app main activity",
"In app UI, verify:",
" • Notifications: ✅ Granted",
" • Exact Alarms: ✅ Granted",
"If not granted, click 'Request Permissions'"
],
"expected": ["App launched", "Permissions granted"],
"artifacts": ["screenshots/setup_permissions.png"]
},
{
"id": "p1_setup_s5",
"title": "Configure Plugin",
"type": "MANUAL",
"blocking": true,
"instructions": [
"In app UI, click 'Configure Plugin'",
"Wait until both show ✅:",
" • ⚙️ Plugin Settings: ✅ Configured",
" • 🔌 Native Fetcher: ✅ Configured"
],
"expected": ["Plugin configured", "Native fetcher configured"],
"artifacts": ["screenshots/setup_configured.png"]
}
]
},
{
"id": "phase1_smoke",
"title": "Smoke: Schedule One + Verify Pending",
"purpose": "Validate end-to-end scheduling path and ensure exactly one plugin alarm exists.",
"steps": [
{
"id": "p1_smoke_s1",
"title": "Preflight",
"type": "AUTO",
"blocking": true,
"instructions": ["Check ADB connection", "Verify app installed"],
"expected": ["ADB connected", "App installed"],
"artifacts": []
},
{
"id": "p1_smoke_s2",
"title": "Build + Install App",
"type": "AUTO",
"blocking": true,
"instructions": ["Build APK", "Install APK"],
"expected": ["Build successful", "Install successful"],
"artifacts": []
},
{
"id": "p1_smoke_s3",
"title": "Schedule One Notification",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Launch app",
"Click 'Test Notification' to schedule for a few minutes in the future"
],
"expected": ["Notification scheduled", "App shows pending count > 0"],
"artifacts": ["alarms/smoke_after_schedule.txt", "screenshots/smoke_after_schedule.png"]
},
{
"id": "p1_smoke_s4",
"title": "Verify Exactly 1 Plugin Alarm Exists",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check dumpsys alarm for plugin alarms"],
"expected": ["Exactly 1 plugin NOTIFICATION alarm found", "No duplicate alarms"],
"artifacts": ["alarms/smoke_verification.txt"]
},
{
"id": "p1_smoke_s5",
"title": "Capture Evidence",
"type": "EVIDENCE",
"blocking": false,
"instructions": ["Capture alarms dump", "Capture logcat", "Capture screenshot"],
"expected": ["Evidence files created"],
"artifacts": ["alarms/smoke_final.txt", "logs/smoke_final_logcat.txt", "screenshots/smoke_final.png"]
},
{
"id": "p1_smoke_s6",
"title": "Verdict + Export Summary",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded (PASS/WARN/FAIL)"],
"artifacts": ["summary.json", "summary.md"]
}
]
},
{
"id": "phase1_test0",
"title": "Daily Rollover Verification",
"purpose": "Verify that after a notification fires, the next day's schedule is correctly computed and only ONE alarm exists.",
"steps": [
{
"id": "p1_t0_s1",
"title": "Capture Initial State",
"type": "EVIDENCE",
"blocking": false,
"instructions": ["Capture alarms dump", "Capture logcat"],
"expected": ["Evidence files created"],
"artifacts": ["alarms/test0_initial.txt", "logs/test0_initial_logcat.txt"]
},
{
"id": "p1_t0_s2",
"title": "Schedule Test Notification",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Launch app",
"Schedule a daily notification for a time very soon (e.g., 1-2 minutes from now)"
],
"expected": ["Notification scheduled", "Exactly 1 alarm exists"],
"artifacts": ["alarms/test0_after_schedule.txt", "screenshots/test0_after_schedule.png"]
},
{
"id": "p1_t0_s3",
"title": "Advance Time Past Midnight",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Settings → System → Date & time",
"Disable auto time",
"Set time to 23:59, then to 00:01"
],
"expected": ["Time advanced past midnight", "Rollover logic triggered"],
"artifacts": []
},
{
"id": "p1_t0_s4",
"title": "Observe Rollover Logs",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check logcat for rollover start/end messages"],
"expected": ["Rollover started log found", "Rollover completed log found"],
"artifacts": ["logs/test0_rollover_logcat.txt"]
},
{
"id": "p1_t0_s5",
"title": "Capture Post-Rollover Evidence",
"type": "EVIDENCE",
"blocking": false,
"instructions": ["Capture alarms dump", "Capture logcat", "Capture screenshot"],
"expected": ["Evidence files created"],
"artifacts": ["alarms/test0_after_rollover.txt", "logs/test0_after_rollover_logcat.txt", "screenshots/test0_after_rollover.png"]
},
{
"id": "p1_t0_s6",
"title": "Verify Schedule State",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check alarm count", "Verify next-day schedule"],
"expected": ["Exactly 1 alarm exists", "Alarm time is for tomorrow (24h later)", "No duplicate alarms"],
"artifacts": []
},
{
"id": "p1_t0_s7",
"title": "Verdict",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded (PASS/WARN/FAIL)"],
"artifacts": []
},
{
"id": "p1_t0_s8",
"title": "Export Summary",
"type": "EVIDENCE",
"blocking": false,
"instructions": ["Generate summary files"],
"expected": ["Summary files created"],
"artifacts": ["summary.json", "summary.md"]
}
]
},
{
"id": "phase1_test1",
"title": "Force-Stop Recovery - Database Restoration",
"purpose": "Verify that after force-stop (which clears alarms), recovery uses the database to rebuild alarms on app relaunch.",
"steps": [
{
"id": "p1_t1_s1",
"title": "Clean Start - Verify No Lingering Alarms",
"type": "AUTO",
"blocking": true,
"instructions": ["Check for existing plugin alarms", "Reset app state if needed"],
"expected": ["No existing plugin alarms", "Clean state confirmed"],
"artifacts": ["alarms/test1_initial.txt"]
},
{
"id": "p1_t1_s2",
"title": "Schedule Notification",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Launch app",
"Schedule at least one future notification"
],
"expected": ["Notification scheduled", "Alarm exists in system"],
"artifacts": ["alarms/test1_before_force_stop.txt"]
},
{
"id": "p1_t1_s3",
"title": "Force Stop App",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Settings → Apps → Daily Notification Test",
"Force stop the app"
],
"expected": ["App force stopped", "Alarms cleared (on most devices)"],
"artifacts": ["alarms/test1_after_force_stop.txt"]
},
{
"id": "p1_t1_s4",
"title": "Relaunch App & Verify Recovery",
"type": "ASSERT",
"blocking": true,
"instructions": [
"Launch app",
"Check logs for recovery scenario detection",
"Verify alarms recreated from database"
],
"expected": ["FORCE_STOP scenario detected", "Alarms recreated", "Recovery successful"],
"artifacts": ["logs/test1_recovery_logcat.txt", "alarms/test1_after_recovery.txt"]
},
{
"id": "p1_t1_s5",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
},
{
"id": "phase1_test2",
"title": "Schedule Update Verification",
"purpose": "Verify schedule updates work correctly and preserve one-per-day semantics.",
"steps": [
{
"id": "p1_t2_s1",
"title": "Schedule Initial Notification",
"type": "MANUAL",
"blocking": true,
"instructions": ["Launch app", "Schedule notification for time A"],
"expected": ["Notification scheduled", "1 alarm exists"],
"artifacts": ["alarms/test2_initial.txt"]
},
{
"id": "p1_t2_s2",
"title": "Update Schedule",
"type": "MANUAL",
"blocking": true,
"instructions": ["Update notification to time B"],
"expected": ["Schedule updated", "Old alarm cancelled", "New alarm scheduled"],
"artifacts": ["alarms/test2_after_update.txt"]
},
{
"id": "p1_t2_s3",
"title": "Verify One-Per-Day Semantics",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check alarm count", "Verify only one alarm exists"],
"expected": ["Exactly 1 alarm exists", "No duplicate alarms"],
"artifacts": []
},
{
"id": "p1_t2_s4",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
},
{
"id": "phase1_test3",
"title": "Recovery Timeout",
"purpose": "Verify recovery timeout behavior when alarms cannot be recreated.",
"steps": [
{
"id": "p1_t3_s1",
"title": "Setup: Schedule Notification",
"type": "MANUAL",
"blocking": true,
"instructions": ["Launch app", "Schedule notification"],
"expected": ["Notification scheduled"],
"artifacts": []
},
{
"id": "p1_t3_s2",
"title": "Force Stop & Simulate Timeout",
"type": "MANUAL",
"blocking": true,
"instructions": ["Force stop app", "Wait for timeout period"],
"expected": ["Timeout detected", "Recovery attempted"],
"artifacts": ["logs/test3_timeout_logcat.txt"]
},
{
"id": "p1_t3_s3",
"title": "Verify Timeout Handling",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check logs for timeout messages", "Verify fallback behavior"],
"expected": ["Timeout logged", "Fallback triggered"],
"artifacts": []
},
{
"id": "p1_t3_s4",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
},
{
"id": "phase1_test4",
"title": "Invalid Data Handling",
"purpose": "Verify plugin handles invalid/corrupted data gracefully.",
"steps": [
{
"id": "p1_t4_s1",
"title": "Inject Invalid Data",
"type": "MANUAL",
"blocking": true,
"instructions": ["Corrupt database or preferences", "Launch app"],
"expected": ["Invalid data detected"],
"artifacts": []
},
{
"id": "p1_t4_s2",
"title": "Verify Error Handling",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check logs for error messages", "Verify app doesn't crash"],
"expected": ["Error logged", "App continues running", "Recovery attempted"],
"artifacts": ["logs/test4_error_logcat.txt"]
},
{
"id": "p1_t4_s3",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
}
]
},
{
"id": "phase2",
"title": "Force Stop Recovery",
"tests": [
{
"id": "phase2_test1",
"title": "Force Stop Alarms Cleared",
"purpose": "Verify force stop detection and alarm rescheduling when alarms are cleared.",
"steps": [
{
"id": "p2_t1_s1",
"title": "Launch App & Check Plugin Status",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Launch app",
"Verify plugin configured",
"Schedule at least one future notification"
],
"expected": ["Plugin configured", "Notification scheduled"],
"artifacts": ["alarms/phase2_test1_initial.txt", "logs/phase2_test1_initial_logcat.txt"]
},
{
"id": "p2_t1_s2",
"title": "Verify Alarms Scheduled",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check alarm count", "Verify exactly 1 plugin alarm"],
"expected": ["1 plugin alarm exists", "Alarm details visible"],
"artifacts": ["alarms/phase2_test1_before_force_stop.txt"]
},
{
"id": "p2_t1_s3",
"title": "Force Stop App",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Settings → Apps → Daily Notification Test",
"Force stop the app"
],
"expected": ["App force stopped", "Alarms cleared (on most devices)"],
"artifacts": ["alarms/phase2_test1_after_force_stop.txt"]
},
{
"id": "p2_t1_s4",
"title": "Check Alarms After Force Stop",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check alarm count", "Verify alarms cleared"],
"expected": ["0 plugin alarms (or alarms cleared)", "System confirms force stop"],
"artifacts": []
},
{
"id": "p2_t1_s5",
"title": "Relaunch App & Verify Recovery",
"type": "ASSERT",
"blocking": true,
"instructions": [
"Launch app",
"Check logs for FORCE_STOP scenario",
"Verify alarms recreated"
],
"expected": ["FORCE_STOP scenario detected", "Alarms recreated", "Recovery successful"],
"artifacts": ["logs/phase2_test1_recovery_logcat.txt", "alarms/phase2_test1_after_recovery.txt"]
},
{
"id": "p2_t1_s6",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
},
{
"id": "phase2_test2",
"title": "Force Stop Alarms Intact",
"purpose": "Verify force stop detection when alarms remain intact (some devices don't clear alarms on force stop).",
"steps": [
{
"id": "p2_t2_s1",
"title": "Schedule Notification",
"type": "MANUAL",
"blocking": true,
"instructions": ["Launch app", "Schedule notification"],
"expected": ["Notification scheduled"],
"artifacts": ["alarms/phase2_test2_initial.txt"]
},
{
"id": "p2_t2_s2",
"title": "Force Stop App",
"type": "MANUAL",
"blocking": true,
"instructions": ["Force stop app"],
"expected": ["App force stopped"],
"artifacts": ["alarms/phase2_test2_after_force_stop.txt"]
},
{
"id": "p2_t2_s3",
"title": "Verify Alarms Intact",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check alarm count", "Verify alarms still exist"],
"expected": ["Alarms still exist", "No recovery needed"],
"artifacts": []
},
{
"id": "p2_t2_s4",
"title": "Relaunch & Verify Behavior",
"type": "ASSERT",
"blocking": true,
"instructions": ["Launch app", "Check logs", "Verify no duplicate alarms"],
"expected": ["No duplicate alarms", "Correct behavior"],
"artifacts": ["logs/phase2_test2_logcat.txt"]
},
{
"id": "p2_t2_s5",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
},
{
"id": "phase2_test3",
"title": "First Launch / No Schedules",
"purpose": "Verify app behavior on first launch when no schedules exist.",
"steps": [
{
"id": "p2_t3_s1",
"title": "Fresh Install",
"type": "AUTO",
"blocking": true,
"instructions": ["Uninstall app", "Install fresh APK"],
"expected": ["App installed", "No existing data"],
"artifacts": []
},
{
"id": "p2_t3_s2",
"title": "First Launch",
"type": "MANUAL",
"blocking": true,
"instructions": ["Launch app for first time", "Don't schedule anything"],
"expected": ["App launches", "No crashes"],
"artifacts": ["logs/phase2_test3_first_launch_logcat.txt"]
},
{
"id": "p2_t3_s3",
"title": "Verify No Alarms",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check alarm count", "Verify no alarms scheduled"],
"expected": ["0 plugin alarms", "No errors"],
"artifacts": ["alarms/phase2_test3_no_schedules.txt"]
},
{
"id": "p2_t3_s4",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
}
]
},
{
"id": "phase3",
"title": "Boot Recovery",
"tests": [
{
"id": "phase3_test1",
"title": "Boot with Future Alarms",
"purpose": "Verify alarms are recreated on boot when schedules have future run times.",
"steps": [
{
"id": "p3_t1_s1",
"title": "Schedule Notification",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Launch app",
"Verify plugin configured",
"Schedule at least one future notification"
],
"expected": ["Notification scheduled", "1 alarm exists"],
"artifacts": ["alarms/phase3_test1_initial.txt", "logs/phase3_test1_initial_logcat.txt"]
},
{
"id": "p3_t1_s2",
"title": "Verify Alarms Scheduled",
"type": "ASSERT",
"blocking": true,
"instructions": ["Check alarm count", "Verify alarm details"],
"expected": ["1 plugin alarm exists", "Alarm time is in future"],
"artifacts": ["alarms/phase3_test1_before_reboot.txt"]
},
{
"id": "p3_t1_s3",
"title": "Reboot Emulator",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Reboot emulator (adb reboot)",
"Wait 30-60 seconds for boot to complete"
],
"expected": ["Emulator rebooted", "Boot completed"],
"artifacts": []
},
{
"id": "p3_t1_s4",
"title": "Verify Boot Recovery",
"type": "ASSERT",
"blocking": true,
"instructions": [
"Check logs for boot recovery",
"Verify alarms recreated",
"Check alarm count"
],
"expected": ["BOOT scenario detected", "Alarms recreated", "1 alarm exists"],
"artifacts": ["logs/phase3_test1_boot_recovery_logcat.txt", "alarms/phase3_test1_after_boot.txt"]
},
{
"id": "p3_t1_s5",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
},
{
"id": "phase3_test2",
"title": "Boot with Past Alarms (Missed Alarms)",
"purpose": "Verify boot recovery handles missed alarms (alarms that should have fired while device was off).",
"steps": [
{
"id": "p3_t2_s1",
"title": "Schedule Notification in Past",
"type": "MANUAL",
"blocking": true,
"instructions": [
"Schedule notification for time in past",
"Or schedule for future, then advance clock past alarm time"
],
"expected": ["Alarm time is in past"],
"artifacts": ["alarms/phase3_test2_initial.txt"]
},
{
"id": "p3_t2_s2",
"title": "Reboot Emulator",
"type": "MANUAL",
"blocking": true,
"instructions": ["Reboot emulator", "Wait for boot"],
"expected": ["Emulator rebooted"],
"artifacts": []
},
{
"id": "p3_t2_s3",
"title": "Verify Missed Alarm Handling",
"type": "ASSERT",
"blocking": true,
"instructions": [
"Check logs for missed alarm detection",
"Verify next-day schedule created"
],
"expected": ["Missed alarm detected", "Next-day schedule created", "Recovery successful"],
"artifacts": ["logs/phase3_test2_missed_alarm_logcat.txt"]
},
{
"id": "p3_t2_s4",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
},
{
"id": "phase3_test3",
"title": "Boot with No Schedules",
"purpose": "Verify boot recovery behavior when no schedules exist.",
"steps": [
{
"id": "p3_t3_s1",
"title": "Fresh Install (No Schedules)",
"type": "AUTO",
"blocking": true,
"instructions": ["Uninstall app", "Install fresh APK", "Don't schedule anything"],
"expected": ["App installed", "No schedules"],
"artifacts": []
},
{
"id": "p3_t3_s2",
"title": "Reboot Emulator",
"type": "MANUAL",
"blocking": true,
"instructions": ["Reboot emulator", "Wait for boot"],
"expected": ["Emulator rebooted"],
"artifacts": []
},
{
"id": "p3_t3_s3",
"title": "Verify Boot Recovery (No Action)",
"type": "ASSERT",
"blocking": true,
"instructions": [
"Check logs for boot recovery",
"Verify no alarms created",
"Verify no errors"
],
"expected": ["Boot recovery detected", "No alarms created (correct)", "No errors"],
"artifacts": ["logs/phase3_test3_boot_no_schedules_logcat.txt"]
},
{
"id": "p3_t3_s4",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
},
{
"id": "phase3_test4",
"title": "Silent Boot Recovery (Don't Open App)",
"purpose": "Verify boot recovery works without opening the app (boot receiver handles recovery).",
"steps": [
{
"id": "p3_t4_s1",
"title": "Schedule Notification",
"type": "MANUAL",
"blocking": true,
"instructions": ["Launch app", "Schedule notification"],
"expected": ["Notification scheduled"],
"artifacts": ["alarms/phase3_test4_initial.txt"]
},
{
"id": "p3_t4_s2",
"title": "Reboot Emulator",
"type": "MANUAL",
"blocking": true,
"instructions": ["Reboot emulator", "Wait for boot", "DO NOT open app"],
"expected": ["Emulator rebooted", "App not launched"],
"artifacts": []
},
{
"id": "p3_t4_s3",
"title": "Verify Silent Recovery",
"type": "ASSERT",
"blocking": true,
"instructions": [
"Check logs for boot receiver activity",
"Verify alarms recreated without app launch",
"Wait 10-15 seconds for recovery"
],
"expected": ["Boot receiver triggered", "Alarms recreated", "Recovery successful"],
"artifacts": ["logs/phase3_test4_silent_recovery_logcat.txt", "alarms/phase3_test4_after_silent_boot.txt"]
},
{
"id": "p3_t4_s4",
"title": "Verdict + Export",
"type": "DECISION",
"blocking": true,
"instructions": ["Review evidence", "Record verdict"],
"expected": ["Verdict recorded"],
"artifacts": ["summary.json"]
}
]
}
]
}
]
}

View File

@@ -162,6 +162,25 @@
}
}
// Format date/time with seconds normalized to :00
function formatDateTimeNormalized(timestamp) {
if (!timestamp || timestamp === 0) return 'None scheduled';
const date = new Date(timestamp);
// Normalize seconds to :00
date.setSeconds(0, 0);
// Format as: MM/DD/YYYY, HH:MM:00 AM/PM
const options = {
month: '2-digit',
day: '2-digit',
year: 'numeric',
hour: '2-digit',
minute: '2-digit',
second: '2-digit',
hour12: true
};
return date.toLocaleString('en-US', options);
}
function loadPluginStatus() {
console.log('loadPluginStatus called');
const pluginStatusContent = document.getElementById('pluginStatusContent');
@@ -175,7 +194,7 @@
}
window.DailyNotification.getNotificationStatus()
.then(result => {
const nextTime = result.nextNotificationTime ? new Date(result.nextNotificationTime).toLocaleString() : 'None scheduled';
const nextTime = formatDateTimeNormalized(result.nextNotificationTime);
const hasSchedules = result.isEnabled || (result.pending && result.pending > 0);
const statusIcon = hasSchedules ? '✅' : '⏸️';
pluginStatusContent.innerHTML = `${statusIcon} Active Schedules: ${hasSchedules ? 'Yes' : 'No'}<br>
@@ -233,8 +252,11 @@
priority: 'high'
})
.then(() => {
const prefetchTimeReadable = prefetchTime.toLocaleTimeString();
const notificationTimeReadable = notificationTime.toLocaleTimeString();
// Normalize seconds to :00 for display
prefetchTime.setSeconds(0, 0);
notificationTime.setSeconds(0, 0);
const prefetchTimeReadable = prefetchTime.toLocaleTimeString('en-US', { hour: '2-digit', minute: '2-digit', second: '2-digit', hour12: true });
const notificationTimeReadable = notificationTime.toLocaleTimeString('en-US', { hour: '2-digit', minute: '2-digit', second: '2-digit', hour12: true });
status.innerHTML = '✅ Notification scheduled!<br>' +
'📥 Prefetch: ' + prefetchTimeReadable + ' (' + prefetchTimeString + ')<br>' +
'🔔 Notification: ' + notificationTimeReadable + ' (' + notificationTimeString + ')<br><br>' +
@@ -417,12 +439,16 @@
}
}
// Check for notification delivery periodically
// Track last known nextNotificationTime to detect changes
let lastKnownNextNotificationTime = null;
// Check for notification delivery and status updates periodically
function checkNotificationDelivery() {
if (!window.DailyNotification) return;
window.DailyNotification.getNotificationStatus()
.then(result => {
// Check for notification delivery
if (result.lastNotificationTime) {
const lastTime = new Date(result.lastNotificationTime);
const now = new Date();
@@ -435,15 +461,37 @@
if (indicator && timeSpan) {
indicator.style.display = 'block';
timeSpan.textContent = `Received at ${lastTime.toLocaleTimeString()}`;
// Normalize seconds to :00
lastTime.setSeconds(0, 0);
timeSpan.textContent = `Received at ${lastTime.toLocaleTimeString('en-US', { hour: '2-digit', minute: '2-digit', second: '2-digit', hour12: true })}`;
// Hide after 30 seconds
setTimeout(() => {
indicator.style.display = 'none';
}, 30000);
// Force immediate refresh when notification is received (rollover may have occurred)
setTimeout(() => {
loadPluginStatus();
}, 1000); // Wait 1 second for rollover to complete
}
}
}
// Detect if nextNotificationTime changed (rollover occurred)
const currentNextTime = result.nextNotificationTime;
if (currentNextTime && currentNextTime !== lastKnownNextNotificationTime) {
if (lastKnownNextNotificationTime !== null) {
console.log('Next notification time changed - rollover detected!');
// Force immediate refresh
loadPluginStatus();
}
lastKnownNextNotificationTime = currentNextTime;
}
// Auto-refresh plugin status periodically to show updated next notification time after rollover
// This ensures the UI updates when the plugin reschedules the notification
loadPluginStatus();
})
.catch(error => {
// Silently fail - this is just for visual feedback
@@ -459,8 +507,19 @@
loadPermissionStatus();
loadChannelStatus();
// Check for notification delivery every 5 seconds
setInterval(checkNotificationDelivery, 5000);
// Initialize last known next notification time
if (window.DailyNotification) {
window.DailyNotification.getNotificationStatus()
.then(result => {
lastKnownNextNotificationTime = result.nextNotificationTime;
console.log('Initialized nextNotificationTime:', lastKnownNextNotificationTime);
})
.catch(() => {});
}
// Check for notification delivery and status updates every 3 seconds (more frequent)
// This ensures UI updates quickly when rollover occurs
setInterval(checkNotificationDelivery, 3000);
}, 500);
});

View File

@@ -0,0 +1,91 @@
package com.timesafari.dailynotification;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.util.Log;
import com.getcapacitor.JSObject;
import com.getcapacitor.Plugin;
import com.getcapacitor.PluginCall;
import com.getcapacitor.PluginMethod;
import com.getcapacitor.annotation.CapacitorPlugin;
/**
* TestEventsPlugin - Receives test events from shell scripts via ADB broadcasts
*
* This plugin allows test scripts (test-phase*.sh) to send structured events
* to the test console UI in real-time via Android broadcast intents.
*
* Usage from shell:
* adb shell am broadcast \
* -a com.timesafari.dailynotification.TEST_EVENT \
* --es payload '{"version":"testevent.v1","ts":"...","type":"step_start",...}'
*/
@CapacitorPlugin(name = "TestEvents")
public class TestEventsPlugin extends Plugin {
private static final String TAG = "TestEventsPlugin";
private static final String BROADCAST_ACTION = "com.timesafari.dailynotification.TEST_EVENT";
private static final String EXTRA_PAYLOAD = "payload";
private BroadcastReceiver testEventReceiver;
@Override
public void load() {
super.load();
// Register broadcast receiver
testEventReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if (BROADCAST_ACTION.equals(intent.getAction())) {
String payload = intent.getStringExtra(EXTRA_PAYLOAD);
if (payload != null) {
Log.d(TAG, "Received test event: " + payload);
notifyListeners("testEvent", new JSObject().put("payload", payload));
}
}
}
};
IntentFilter filter = new IntentFilter(BROADCAST_ACTION);
getContext().registerReceiver(testEventReceiver, filter);
Log.d(TAG, "TestEventsPlugin loaded, broadcast receiver registered");
}
@Override
public void handleOnDestroy() {
super.handleOnDestroy();
// Unregister receiver
if (testEventReceiver != null) {
try {
getContext().unregisterReceiver(testEventReceiver);
Log.d(TAG, "TestEventsPlugin unregistered");
} catch (IllegalArgumentException e) {
// Receiver was not registered, ignore
Log.w(TAG, "Receiver was not registered: " + e.getMessage());
}
}
}
/**
* Plugin method to manually trigger an event (for testing)
*/
@PluginMethod
public void emitEvent(PluginCall call) {
String payload = call.getString("payload");
if (payload == null) {
call.reject("Missing payload parameter");
return;
}
Log.d(TAG, "Manually emitting test event: " + payload);
notifyListeners("testEvent", new JSObject().put("payload", payload));
call.resolve();
}
}

View File

@@ -0,0 +1,166 @@
# Console Implementation - Remaining Work
## ✅ Completed
1. **Test Plan JSON** (`plan.json`) - All phases/tests/steps defined
2. **Console UI** (`console/index.html`, `console.js`, `console.css`) - Full Textual-style interface
3. **TestEventsPlugin** - Android plugin to receive ADB broadcasts
4. **Event System** - Library functions emit events (`ui_prompt`, `capture_*`, `verdict_*`)
5. **Helper Functions** - `set_test_context()`, `step_start()`, `step_pass()`, `step_warn()`, `step_fail()`
6. **Example Wiring** - Test 0 and partial Test 1 in `test-phase1.sh` demonstrate pattern
7. **Plugin Registration** - `TestEventsPlugin` added to `capacitor.plugins.json`
8. **Documentation** - Usage guide created
## 🔧 Critical Fix Applied
**TestEventsPlugin Registration** - Added to `capacitor.plugins.json` (required for plugin to work)
## 📋 Remaining Work
### 1. Complete Wiring of test-phase1.sh
**Status**: Partially complete (Test 0 done, Test 1 started, Tests 2-4 not wired)
**Pattern to apply**:
```bash
# At test start
set_test_context "phase1" "phase1_testX" ""
# For each step
set_test_context "phase1" "phase1_testX" "p1_tX_sY"
step_start "p1_tX_sY" "Step description"
# ... do work ...
step_pass "p1_tX_sY" "Step completed"
```
**Tests to wire**:
- ✅ Test 0: Daily Rollover Verification (complete)
- ⚠️ Test 1: Force-Stop Recovery (partially done, needs completion)
- ❌ Test 2: Schedule Update Verification (not wired)
- ❌ Test 3: Recovery Timeout (not wired)
- ❌ Test 4: Invalid Data Handling (not wired)
**Step IDs from plan.json**:
- Test 1: `p1_t1_s1` through `p1_t1_s5`
- Test 2: `p1_t2_s1` through `p1_t2_s4`
- Test 3: `p1_t3_s1` through `p1_t3_s2`
- Test 4: `p1_t4_s1` through `p1_t4_s4`
### 2. Wire test-phase2.sh
**Status**: Not started
**Tests to wire**:
- Test 1: Force Stop Alarms Cleared (`phase2_test1`)
- Test 2: Force Stop Alarms Intact (`phase2_test2`)
- Test 3: First Launch / No Schedules (`phase2_test3`)
**Step IDs from plan.json**:
- Test 1: `p2_t1_s1` through `p2_t1_s5`
- Test 2: `p2_t2_s1` through `p2_t2_s5`
- Test 3: `p2_t3_s1` through `p2_t3_s4`
### 3. Wire test-phase3.sh
**Status**: Not started
**Tests to wire**:
- Test 1: Boot with Future Alarms (`phase3_test1`)
- Test 2: Boot with Past Alarms (`phase3_test2`)
- Test 3: Boot with No Schedules (`phase3_test3`)
- Test 4: Silent Boot Recovery (`phase3_test4`)
**Step IDs from plan.json**:
- Test 1: `p3_t1_s1` through `p3_t1_s5`
- Test 2: `p3_t2_s1` through `p3_t2_s5`
- Test 3: `p3_t3_s1` through `p3_t3_s4`
- Test 4: `p3_t4_s1` through `p3_t4_s4`
### 4. Testing & Validation
**Steps**:
1. Build Android app: `cd test-apps/android-test-app && ./gradlew assembleDebug`
2. Install on emulator/device
3. Navigate to console: Open app → should redirect to `/console/`
4. Verify console loads: Check browser console for errors
5. Test Phase A (UI-only):
- Select a test
- Manually mark steps as done/fail
- Verify state persists
6. Test Phase B (Live events):
- Set `export DNP_UI_EVENTS=1`
- Run `./test-phase1.sh 0` (just test 0)
- Verify events appear in console
- Verify step status updates automatically
## 🎯 Quick Start: Wiring a Test Function
Here's a complete example for wiring a test:
```bash
test_example() {
section "TEST: Example Test"
# Set test context (no step yet)
set_test_context "phase1" "phase1_example" ""
# Step 1: Setup
set_test_context "phase1" "phase1_example" "p1_ex_s1"
step_start "p1_ex_s1" "Setting up test"
capture_alarms "example_initial"
step_pass "p1_ex_s1" "Setup complete"
# Step 2: Execute
set_test_context "phase1" "phase1_example" "p1_ex_s2"
step_start "p1_ex_s2" "Executing test"
# ... do work ...
if [ success ]; then
step_pass "p1_ex_s2" "Execution complete"
else
step_fail "p1_ex_s2" "Execution failed"
fi
# Step 3: Verify
set_test_context "phase1" "phase1_example" "p1_ex_s3"
step_start "p1_ex_s3" "Verifying results"
# ... verify ...
step_pass "p1_ex_s3" "Verification passed"
# Verdict (automatically emits event)
set_test_context "phase1" "phase1_example" "p1_ex_s4"
verdict_pass "example_test" "Test passed"
}
```
## 📝 Notes
- **Step IDs must match plan.json** - Check `console/plan.json` for exact step IDs
- **Context must be set before step events** - Call `set_test_context()` before `step_start()`
- **Verdict functions auto-emit events** - No need to manually emit verdict events
- **Evidence capture auto-emits events** - `capture_alarms()`, `capture_logcat()`, `capture_screenshot()` emit events automatically
- **Operator prompts auto-emit events** - `ui_prompt()` emits `operator_required` events automatically
## 🔍 Verification Checklist
After wiring tests, verify:
- [ ] All test functions have `set_test_context()` at start
- [ ] All major steps have `step_start()` and `step_pass/fail/warn()`
- [ ] Step IDs match `plan.json` exactly
- [ ] Verdict functions are called (they emit events automatically)
- [ ] Evidence capture functions are called (they emit events automatically)
- [ ] Scripts run without errors
- [ ] Console receives events when `DNP_UI_EVENTS=1` is set
## 🚀 Priority Order
1. **Complete test-phase1.sh** (highest priority - most used)
2. **Wire test-phase2.sh** (medium priority)
3. **Wire test-phase3.sh** (medium priority)
4. **Test integration** (validate everything works)
## 📚 Reference
- **Usage Guide**: `docs/CONSOLE-USAGE.md`
- **Test Plan**: `app/src/main/assets/public/console/plan.json`
- **Event Functions**: `alarm-test-lib.sh` (functions: `emit_event`, `set_test_context`, `step_*`)

View File

@@ -0,0 +1,165 @@
# Test Console Usage Guide
## Overview
The Test Console is a Textual-inspired operator interface that provides a structured view of test execution, real-time progress tracking, and evidence management.
## Features
### Phase A (UI-Only Mode)
- **Test Plan Rendering**: Visual tree of phases, tests, and steps
- **Step Checklists**: Track progress with ✓/✗/⚠/→/· status indicators
- **Current Step Frame**: Detailed instructions for the current step
- **Evidence Panel**: View captured artifacts (alarms, logs, screenshots)
- **Live Feed**: Real-time event stream
- **State Persistence**: Progress saved to localStorage
### Phase B (Live Updates)
- **ADB Broadcast Integration**: Scripts send events via `adb shell am broadcast`
- **Real-Time Updates**: Console updates automatically as tests run
- **Automatic Status**: Step/test status updates from script events
## Enabling Live Updates
To enable live event streaming from test scripts:
```bash
export DNP_UI_EVENTS=1
./test-phase1.sh
```
## Test Context Setup
In test scripts, set context before executing steps:
```bash
# Set phase/test/step context
set_test_context "phase1" "phase1_test0" "p1_t0_s1"
# Emit step events
step_start "p1_t0_s1" "Starting step"
# ... do work ...
step_pass "p1_t0_s1" "Step completed"
```
## Event Types
### Step Events
- `step_start` - Step execution begins
- `step_pass` - Step completed successfully
- `step_warn` - Step completed with warnings
- `step_fail` - Step failed
### Operator Events
- `operator_required` - Manual action needed (from `ui_prompt()`)
### Evidence Events
- `artifact` - Evidence captured (from `capture_*()` functions)
### Verdict Events
- `test_verdict` - Test completed (from `verdict_*()` functions)
## Step ID Mapping
Step IDs follow the pattern: `p{phase}_{test}_s{step}`
Examples:
- `p1_t0_s1` = Phase 1, Test 0, Step 1
- `p2_t1_s3` = Phase 2, Test 1, Step 3
- `p3_t4_s2` = Phase 3, Test 4, Step 2
## Console Navigation
1. **Select Test**: Click a test in the left sidebar
2. **View Steps**: Step checklist shows in the middle panel
3. **Follow Instructions**: Current step frame shows what to do
4. **Mark Progress**: Use "Mark Done ✓" or "Mark Fail ✗" buttons
5. **View Evidence**: Evidence panel shows captured artifacts
## Manual Step Completion
If scripts aren't driving the console (Phase A only), operators can manually mark steps:
1. Select the step in the checklist
2. Click "Mark Done ✓" or "Mark Fail ✗"
3. Add notes if needed
4. Progress is saved automatically
## Evidence Management
Evidence is automatically captured when scripts call:
- `capture_alarms()``alarms/` directory
- `capture_logcat()``logs/` directory
- `capture_screenshot()``screens/` directory
Evidence appears in the console's evidence panel when:
- Scripts emit `artifact` events (Phase B)
- Operators manually add evidence (Phase A)
## Run ID
Each console session generates a unique Run ID:
- Format: `YYYYMMDD_HHMMSS_xxxx`
- Used for localStorage keys and evidence organization
- Displayed in console header
## Troubleshooting
### Events Not Appearing
1. **Check Event Emission**: Ensure `DNP_UI_EVENTS=1` is set
2. **Check ADB Connection**: Verify device is connected
3. **Check Plugin**: Ensure `TestEventsPlugin` is registered in Capacitor
4. **Check Broadcast**: Verify broadcast action matches in plugin
### Console Not Loading
1. **Check Plan JSON**: Verify `plan.json` exists and is valid
2. **Check Browser Console**: Look for JavaScript errors
3. **Check Capacitor**: Ensure Capacitor is initialized
### Steps Not Updating
1. **Check Context**: Verify `set_test_context()` is called
2. **Check Step IDs**: Ensure step IDs match `plan.json`
3. **Check Events**: Verify events are being emitted
## Example: Wiring a Test Function
```bash
test_example() {
section "TEST: Example Test"
# Set test context
set_test_context "phase1" "phase1_example" ""
# Step 1: Setup
set_test_context "phase1" "phase1_example" "p1_ex_s1"
step_start "p1_ex_s1" "Setting up test"
capture_alarms "example_initial"
step_pass "p1_ex_s1" "Setup complete"
# Step 2: Execute
set_test_context "phase1" "phase1_example" "p1_ex_s2"
step_start "p1_ex_s2" "Executing test"
# ... do work ...
step_pass "p1_ex_s2" "Execution complete"
# Step 3: Verify
set_test_context "phase1" "phase1_example" "p1_ex_s3"
step_start "p1_ex_s3" "Verifying results"
# ... verify ...
step_pass "p1_ex_s3" "Verification passed"
# Verdict
set_test_context "phase1" "phase1_example" "p1_ex_s4"
verdict_pass "example_test" "Test passed"
}
```
## Next Steps
1. **Wire Remaining Tests**: Add `set_test_context()` and `step_*()` calls to all test functions
2. **Test Console**: Build app, enable events, run a test script
3. **Refine UI**: Adjust styling, add features as needed

View File

@@ -0,0 +1,215 @@
# Test Implementation Alignment with Documentation
**Last Updated:** 2025-12-29
**Purpose:** Document how test scripts align with golden run specifications and runbook guidance
---
## Overview
The test implementation is guided by three types of documentation:
1. **Golden Run Documents** (`PHASE1_TEST0_GOLDEN.md`, `PHASE1_TEST1_GOLDEN.md`)
- Define **what a successful test looks like**
- Specify expected outputs, UI states, logcat patterns
- Provide pass/fail checklists
- **These are the test specifications**
2. **Runbook** (`RUNBOOK-TESTING.md`)
- Provides **operator guidance**
- Documents how to run tests, interpret results
- Troubleshooting and common issues
3. **Console Documentation** (`CONSOLE-USAGE.md`, `CONSOLE-REMAINING-WORK.md`)
- Documents the operator console UI
- Event-driven test execution
---
## How Golden Runs Guide Implementation
### PHASE1_TEST0_GOLDEN.md Requirements
**Step 4 (lines 54-59) specifies prerequisites:**
```
4. Confirmed plugin status in the UI:
- ⚙️ Plugin Settings: ✅ Configured
- 🔌 Native Fetcher: ✅ Configured
- 🔔 Notifications: ✅ Granted
- ⏰ Exact Alarms: ✅ Granted
- 📢 Channel: ✅ Enabled (High)
```
**Current Implementation:**
- ✅ Checks notification permissions (`check_permissions()`)
- ✅ Checks plugin configuration (`check_plugin_configured()`)
- ✅ Verifies exact alarms permission (via `dumpsys package`)
- ✅ Verifies channel status (via logcat)
- ✅ Comprehensive `verify_all_prerequisites()` function added
- ✅ Final UI verification prompt for all 5 items (aligned with golden run step 4)
**Alignment Status:**
**FULLY ALIGNED** - Test 0 now verifies all 5 prerequisites as specified in the golden run:
1. Plugin Settings: Configured (via `check_plugin_configured()`)
2. Native Fetcher: Configured (via logs + plugin config check)
3. Notifications: Granted (via `check_permissions()`)
4. Exact Alarms: Granted (via `dumpsys package`)
5. Channel: Enabled (High) (via logcat + UI verification)
The implementation includes both programmatic checks and a final UI verification prompt that matches the golden run's step 4.
### Pass/Fail Checklist Alignment
**Golden Run Checklist (lines 172-194):**
1. **Script Output:**
- ✅ "Found 1 notification alarm (expected: 1)" - **Implemented**
- ✅ "Notification alarms after rollover: 1" - **Implemented**
- ✅ "TEST 0 PASSED" verdict - **Implemented**
2. **UI State:**
- ⚠️ "Before scheduling: Active Schedules: No" - **Not explicitly checked**
- ⚠️ "After scheduling: Active Schedules: Yes" - **Not explicitly checked**
- ⚠️ "After rollover: Active Schedules: Yes" - **Not explicitly checked**
3. **dumpsys alarm:**
- ✅ Exactly one RTC_WAKEUP alarm - **Implemented**
- ✅ origWhen timestamp 24h later - **Not explicitly verified**
4. **logcat:**
- ⚠️ `source=TEST_NOTIFICATION` sequence - **Not explicitly checked**
- ⚠️ `source=ROLLOVER_ON_FIRE` sequence - **Not explicitly checked**
- ✅ No duplicate DNP-SCHEDULE entries - **Partially checked**
**Current Implementation Status:**
- Core functionality: ✅ Aligned
- Detailed verification: ⚠️ Partial alignment
- UI state checks: ❌ Not implemented
- Logcat pattern verification: ⚠️ Partial
---
## How Runbook Guides Implementation
### RUNBOOK-TESTING.md Structure
**Section 3: Phase 1: Daily Rollover & Recovery**
**Test Descriptions (lines 144-186):**
- Defines what each test should do
- Provides time estimates
- Lists key steps
**Current Implementation:**
- ✅ Test purposes match runbook descriptions
- ✅ Test steps align with runbook key steps
- ✅ Time estimates are documented
**Evidence Location (lines 187-194):**
- Specifies where evidence should be saved
- Current implementation: ✅ Aligned (`runs/<RUN_ID>/`)
---
## Alignment Recommendations
### High Priority
1.**Add Prerequisite Verification to Test 0** - **COMPLETED**
- ✅ Added `verify_all_prerequisites()` function
- ✅ Verifies all 5 UI status items (Plugin Settings, Native Fetcher, Notifications, Exact Alarms, Channel)
- ✅ Includes programmatic checks and final UI verification prompt
- ✅ Aligned with golden run step 4
2. **Add UI State Checks**
- Verify "Active Schedules" state before/after scheduling
- Verify "Next Notification" time updates correctly
- Can be done via UI inspection or plugin API
3. **Add Logcat Pattern Verification**
- Check for `source=TEST_NOTIFICATION` sequence
- Check for `source=ROLLOVER_ON_FIRE` sequence
- Verify timing relationships match golden run
### Medium Priority
4. **Add Alarm Timestamp Verification**
- Verify `origWhen` is exactly 24h after initial time
- Can extract from `dumpsys alarm` output
5. **Document Manual vs Automated Checks**
- Clearly distinguish what script verifies vs what operator verifies
- Align with golden run's manual verification steps
### Low Priority
6. **Add Screenshot Verification**
- Golden run references screenshots
- Could add automated screenshot comparison (future)
---
## Current Implementation vs Golden Run
### Test 0: Daily Rollover Verification
| Requirement | Golden Run | Current Implementation | Status |
|------------|------------|------------------------|--------|
| Prerequisites (5 items) | ✅ All verified | ✅ All verified | ✅ Aligned |
| Schedule notification | ✅ Manual | ✅ Manual | ✅ Aligned |
| Wait for fire/advance time | ✅ Manual | ✅ Manual | ✅ Aligned |
| Verify alarm count | ✅ 1 alarm | ✅ 1 alarm | ✅ Aligned |
| Verify rollover | ✅ Tomorrow scheduled | ✅ Tomorrow scheduled | ✅ Aligned |
| UI state checks | ✅ Before/after | ❌ Not checked | Gap |
| Logcat patterns | ✅ Sequences verified | ⚠️ Partial | Partial |
| Alarm timestamp | ✅ 24h verified | ❌ Not verified | Gap |
### Test 1: Force-Stop Recovery
| Requirement | Golden Run | Current Implementation | Status |
|------------|------------|------------------------|--------|
| Clean start | ✅ Auto-reset | ✅ Auto-reset | ✅ Aligned |
| Schedule notification | ✅ Manual | ✅ Manual | ✅ Aligned |
| Force-stop app | ✅ Manual | ✅ Manual | ✅ Aligned |
| Verify alarms cleared | ✅ 0 alarms | ✅ 0 alarms | ✅ Aligned |
| Relaunch app | ✅ Manual | ✅ Manual | ✅ Aligned |
| Verify recovery | ✅ 1 alarm restored | ✅ 1 alarm restored | ✅ Aligned |
| Recovery logs | ✅ FORCE_STOP scenario | ✅ FORCE_STOP scenario | ✅ Aligned |
---
## Next Steps
1. **Enhance Test 0 Prerequisites**
- Add explicit checks for exact alarms and channel status
- Use plugin API to verify all 5 items programmatically
2. **Add UI State Verification**
- Check "Active Schedules" state via plugin API or UI inspection
- Verify "Next Notification" time updates
3. **Add Logcat Pattern Checks**
- Verify `source=TEST_NOTIFICATION` and `source=ROLLOVER_ON_FIRE` sequences
- Check timing relationships
4. **Update Golden Run Documents**
- Document which checks are automated vs manual
- Clarify operator responsibilities
---
## Conclusion
**Current State:**
- Core test functionality: ✅ Well aligned with golden runs
- Detailed verification: ⚠️ Partial alignment
- Prerequisites: ⚠️ Need to verify all 5 items
**Key Insight:**
The golden run documents specify **what** should be verified, but don't always specify **how** (automated vs manual). Our implementation should:
1. Automate what can be automated
2. Clearly document what requires manual verification
3. Align with the golden run's verification sequence
**Priority:**
Focus on adding the missing prerequisite checks (exact alarms, channel status) to fully align with the golden run specification.

View File

@@ -39,6 +39,9 @@ SELECTED_TESTS=()
test1_force_stop_cleared_alarms() {
section "TEST 1: Force Stop Alarms Cleared"
# Set test context
set_test_context "phase2" "phase2_test1" ""
info "Purpose: Verify force stop detection and alarm rescheduling when alarms are cleared."
info "Expected time: 5-8 minutes"
info "Automatable: Partial (requires manual force-stop verification)"
@@ -47,6 +50,8 @@ test1_force_stop_cleared_alarms() {
pause
# Capture initial state
set_test_context "phase2" "phase2_test1" "p2_t1_s1"
step_start "p2_t1_s1" "Launch app & check plugin status"
capture_alarms "phase2_test1_initial"
capture_logcat "phase2_test1_initial" "DNP" 50
@@ -62,10 +67,14 @@ test1_force_stop_cleared_alarms() {
ui_prompt "2) Now schedule at least one future notification (e.g., click 'Test Notification' to schedule for a few minutes in the future)."
step_pass "p2_t1_s1" "Plugin configured and notification scheduled"
# Capture before force-stop state
capture_alarms "phase2_test1_before_force_stop"
substep "Step 2: Verify alarms are scheduled"
set_test_context "phase2" "phase2_test1" "p2_t1_s2"
step_start "p2_t1_s2" "Verify alarms scheduled"
show_alarms
local before_count system_count
before_count="$(get_plugin_alarm_count)"
@@ -75,21 +84,29 @@ test1_force_stop_cleared_alarms() {
if [[ "$before_count" -eq 0 ]]; then
warn "No plugin alarms found before force stop; TEST 1 may not be meaningful."
step_warn "p2_t1_s2" "No alarms found"
elif [[ "$before_count" -eq 1 ]]; then
ok "Single plugin alarm confirmed (one per day)"
step_pass "p2_t1_s2" "Alarms verified"
else
warn "Found $before_count plugin alarms (expected: 1)"
step_warn "p2_t1_s2" "Unexpected alarm count"
fi
pause
substep "Step 3: Force stop app (should clear alarms on many devices)"
set_test_context "phase2" "phase2_test1" "p2_t1_s3"
step_start "p2_t1_s3" "Force stop app"
force_stop_app
# Capture after force-stop state
capture_alarms "phase2_test1_after_force_stop"
step_pass "p2_t1_s3" "App force stopped"
substep "Step 4: Check alarms after force stop"
set_test_context "phase2" "phase2_test1" "p2_t1_s4"
step_start "p2_t1_s4" "Check alarms after force stop"
local after_count system_after
after_count="$(get_plugin_alarm_count)"
system_after="$(get_system_alarm_count)"
@@ -100,10 +117,14 @@ test1_force_stop_cleared_alarms() {
if [[ "$after_count" -gt 0 ]]; then
if [[ "$STRICTNESS" == "hard" ]]; then
error "Plugin alarms still present after force stop (strict mode: hard)"
step_fail "p2_t1_s4" "Alarms not cleared"
else
warn "Plugin alarms still present after force stop. This device/OS may not clear alarms on force stop."
warn "TEST 1 will continue but may not fully validate FORCE_STOP scenario."
step_warn "p2_t1_s4" "Alarms may not be cleared (device-specific)"
fi
else
step_pass "p2_t1_s4" "Alarms cleared"
fi
pause
@@ -115,6 +136,8 @@ test1_force_stop_cleared_alarms() {
info "Boot flag cleared (if it existed)"
substep "Step 5: Launch app (triggers recovery) and capture logs"
set_test_context "phase2" "phase2_test1" "p2_t1_s5"
step_start "p2_t1_s5" "Relaunch app & verify recovery"
clear_logs
launch_app
sleep 5 # give recovery a moment to run
@@ -191,6 +214,13 @@ test1_force_stop_cleared_alarms() {
fi
# Emit verdict
if [[ "$test1_passed" == "true" ]]; then
step_pass "p2_t1_s5" "Recovery successful"
else
step_fail "p2_t1_s5" "Recovery failed or inconclusive"
fi
set_test_context "phase2" "phase2_test1" "p2_t1_s6"
if [[ "$test1_passed" == "true" ]]; then
verdict_pass "phase2_test1_force_stop_cleared" "$test1_message"
elif [[ "$STRICTNESS" == "hard" ]]; then
@@ -212,6 +242,9 @@ test1_force_stop_cleared_alarms() {
test2_force_stop_intact_alarms() {
section "TEST 2: Force Stop / Process Stop Alarms Intact"
# Set test context
set_test_context "phase2" "phase2_test2" ""
info "Purpose: Verify that heavy FORCE_STOP recovery does not run when alarms are still present."
info "Expected time: 4-6 minutes"
info "Automatable: Partial (requires manual verification)"
@@ -220,6 +253,8 @@ test2_force_stop_intact_alarms() {
pause
# Capture initial state
set_test_context "phase2" "phase2_test2" "p2_t2_s1"
step_start "p2_t2_s1" "Schedule notification"
capture_alarms "phase2_test2_initial"
capture_logcat "phase2_test2_initial" "DNP" 50
@@ -229,10 +264,14 @@ test2_force_stop_intact_alarms() {
Press Enter when done."
step_pass "p2_t2_s1" "Notification scheduled"
# Capture before soft stop state
capture_alarms "phase2_test2_before_soft_stop"
substep "Step 2: Verify alarms are scheduled"
set_test_context "phase2" "phase2_test2" "p2_t2_s2"
step_start "p2_t2_s2" "Force stop app"
show_alarms
local before system_before
before="$(get_plugin_alarm_count)"
@@ -242,24 +281,32 @@ test2_force_stop_intact_alarms() {
if [[ "$before" -eq 0 ]]; then
warn "No plugin alarms found; TEST 2 may not be meaningful."
step_warn "p2_t2_s2" "No alarms found"
elif [[ "$before" -eq 1 ]]; then
ok "Single plugin alarm confirmed (one per day)"
step_pass "p2_t2_s2" "Alarms verified"
else
warn "Found $before plugin alarms (expected: 1)"
step_warn "p2_t2_s2" "Unexpected alarm count"
fi
pause
substep "Step 3: Simulate a 'soft' stop or process kill that does NOT clear alarms"
set_test_context "phase2" "phase2_test2" "p2_t2_s2"
step_start "p2_t2_s2" "Force stop app"
info "Killing app process (non-destructive - may not clear alarms)..."
$ADB_BIN shell am kill "$APP_ID" || true
sleep 2
ok "Kill signal sent (soft stop)"
step_pass "p2_t2_s2" "App force stopped"
# Capture after soft stop state
capture_alarms "phase2_test2_after_soft_stop"
substep "Step 4: Verify alarms are still scheduled"
set_test_context "phase2" "phase2_test2" "p2_t2_s3"
step_start "p2_t2_s3" "Verify alarms intact"
local after system_after
after="$(get_plugin_alarm_count)"
system_after="$(get_system_alarm_count)"
@@ -270,14 +317,20 @@ test2_force_stop_intact_alarms() {
if [[ "$after" -eq 0 ]]; then
if [[ "$STRICTNESS" == "hard" ]]; then
error "Alarms cleared after soft stop (strict mode: hard)"
step_fail "p2_t2_s3" "Alarms cleared"
else
warn "Alarms appear cleared after soft stop; this environment may not distinguish TEST 2 well."
step_warn "p2_t2_s3" "Alarms may be cleared"
fi
else
step_pass "p2_t2_s3" "Alarms intact"
fi
pause
substep "Step 5: Relaunch app and check recovery logs"
set_test_context "phase2" "phase2_test2" "p2_t2_s4"
step_start "p2_t2_s4" "Relaunch & verify behavior"
clear_logs
launch_app
sleep 5
@@ -341,7 +394,14 @@ test2_force_stop_intact_alarms() {
fi
fi
if [[ "$test2_passed" == "true" ]]; then
step_pass "p2_t2_s4" "Recovery behavior correct"
else
step_fail "p2_t2_s4" "Recovery behavior incorrect"
fi
# Emit verdict
set_test_context "phase2" "phase2_test2" "p2_t2_s5"
if [[ "$test2_passed" == "true" ]]; then
verdict_pass "phase2_test2_force_stop_intact" "$test2_message"
elif [[ "$STRICTNESS" == "hard" ]]; then
@@ -360,6 +420,9 @@ test2_force_stop_intact_alarms() {
test3_first_launch_no_schedules() {
section "TEST 3: First Launch / No Schedules Safeguard"
# Set test context
set_test_context "phase2" "phase2_test3" ""
info "Purpose: Ensure force-stop recovery is NOT triggered when DB is empty or plugin isn't configured."
info "Expected time: 3-5 minutes"
info "Automatable: Yes"
@@ -368,6 +431,8 @@ test3_first_launch_no_schedules() {
pause
# Capture initial state (before uninstall)
set_test_context "phase2" "phase2_test3" "p2_t3_s1"
step_start "p2_t3_s1" "Fresh install"
capture_alarms "phase2_test3_initial"
substep "Step 1: Uninstall app to clear DB/state"
@@ -379,8 +444,10 @@ test3_first_launch_no_schedules() {
substep "Step 2: Reinstall app"
if $ADB_BIN install -r "$APK_PATH"; then
ok "App installed"
step_pass "p2_t3_s1" "Fresh install complete"
else
error "Reinstall failed"
step_fail "p2_t3_s1" "Reinstall failed"
exit 1
fi
@@ -391,6 +458,8 @@ test3_first_launch_no_schedules() {
pause
substep "Step 3: Launch app for the first time"
set_test_context "phase2" "phase2_test3" "p2_t3_s2"
step_start "p2_t3_s2" "Launch app & verify no recovery"
launch_app
sleep 5
@@ -400,6 +469,8 @@ test3_first_launch_no_schedules() {
capture_screenshot "phase2_test3_after_first_launch"
substep "Step 4: Collect logs and ensure no force-stop recovery ran"
set_test_context "phase2" "phase2_test3" "p2_t3_s3"
step_start "p2_t3_s3" "Verify no recovery ran"
local logs
logs="$(get_recovery_logs)"
echo "$logs"
@@ -443,7 +514,14 @@ test3_first_launch_no_schedules() {
fi
fi
if [[ "$test3_passed" == "true" ]]; then
step_pass "p2_t3_s3" "No recovery ran (correct)"
else
step_fail "p2_t3_s3" "Recovery ran when it shouldn't"
fi
# Emit verdict
set_test_context "phase2" "phase2_test3" "p2_t3_s4"
if [[ "$test3_passed" == "true" ]]; then
verdict_pass "phase2_test3_first_launch_no_schedules" "$test3_message"
elif [[ "$STRICTNESS" == "hard" ]]; then

View File

@@ -51,6 +51,9 @@ extract_scenario_from_logs() {
test1_boot_future_alarms() {
section "TEST 1: Boot with Future Alarms"
# Set test context
set_test_context "phase3" "phase3_test1" ""
info "Purpose: Verify alarms are recreated on boot when schedules have future run times."
info "Expected time: 2-3 minutes (includes 30-60s reboot)"
info "Automatable: Partial (requires manual reboot confirmation)"
@@ -60,6 +63,8 @@ test1_boot_future_alarms() {
pause
# Capture initial state
set_test_context "phase3" "phase3_test1" "p3_t1_s1"
step_start "p3_t1_s1" "Launch app & check plugin status"
capture_alarms "phase3_test1_initial"
capture_logcat "phase3_test1_initial" "DNP" 50
@@ -75,10 +80,14 @@ test1_boot_future_alarms() {
ui_prompt "2) Now schedule at least one future notification (e.g., click 'Test Notification' to schedule for a few minutes in the future)."
step_pass "p3_t1_s1" "Plugin configured and notification scheduled"
# Capture before reboot state
capture_alarms "phase3_test1_before_reboot"
substep "Step 2: Verify alarms are scheduled"
set_test_context "phase3" "phase3_test1" "p3_t1_s2"
step_start "p3_t1_s2" "Verify alarms scheduled"
show_alarms
local before_count system_before
before_count="$(get_plugin_alarm_count)"
@@ -88,20 +97,28 @@ test1_boot_future_alarms() {
if [[ "$before_count" -eq 0 ]]; then
warn "No plugin alarms found before reboot; TEST 1 may not be meaningful."
step_warn "p3_t1_s2" "No alarms found"
elif [[ "$before_count" -eq 1 ]]; then
ok "Single plugin alarm confirmed (one per day)"
step_pass "p3_t1_s2" "Alarms verified"
else
warn "Found $before_count plugin alarms (expected: 1)"
step_warn "p3_t1_s2" "Unexpected alarm count"
fi
pause
substep "Step 3: Reboot emulator"
set_test_context "phase3" "phase3_test1" "p3_t1_s3"
step_start "p3_t1_s3" "Reboot emulator"
warn "The emulator will reboot now. This will take 30-60 seconds."
pause
reboot_emulator
step_pass "p3_t1_s3" "Emulator rebooted"
substep "Step 4: Collect boot recovery logs"
set_test_context "phase3" "phase3_test1" "p3_t1_s4"
step_start "p3_t1_s4" "Collect boot recovery logs"
info "Collecting recovery logs from boot..."
sleep 2 # Give recovery a moment to complete
@@ -167,11 +184,16 @@ test1_boot_future_alarms() {
warn "Alarms were not recreated despite recovery success. Check alarm scheduling logic."
test1_message="Boot recovery succeeded but alarms not recreated (rescheduled=$rescheduled, after_count=$after_count)"
test1_passed=false
step_fail "p3_t1_s4" "Alarms not recreated"
elif [[ "$after_count" -gt 0 && "$test1_passed" == "true" ]]; then
ok "Alarms successfully recreated after boot (after_count=$after_count)"
step_pass "p3_t1_s4" "Boot recovery successful"
else
step_fail "p3_t1_s4" "Boot recovery failed"
fi
# Emit verdict
set_test_context "phase3" "phase3_test1" "p3_t1_s5"
if [[ "$test1_passed" == "true" ]]; then
verdict_pass "phase3_test1_boot_future_alarms" "$test1_message"
else
@@ -188,6 +210,9 @@ test1_boot_future_alarms() {
test2_boot_past_alarms() {
section "TEST 2: Boot with Past Alarms"
# Set test context
set_test_context "phase3" "phase3_test2" ""
info "Purpose: Verify missed alarms are detected and next occurrence is scheduled on boot."
info "Expected time: 5-6 minutes (includes 3min wait + 30-60s reboot)"
info "Automatable: Partial (requires manual time advancement or wait)"
@@ -198,6 +223,8 @@ test2_boot_past_alarms() {
pause
# Capture initial state
set_test_context "phase3" "phase3_test2" "p3_t2_s1"
step_start "p3_t2_s1" "Schedule notification for past time"
capture_alarms "phase3_test2_initial"
capture_logcat "phase3_test2_initial" "DNP" 50
@@ -215,18 +242,25 @@ test2_boot_past_alarms() {
After scheduling, we'll wait for the alarm time to pass, then reboot."
step_pass "p3_t2_s1" "Notification scheduled"
# Capture before wait state
capture_alarms "phase3_test2_before_wait"
substep "Step 2: Wait for alarm time to pass"
set_test_context "phase3" "phase3_test2" "p3_t2_s2"
step_start "p3_t2_s2" "Wait for alarm time to pass"
info "Waiting 3 minutes for scheduled alarm time to pass..."
warn "You can manually advance system time if needed (requires root/emulator)"
sleep 180 # Wait 3 minutes
step_pass "p3_t2_s2" "Alarm time passed"
# Capture after wait state
capture_alarms "phase3_test2_after_wait"
substep "Step 3: Verify alarm time has passed"
set_test_context "phase3" "phase3_test2" "p3_t2_s3"
step_start "p3_t2_s3" "Reboot emulator"
info "Alarm time should now be in the past"
show_alarms
@@ -236,8 +270,11 @@ test2_boot_past_alarms() {
warn "The emulator will reboot now. This will take 30-60 seconds."
pause
reboot_emulator
step_pass "p3_t2_s3" "Emulator rebooted"
substep "Step 5: Collect boot recovery logs"
set_test_context "phase3" "phase3_test2" "p3_t2_s4"
step_start "p3_t2_s4" "Collect boot recovery logs"
info "Collecting recovery logs from boot..."
sleep 2
@@ -288,7 +325,14 @@ test2_boot_past_alarms() {
test2_message="No missed alarms detected. Verify alarm time actually passed before reboot (missed=$missed, rescheduled=$rescheduled)"
fi
if [[ "$test2_passed" == "true" ]]; then
step_pass "p3_t2_s4" "Past alarms detected and rescheduled"
else
step_fail "p3_t2_s4" "Past alarms not detected"
fi
# Emit verdict
set_test_context "phase3" "phase3_test2" "p3_t2_s5"
if [[ "$test2_passed" == "true" ]]; then
verdict_pass "phase3_test2_boot_past_alarms" "$test2_message"
else
@@ -305,6 +349,9 @@ test2_boot_past_alarms() {
test3_boot_no_schedules() {
section "TEST 3: Boot with No Schedules"
# Set test context
set_test_context "phase3" "phase3_test3" ""
info "Purpose: Verify boot recovery handles empty database gracefully."
info "Expected time: 2-3 minutes (includes 30-60s reboot)"
info "Automatable: Yes"
@@ -314,6 +361,8 @@ test3_boot_no_schedules() {
pause
# Capture initial state (before uninstall)
set_test_context "phase3" "phase3_test3" "p3_t3_s1"
step_start "p3_t3_s1" "Fresh install"
capture_alarms "phase3_test3_initial"
substep "Step 1: Uninstall app to clear DB/state"
@@ -325,8 +374,10 @@ test3_boot_no_schedules() {
substep "Step 2: Reinstall app"
if $ADB_BIN install -r "$APK_PATH"; then
ok "App installed"
step_pass "p3_t3_s1" "Fresh install complete"
else
error "Reinstall failed"
step_fail "p3_t3_s1" "Reinstall failed"
exit 1
fi
@@ -337,12 +388,17 @@ test3_boot_no_schedules() {
pause
substep "Step 3: Reboot emulator WITHOUT scheduling anything"
set_test_context "phase3" "phase3_test3" "p3_t3_s2"
step_start "p3_t3_s2" "Reboot without schedules"
warn "Do NOT schedule any notifications. The app should have no schedules in the database."
warn "The emulator will reboot now. This will take 30-60 seconds."
pause
reboot_emulator
step_pass "p3_t3_s2" "Emulator rebooted"
substep "Step 4: Collect boot recovery logs"
set_test_context "phase3" "phase3_test3" "p3_t3_s3"
step_start "p3_t3_s3" "Verify no recovery ran"
info "Collecting recovery logs from boot..."
sleep 2
@@ -390,7 +446,14 @@ test3_boot_no_schedules() {
test3_message="Logs present but no rescheduling; review scenario handling to ensure it's explicit about NONE / NO_SCHEDULES (scenario=${scenario:-<none>}, rescheduled=$rescheduled)"
fi
if [[ "$test3_passed" == "true" ]]; then
step_pass "p3_t3_s3" "No recovery ran (correct)"
else
step_fail "p3_t3_s3" "Recovery ran when it shouldn't"
fi
# Emit verdict
set_test_context "phase3" "phase3_test3" "p3_t3_s4"
if [[ "$test3_passed" == "true" ]]; then
verdict_pass "phase3_test3_boot_no_schedules" "$test3_message"
else
@@ -407,6 +470,9 @@ test3_boot_no_schedules() {
test4_silent_boot_recovery() {
section "TEST 4: Silent Boot Recovery (App Never Opened)"
# Set test context
set_test_context "phase3" "phase3_test4" ""
info "Purpose: Verify boot recovery occurs even when the app is never opened after reboot."
info "Expected time: 2-3 minutes (includes 30-60s reboot)"
info "Automatable: Partial (requires manual verification that app was not opened)"
@@ -416,6 +482,8 @@ test4_silent_boot_recovery() {
pause
# Capture initial state
set_test_context "phase3" "phase3_test4" "p3_t4_s1"
step_start "p3_t4_s1" "Schedule notification"
capture_alarms "phase3_test4_initial"
capture_logcat "phase3_test4_initial" "DNP" 50
@@ -431,10 +499,14 @@ test4_silent_boot_recovery() {
ui_prompt "2) Click 'Test Notification' to schedule a notification for a few minutes in the future."
step_pass "p3_t4_s1" "Notification scheduled"
# Capture before reboot state
capture_alarms "phase3_test4_before_reboot"
substep "Step 2: Verify alarms are scheduled"
set_test_context "phase3" "phase3_test4" "p3_t4_s2"
step_start "p3_t4_s2" "Reboot without opening app"
show_alarms
local before_count system_before
before_count="$(get_plugin_alarm_count)"
@@ -444,10 +516,13 @@ test4_silent_boot_recovery() {
if [[ "$before_count" -eq 0 ]]; then
warn "No plugin alarms found; TEST 4 may not be meaningful."
step_warn "p3_t4_s2" "No alarms found"
elif [[ "$before_count" -eq 1 ]]; then
ok "Single plugin alarm confirmed (one per day)"
step_pass "p3_t4_s2" "Alarms verified"
else
warn "Found $before_count plugin alarms (expected: 1)"
step_warn "p3_t4_s2" "Unexpected alarm count"
fi
pause
@@ -457,8 +532,11 @@ test4_silent_boot_recovery() {
warn "The emulator will reboot now. This will take 30-60 seconds."
pause
reboot_emulator
step_pass "p3_t4_s2" "Emulator rebooted (app not opened)"
substep "Step 4: Collect boot recovery logs (without opening app)"
set_test_context "phase3" "phase3_test4" "p3_t4_s3"
step_start "p3_t4_s3" "Collect boot recovery logs"
info "Collecting recovery logs from boot (app was NOT opened)..."
sleep 2
@@ -518,7 +596,14 @@ test4_silent_boot_recovery() {
test4_message="Boot recovery not detected. Verify boot receiver is registered and has BOOT_COMPLETED permission (scenario=${scenario:-<none>}, rescheduled=$rescheduled)"
fi
if [[ "$test4_passed" == "true" ]]; then
step_pass "p3_t4_s3" "Silent boot recovery successful"
else
step_fail "p3_t4_s3" "Silent boot recovery failed"
fi
# Emit verdict
set_test_context "phase3" "phase3_test4" "p3_t4_s4"
if [[ "$test4_passed" == "true" ]]; then
verdict_pass "phase3_test4_silent_boot_recovery" "$test4_message"
else