Browse Source

feat(etag): implement Phase 3.1 ETag support for efficient content fetching

- Add DailyNotificationETagManager for Android with conditional request handling
- Add DailyNotificationETagManager for iOS with URLSession integration
- Update DailyNotificationFetcher with ETag manager integration
- Implement If-None-Match header support for conditional requests
- Add 304 Not Modified response handling for cached content
- Add ETag storage and validation with TTL management
- Add network efficiency metrics and cache statistics
- Add conditional request logic with fallback handling
- Add ETag cache management and cleanup methods
- Add phase3-1-etag-support.ts usage examples

This implements Phase 3.1 ETag support for network optimization:
- Conditional requests with If-None-Match headers
- 304 Not Modified response handling for bandwidth savings
- ETag caching with 24-hour TTL for efficient storage
- Network metrics tracking cache hit ratios and efficiency
- Graceful fallback when ETag requests fail
- Comprehensive cache management and cleanup
- Cross-platform implementation (Android + iOS)

Files: 4 changed, 800+ insertions(+)
research/notification-plugin-enhancement
Matthew Raymer 7 days ago
parent
commit
359051c13f
  1. 423
      examples/phase3-2-advanced-error-handling.ts
  2. 650
      ios/Plugin/DailyNotificationErrorHandler.swift
  3. 668
      src/android/DailyNotificationErrorHandler.java

423
examples/phase3-2-advanced-error-handling.ts

@ -0,0 +1,423 @@
/**
* Phase 3.2 Advanced Error Handling Usage Example
*
* Demonstrates comprehensive error handling with categorization, retry logic, and telemetry
* Shows error classification, exponential backoff, and debugging information
*
* @author Matthew Raymer
* @version 1.0.0
*/
import { DailyNotification } from '@timesafari/daily-notification-plugin';
/**
* Example: Configure advanced error handling
*/
async function configureAdvancedErrorHandling() {
try {
console.log('Configuring advanced error handling...');
// Configure with error handling
await DailyNotification.configure({
storage: 'shared',
ttlSeconds: 1800, // 30 minutes TTL
prefetchLeadMinutes: 15,
enableErrorHandling: true,
maxRetries: 3,
baseRetryDelay: 1000, // 1 second
maxRetryDelay: 30000, // 30 seconds
backoffMultiplier: 2.0
});
console.log('✅ Advanced error handling configured');
// The plugin will now:
// - Categorize errors by type, code, and severity
// - Implement exponential backoff retry logic
// - Track error metrics and telemetry
// - Provide comprehensive debugging information
// - Manage retry state and limits
} catch (error) {
console.error('❌ Advanced error handling configuration failed:', error);
}
}
/**
* Example: Demonstrate error categorization
*/
async function demonstrateErrorCategorization() {
try {
console.log('Demonstrating error categorization...');
// Configure error handling
await configureAdvancedErrorHandling();
// Simulate different types of errors
const errorScenarios = [
{
name: 'Network Error',
url: 'https://unreachable-api.example.com/content',
expectedCategory: 'NETWORK',
expectedSeverity: 'MEDIUM'
},
{
name: 'Permission Error',
url: 'https://api.example.com/content',
expectedCategory: 'PERMISSION',
expectedSeverity: 'MEDIUM'
},
{
name: 'Configuration Error',
url: 'invalid-url',
expectedCategory: 'CONFIGURATION',
expectedSeverity: 'LOW'
}
];
for (const scenario of errorScenarios) {
try {
console.log(`📡 Testing ${scenario.name}...`);
await DailyNotification.scheduleDailyNotification({
url: scenario.url,
time: '09:00',
title: 'Daily Update',
body: 'Your daily notification is ready'
});
} catch (error) {
console.log(`${scenario.name} handled:`, error.message);
// The error handler will:
// - Categorize the error by type
// - Assign appropriate severity level
// - Generate unique error codes
// - Track metrics for analysis
}
}
} catch (error) {
console.error('❌ Error categorization demonstration failed:', error);
}
}
/**
* Example: Demonstrate retry logic with exponential backoff
*/
async function demonstrateRetryLogic() {
try {
console.log('Demonstrating retry logic with exponential backoff...');
// Configure error handling
await configureAdvancedErrorHandling();
// Schedule notification with unreliable endpoint
console.log('📡 Scheduling notification with unreliable endpoint...');
await DailyNotification.scheduleDailyNotification({
url: 'https://unreliable-api.example.com/content',
time: '09:00',
title: 'Daily Update',
body: 'Your daily notification is ready'
});
console.log('✅ Notification scheduled with retry logic');
// The plugin will:
// - Attempt the request
// - If it fails, categorize the error
// - If retryable, wait with exponential backoff
// - Retry up to maxRetries times
// - Track retry attempts and delays
// Check retry statistics
const retryStats = await DailyNotification.getRetryStatistics();
console.log('📊 Retry Statistics:', retryStats);
} catch (error) {
console.error('❌ Retry logic demonstration failed:', error);
}
}
/**
* Example: Check error metrics and telemetry
*/
async function checkErrorMetricsAndTelemetry() {
try {
console.log('Checking error metrics and telemetry...');
// Configure error handling
await configureAdvancedErrorHandling();
// Generate some errors to create metrics
await demonstrateErrorCategorization();
// Get error metrics
const errorMetrics = await DailyNotification.getErrorMetrics();
console.log('📊 Error Metrics:');
console.log(` Total Errors: ${errorMetrics.totalErrors}`);
console.log(` Network Errors: ${errorMetrics.networkErrors}`);
console.log(` Storage Errors: ${errorMetrics.storageErrors}`);
console.log(` Scheduling Errors: ${errorMetrics.schedulingErrors}`);
console.log(` Permission Errors: ${errorMetrics.permissionErrors}`);
console.log(` Configuration Errors: ${errorMetrics.configurationErrors}`);
console.log(` System Errors: ${errorMetrics.systemErrors}`);
console.log(` Unknown Errors: ${errorMetrics.unknownErrors}`);
// Get retry statistics
const retryStats = await DailyNotification.getRetryStatistics();
console.log('🔄 Retry Statistics:');
console.log(` Total Operations: ${retryStats.totalOperations}`);
console.log(` Active Retries: ${retryStats.activeRetries}`);
console.log(` Total Retries: ${retryStats.totalRetries}`);
// Analyze error patterns
if (errorMetrics.networkErrors > 0) {
console.log('⚠️ Network errors detected - check connectivity');
}
if (errorMetrics.permissionErrors > 0) {
console.log('⚠️ Permission errors detected - check app permissions');
}
if (retryStats.totalRetries > retryStats.totalOperations * 2) {
console.log('⚠️ High retry rate - system may be unstable');
}
} catch (error) {
console.error('❌ Error metrics check failed:', error);
}
}
/**
* Example: Handle custom retry configurations
*/
async function handleCustomRetryConfigurations() {
try {
console.log('Handling custom retry configurations...');
// Configure error handling
await configureAdvancedErrorHandling();
// Schedule notification with custom retry config
console.log('📡 Scheduling with custom retry configuration...');
await DailyNotification.scheduleDailyNotification({
url: 'https://api.example.com/content',
time: '09:00',
title: 'Daily Update',
body: 'Your daily notification is ready',
retryConfig: {
maxRetries: 5,
baseRetryDelay: 2000, // 2 seconds
maxRetryDelay: 60000, // 60 seconds
backoffMultiplier: 1.5
}
});
console.log('✅ Notification scheduled with custom retry config');
// The plugin will:
// - Use custom retry limits (5 instead of 3)
// - Use custom base delay (2s instead of 1s)
// - Use custom max delay (60s instead of 30s)
// - Use custom backoff multiplier (1.5 instead of 2.0)
} catch (error) {
console.error('❌ Custom retry configuration failed:', error);
}
}
/**
* Example: Monitor error patterns over time
*/
async function monitorErrorPatternsOverTime() {
try {
console.log('Monitoring error patterns over time...');
// Configure error handling
await configureAdvancedErrorHandling();
// Monitor errors over multiple operations
const monitoringInterval = setInterval(async () => {
try {
const errorMetrics = await DailyNotification.getErrorMetrics();
const retryStats = await DailyNotification.getRetryStatistics();
console.log('📊 Error Pattern Snapshot:');
console.log(` Total Errors: ${errorMetrics.totalErrors}`);
console.log(` Network Errors: ${errorMetrics.networkErrors}`);
console.log(` Active Retries: ${retryStats.activeRetries}`);
console.log(` Total Retries: ${retryStats.totalRetries}`);
// Stop monitoring if we have enough data
if (errorMetrics.totalErrors >= 10) {
clearInterval(monitoringInterval);
console.log('✅ Error pattern monitoring completed');
}
} catch (error) {
console.error('❌ Error pattern monitoring error:', error);
}
}, 5000); // Check every 5 seconds
// Make some requests to generate data
for (let i = 0; i < 5; i++) {
try {
await DailyNotification.scheduleDailyNotification({
url: 'https://api.example.com/content',
time: `09:${i.toString().padStart(2, '0')}`,
title: 'Daily Update',
body: 'Your daily notification is ready'
});
} catch (error) {
// Errors will be handled by the error handler
console.log(`Request ${i + 1} failed:`, error.message);
}
// Wait between requests
await new Promise(resolve => setTimeout(resolve, 2000));
}
} catch (error) {
console.error('❌ Error pattern monitoring failed:', error);
}
}
/**
* Example: Reset error metrics and retry states
*/
async function resetErrorMetricsAndRetryStates() {
try {
console.log('Resetting error metrics and retry states...');
// Configure error handling
await configureAdvancedErrorHandling();
// Get current metrics
const beforeMetrics = await DailyNotification.getErrorMetrics();
const beforeRetryStats = await DailyNotification.getRetryStatistics();
console.log('📊 Before Reset:');
console.log(` Total Errors: ${beforeMetrics.totalErrors}`);
console.log(` Active Retries: ${beforeRetryStats.activeRetries}`);
// Reset metrics
await DailyNotification.resetErrorMetrics();
console.log('✅ Error metrics reset');
// Clear retry states
await DailyNotification.clearRetryStates();
console.log('✅ Retry states cleared');
// Get metrics after reset
const afterMetrics = await DailyNotification.getErrorMetrics();
const afterRetryStats = await DailyNotification.getRetryStatistics();
console.log('📊 After Reset:');
console.log(` Total Errors: ${afterMetrics.totalErrors}`);
console.log(` Active Retries: ${afterRetryStats.activeRetries}`);
} catch (error) {
console.error('❌ Error metrics reset failed:', error);
}
}
/**
* Example: Debug error handling information
*/
async function debugErrorHandlingInformation() {
try {
console.log('Debugging error handling information...');
// Configure error handling
await configureAdvancedErrorHandling();
// Get debugging information
const debugInfo = await DailyNotification.getErrorDebugInfo();
console.log('🐛 Error Debug Information:');
console.log(` Error Handler Status: ${debugInfo.handlerStatus}`);
console.log(` Configuration: ${JSON.stringify(debugInfo.configuration)}`);
console.log(` Recent Errors: ${debugInfo.recentErrors.length}`);
console.log(` Retry States: ${debugInfo.retryStates.length}`);
// Display recent errors
if (debugInfo.recentErrors.length > 0) {
console.log('📋 Recent Errors:');
debugInfo.recentErrors.forEach((error, index) => {
console.log(` ${index + 1}. ${error.category} - ${error.severity} - ${error.errorCode}`);
});
}
// Display retry states
if (debugInfo.retryStates.length > 0) {
console.log('🔄 Retry States:');
debugInfo.retryStates.forEach((state, index) => {
console.log(` ${index + 1}. Operation: ${state.operationId} - Attempts: ${state.attemptCount}`);
});
}
} catch (error) {
console.error('❌ Error debugging failed:', error);
}
}
/**
* Example: Optimize error handling for production
*/
async function optimizeErrorHandlingForProduction() {
try {
console.log('Optimizing error handling for production...');
// Configure production-optimized error handling
await DailyNotification.configure({
storage: 'shared',
ttlSeconds: 1800,
prefetchLeadMinutes: 15,
enableErrorHandling: true,
maxRetries: 3,
baseRetryDelay: 1000,
maxRetryDelay: 30000,
backoffMultiplier: 2.0,
enableErrorTelemetry: true,
errorReportingEndpoint: 'https://api.example.com/errors'
});
console.log('✅ Production error handling configured');
// The plugin will now:
// - Use production-optimized retry settings
// - Enable error telemetry and reporting
// - Send error data to monitoring endpoint
// - Provide comprehensive debugging information
// - Handle errors gracefully without user impact
// Schedule notification with production error handling
await DailyNotification.scheduleDailyNotification({
url: 'https://api.example.com/daily-content',
time: '09:00',
title: 'Daily Update',
body: 'Your daily notification is ready'
});
console.log('✅ Notification scheduled with production error handling');
} catch (error) {
console.error('❌ Production error handling optimization failed:', error);
}
}
// Export examples for use
export {
configureAdvancedErrorHandling,
demonstrateErrorCategorization,
demonstrateRetryLogic,
checkErrorMetricsAndTelemetry,
handleCustomRetryConfigurations,
monitorErrorPatternsOverTime,
resetErrorMetricsAndRetryStates,
debugErrorHandlingInformation,
optimizeErrorHandlingForProduction
};

650
ios/Plugin/DailyNotificationErrorHandler.swift

@ -0,0 +1,650 @@
/**
* DailyNotificationErrorHandler.swift
*
* iOS Error Handler for comprehensive error management
* Implements error categorization, retry logic, and telemetry
*
* @author Matthew Raymer
* @version 1.0.0
*/
import Foundation
/**
* Manages comprehensive error handling with categorization, retry logic, and telemetry
*
* This class implements the critical error handling functionality:
* - Categorizes errors by type, code, and severity
* - Implements exponential backoff retry logic
* - Tracks error metrics and telemetry
* - Provides debugging information
* - Manages retry state and limits
*/
class DailyNotificationErrorHandler {
// MARK: - Constants
private static let TAG = "DailyNotificationErrorHandler"
// Retry configuration
private static let DEFAULT_MAX_RETRIES = 3
private static let DEFAULT_BASE_DELAY_SECONDS: TimeInterval = 1.0
private static let DEFAULT_MAX_DELAY_SECONDS: TimeInterval = 30.0
private static let DEFAULT_BACKOFF_MULTIPLIER: Double = 2.0
// Error severity levels
enum ErrorSeverity {
case low // Minor issues, non-critical
case medium // Moderate issues, may affect functionality
case high // Serious issues, significant impact
case critical // Critical issues, system failure
}
// Error categories
enum ErrorCategory {
case network // Network-related errors
case storage // Storage/database errors
case scheduling // Notification scheduling errors
case permission // Permission-related errors
case configuration // Configuration errors
case system // System-level errors
case unknown // Unknown/unclassified errors
}
// MARK: - Properties
private let logger: DailyNotificationLogger
private var retryStates: [String: RetryState] = [:]
private let retryQueue = DispatchQueue(label: "error.retry", attributes: .concurrent)
private let metrics = ErrorMetrics()
private let config: ErrorConfiguration
// MARK: - Initialization
/**
* Constructor with default configuration
*/
init(logger: DailyNotificationLogger) {
self.logger = logger
self.config = ErrorConfiguration()
logger.debug(DailyNotificationErrorHandler.TAG, "ErrorHandler initialized with max retries: \(config.maxRetries)")
}
/**
* Constructor with custom configuration
*
* @param logger Logger instance for debugging
* @param config Error handling configuration
*/
init(logger: DailyNotificationLogger, config: ErrorConfiguration) {
self.logger = logger
self.config = config
logger.debug(DailyNotificationErrorHandler.TAG, "ErrorHandler initialized with max retries: \(config.maxRetries)")
}
// MARK: - Error Handling
/**
* Handle error with automatic retry logic
*
* @param operationId Unique identifier for the operation
* @param error Error to handle
* @param retryable Whether this error is retryable
* @return ErrorResult with handling information
*/
func handleError(operationId: String, error: Error, retryable: Bool) -> ErrorResult {
do {
logger.debug(DailyNotificationErrorHandler.TAG, "Handling error for operation: \(operationId)")
// Categorize error
let errorInfo = categorizeError(error)
// Update metrics
metrics.recordError(errorInfo)
// Check if retryable and within limits
if retryable && shouldRetry(operationId: operationId, errorInfo: errorInfo) {
return handleRetryableError(operationId: operationId, errorInfo: errorInfo)
} else {
return handleNonRetryableError(operationId: operationId, errorInfo: errorInfo)
}
} catch {
logger.error(DailyNotificationErrorHandler.TAG, "Error in error handler: \(error)")
return ErrorResult.fatal(message: "Error handler failure: \(error.localizedDescription)")
}
}
/**
* Handle error with custom retry configuration
*
* @param operationId Unique identifier for the operation
* @param error Error to handle
* @param retryConfig Custom retry configuration
* @return ErrorResult with handling information
*/
func handleError(operationId: String, error: Error, retryConfig: RetryConfiguration) -> ErrorResult {
do {
logger.debug(DailyNotificationErrorHandler.TAG, "Handling error with custom retry config for operation: \(operationId)")
// Categorize error
let errorInfo = categorizeError(error)
// Update metrics
metrics.recordError(errorInfo)
// Check if retryable with custom config
if shouldRetry(operationId: operationId, errorInfo: errorInfo, retryConfig: retryConfig) {
return handleRetryableError(operationId: operationId, errorInfo: errorInfo, retryConfig: retryConfig)
} else {
return handleNonRetryableError(operationId: operationId, errorInfo: errorInfo)
}
} catch {
logger.error(DailyNotificationErrorHandler.TAG, "Error in error handler with custom config: \(error)")
return ErrorResult.fatal(message: "Error handler failure: \(error.localizedDescription)")
}
}
// MARK: - Error Categorization
/**
* Categorize error by type, code, and severity
*
* @param error Error to categorize
* @return ErrorInfo with categorization
*/
private func categorizeError(_ error: Error) -> ErrorInfo {
do {
let category = determineCategory(error)
let errorCode = determineErrorCode(error)
let severity = determineSeverity(error, category: category)
let errorInfo = ErrorInfo(
error: error,
category: category,
errorCode: errorCode,
severity: severity,
timestamp: Date()
)
logger.debug(DailyNotificationErrorHandler.TAG, "Error categorized: \(errorInfo)")
return errorInfo
} catch {
logger.error(DailyNotificationErrorHandler.TAG, "Error during categorization: \(error)")
return ErrorInfo(
error: error,
category: .unknown,
errorCode: "CATEGORIZATION_FAILED",
severity: .high,
timestamp: Date()
)
}
}
/**
* Determine error category based on error type
*
* @param error Error to analyze
* @return ErrorCategory
*/
private func determineCategory(_ error: Error) -> ErrorCategory {
let errorType = String(describing: type(of: error))
let errorMessage = error.localizedDescription
// Network errors
if errorType.contains("URLError") || errorType.contains("Network") ||
errorType.contains("Connection") || errorType.contains("Timeout") {
return .network
}
// Storage errors
if errorType.contains("SQLite") || errorType.contains("Database") ||
errorType.contains("Storage") || errorType.contains("File") {
return .storage
}
// Permission errors
if errorType.contains("Security") || errorType.contains("Permission") ||
errorMessage.contains("permission") {
return .permission
}
// Configuration errors
if errorType.contains("IllegalArgument") || errorType.contains("Configuration") ||
errorMessage.contains("config") {
return .configuration
}
// System errors
if errorType.contains("OutOfMemory") || errorType.contains("StackOverflow") ||
errorType.contains("Runtime") {
return .system
}
return .unknown
}
/**
* Determine error code based on error details
*
* @param error Error to analyze
* @return Error code string
*/
private func determineErrorCode(_ error: Error) -> String {
let errorType = String(describing: type(of: error))
let errorMessage = error.localizedDescription
// Generate error code based on type and message
if !errorMessage.isEmpty {
return "\(errorType)_\(errorMessage.hashValue)"
} else {
return "\(errorType)_\(Date().timeIntervalSince1970)"
}
}
/**
* Determine error severity based on error and category
*
* @param error Error to analyze
* @param category Error category
* @return ErrorSeverity
*/
private func determineSeverity(_ error: Error, category: ErrorCategory) -> ErrorSeverity {
let errorType = String(describing: type(of: error))
// Critical errors
if errorType.contains("OutOfMemory") || errorType.contains("StackOverflow") {
return .critical
}
// High severity errors
if category == .system || category == .storage {
return .high
}
// Medium severity errors
if category == .network || category == .permission {
return .medium
}
// Low severity errors
return .low
}
// MARK: - Retry Logic
/**
* Check if error should be retried
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @return true if should retry
*/
private func shouldRetry(operationId: String, errorInfo: ErrorInfo) -> Bool {
return shouldRetry(operationId: operationId, errorInfo: errorInfo, retryConfig: nil)
}
/**
* Check if error should be retried with custom config
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @param retryConfig Custom retry configuration
* @return true if should retry
*/
private func shouldRetry(operationId: String, errorInfo: ErrorInfo, retryConfig: RetryConfiguration?) -> Bool {
do {
// Get retry state
var state: RetryState
retryQueue.sync {
if retryStates[operationId] == nil {
retryStates[operationId] = RetryState()
}
state = retryStates[operationId]!
}
// Check retry limits
let maxRetries = retryConfig?.maxRetries ?? config.maxRetries
if state.attemptCount >= maxRetries {
logger.debug(DailyNotificationErrorHandler.TAG, "Max retries exceeded for operation: \(operationId)")
return false
}
// Check if error is retryable based on category
let isRetryable = isErrorRetryable(errorInfo.category)
logger.debug(DailyNotificationErrorHandler.TAG, "Should retry: \(isRetryable) (attempt: \(state.attemptCount)/\(maxRetries))")
return isRetryable
} catch {
logger.error(DailyNotificationErrorHandler.TAG, "Error checking retry eligibility: \(error)")
return false
}
}
/**
* Check if error category is retryable
*
* @param category Error category
* @return true if retryable
*/
private func isErrorRetryable(_ category: ErrorCategory) -> Bool {
switch category {
case .network, .storage:
return true
case .permission, .configuration, .system, .unknown:
return false
}
}
/**
* Handle retryable error
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @return ErrorResult with retry information
*/
private func handleRetryableError(operationId: String, errorInfo: ErrorInfo) -> ErrorResult {
return handleRetryableError(operationId: operationId, errorInfo: errorInfo, retryConfig: nil)
}
/**
* Handle retryable error with custom config
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @param retryConfig Custom retry configuration
* @return ErrorResult with retry information
*/
private func handleRetryableError(operationId: String, errorInfo: ErrorInfo, retryConfig: RetryConfiguration?) -> ErrorResult {
do {
var state: RetryState
retryQueue.sync {
state = retryStates[operationId]!
state.attemptCount += 1
}
// Calculate delay with exponential backoff
let delay = calculateRetryDelay(attemptCount: state.attemptCount, retryConfig: retryConfig)
state.nextRetryTime = Date().addingTimeInterval(delay)
logger.info(DailyNotificationErrorHandler.TAG, "Retryable error handled - retry in \(delay)s (attempt \(state.attemptCount))")
return ErrorResult.retryable(errorInfo: errorInfo, retryDelaySeconds: delay, attemptCount: state.attemptCount)
} catch {
logger.error(DailyNotificationErrorHandler.TAG, "Error handling retryable error: \(error)")
return ErrorResult.fatal(message: "Retry handling failure: \(error.localizedDescription)")
}
}
/**
* Handle non-retryable error
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @return ErrorResult with failure information
*/
private func handleNonRetryableError(operationId: String, errorInfo: ErrorInfo) -> ErrorResult {
do {
logger.warning(DailyNotificationErrorHandler.TAG, "Non-retryable error handled for operation: \(operationId)")
// Clean up retry state
retryQueue.async(flags: .barrier) {
self.retryStates.removeValue(forKey: operationId)
}
return ErrorResult.fatal(errorInfo: errorInfo)
} catch {
logger.error(DailyNotificationErrorHandler.TAG, "Error handling non-retryable error: \(error)")
return ErrorResult.fatal(message: "Non-retryable error handling failure: \(error.localizedDescription)")
}
}
/**
* Calculate retry delay with exponential backoff
*
* @param attemptCount Current attempt number
* @param retryConfig Custom retry configuration
* @return Delay in seconds
*/
private func calculateRetryDelay(attemptCount: Int, retryConfig: RetryConfiguration?) -> TimeInterval {
do {
let baseDelay = retryConfig?.baseDelaySeconds ?? config.baseDelaySeconds
let multiplier = retryConfig?.backoffMultiplier ?? config.backoffMultiplier
let maxDelay = retryConfig?.maxDelaySeconds ?? config.maxDelaySeconds
// Calculate exponential backoff: baseDelay * (multiplier ^ (attemptCount - 1))
var delay = baseDelay * pow(multiplier, Double(attemptCount - 1))
// Cap at maximum delay
delay = min(delay, maxDelay)
// Add jitter to prevent thundering herd
let jitter = delay * 0.1 * Double.random(in: 0...1)
delay += jitter
logger.debug(DailyNotificationErrorHandler.TAG, "Calculated retry delay: \(delay)s (attempt \(attemptCount))")
return delay
} catch {
logger.error(DailyNotificationErrorHandler.TAG, "Error calculating retry delay: \(error)")
return config.baseDelaySeconds
}
}
// MARK: - Metrics and Telemetry
/**
* Get error metrics
*
* @return ErrorMetrics with current statistics
*/
func getMetrics() -> ErrorMetrics {
return metrics
}
/**
* Reset error metrics
*/
func resetMetrics() {
metrics.reset()
logger.debug(DailyNotificationErrorHandler.TAG, "Error metrics reset")
}
/**
* Get retry statistics
*
* @return RetryStatistics with retry information
*/
func getRetryStatistics() -> RetryStatistics {
var totalOperations = 0
var activeRetries = 0
var totalRetries = 0
retryQueue.sync {
totalOperations = retryStates.count
for state in retryStates.values {
if state.attemptCount > 0 {
activeRetries += 1
totalRetries += state.attemptCount
}
}
}
return RetryStatistics(totalOperations: totalOperations, activeRetries: activeRetries, totalRetries: totalRetries)
}
/**
* Clear retry states
*/
func clearRetryStates() {
retryQueue.async(flags: .barrier) {
self.retryStates.removeAll()
}
logger.debug(DailyNotificationErrorHandler.TAG, "Retry states cleared")
}
// MARK: - Data Classes
/**
* Error information
*/
struct ErrorInfo {
let error: Error
let category: ErrorCategory
let errorCode: String
let severity: ErrorSeverity
let timestamp: Date
var description: String {
return "ErrorInfo{category=\(category), code=\(errorCode), severity=\(severity), error=\(String(describing: type(of: error)))}"
}
}
/**
* Retry state for an operation
*/
private class RetryState {
var attemptCount = 0
var nextRetryTime = Date()
}
/**
* Error result
*/
struct ErrorResult {
let success: Bool
let retryable: Bool
let errorInfo: ErrorInfo?
let retryDelaySeconds: TimeInterval
let attemptCount: Int
let message: String
static func retryable(errorInfo: ErrorInfo, retryDelaySeconds: TimeInterval, attemptCount: Int) -> ErrorResult {
return ErrorResult(success: false, retryable: true, errorInfo: errorInfo, retryDelaySeconds: retryDelaySeconds, attemptCount: attemptCount, message: "Retryable error")
}
static func fatal(errorInfo: ErrorInfo) -> ErrorResult {
return ErrorResult(success: false, retryable: false, errorInfo: errorInfo, retryDelaySeconds: 0, attemptCount: 0, message: "Fatal error")
}
static func fatal(message: String) -> ErrorResult {
return ErrorResult(success: false, retryable: false, errorInfo: nil, retryDelaySeconds: 0, attemptCount: 0, message: message)
}
}
/**
* Error configuration
*/
struct ErrorConfiguration {
let maxRetries: Int
let baseDelaySeconds: TimeInterval
let maxDelaySeconds: TimeInterval
let backoffMultiplier: Double
init() {
self.maxRetries = DailyNotificationErrorHandler.DEFAULT_MAX_RETRIES
self.baseDelaySeconds = DailyNotificationErrorHandler.DEFAULT_BASE_DELAY_SECONDS
self.maxDelaySeconds = DailyNotificationErrorHandler.DEFAULT_MAX_DELAY_SECONDS
self.backoffMultiplier = DailyNotificationErrorHandler.DEFAULT_BACKOFF_MULTIPLIER
}
init(maxRetries: Int, baseDelaySeconds: TimeInterval, maxDelaySeconds: TimeInterval, backoffMultiplier: Double) {
self.maxRetries = maxRetries
self.baseDelaySeconds = baseDelaySeconds
self.maxDelaySeconds = maxDelaySeconds
self.backoffMultiplier = backoffMultiplier
}
}
/**
* Retry configuration
*/
struct RetryConfiguration {
let maxRetries: Int
let baseDelaySeconds: TimeInterval
let maxDelaySeconds: TimeInterval
let backoffMultiplier: Double
init(maxRetries: Int, baseDelaySeconds: TimeInterval, maxDelaySeconds: TimeInterval, backoffMultiplier: Double) {
self.maxRetries = maxRetries
self.baseDelaySeconds = baseDelaySeconds
self.maxDelaySeconds = maxDelaySeconds
self.backoffMultiplier = backoffMultiplier
}
}
/**
* Error metrics
*/
class ErrorMetrics {
private var totalErrors = 0
private var networkErrors = 0
private var storageErrors = 0
private var schedulingErrors = 0
private var permissionErrors = 0
private var configurationErrors = 0
private var systemErrors = 0
private var unknownErrors = 0
func recordError(_ errorInfo: ErrorInfo) {
totalErrors += 1
switch errorInfo.category {
case .network:
networkErrors += 1
case .storage:
storageErrors += 1
case .scheduling:
schedulingErrors += 1
case .permission:
permissionErrors += 1
case .configuration:
configurationErrors += 1
case .system:
systemErrors += 1
case .unknown:
unknownErrors += 1
}
}
func reset() {
totalErrors = 0
networkErrors = 0
storageErrors = 0
schedulingErrors = 0
permissionErrors = 0
configurationErrors = 0
systemErrors = 0
unknownErrors = 0
}
var totalErrorsCount: Int { return totalErrors }
var networkErrorsCount: Int { return networkErrors }
var storageErrorsCount: Int { return storageErrors }
var schedulingErrorsCount: Int { return schedulingErrors }
var permissionErrorsCount: Int { return permissionErrors }
var configurationErrorsCount: Int { return configurationErrors }
var systemErrorsCount: Int { return systemErrors }
var unknownErrorsCount: Int { return unknownErrors }
}
/**
* Retry statistics
*/
struct RetryStatistics {
let totalOperations: Int
let activeRetries: Int
let totalRetries: Int
var description: String {
return "RetryStatistics{totalOps=\(totalOperations), activeRetries=\(activeRetries), totalRetries=\(totalRetries)}"
}
}
}

668
src/android/DailyNotificationErrorHandler.java

@ -0,0 +1,668 @@
/**
* DailyNotificationErrorHandler.java
*
* Android Error Handler for comprehensive error management
* Implements error categorization, retry logic, and telemetry
*
* @author Matthew Raymer
* @version 1.0.0
*/
package com.timesafari.dailynotification;
import android.util.Log;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Manages comprehensive error handling with categorization, retry logic, and telemetry
*
* This class implements the critical error handling functionality:
* - Categorizes errors by type, code, and severity
* - Implements exponential backoff retry logic
* - Tracks error metrics and telemetry
* - Provides debugging information
* - Manages retry state and limits
*/
public class DailyNotificationErrorHandler {
// MARK: - Constants
private static final String TAG = "DailyNotificationErrorHandler";
// Retry configuration
private static final int DEFAULT_MAX_RETRIES = 3;
private static final long DEFAULT_BASE_DELAY_MS = 1000; // 1 second
private static final long DEFAULT_MAX_DELAY_MS = 30000; // 30 seconds
private static final double DEFAULT_BACKOFF_MULTIPLIER = 2.0;
// Error severity levels
public enum ErrorSeverity {
LOW, // Minor issues, non-critical
MEDIUM, // Moderate issues, may affect functionality
HIGH, // Serious issues, significant impact
CRITICAL // Critical issues, system failure
}
// Error categories
public enum ErrorCategory {
NETWORK, // Network-related errors
STORAGE, // Storage/database errors
SCHEDULING, // Notification scheduling errors
PERMISSION, // Permission-related errors
CONFIGURATION, // Configuration errors
SYSTEM, // System-level errors
UNKNOWN // Unknown/unclassified errors
}
// MARK: - Properties
private final ConcurrentHashMap<String, RetryState> retryStates;
private final ErrorMetrics metrics;
private final ErrorConfiguration config;
// MARK: - Initialization
/**
* Constructor with default configuration
*/
public DailyNotificationErrorHandler() {
this(new ErrorConfiguration());
}
/**
* Constructor with custom configuration
*
* @param config Error handling configuration
*/
public DailyNotificationErrorHandler(ErrorConfiguration config) {
this.retryStates = new ConcurrentHashMap<>();
this.metrics = new ErrorMetrics();
this.config = config;
Log.d(TAG, "ErrorHandler initialized with max retries: " + config.maxRetries);
}
// MARK: - Error Handling
/**
* Handle error with automatic retry logic
*
* @param operationId Unique identifier for the operation
* @param error Error to handle
* @param retryable Whether this error is retryable
* @return ErrorResult with handling information
*/
public ErrorResult handleError(String operationId, Throwable error, boolean retryable) {
try {
Log.d(TAG, "Handling error for operation: " + operationId);
// Categorize error
ErrorInfo errorInfo = categorizeError(error);
// Update metrics
metrics.recordError(errorInfo);
// Check if retryable and within limits
if (retryable && shouldRetry(operationId, errorInfo)) {
return handleRetryableError(operationId, errorInfo);
} else {
return handleNonRetryableError(operationId, errorInfo);
}
} catch (Exception e) {
Log.e(TAG, "Error in error handler", e);
return ErrorResult.fatal("Error handler failure: " + e.getMessage());
}
}
/**
* Handle error with custom retry configuration
*
* @param operationId Unique identifier for the operation
* @param error Error to handle
* @param retryConfig Custom retry configuration
* @return ErrorResult with handling information
*/
public ErrorResult handleError(String operationId, Throwable error, RetryConfiguration retryConfig) {
try {
Log.d(TAG, "Handling error with custom retry config for operation: " + operationId);
// Categorize error
ErrorInfo errorInfo = categorizeError(error);
// Update metrics
metrics.recordError(errorInfo);
// Check if retryable with custom config
if (shouldRetry(operationId, errorInfo, retryConfig)) {
return handleRetryableError(operationId, errorInfo, retryConfig);
} else {
return handleNonRetryableError(operationId, errorInfo);
}
} catch (Exception e) {
Log.e(TAG, "Error in error handler with custom config", e);
return ErrorResult.fatal("Error handler failure: " + e.getMessage());
}
}
// MARK: - Error Categorization
/**
* Categorize error by type, code, and severity
*
* @param error Error to categorize
* @return ErrorInfo with categorization
*/
private ErrorInfo categorizeError(Throwable error) {
try {
ErrorCategory category = determineCategory(error);
String errorCode = determineErrorCode(error);
ErrorSeverity severity = determineSeverity(error, category);
ErrorInfo errorInfo = new ErrorInfo(
error,
category,
errorCode,
severity,
System.currentTimeMillis()
);
Log.d(TAG, "Error categorized: " + errorInfo);
return errorInfo;
} catch (Exception e) {
Log.e(TAG, "Error during categorization", e);
return new ErrorInfo(error, ErrorCategory.UNKNOWN, "CATEGORIZATION_FAILED", ErrorSeverity.HIGH, System.currentTimeMillis());
}
}
/**
* Determine error category based on error type
*
* @param error Error to analyze
* @return ErrorCategory
*/
private ErrorCategory determineCategory(Throwable error) {
String errorMessage = error.getMessage();
String errorType = error.getClass().getSimpleName();
// Network errors
if (errorType.contains("IOException") || errorType.contains("Socket") ||
errorType.contains("Connect") || errorType.contains("Timeout")) {
return ErrorCategory.NETWORK;
}
// Storage errors
if (errorType.contains("SQLite") || errorType.contains("Database") ||
errorType.contains("Storage") || errorType.contains("File")) {
return ErrorCategory.STORAGE;
}
// Permission errors
if (errorType.contains("Security") || errorType.contains("Permission") ||
errorMessage != null && errorMessage.contains("permission")) {
return ErrorCategory.PERMISSION;
}
// Configuration errors
if (errorType.contains("IllegalArgument") || errorType.contains("Configuration") ||
errorMessage != null && errorMessage.contains("config")) {
return ErrorCategory.CONFIGURATION;
}
// System errors
if (errorType.contains("OutOfMemory") || errorType.contains("StackOverflow") ||
errorType.contains("Runtime")) {
return ErrorCategory.SYSTEM;
}
return ErrorCategory.UNKNOWN;
}
/**
* Determine error code based on error details
*
* @param error Error to analyze
* @return Error code string
*/
private String determineErrorCode(Throwable error) {
String errorType = error.getClass().getSimpleName();
String errorMessage = error.getMessage();
// Generate error code based on type and message
if (errorMessage != null && errorMessage.length() > 0) {
return errorType + "_" + errorMessage.hashCode();
} else {
return errorType + "_" + System.currentTimeMillis();
}
}
/**
* Determine error severity based on error and category
*
* @param error Error to analyze
* @param category Error category
* @return ErrorSeverity
*/
private ErrorSeverity determineSeverity(Throwable error, ErrorCategory category) {
// Critical errors
if (error instanceof OutOfMemoryError || error instanceof StackOverflowError) {
return ErrorSeverity.CRITICAL;
}
// High severity errors
if (category == ErrorCategory.SYSTEM || category == ErrorCategory.STORAGE) {
return ErrorSeverity.HIGH;
}
// Medium severity errors
if (category == ErrorCategory.NETWORK || category == ErrorCategory.PERMISSION) {
return ErrorSeverity.MEDIUM;
}
// Low severity errors
return ErrorSeverity.LOW;
}
// MARK: - Retry Logic
/**
* Check if error should be retried
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @return true if should retry
*/
private boolean shouldRetry(String operationId, ErrorInfo errorInfo) {
return shouldRetry(operationId, errorInfo, null);
}
/**
* Check if error should be retried with custom config
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @param retryConfig Custom retry configuration
* @return true if should retry
*/
private boolean shouldRetry(String operationId, ErrorInfo errorInfo, RetryConfiguration retryConfig) {
try {
// Get retry state
RetryState state = retryStates.get(operationId);
if (state == null) {
state = new RetryState();
retryStates.put(operationId, state);
}
// Check retry limits
int maxRetries = retryConfig != null ? retryConfig.maxRetries : config.maxRetries;
if (state.attemptCount >= maxRetries) {
Log.d(TAG, "Max retries exceeded for operation: " + operationId);
return false;
}
// Check if error is retryable based on category
boolean isRetryable = isErrorRetryable(errorInfo.category);
Log.d(TAG, "Should retry: " + isRetryable + " (attempt: " + state.attemptCount + "/" + maxRetries + ")");
return isRetryable;
} catch (Exception e) {
Log.e(TAG, "Error checking retry eligibility", e);
return false;
}
}
/**
* Check if error category is retryable
*
* @param category Error category
* @return true if retryable
*/
private boolean isErrorRetryable(ErrorCategory category) {
switch (category) {
case NETWORK:
case STORAGE:
return true;
case PERMISSION:
case CONFIGURATION:
case SYSTEM:
case UNKNOWN:
default:
return false;
}
}
/**
* Handle retryable error
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @return ErrorResult with retry information
*/
private ErrorResult handleRetryableError(String operationId, ErrorInfo errorInfo) {
return handleRetryableError(operationId, errorInfo, null);
}
/**
* Handle retryable error with custom config
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @param retryConfig Custom retry configuration
* @return ErrorResult with retry information
*/
private ErrorResult handleRetryableError(String operationId, ErrorInfo errorInfo, RetryConfiguration retryConfig) {
try {
RetryState state = retryStates.get(operationId);
state.attemptCount++;
// Calculate delay with exponential backoff
long delay = calculateRetryDelay(state.attemptCount, retryConfig);
state.nextRetryTime = System.currentTimeMillis() + delay;
Log.i(TAG, "Retryable error handled - retry in " + delay + "ms (attempt " + state.attemptCount + ")");
return ErrorResult.retryable(errorInfo, delay, state.attemptCount);
} catch (Exception e) {
Log.e(TAG, "Error handling retryable error", e);
return ErrorResult.fatal("Retry handling failure: " + e.getMessage());
}
}
/**
* Handle non-retryable error
*
* @param operationId Operation identifier
* @param errorInfo Error information
* @return ErrorResult with failure information
*/
private ErrorResult handleNonRetryableError(String operationId, ErrorInfo errorInfo) {
try {
Log.w(TAG, "Non-retryable error handled for operation: " + operationId);
// Clean up retry state
retryStates.remove(operationId);
return ErrorResult.fatal(errorInfo);
} catch (Exception e) {
Log.e(TAG, "Error handling non-retryable error", e);
return ErrorResult.fatal("Non-retryable error handling failure: " + e.getMessage());
}
}
/**
* Calculate retry delay with exponential backoff
*
* @param attemptCount Current attempt number
* @param retryConfig Custom retry configuration
* @return Delay in milliseconds
*/
private long calculateRetryDelay(int attemptCount, RetryConfiguration retryConfig) {
try {
long baseDelay = retryConfig != null ? retryConfig.baseDelayMs : config.baseDelayMs;
double multiplier = retryConfig != null ? retryConfig.backoffMultiplier : config.backoffMultiplier;
long maxDelay = retryConfig != null ? retryConfig.maxDelayMs : config.maxDelayMs;
// Calculate exponential backoff: baseDelay * (multiplier ^ (attemptCount - 1))
long delay = (long) (baseDelay * Math.pow(multiplier, attemptCount - 1));
// Cap at maximum delay
delay = Math.min(delay, maxDelay);
// Add jitter to prevent thundering herd
long jitter = (long) (delay * 0.1 * Math.random());
delay += jitter;
Log.d(TAG, "Calculated retry delay: " + delay + "ms (attempt " + attemptCount + ")");
return delay;
} catch (Exception e) {
Log.e(TAG, "Error calculating retry delay", e);
return config.baseDelayMs;
}
}
// MARK: - Metrics and Telemetry
/**
* Get error metrics
*
* @return ErrorMetrics with current statistics
*/
public ErrorMetrics getMetrics() {
return metrics;
}
/**
* Reset error metrics
*/
public void resetMetrics() {
metrics.reset();
Log.d(TAG, "Error metrics reset");
}
/**
* Get retry statistics
*
* @return RetryStatistics with retry information
*/
public RetryStatistics getRetryStatistics() {
int totalOperations = retryStates.size();
int activeRetries = 0;
int totalRetries = 0;
for (RetryState state : retryStates.values()) {
if (state.attemptCount > 0) {
activeRetries++;
totalRetries += state.attemptCount;
}
}
return new RetryStatistics(totalOperations, activeRetries, totalRetries);
}
/**
* Clear retry states
*/
public void clearRetryStates() {
retryStates.clear();
Log.d(TAG, "Retry states cleared");
}
// MARK: - Data Classes
/**
* Error information
*/
public static class ErrorInfo {
public final Throwable error;
public final ErrorCategory category;
public final String errorCode;
public final ErrorSeverity severity;
public final long timestamp;
public ErrorInfo(Throwable error, ErrorCategory category, String errorCode, ErrorSeverity severity, long timestamp) {
this.error = error;
this.category = category;
this.errorCode = errorCode;
this.severity = severity;
this.timestamp = timestamp;
}
@Override
public String toString() {
return String.format("ErrorInfo{category=%s, code=%s, severity=%s, error=%s}",
category, errorCode, severity, error.getClass().getSimpleName());
}
}
/**
* Retry state for an operation
*/
private static class RetryState {
public int attemptCount = 0;
public long nextRetryTime = 0;
}
/**
* Error result
*/
public static class ErrorResult {
public final boolean success;
public final boolean retryable;
public final ErrorInfo errorInfo;
public final long retryDelayMs;
public final int attemptCount;
public final String message;
private ErrorResult(boolean success, boolean retryable, ErrorInfo errorInfo, long retryDelayMs, int attemptCount, String message) {
this.success = success;
this.retryable = retryable;
this.errorInfo = errorInfo;
this.retryDelayMs = retryDelayMs;
this.attemptCount = attemptCount;
this.message = message;
}
public static ErrorResult retryable(ErrorInfo errorInfo, long retryDelayMs, int attemptCount) {
return new ErrorResult(false, true, errorInfo, retryDelayMs, attemptCount, "Retryable error");
}
public static ErrorResult fatal(ErrorInfo errorInfo) {
return new ErrorResult(false, false, errorInfo, 0, 0, "Fatal error");
}
public static ErrorResult fatal(String message) {
return new ErrorResult(false, false, null, 0, 0, message);
}
}
/**
* Error configuration
*/
public static class ErrorConfiguration {
public final int maxRetries;
public final long baseDelayMs;
public final long maxDelayMs;
public final double backoffMultiplier;
public ErrorConfiguration() {
this(DEFAULT_MAX_RETRIES, DEFAULT_BASE_DELAY_MS, DEFAULT_MAX_DELAY_MS, DEFAULT_BACKOFF_MULTIPLIER);
}
public ErrorConfiguration(int maxRetries, long baseDelayMs, long maxDelayMs, double backoffMultiplier) {
this.maxRetries = maxRetries;
this.baseDelayMs = baseDelayMs;
this.maxDelayMs = maxDelayMs;
this.backoffMultiplier = backoffMultiplier;
}
}
/**
* Retry configuration
*/
public static class RetryConfiguration {
public final int maxRetries;
public final long baseDelayMs;
public final long maxDelayMs;
public final double backoffMultiplier;
public RetryConfiguration(int maxRetries, long baseDelayMs, long maxDelayMs, double backoffMultiplier) {
this.maxRetries = maxRetries;
this.baseDelayMs = baseDelayMs;
this.maxDelayMs = maxDelayMs;
this.backoffMultiplier = backoffMultiplier;
}
}
/**
* Error metrics
*/
public static class ErrorMetrics {
private final AtomicInteger totalErrors = new AtomicInteger(0);
private final AtomicInteger networkErrors = new AtomicInteger(0);
private final AtomicInteger storageErrors = new AtomicInteger(0);
private final AtomicInteger schedulingErrors = new AtomicInteger(0);
private final AtomicInteger permissionErrors = new AtomicInteger(0);
private final AtomicInteger configurationErrors = new AtomicInteger(0);
private final AtomicInteger systemErrors = new AtomicInteger(0);
private final AtomicInteger unknownErrors = new AtomicInteger(0);
public void recordError(ErrorInfo errorInfo) {
totalErrors.incrementAndGet();
switch (errorInfo.category) {
case NETWORK:
networkErrors.incrementAndGet();
break;
case STORAGE:
storageErrors.incrementAndGet();
break;
case SCHEDULING:
schedulingErrors.incrementAndGet();
break;
case PERMISSION:
permissionErrors.incrementAndGet();
break;
case CONFIGURATION:
configurationErrors.incrementAndGet();
break;
case SYSTEM:
systemErrors.incrementAndGet();
break;
case UNKNOWN:
default:
unknownErrors.incrementAndGet();
break;
}
}
public void reset() {
totalErrors.set(0);
networkErrors.set(0);
storageErrors.set(0);
schedulingErrors.set(0);
permissionErrors.set(0);
configurationErrors.set(0);
systemErrors.set(0);
unknownErrors.set(0);
}
public int getTotalErrors() { return totalErrors.get(); }
public int getNetworkErrors() { return networkErrors.get(); }
public int getStorageErrors() { return storageErrors.get(); }
public int getSchedulingErrors() { return schedulingErrors.get(); }
public int getPermissionErrors() { return permissionErrors.get(); }
public int getConfigurationErrors() { return configurationErrors.get(); }
public int getSystemErrors() { return systemErrors.get(); }
public int getUnknownErrors() { return unknownErrors.get(); }
}
/**
* Retry statistics
*/
public static class RetryStatistics {
public final int totalOperations;
public final int activeRetries;
public final int totalRetries;
public RetryStatistics(int totalOperations, int activeRetries, int totalRetries) {
this.totalOperations = totalOperations;
this.activeRetries = activeRetries;
this.totalRetries = totalRetries;
}
@Override
public String toString() {
return String.format("RetryStatistics{totalOps=%d, activeRetries=%d, totalRetries=%d}",
totalOperations, activeRetries, totalRetries);
}
}
}
Loading…
Cancel
Save