Browse Source

Merge branch 'master' into performance-optimizations-testing

pull/159/head
Matthew Raymer 22 hours ago
parent
commit
c0104dbb99
  1. 4
      .cursor/rules/adr_template.mdc
  2. 246
      .cursor/rules/app/architectural_decision_record.mdc
  3. 413
      .cursor/rules/app/timesafari.mdc
  4. 75
      .cursor/rules/architecture/README.md
  5. 295
      .cursor/rules/architecture/build_architecture_guard.mdc
  6. 49
      .cursor/rules/asset_configuration.mdc
  7. 96
      .cursor/rules/base_context.mdc
  8. 158
      .cursor/rules/database/absurd-sql.mdc
  9. 5
      .cursor/rules/database/legacy_dexie.mdc
  10. 47
      .cursor/rules/development/type_safety_guide.mdc
  11. 79
      .cursor/rules/docs/markdown-automation.mdc
  12. 36
      .cursor/rules/docs/markdown.mdc
  13. 6
      .cursor/rules/features/camera-implementation.mdc
  14. 206
      .cursor/rules/harbor_pilot_universal.mdc
  15. 236
      .cursor/rules/historical_comment_management.mdc
  16. 81
      .cursor/rules/investigation_report_example.mdc
  17. 222
      .cursor/rules/logging_standards.mdc
  18. 348
      .cursor/rules/realistic_time_estimation.mdc
  19. 4
      .cursor/rules/research_diagnostic.mdc
  20. 209
      .cursor/rules/software_development.mdc
  21. 329
      .cursor/rules/time.mdc
  22. 321
      .cursor/rules/workflow/version_control.mdc
  23. 2
      .dockerignore
  24. 2
      .env.test
  25. 142
      .github/workflows/asset-validation.yml
  26. 27
      .github/workflows/playwright.yml
  27. 40
      .husky/_/husky.sh
  28. 10
      .husky/commit-msg
  29. 15
      .husky/pre-commit
  30. 27
      .husky/pre-push
  31. 433
      BUILDING.md
  32. 47
      CHANGELOG.md
  33. 290
      README-BUILD-GUARD.md
  34. 82
      README-PR-TEMPLATE.md
  35. 279
      README.md
  36. 8
      TASK_storage.md
  37. 4
      android/app/build.gradle
  38. 2
      android/build.gradle
  39. 3
      doc/DEEP_LINKS.md
  40. 3
      doc/README.md
  41. 1
      doc/asset-migration-plan.md
  42. 8
      doc/build-modernization-context.md
  43. 17
      doc/circular-dependency-analysis.md
  44. 14
      doc/component-communication-guide.md
  45. 14
      doc/cors-disabled-for-universal-images.md
  46. 26
      doc/cors-image-loading-solution.md
  47. 1
      doc/database-migration-guide.md
  48. 187
      doc/debug-hook-guide.md
  49. 6
      doc/electron-cleanup-summary.md
  50. 21
      doc/electron-console-cleanup.md
  51. 13
      doc/error-diagnostics-log.md
  52. 21
      doc/image-hosting-guide.md
  53. 2
      doc/logging-configuration.md
  54. 21
      doc/migration-fence-definition.md
  55. 69
      doc/migration-progress-tracker.md
  56. 1
      doc/migration-quick-reference.md
  57. 31
      doc/migration-readiness-summary.md
  58. 31
      doc/migration-roadmap-next-steps.md
  59. 29
      doc/migration-to-wa-sqlite.md
  60. 29
      doc/platformservicemixin-completion-plan.md
  61. 28
      doc/qr-code-implementation-guide.md
  62. 9
      doc/secure-storage-implementation.md
  63. 14
      doc/sharebufferarray_spectre_security.md
  64. 29
      doc/storage-implementation-checklist.md
  65. 35
      doc/usage-guide.md
  66. 32
      docker/README.md
  67. 29
      electron/README-BUILDING.md
  68. 32
      electron/README.md
  69. 2
      index.html
  70. 8
      ios/App/App.xcodeproj/project.pbxproj
  71. 15
      ios/App/app_privacy_manifest_fixer/CHANGELOG.md
  72. 7
      ios/App/app_privacy_manifest_fixer/README.md
  73. 7
      ios/App/app_privacy_manifest_fixer/README.zh-CN.md
  74. 3247
      package-lock.json
  75. 31
      package.json
  76. 47
      pull_request_template.md
  77. 3
      resources/README.md
  78. 7
      scripts/README.md
  79. 34
      scripts/build-android.sh
  80. 187
      scripts/build-arch-guard.sh
  81. 8
      scripts/build-ios.sh
  82. 110
      scripts/check-dependencies.sh
  83. 62
      scripts/clean-android.sh
  84. 19
      scripts/fix-markdown.sh
  85. 124
      scripts/git-hooks/README.md
  86. 86
      scripts/git-hooks/debug-checker.config
  87. 252
      scripts/git-hooks/pre-commit
  88. 171
      scripts/install-debug-hook.sh
  89. 214
      scripts/setup-markdown-hooks.sh
  90. 117
      scripts/test-debug-hook.sh
  91. 19
      scripts/validate-markdown.sh
  92. 24
      src/components/FeedFilters.vue
  93. 6
      src/interfaces/common.ts
  94. 28
      src/interfaces/deepLinks.ts
  95. 120
      src/main.capacitor.ts
  96. 26
      src/main.ts
  97. 72
      src/router/index.ts
  98. 77
      src/services/ProfileService.ts
  99. 352
      src/services/deepLinks.ts
  100. 48
      src/views/AccountViewView.vue

4
.cursor/rules/adr_template.mdc

@ -9,7 +9,9 @@
## Context ## Context
[Describe the forces at play, including technological, political, social, and project local. These forces are probably in tension, and should be called out as such. The language in this section is value-neutral. It is simply describing facts.] [Describe the forces at play, including technological, political, social, and
project local. These forces are probably in tension, and should be called out as
such. The language in this section is value-neutral. It is simply describing facts.]
## Decision ## Decision

246
.cursor/rules/app/architectural_decision_record.mdc

@ -1,10 +1,13 @@
--- ---
description: description: when you need to understand the system architecture or make changes that impact the system architecture
globs: alwaysApply: false
alwaysApply: true
--- ---
# TimeSafari Cross-Platform Architecture Guide # TimeSafari Cross-Platform Architecture Guide
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Architecture guidelines
## 1. Platform Support Matrix ## 1. Platform Support Matrix
| Feature | Web (PWA) | Capacitor (Mobile) | Electron (Desktop) | | Feature | Web (PWA) | Capacitor (Mobile) | Electron (Desktop) |
@ -15,11 +18,10 @@ alwaysApply: true
| Camera Access | MediaDevices API | Capacitor Camera | Not Implemented | | Camera Access | MediaDevices API | Capacitor Camera | Not Implemented |
| Platform Detection | Web APIs | Capacitor.isNativePlatform() | process.env checks | | Platform Detection | Web APIs | Capacitor.isNativePlatform() | process.env checks |
---
## 2. Project Structure ## 2. Project Structure
### Core Directories ### Core Directories
``` ```
src/ src/
├── components/ # Vue components ├── components/ # Vue components
@ -38,14 +40,13 @@ src/
``` ```
### Entry Points ### Entry Points
- `main.ts` → Base entry - `main.ts` → Base entry
- `main.common.ts` → Shared init - `main.common.ts` → Shared init
- `main.capacitor.ts` → Mobile entry - `main.capacitor.ts` → Mobile entry
- `main.electron.ts` → Electron entry - `main.electron.ts` → Electron entry
- `main.web.ts` → Web entry - `main.web.ts` → Web entry
---
## 3. Service Architecture ## 3. Service Architecture
### Service Organization ### Service Organization
@ -64,28 +65,30 @@ services/
``` ```
### Factory Pattern ### Factory Pattern
Use a **singleton factory** to select platform services via `process.env.VITE_PLATFORM`.
--- Use a **singleton factory** to select platform services via
`process.env.VITE_PLATFORM`.
## 4. Feature Guidelines ## 4. Feature Guidelines
### QR Code Scanning ### QR Code Scanning
- Define `QRScannerService` interface. - Define `QRScannerService` interface.
- Implement platform-specific classes (`WebInlineQRScanner`, Capacitor, etc). - Implement platform-specific classes (`WebInlineQRScanner`, Capacitor,
etc).
- Provide `addListener` and `onStream` hooks for composability. - Provide `addListener` and `onStream` hooks for composability.
### Deep Linking ### Deep Linking
- URL format: `timesafari://<route>[/<param>][?query=value]` - URL format: `timesafari://<route>[/<param>][?query=value]`
- Web: `router.beforeEach` → parse query - Web: `router.beforeEach` → parse query
- Capacitor: `App.addListener("appUrlOpen", …)` - Capacitor: `App.addListener("appUrlOpen", …)`
---
## 5. Build Process ## 5. Build Process
- `vite.config.common.mts` → shared config - `vite.config.common.mts` → shared config
- Platform configs: `vite.config.web.mts`, `.capacitor.mts`, `.electron.mts` - Platform configs: `vite.config.web.mts`, `.capacitor.mts`,
`.electron.mts`
- Use `process.env.VITE_PLATFORM` for conditional loading. - Use `process.env.VITE_PLATFORM` for conditional loading.
```bash ```bash
@ -94,8 +97,6 @@ npm run build:capacitor
npm run build:electron npm run build:electron
``` ```
---
## 6. Testing Strategy ## 6. Testing Strategy
- **Unit tests** for services. - **Unit tests** for services.
@ -108,64 +109,243 @@ npm run build:electron
test.skip(!process.env.MOBILE_TEST, "Mobile-only test"); test.skip(!process.env.MOBILE_TEST, "Mobile-only test");
``` ```
> 🔗 **Human Hook:** Before merging new tests, hold a short sync (≤15 min) with QA to align on coverage and flaky test risks. > 🔗 **Human Hook:** Before merging new tests, hold a short sync (≤15
> min) with QA to align on coverage and flaky test risks.
---
## 7. Error Handling ## 7. Error Handling
- Global Vue error handler → logs with component name. - Global Vue error handler → logs with component name.
- Platform-specific wrappers log API errors with platform prefix (`[Capacitor API Error]`, etc). - Platform-specific wrappers log API errors with platform prefix
(`[Capacitor API Error]`, etc).
- Use structured logging (not `console.log`). - Use structured logging (not `console.log`).
---
## 8. Best Practices ## 8. Best Practices
- Keep platform code **isolated** in `platforms/`. - Keep platform code **isolated** in `platforms/`.
- Always define a **shared interface** first. - Always define a **shared interface** first.
- Use feature detection, not platform detection, when possible. - Use feature detection, not platform detection, when possible.
- Dependency injection for services → improves testability. - Dependency injection for services → improves testability.
- Maintain **Competence Hooks** in PRs (2–3 prompts for dev discussion). - Maintain **Competence Hooks** in PRs (2–3 prompts for dev
discussion).
---
## 9. Dependency Management ## 9. Dependency Management
- Key deps: `@capacitor/core`, `electron`, `vue`. - Key deps: `@capacitor/core`, `electron`, `vue`.
- Use conditional `import()` for platform-specific libs. - Use conditional `import()` for platform-specific libs.
---
## 10. Security Considerations ## 10. Security Considerations
- **Permissions**: Always check + request gracefully. - **Permissions**: Always check + request gracefully.
- **Storage**: Secure storage for sensitive data; encrypt when possible. - **Storage**: Secure storage for sensitive data; encrypt when possible.
- **Audits**: Schedule quarterly security reviews. - **Audits**: Schedule quarterly security reviews.
---
## 11. ADR Process ## 11. ADR Process
- All major architecture choices → log in `doc/adr/`. - All major architecture choices → log in `doc/adr/`.
- Use ADR template with Context, Decision, Consequences, Status. - Use ADR template with Context, Decision, Consequences, Status.
- Link related ADRs in PR descriptions. - Link related ADRs in PR descriptions.
> 🔗 **Human Hook:** When proposing a new ADR, schedule a 30-min design sync for discussion, not just async review. > 🔗 **Human Hook:** When proposing a new ADR, schedule a 30-min
> design sync for discussion, not just async review.
## 12. Collaboration Hooks
- **QR features**: Sync with Security before merging → permissions &
privacy.
- **New platform builds**: Demo in team meeting → confirm UX
differences.
- **Critical ADRs**: Present in guild or architecture review.
## Self-Check
- [ ] Does this feature implement a shared interface?
- [ ] Are fallbacks + errors handled gracefully?
- [ ] Have relevant ADRs been updated/linked?
- [ ] Did I add competence hooks or prompts for the team?
- [ ] Was human interaction (sync/review/demo) scheduled?
--- ---
**Status**: Active architecture guidelines
**Priority**: High
**Estimated Effort**: Ongoing reference
**Dependencies**: Vue 3, Capacitor, Electron, Vite
**Stakeholders**: Development team, Architecture team
- [ ] Are fallbacks + errors handled gracefully?
- [ ] Have relevant ADRs been updated/linked?
- [ ] Did I add competence hooks or prompts for the team?
- [ ] Was human interaction (sync/review/demo) scheduled?
# TimeSafari Cross-Platform Architecture Guide
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Architecture guidelines
## 1. Platform Support Matrix
| Feature | Web (PWA) | Capacitor (Mobile) | Electron (Desktop) |
|---------|-----------|--------------------|-------------------|
| QR Code Scanning | WebInlineQRScanner | @capacitor-mlkit/barcode-scanning | Not Implemented |
| Deep Linking | URL Parameters | App URL Open Events | Not Implemented |
| File System | Limited (Browser API) | Capacitor Filesystem | Electron fs |
| Camera Access | MediaDevices API | Capacitor Camera | Not Implemented |
| Platform Detection | Web APIs | Capacitor.isNativePlatform() | process.env checks |
## 2. Project Structure
### Core Directories
```
src/
├── components/ # Vue components
├── services/ # Platform services and business logic
├── views/ # Page components
├── router/ # Vue router configuration
├── types/ # TypeScript type definitions
├── utils/ # Utility functions
├── lib/ # Core libraries
├── platforms/ # Platform-specific implementations
├── electron/ # Electron-specific code
├── constants/ # Application constants
├── db/ # Database related code
├── interfaces/ # TypeScript interfaces
└── assets/ # Static assets
```
### Entry Points
- `main.ts` → Base entry
- `main.common.ts` → Shared init
- `main.capacitor.ts` → Mobile entry
- `main.electron.ts` → Electron entry
- `main.web.ts` → Web entry
## 3. Service Architecture
### Service Organization
```tree
services/
├── QRScanner/
│ ├── WebInlineQRScanner.ts
│ └── interfaces.ts
├── platforms/
│ ├── WebPlatformService.ts
│ ├── CapacitorPlatformService.ts
│ └── ElectronPlatformService.ts
└── factory/
└── PlatformServiceFactory.ts
```
### Factory Pattern
Use a **singleton factory** to select platform services via
`process.env.VITE_PLATFORM`.
## 4. Feature Guidelines
### QR Code Scanning
- Define `QRScannerService` interface.
- Implement platform-specific classes (`WebInlineQRScanner`, Capacitor,
etc).
- Provide `addListener` and `onStream` hooks for composability.
### Deep Linking
- URL format: `timesafari://<route>[/<param>][?query=value]`
- Web: `router.beforeEach` → parse query
- Capacitor: `App.addListener("appUrlOpen", …)`
## 5. Build Process
- `vite.config.common.mts` → shared config
- Platform configs: `vite.config.web.mts`, `.capacitor.mts`,
`.electron.mts`
- Use `process.env.VITE_PLATFORM` for conditional loading.
```bash
npm run build:web
npm run build:capacitor
npm run build:electron
```
## 6. Testing Strategy
- **Unit tests** for services.
- **Playwright** for Web + Capacitor:
- `playwright.config-local.ts` includes web + Pixel 5.
- **Electron tests**: add `spectron` or Playwright-Electron.
- Mark tests with platform tags:
```ts
test.skip(!process.env.MOBILE_TEST, "Mobile-only test");
```
> 🔗 **Human Hook:** Before merging new tests, hold a short sync (≤15
> min) with QA to align on coverage and flaky test risks.
## 7. Error Handling
- Global Vue error handler → logs with component name.
- Platform-specific wrappers log API errors with platform prefix
(`[Capacitor API Error]`, etc).
- Use structured logging (not `console.log`).
## 8. Best Practices
- Keep platform code **isolated** in `platforms/`.
- Always define a **shared interface** first.
- Use feature detection, not platform detection, when possible.
- Dependency injection for services → improves testability.
- Maintain **Competence Hooks** in PRs (2–3 prompts for dev
discussion).
## 9. Dependency Management
- Key deps: `@capacitor/core`, `electron`, `vue`.
- Use conditional `import()` for platform-specific libs.
## 10. Security Considerations
- **Permissions**: Always check + request gracefully.
- **Storage**: Secure storage for sensitive data; encrypt when possible.
- **Audits**: Schedule quarterly security reviews.
## 11. ADR Process
- All major architecture choices → log in `doc/adr/`.
- Use ADR template with Context, Decision, Consequences, Status.
- Link related ADRs in PR descriptions.
> 🔗 **Human Hook:** When proposing a new ADR, schedule a 30-min
> design sync for discussion, not just async review.
## 12. Collaboration Hooks ## 12. Collaboration Hooks
- **QR features**: Sync with Security before merging → permissions & privacy. - **QR features**: Sync with Security before merging → permissions &
- **New platform builds**: Demo in team meeting → confirm UX differences. privacy.
- **New platform builds**: Demo in team meeting → confirm UX
differences.
- **Critical ADRs**: Present in guild or architecture review. - **Critical ADRs**: Present in guild or architecture review.
## Self-Check
- [ ] Does this feature implement a shared interface?
- [ ] Are fallbacks + errors handled gracefully?
- [ ] Have relevant ADRs been updated/linked?
- [ ] Did I add competence hooks or prompts for the team?
- [ ] Was human interaction (sync/review/demo) scheduled?
--- ---
# Self-Check **Status**: Active architecture guidelines
**Priority**: High
**Estimated Effort**: Ongoing reference
**Dependencies**: Vue 3, Capacitor, Electron, Vite
**Stakeholders**: Development team, Architecture team
- [ ] Does this feature implement a shared interface?
- [ ] Are fallbacks + errors handled gracefully? - [ ] Are fallbacks + errors handled gracefully?
- [ ] Have relevant ADRs been updated/linked? - [ ] Have relevant ADRs been updated/linked?
- [ ] Did I add competence hooks or prompts for the team? - [ ] Did I add competence hooks or prompts for the team?

413
.cursor/rules/app/timesafari.mdc

@ -1,316 +1,181 @@
---
description:
globs:
alwaysApply: true
---
# Time Safari Context # Time Safari Context
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Core application context
## Project Overview ## Project Overview
Time Safari is an application designed to foster community building through gifts, Time Safari is an application designed to foster community building through
gratitude, and collaborative projects. The app should make it extremely easy and gifts, gratitude, and collaborative projects. The app makes it easy and
intuitive for users of any age and capability to recognize contributions, build intuitive for users of any age and capability to recognize contributions,
trust networks, and organize collective action. It is built on services that build trust networks, and organize collective action. It is built on services
preserve privacy and data sovereignty. that preserve privacy and data sovereignty.
The ultimate goals of Time Safari are two-fold: ## Core Goals
1. **Connect** Make it easy, rewarding, and non-threatening for people to 1. **Connect**: Make it easy, rewarding, and non-threatening for people to
connect with others who have similar interests, and to initiate activities connect with others who have similar interests, and to initiate activities
together. This helps people accomplish and learn from other individuals in together.
less-structured environments; moreover, it helps them discover who they want
to continue to support and with whom they want to maintain relationships.
2. **Reveal** Widely advertise the great support and rewards that are being 2. **Reveal**: Widely advertise the great support and rewards that are being
given and accepted freely, especially non-monetary ones. Using visuals and text, given and accepted freely, especially non-monetary ones, showing the impact
display the kind of impact that gifts are making in the lives of others. Also gifts make in people's lives.
show useful and engaging reports of project statistics and personal accomplishments.
## Technical Foundation
## Core Approaches ### Architecture
Time Safari should help everyday users build meaningful connections and organize - **Privacy-preserving claims architecture** via endorser.ch
collective efforts by: - **Decentralized Identifiers (DIDs)**: User identities based on
public/private key pairs stored on devices
- **Cryptographic Verification**: All claims and confirmations are
cryptographically signed
- **User-Controlled Visibility**: Users explicitly control who can see their
identifiers and data
- **Cross-Platform**: Web (PWA), Mobile (Capacitor), Desktop (Electron)
1. **Recognizing Contributions**: Creating permanent, verifiable records of gifts ### Current Database State
and contributions people give to each other and their communities.
2. **Facilitating Collaboration**: Making it ridiculously easy for people to ask - **Database**: SQLite via Absurd SQL (browser) and native SQLite
for or propose help on projects and interests that matter to them. (mobile/desktop)
- **Legacy Support**: IndexedDB (Dexie) for backward compatibility
- **Status**: Modern database architecture fully implemented
3. **Building Trust Networks**: Enabling users to maintain their network and activity ### Core Technologies
visibility. Developing reputation through verified contributions and references,
which can be selectively shown to others outside the network.
4. **Preserving Privacy**: Ensuring personal identifiers are only shared with - **Frontend**: Vue 3 + TypeScript + vue-facing-decorator
explicitly authorized contacts, allowing private individuals including children - **Styling**: TailwindCSS
to participate safely. - **Build**: Vite with platform-specific configs
- **Testing**: Playwright E2E, Jest unit tests
- **Database**: SQLite (Absurd SQL in browser), IndexedDB (legacy)
- **State**: Pinia stores
- **Platform Services**: Abstracted behind interfaces with factory pattern
5. **Engaging Content**: Displaying people's records in compelling stories, and ## Development Principles
highlighting those projects that are lifting people's lives long-term, both in
physical support and in emotional-spiritual-creative thriving.
### Code Organization
## Technical Foundation - **Platform Services**: Abstract platform-specific code behind interfaces
- **Service Factory**: Use `PlatformServiceFactory` for platform selection
- **Type Safety**: Strict TypeScript, no `any` types, use type guards
- **Modern Architecture**: Use current platform service patterns
This application is built on a privacy-preserving claims architecture (via ### Architecture Patterns
endorser.ch) with these key characteristics:
- **Decentralized Identifiers (DIDs)**: User identities are based on public/private - **Dependency Injection**: Services injected via mixins and factory pattern
key pairs stored on their devices - **Interface Segregation**: Small, focused interfaces over large ones
- **Cryptographic Verification**: All claims and confirmations are - **Composition over Inheritance**: Prefer mixins and composition
cryptographically signed - **Single Responsibility**: Each component/service has one clear purpose
- **User-Controlled Visibility**: Users explicitly control who can see their
identifiers and data
- **Merkle-Chained Claims**: Claims are cryptographically chained for verification
and integrity
- **Native and Web App**: Works on Capacitor (iOS, Android), Desktop (Electron
and CEFPython), and web browsers
## User Journey ### Testing Strategy
The typical progression of usage follows these stages: - **E2E**: Playwright for critical user journeys
- **Unit**: Jest with F.I.R.S.T. principles
- **Platform Coverage**: Web + Capacitor (Pixel 5) in CI
- **Quality Assurance**: Comprehensive testing and validation
1. **Gratitude & Recognition**: Users begin by expressing and recording gratitude ## Current Development Focus
for gifts received, building a foundation of acknowledgment.
2. **Project Proposals**: Users propose projects and ideas, reaching out to connect ### Active Development
with others who share similar interests.
3. **Action Triggers**: Offers of help serve as triggers and motivations to execute - **Feature Development**: Build new functionality using modern platform
proposed projects, moving from ideas to action. services
- **Performance Optimization**: Improve app performance and user experience
- **Platform Enhancement**: Leverage platform-specific capabilities
- **Code Quality**: Maintain high standards and best practices
## Context for LLM Development ### Development Metrics
When developing new functionality for Time Safari, consider these design principles: - **Code Quality**: High standards maintained across all platforms
- **Performance**: Optimized for all target devices
- **Testing**: Comprehensive coverage maintained
- **User Experience**: Focus on intuitive, accessible interfaces
1. **Accessibility First**: Features should be usable by non-technical users with ## Platform-Specific Considerations
minimal learning curve.
2. **Privacy by Design**: All features must respect user privacy and data sovereignty. ### Web (PWA)
3. **Progressive Enhancement**: Core functionality should work across all devices, - **QR Scanning**: WebInlineQRScanner
with richer experiences where supported. - **Deep Linking**: URL parameters
- **File System**: Limited browser APIs
- **Build**: `npm run build:web` (development build)
4. **Voluntary Collaboration**: The system should enable but never coerce participation. ### Mobile (Capacitor)
5. **Trust Building**: Features should help build verifiable trust between users. - **QR Scanning**: @capacitor-mlkit/barcode-scanning
- **Deep Linking**: App URL open events
- **File System**: Capacitor Filesystem
- **Build**: `npm run build:capacitor`
6. **Network Effects**: Consider how features scale as more users join the platform. ### Desktop (Electron)
7. **Low Resource Requirements**: The system should be lightweight enough to run - **File System**: Node.js fs
on inexpensive devices users already own. - **Build**: `npm run build:electron`
- **Distribution**: AppImage, DEB, DMG packages
## Use Cases to Support ## Development Workflow
### Build Commands
LLM development should focus on enhancing these key use cases: ```bash
# Web (development)
npm run build:web
1. **Community Building**: Tools that help people find others with shared # Mobile
interests and values. npm run build:capacitor
npm run build:native
2. **Project Coordination**: Features that make it easy to propose collaborative # Desktop
projects and to submit suggestions and offers to existing ones. npm run build:electron
npm run build:electron:appimage
npm run build:electron:deb
npm run build:electron:dmg
```
3. **Reputation Building**: Methods for users to showcase their contributions ### Testing Commands
and reliability, in contexts where they explicitly reveal that information.
4. **Governance Experimentation**: Features that facilitate decision-making and ```bash
collective governance. # Web E2E
npm run test:web
## Constraints # Mobile
npm run test:mobile
When developing new features, be mindful of these constraints: npm run test:android
npm run test:ios
# Type checking
npm run type-check
npm run lint-fix
```
## Key Constraints
1. **Privacy First**: User identifiers remain private except when explicitly
shared
2. **Platform Compatibility**: Features must work across all target platforms
3. **Performance**: Must remain performant on older/simpler devices
4. **Modern Architecture**: New features should use current platform services
5. **Offline Capability**: Key functionality should work offline when feasible
## Use Cases to Support
1. **Community Building**: Tools for finding others with shared interests
2. **Project Coordination**: Easy proposal and collaboration on projects
3. **Reputation Building**: Showcasing contributions and reliability
4. **Governance**: Facilitating decision-making and collective governance
## Resources
- **Testing**: `docs/migration-testing/`
- **Architecture**: `docs/architecture-decisions.md`
- **Build Context**: `docs/build-modernization-context.md`
---
1. **Privacy Preservation**: User identifiers must remain private except when ## Status: Active application context
explicitly shared.
2. **Platform Limitations**: Features must work within the constraints of the target - **Priority**: Critical
app platforms, while aiming to leverage the best platform technology available. - **Estimated Effort**: Ongoing reference
- **Dependencies**: Vue 3, TypeScript, SQLite, Capacitor, Electron
3. **Endorser API Limitations**: Backend features are constrained by the endorser.ch - **Stakeholders**: Development team, Product team
API capabilities.
4. **Performance on Low-End Devices**: The application should remain performant
on older/simpler devices.
5. **Offline-First When Possible**: Key functionality should work offline when feasible.
## Project Technologies
- Typescript using ES6 classes using vue-facing-decorator
- TailwindCSS
- Vite Build Tool
- Playwright E2E testing
- IndexDB
- Camera, Image uploads, QR Code reader, ...
## Mobile Features
- Deep Linking
- Local Notifications via a custom Capacitor plugin
## Project Architecture
- The application must work on web browser, PWA (Progressive Web Application),
desktop via Electron, and mobile via Capacitor
- Building for each platform is managed via Vite
## Core Development Principles
### DRY development
- **Code Reuse**
- Extract common functionality into utility functions
- Create reusable components for UI patterns
- Implement service classes for shared business logic
- Use mixins for cross-cutting concerns
- Leverage TypeScript interfaces for shared type definitions
- **Component Patterns**
- Create base components for common UI elements
- Implement higher-order components for shared behavior
- Use slot patterns for flexible component composition
- Create composable services for business logic
- Implement factory patterns for component creation
- **State Management**
- Centralize state in Pinia stores
- Use computed properties for derived state
- Implement shared state selectors
- Create reusable state mutations
- Use action creators for common operations
- **Error Handling**
- Implement centralized error handling
- Create reusable error components
- Use error boundary components
- Implement consistent error logging
- Create error type definitions
- **Type Definitions**
- Create shared interfaces for common data structures
- Use type aliases for complex types
- Implement generic types for reusable components
- Create utility types for common patterns
- Use discriminated unions for state management
- **API Integration**
- Create reusable API client classes
- Implement request/response interceptors
- Use consistent error handling patterns
- Create type-safe API endpoints
- Implement caching strategies
- **Platform Services**
- Abstract platform-specific code behind interfaces
- Create platform-agnostic service layers
- Implement feature detection
- Use dependency injection for services
- Create service factories
- **Testing**
- Create reusable test utilities
- Implement test factories
- Use shared test configurations
- Create reusable test helpers
- Implement consistent test patterns
- F.I.R.S.T. (for Unit Tests)
F – Fast
I – Independent
R – Repeatable
S – Self-validating
T – Timely
### SOLID Principles
- **Single Responsibility**: Each class/component should have only one reason to
change
- Components should focus on one specific feature (e.g., QR scanning, DID management)
- Services should handle one type of functionality (e.g., platform services,
crypto services)
- Utilities should provide focused helper functions
- **Open/Closed**: Software entities should be open for extension but closed for
modification
- Use interfaces for service definitions
- Implement plugin architecture for platform-specific features
- Allow component behavior extension through props and events
- **Liskov Substitution**: Objects should be replaceable with their subtypes
- Platform services should work consistently across web/mobile
- Authentication providers should be interchangeable
- Storage implementations should be swappable
- **Interface Segregation**: Clients shouldn't depend on interfaces they don't use
- Break down large service interfaces into smaller, focused ones
- Component props should be minimal and purposeful
- Event emissions should be specific and targeted
- **Dependency Inversion**: High-level modules shouldn't depend on low-level modules
- Use dependency injection for services
- Abstract platform-specific code behind interfaces
- Implement factory patterns for component creation
### Law of Demeter
- Components should only communicate with immediate dependencies
- Avoid chaining method calls (e.g., `this.service.getUser().getProfile().getName()`)
- Use mediator patterns for complex component interactions
- Implement facade patterns for subsystem access
- Keep component communication through defined events and props
### Composition over Inheritance
- Prefer building components through composition
- Use mixins for shared functionality
- Implement feature toggles through props
- Create higher-order components for common patterns
- Use service composition for complex features
### Interface Segregation
- Define clear interfaces for services
- Keep component APIs minimal and focused
- Split large interfaces into smaller, specific ones
- Use TypeScript interfaces for type definitions
- Implement role-based interfaces for different use cases
### Fail Fast
- Validate inputs early in the process
- Use TypeScript strict mode
- Implement comprehensive error handling
- Add runtime checks for critical operations
- Use assertions for development-time validation
### Principle of Least Astonishment
- Follow Vue.js conventions consistently
- Use familiar naming patterns
- Implement predictable component behaviors
- Maintain consistent error handling
- Keep UI interactions intuitive
### Information Hiding
- Encapsulate implementation details
- Use private class members
- Implement proper access modifiers
- Hide complex logic behind simple interfaces
- Use TypeScript's access modifiers effectively
### Single Source of Truth
- Use Pinia for state management
- Maintain one source for user data
- Centralize configuration management
- Use computed properties for derived state
- Implement proper state synchronization
### Principle of Least Privilege
- Implement proper access control
- Use minimal required permissions
- Follow privacy-by-design principles
- Restrict component access to necessary data
- Implement proper authentication/authorization

75
.cursor/rules/architecture/README.md

@ -0,0 +1,75 @@
# Architecture Rules Directory
**Author**: Matthew Raymer
**Date**: 2025-08-20
**Status**: 🎯 **ACTIVE** - Architecture protection guidelines
## Overview
This directory contains MDC (Model Directive Configuration) rules that protect
critical architectural components of the TimeSafari project. These rules ensure
that changes to system architecture follow proper review, testing, and
documentation procedures.
## Available Rules
### Build Architecture Guard (`build_architecture_guard.mdc`)
Protects the multi-platform build system including:
- Vite configuration files
- Build scripts and automation
- Platform-specific configurations (iOS, Android, Electron, Web)
- Docker and deployment infrastructure
- CI/CD pipeline components
**When to use**: Any time you're modifying build scripts, configuration files,
or deployment processes.
**Authorization levels**:
- **Level 1**: Minor changes (review required)
- **Level 2**: Moderate changes (testing required)
- **Level 3**: Major changes (ADR required)
## Usage Guidelines
### For Developers
1. **Check the rule**: Before making architectural changes, review the relevant
rule
2. **Follow the process**: Use the appropriate authorization level
3. **Complete validation**: Run through the required checklist
4. **Update documentation**: Keep BUILDING.md and related docs current
### For Reviewers
1. **Verify authorization**: Ensure changes match the required level
2. **Check testing**: Confirm appropriate testing has been completed
3. **Validate documentation**: Ensure BUILDING.md reflects changes
4. **Assess risk**: Consider impact on other platforms and systems
## Integration with Other Rules
- **Version Control**: Works with `workflow/version_control.mdc`
- **Research & Diagnostic**: Supports `research_diagnostic.mdc` for
investigations
- **Software Development**: Aligns with development best practices
- **Markdown Automation**: Integrates with `docs/markdown-automation.mdc` for
consistent documentation formatting
## Emergency Procedures
If architectural changes cause system failures:
1. **Immediate rollback** to last known working state
2. **Document the failure** with full error details
3. **Investigate root cause** using diagnostic workflows
4. **Update procedures** to prevent future failures
---
**Status**: Active architecture protection
**Priority**: Critical
**Maintainer**: Development team
**Next Review**: 2025-09-20

295
.cursor/rules/architecture/build_architecture_guard.mdc

@ -0,0 +1,295 @@
---
description: Guards against unauthorized changes to the TimeSafari building
architecture
alwaysApply: false
---
# Build Architecture Guard Directive
**Author**: Matthew Raymer
**Date**: 2025-08-20
**Status**: 🎯 **ACTIVE** - Build system protection guidelines
## Purpose
Protect the TimeSafari building architecture from unauthorized changes that
could break the multi-platform build pipeline, deployment processes, or
development workflow. This directive ensures all build system modifications
follow proper review, testing, and documentation procedures.
## Protected Architecture Components
### Core Build Infrastructure
- **Vite Configuration Files**: `vite.config.*.mts` files
- **Build Scripts**: All scripts in `scripts/` directory
- **Package Scripts**: `package.json` build-related scripts
- **Platform Configs**: `capacitor.config.ts`, `electron/`, `android/`,
`ios/`
- **Docker Configuration**: `Dockerfile`, `docker-compose.yml`
- **Environment Files**: `.env.*`, `.nvmrc`, `.node-version`
### Critical Build Dependencies
- **Build Tools**: Vite, Capacitor, Electron, Android SDK, Xcode
- **Asset Management**: `capacitor-assets.config.json`, asset scripts
- **Testing Infrastructure**: Playwright, Jest, mobile test scripts
- **CI/CD Pipeline**: GitHub Actions, build validation scripts
- **Service Worker Assembly**: `sw_scripts/`, `sw_combine.js`, WASM copy steps
## Change Authorization Requirements
### Level 1: Minor Changes (Requires Review)
- Documentation updates to `BUILDING.md`
- Non-breaking script improvements
- Test additions or improvements
- Asset configuration updates
**Process**: Code review + basic testing
### Level 2: Moderate Changes (Requires Testing)
- New build script additions
- Environment variable changes
- Dependency version updates
- Platform-specific optimizations
**Process**: Code review + platform testing + documentation update
### Level 3: Major Changes (Requires ADR)
- Build system architecture changes
- New platform support
- Breaking changes to build scripts
- Major dependency migrations
**Process**: ADR creation + comprehensive testing + team review
## Prohibited Actions
### ❌ Never Allow Without ADR
- **Delete or rename** core build scripts
- **Modify** `package.json` build script names
- **Change** Vite configuration structure
- **Remove** platform-specific build targets
- **Alter** Docker build process
- **Modify** CI/CD pipeline without testing
### ❌ Never Allow Without Testing
- **Update** build dependencies
- **Change** environment configurations
- **Modify** asset generation scripts
- **Alter** test infrastructure
- **Update** platform SDK versions
## Required Validation Checklist
### Before Any Build System Change
- [ ] **Impact Assessment**: Which platforms are affected?
- [ ] **Testing Plan**: How will this be tested across platforms?
- [ ] **Rollback Plan**: How can this be reverted if it breaks?
- [ ] **Documentation**: Will `BUILDING.md` need updates?
- [ ] **Dependencies**: Are all required tools available?
### After Build System Change
- [ ] **Web Platform**: Does `npm run build:web:dev` work?
- [ ] **Mobile Platforms**: Do iOS/Android builds succeed?
- [ ] **Desktop Platform**: Does Electron build and run?
- [ ] **Tests Pass**: Do all build-related tests pass?
- [ ] **Documentation Updated**: Is `BUILDING.md` current?
## Specific Test Commands (Minimum Required)
### Web Platform
- **Development**: `npm run build:web:dev` - serve and load app
- **Production**: `npm run build:web:prod` - verify SW and WASM present
### Mobile Platforms
- **Android**: `npm run build:android:test` or `:prod` - confirm assets copied
- **iOS**: `npm run build:ios:test` or `:prod` - verify build succeeds
### Desktop Platform
- **Electron**: `npm run build:electron:dev` and packaging for target OS
- **Verify**: Single-instance behavior and app boot
### Auto-run (if affected)
- **Test Mode**: `npm run auto-run:test` and platform variants
- **Production Mode**: `npm run auto-run:prod` and platform variants
### Clean and Rebuild
- Run relevant `clean:*` scripts and ensure re-build works
## Emergency Procedures
### Build System Broken
1. **Immediate**: Revert to last known working commit
2. **Investigation**: Create issue with full error details
3. **Testing**: Verify all platforms work after revert
4. **Documentation**: Update `BUILDING.md` with failure notes
### Platform-Specific Failure
1. **Isolate**: Identify which platform is affected
2. **Test Others**: Verify other platforms still work
3. **Rollback**: Revert platform-specific changes
4. **Investigation**: Debug in isolated environment
## Integration Points
### With Version Control
- **Branch Protection**: Require reviews for build script changes
- **Commit Messages**: Must reference ADR for major changes
- **Testing**: All build changes must pass CI/CD pipeline
### With Documentation
- **BUILDING.md**: Must be updated for any script changes
- **README.md**: Must reflect new build requirements
- **CHANGELOG.md**: Must document breaking build changes
### With Testing
- **Pre-commit**: Run basic build validation
- **CI/CD**: Full platform build testing
- **Manual Testing**: Human verification of critical paths
## Risk Matrix & Required Validation
### Environment Handling
- **Trigger**: Change to `.env.*` loading / variable names
- **Validation**: Prove `dev/test/prod` builds; show environment echo in logs
### Script Flow
- **Trigger**: Reorder steps (prebuild → build → package), new flags
- **Validation**: Dry-run + normal run, show exit codes & timing
### Platform Packaging
- **Trigger**: Electron NSIS/DMG/AppImage, Android/iOS bundle
- **Validation**: Produce installer/artifact and open it; verify single-instance,
icons, signing
### Service Worker / WASM
- **Trigger**: `sw_combine.js`, WASM copy path
- **Validation**: Verify combined SW exists and is injected; page loads offline;
WASM present
### Docker
- **Trigger**: New base image, build args
- **Validation**: Build image locally; run container; list produced `/dist`
### Signing/Notarization
- **Trigger**: Cert path/profiles
- **Validation**: Show signing logs + verify on target OS
## PR Template (Paste into Description)
- [ ] **Level**: L1 / L2 / L3 + justification
- [ ] **Files & platforms touched**:
- [ ] **Risk triggers & mitigations**:
- [ ] **Commands run (paste logs)**:
- [ ] **Artifacts (names + sha256)**:
- [ ] **Docs updated (sections/links)**:
- [ ] **Rollback steps verified**:
- [ ] **CI**: Jobs passing and artifacts uploaded
## Rollback Playbook
### Immediate Rollback
1. `git revert` or `git reset --hard <prev>`; restore prior `scripts/` or config
files
2. Rebuild affected targets; verify old behavior returns
3. Post-mortem notes → update this guard and `BUILDING.md` if gaps found
### Rollback Verification
- **Web**: `npm run build:web:dev` and `npm run build:web:prod`
- **Mobile**: `npm run build:android:test` and `npm run build:ios:test`
- **Desktop**: `npm run build:electron:dev` and packaging commands
- **Clean**: Run relevant `clean:*` scripts and verify re-build works
## ADR Trigger List
Raise an ADR when you propose any of:
- **New build stage** or reorder of canonical stages
- **Replacement of packager** / packaging format
- **New environment model** or secure secret handling scheme
- **New service worker assembly** strategy or cache policy
- **New Docker base** or multi-stage pipeline
- **Relocation of build outputs** or directory conventions
**ADR must include**: motivation, alternatives, risks, validation plan, rollback,
doc diffs.
## Competence Hooks
### Why This Works
- **Prevents Build Failures**: Catches issues before they reach production
- **Maintains Consistency**: Ensures all platforms build identically
- **Reduces Debugging Time**: Prevents build system regressions
### Common Pitfalls
- **Silent Failures**: Changes that work on one platform but break others
- **Dependency Conflicts**: Updates that create version incompatibilities
- **Documentation Drift**: Build scripts that don't match documentation
### Next Skill Unlock
- Learn to test build changes across all platforms simultaneously
### Teach-back
- "What three platforms must I test before committing a build script change?"
## Collaboration Hooks
### Team Review Requirements
- **Platform Owners**: iOS, Android, Electron, Web specialists
- **DevOps**: CI/CD pipeline maintainers
- **QA**: Testing infrastructure owners
### Discussion Prompts
- "Which platforms will be affected by this build change?"
- "How can we test this change without breaking existing builds?"
- "What's our rollback plan if this change fails?"
## Self-Check (Before Allowing Changes)
- [ ] **Authorization Level**: Is this change appropriate for the level?
- [ ] **Testing Plan**: Is there a comprehensive testing strategy?
- [ ] **Documentation**: Will BUILDING.md be updated?
- [ ] **Rollback**: Is there a safe rollback mechanism?
- [ ] **Team Review**: Have appropriate stakeholders been consulted?
- [ ] **CI/CD**: Will this pass the build pipeline?
---
**Status**: Active build system protection
**Priority**: Critical
**Estimated Effort**: Ongoing vigilance
**Dependencies**: All build system components
**Stakeholders**: Development team, DevOps, Platform owners
**Next Review**: 2025-09-20

49
.cursor/rules/asset_configuration.mdc

@ -1,32 +1,61 @@
--- ---
alwaysApply: true description: when doing anything with capacitor assets
alwaysApply: false
--- ---
# Asset Configuration Directive # Asset Configuration Directive
*Scope: Assets Only (icons, splashes, image pipelines) — not overall build orchestration*
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Asset management guidelines
*Scope: Assets Only (icons, splashes, image pipelines) — not overall build
orchestration*
## Intent ## Intent
- Version **asset configuration files** (optionally dev-time generated). - Version **asset configuration files** (optionally dev-time generated).
- **Do not** version platform asset outputs (Android/iOS/Electron); generate them **at build-time** with standard tools. - **Do not** version platform asset outputs (Android/iOS/Electron); generate
them **at build-time** with standard tools.
- Keep existing per-platform build scripts unchanged. - Keep existing per-platform build scripts unchanged.
## Source of Truth ## Source of Truth
- **Preferred (Capacitor default):** `resources/` as the single master source. - **Preferred (Capacitor default):** `resources/` as the single master source.
- **Alternative:** `assets/` is acceptable **only** if `capacitor-assets` is explicitly configured to read from it. - **Alternative:** `assets/` is acceptable **only** if `capacitor-assets` is
- **Never** maintain both `resources/` and `assets/` as parallel sources. Migrate and delete the redundant folder. explicitly configured to read from it.
- **Never** maintain both `resources/` and `assets/` as parallel sources.
Migrate and delete the redundant folder.
## Config Files ## Config Files
- Live under: `config/assets/` (committed). - Live under: `config/assets/` (committed).
- Examples: - Examples:
- `config/assets/capacitor-assets.config.json` (or the path the tool expects) - `config/assets/capacitor-assets.config.json` (or the path the tool
expects)
- `config/assets/android.assets.json` - `config/assets/android.assets.json`
- `config/assets/ios.assets.json` - `config/assets/ios.assets.json`
- `config/assets/common.assets.yaml` (optional shared layer) - `config/assets/common.assets.yaml` (optional shared layer)
- **Dev-time generation allowed** for these configs; **build-time generation is forbidden**. - **Dev-time generation allowed** for these configs; **build-time
generation is forbidden**.
## Build-Time Behavior ## Build-Time Behavior
- Build generates platform assets (not configs) using the standard chain: - Build generates platform assets (not configs) using the standard chain:
```bash
npm run build:capacitor # web build via Vite (.mts) ```bash
npx cap sync npm run build:capacitor # web build via Vite (.mts)
npx cap sync
npx capacitor-assets generate # produces platform assets; not committed
# then platform-specific build steps
```
---
**Status**: Active asset management directive
**Priority**: Medium
**Estimated Effort**: Ongoing reference
**Dependencies**: capacitor-assets toolchain
**Stakeholders**: Development team, Build team
npx capacitor-assets generate # produces platform assets; not committed npx capacitor-assets generate # produces platform assets; not committed
# then platform-specific build steps # then platform-specific build steps

96
.cursor/rules/base_context.mdc

@ -1,3 +1,6 @@
---
alwaysApply: true
---
```json ```json
{ {
"coaching_level": "standard", "coaching_level": "standard",
@ -10,7 +13,12 @@
# Base Context — Human Competence First # Base Context — Human Competence First
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Core interaction guidelines
## Purpose ## Purpose
All interactions must *increase the human's competence over time* while All interactions must *increase the human's competence over time* while
completing the task efficiently. The model may handle menial work and memory completing the task efficiently. The model may handle menial work and memory
extension, but must also promote learning, autonomy, and healthy work habits. extension, but must also promote learning, autonomy, and healthy work habits.
@ -21,57 +29,79 @@ machine-driven steps.
## Principles ## Principles
1) Competence over convenience: finish the task *and* leave the human more 1. Competence over convenience: finish the task *and* leave the human more
capable next time. capable next time.
2) Mentorship, not lectures: be concise, concrete, and immediately applicable. 2. Mentorship, not lectures: be concise, concrete, and immediately applicable.
3) Transparency: show assumptions, limits, and uncertainty; cite when non-obvious. 3. Transparency: show assumptions, limits, and uncertainty; cite when
4) Optional scaffolding: include small, skimmable learning hooks that do not non-obvious.
4. Optional scaffolding: include small, skimmable learning hooks that do not
bloat output. bloat output.
5) Time respect: default to **lean output**; offer opt-in depth via toggles. 5. Time respect: default to **lean output**; offer opt-in depth via toggles.
6) Psychological safety: encourage, never condescend; no medical/clinical advice. 6. Psychological safety: encourage, never condescend; no medical/clinical
No censorship! advice. No censorship!
7) Reusability: structure outputs so they can be saved, searched, reused, and repurposed. 7. Reusability: structure outputs so they can be saved, searched, reused, and
8) **Collaborative Bias**: Favor solutions that invite human review, discussion, repurposed.
and iteration. When in doubt, ask "Who should this be shown to?" or "Which human 8. **Collaborative Bias**: Favor solutions that invite human review,
input would improve this?" discussion, and iteration. When in doubt, ask "Who should this be shown
to?" or "Which human input would improve this?"
## Toggle Definitions ## Toggle Definitions
### coaching_level ### coaching_level
Determines the depth of learning support: `light` (short hooks), `standard` Determines the depth of learning support: `light` (short hooks),
(balanced), `deep` (detailed). `standard` (balanced), `deep` (detailed).
### socratic_max_questions ### socratic_max_questions
The number of clarifying questions the model may ask before proceeding. The number of clarifying questions the model may ask before proceeding.
If >0, questions should be targeted, minimal, and followed by reasonable assumptions if unanswered. If >0, questions should be targeted, minimal, and followed by reasonable
assumptions if unanswered.
### verbosity ### verbosity
'terse' (just a sentence), `concise` (minimum commentary), `normal` (balanced explanation), or other project-defined levels.
'terse' (just a sentence), `concise` (minimum commentary), `normal`
(balanced explanation), or other project-defined levels.
### timebox_minutes ### timebox_minutes
*integer or null* — When set to a positive integer (e.g., `5`), this acts as a **time budget** guiding the model to prioritize delivering the most essential parts of the task within that constraint.
*integer or null* — When set to a positive integer (e.g., `5`), this acts
as a **time budget** guiding the model to prioritize delivering the most
essential parts of the task within that constraint.
Behavior when set: Behavior when set:
1. **Prioritize Core Output** — Deliver the minimum viable solution or result first.
2. **Limit Commentary** — Competence Hooks and Collaboration Hooks must be shorter than normal. 1. **Prioritize Core Output** — Deliver the minimum viable solution or
3. **Signal Skipped Depth** — Omitted details should be listed under *Deferred for depth*. result first.
4. **Order by Value** — Start with blocking or high-value items, then proceed to nice-to-haves if budget allows. 2. **Limit Commentary** — Competence Hooks and Collaboration Hooks must be
If `null`, there is no timebox — the model can produce full-depth responses. shorter than normal.
3. **Signal Skipped Depth** — Omitted details should be listed under
*Deferred for depth*.
4. **Order by Value** — Start with blocking or high-value items, then
proceed to nice-to-haves if budget allows.
If `null`, there is no timebox — the model can produce full-depth
responses.
### format_enforcement ### format_enforcement
`strict` (reject outputs with format drift) or `relaxed` (minor deviations acceptable).
`strict` (reject outputs with format drift) or `relaxed` (minor deviations
acceptable).
## Modes (select or combine) ## Modes (select or combine)
- **Doer**: produce the artifact fast, minimal commentary. - **Doer**: produce the artifact fast, minimal commentary.
- **Mentor**: add short "why/how" notes + next-step pointers. - **Mentor**: add short "why/how" notes + next-step pointers.
- **Socratic**: ask up to N targeted questions when requirements are ambiguous. - **Socratic**: ask up to N targeted questions when requirements are
ambiguous.
- **Pair-Programmer/Pair-Writer**: explain tradeoffs as you implement. - **Pair-Programmer/Pair-Writer**: explain tradeoffs as you implement.
- **Facilitator**: structure output to be reviewable, commentable, and ready for group discussion. - **Facilitator**: structure output to be reviewable, commentable, and
ready for group discussion.
Default: Doer + short Mentor notes. Default: Doer + short Mentor notes.
## Competence & Collaboration Levers (keep lightweight) ## Competence & Collaboration Levers (keep lightweight)
- "Why this works" (≤3 bullets) - "Why this works" (≤3 bullets)
- "Common pitfalls" (≤3 bullets) - "Common pitfalls" (≤3 bullets)
- "Next skill unlock" (1 tiny action or reading) - "Next skill unlock" (1 tiny action or reading)
@ -79,6 +109,7 @@ Default: Doer + short Mentor notes.
- "Discussion prompts" (≤2 short questions for peers/stakeholders) - "Discussion prompts" (≤2 short questions for peers/stakeholders)
## Output Contract (apply to every deliverable) ## Output Contract (apply to every deliverable)
- Clear **Objective** (1 line) - Clear **Objective** (1 line)
- **Result** (artifact/code/answer) - **Result** (artifact/code/answer)
- **Use/Run** (how to apply/test) - **Use/Run** (how to apply/test)
@ -88,8 +119,10 @@ Default: Doer + short Mentor notes.
- **References** (if used; links or titles) - **References** (if used; links or titles)
## Do-Not ## Do-Not
- No filler, hedging, or moralizing. - No filler, hedging, or moralizing.
- No medical/mental-health advice; keep "healthy habits" to general work practices. - No medical/mental-health advice; keep "healthy habits" to general work
practices.
- No invented facts; mark uncertainty plainly. - No invented facts; mark uncertainty plainly.
- No censorship. - No censorship.
- Avoid outputs that bypass human review when such review is valuable. - Avoid outputs that bypass human review when such review is valuable.
@ -100,11 +133,22 @@ Default: Doer + short Mentor notes.
- **research_diagnostic.mdc**: For investigation and research workflows - **research_diagnostic.mdc**: For investigation and research workflows
## Self-Check (model, before responding) ## Self-Check (model, before responding)
- [ ] Task done *and* at least one competence lever included (≤120 words total).
- [ ] Task done *and* at least one competence lever included (≤120 words
total).
- [ ] At least one collaboration/discussion hook present. - [ ] At least one collaboration/discussion hook present.
- [ ] Output follows the **Output Contract** sections. - [ ] Output follows the **Output Contract** sections.
- [ ] Toggles respected; verbosity remains concise. - [ ] Toggles respected; verbosity remains concise.
- [ ] Uncertainties/assumptions surfaced. - [ ] Uncertainties/assumptions surfaced.
- [ ] No disallowed content. - [ ] No disallowed content.
---
**Status**: Active core guidelines
**Priority**: Critical
**Estimated Effort**: Ongoing reference
**Dependencies**: None (base ruleset)
**Stakeholders**: All AI interactions
- [ ] Uncertainties/assumptions surfaced. - [ ] Uncertainties/assumptions surfaced.
- [ ] No disallowed content. - [ ] No disallowed content.

158
.cursor/rules/database/absurd-sql.mdc

@ -1,13 +1,23 @@
--- ---
globs: **/db/databaseUtil.ts, **/interfaces/absurd-sql.d.ts, **/src/registerSQLWorker.js, **/services/AbsurdSqlDatabaseService.ts globs: **/db/databaseUtil.ts, **/interfaces/absurd-sql.d.ts, **/src/registerSQLWorker.js, **/
services/AbsurdSqlDatabaseService.ts
alwaysApply: false alwaysApply: false
--- ---
# Absurd SQL - Cursor Development Guide (Directive Style) # Absurd SQL - Cursor Development Guide
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Database development guidelines
## Project Overview ## Project Overview
Implement persistent SQLite databases in the browser using **Absurd SQL** with IndexedDB as block storage. Execute all SQL operations according to the following directives.
Absurd SQL is a backend implementation for sql.js that enables persistent
SQLite databases in the browser by using IndexedDB as a block storage system.
This guide provides rules and best practices for developing with this project
in Cursor.
## Project Structure ## Project Structure
``` ```
absurd-sql/ absurd-sql/
├── src/ # Place source code here ├── src/ # Place source code here
@ -19,37 +29,46 @@ absurd-sql/
## Directives ## Directives
### 1. Worker Thread Execution ### 1. Worker Thread Requirements
- Execute **all SQL operations** inside worker threads.
- Restrict the main thread to **initialization** and **communication only**. - All SQL operations MUST be performed in a worker thread
- Block **no operations** on the main thread. - Main thread should only handle worker initialization and communication
- Never block the main thread with database operations
### 2. Code Organization ### 2. Code Organization
- Store worker logic in dedicated files: `*.worker.js`.
- Use **ES modules** exclusively.
- Conform to the existing **module structure**.
### 3. Headers Enforcement - Keep worker code in separate files (e.g., `*.worker.js`)
Always set the following headers: - Use ES modules for imports/exports
- Follow the project's existing module structure
### 3. Required Headers
When developing locally or deploying, ensure these headers are set:
``` ```
Cross-Origin-Opener-Policy: same-origin Cross-Origin-Opener-Policy: same-origin
Cross-Origin-Embedder-Policy: require-corp Cross-Origin-Embedder-Policy: require-corp
``` ```
### 4. Browser Compatibility ### 4. Browser Compatibility
- Target **modern browsers with SharedArrayBuffer support**.
- Activate fallback mode for Safari when required. - Primary target: Modern browsers with SharedArrayBuffer support
- Test in **both primary and fallback modes** without exception. - Fallback mode: Safari (with limitations)
- Always test in both modes
### 5. Database Configuration ### 5. Database Configuration
Apply the following PRAGMA settings immediately:
Recommended database settings:
```sql ```sql
PRAGMA journal_mode=MEMORY; PRAGMA journal_mode=MEMORY;
PRAGMA page_size=8192; PRAGMA page_size=8192;
``` ```
### 6. Development Workflow ### 6. Development Workflow
1. Install dependencies: 1. Install dependencies:
```bash ```bash
yarn add @jlongster/sql.js absurd-sql yarn add @jlongster/sql.js absurd-sql
``` ```
@ -58,45 +77,52 @@ PRAGMA page_size=8192;
- `yarn jest` → run all tests - `yarn jest` → run all tests
- `yarn serve` → launch development server - `yarn serve` → launch development server
### 7. Testing ### 7. Testing Guidelines
- Write tests for both **SharedArrayBuffer** and **fallback modes**.
- Use **Jest** exclusively.
- Include **performance benchmarks** for critical paths.
### 8. Performance Optimization - Write tests for both SharedArrayBuffer and fallback modes
- Execute bulk operations when available. - Use Jest for testing
- Enforce **transactions** for multi-step operations. - Include performance benchmarks for critical operations
- Monitor read/write throughput continuously.
- Reuse database connections. Do **not** open unnecessary ones. ### 8. Performance Considerations
- Use bulk operations when possible
- Monitor read/write performance
- Consider using transactions for multiple operations
- Avoid unnecessary database connections
### 9. Error Handling ### 9. Error Handling
Implement error handling for:
- Worker initialization failures - Implement proper error handling for:
- Database connection issues - Worker initialization failures
- Concurrent access conflicts (fallback mode) - Database connection issues
- Storage quota exceeded scenarios - Concurrent access conflicts (in fallback mode)
- Storage quota exceeded scenarios
### 10. Security
- Forbid direct client access to database operations. ### 10. Security Best Practices
- Validate every SQL query.
- Enforce access control measures. - Never expose database operations directly to the client
- Handle sensitive data with strict isolation. - Validate all SQL queries
- Implement proper access controls
- Handle sensitive data appropriately
### 11. Code Style ### 11. Code Style
- Follow ESLint configuration.
- Use `async/await` for asynchronous operations. - Follow ESLint configuration
- Document complex operations thoroughly. - Use async/await for asynchronous operations
- Comment all optimizations that are not obvious. - Document complex database operations
- Include comments for non-obvious optimizations
### 12. Debugging ### 12. Debugging
- Use `jest-debug` for test debugging.
- Inspect IndexedDB in browser developer tools. - Use `jest-debug` for debugging tests
- Trace worker communication in console logs. - Monitor IndexedDB usage in browser dev tools
- Apply browser performance monitoring tools. - Check worker communication in console
- Use performance monitoring tools
## Required Patterns ## Required Patterns
### Worker Initialization ### Worker Initialization
```javascript ```javascript
import { initBackend } from 'absurd-sql/dist/indexeddb-main-thread'; import { initBackend } from 'absurd-sql/dist/indexeddb-main-thread';
@ -107,6 +133,7 @@ function init() {
``` ```
### Database Setup ### Database Setup
```javascript ```javascript
import initSqlJs from '@jlongster/sql.js'; import initSqlJs from '@jlongster/sql.js';
import { SQLiteFS } from 'absurd-sql'; import { SQLiteFS } from 'absurd-sql';
@ -124,24 +151,39 @@ async function setupDatabase() {
} }
``` ```
## Troubleshooting Directives ## Troubleshooting
### Common Issues
1. SharedArrayBuffer not available
- Check COOP/COEP headers
- Verify browser support
- Test fallback mode
### If SharedArrayBuffer is unavailable: 2. Worker initialization failures
- Verify COOP/COEP headers. - Check file paths
- Check browser support. - Verify module imports
- Activate fallback mode. - Check browser console for errors
### If worker initialization fails: 3. Performance issues
- Verify file paths. - Monitor IndexedDB usage
- Confirm module imports. - Check for unnecessary operations
- Inspect browser console for errors. - Verify transaction usage
### If performance degrades: ## Resources
- Inspect IndexedDB usage.
- Eliminate redundant operations. - [Project Demo](https://priceless-keller-d097e5.netlify.app/)
- Confirm transaction enforcement. - [Example Project](https://github.com/jlongster/absurd-example-project)
- [Blog Post](https://jlongster.com/future-sql-web)
- [SQL.js Documentation](https://github.com/sql-js/sql.js/)
---
## Reference Materials **Status**: Active database development guidelines
**Priority**: High
**Estimated Effort**: Ongoing reference
**Dependencies**: Absurd SQL, SQL.js, IndexedDB
**Stakeholders**: Development team, Database team
- [Project Demo](https://priceless-keller-d097e5.netlify.app/) - [Project Demo](https://priceless-keller-d097e5.netlify.app/)
- [Example Project](https://github.com/jlongster/absurd-example-project) - [Example Project](https://github.com/jlongster/absurd-example-project)
- [Blog Post](https://jlongster.com/future-sql-web) - [Blog Post](https://jlongster.com/future-sql-web)

5
.cursor/rules/database/legacy_dexie.mdc

@ -2,4 +2,7 @@
globs: **/databaseUtil.ts,**/AccountViewView.vue,**/ContactsView.vue,**/DatabaseMigration.vue,**/NewIdentifierView.vue globs: **/databaseUtil.ts,**/AccountViewView.vue,**/ContactsView.vue,**/DatabaseMigration.vue,**/NewIdentifierView.vue
alwaysApply: false alwaysApply: false
--- ---
All references in the codebase to Dexie apply only to migration from IndexedDb to Sqlite and will be deprecated in future versions. # What to do with Dexie
All references in the codebase to Dexie apply only to migration from IndexedDb to
Sqlite and will be deprecated in future versions.

47
.cursor/rules/development/type_safety_guide.mdc

@ -1,5 +1,5 @@
--- ---
globs: **/src/**/*,**/scripts/**/*,**/electron/**/* description: when dealing with types and Typesript
alwaysApply: false alwaysApply: false
--- ---
```json ```json
@ -15,8 +15,8 @@ alwaysApply: false
# TypeScript Type Safety Guidelines # TypeScript Type Safety Guidelines
**Author**: Matthew Raymer **Author**: Matthew Raymer
**Date**: 2025-08-16 **Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** **Status**: 🎯 **ACTIVE** - Type safety enforcement
## Overview ## Overview
@ -28,7 +28,8 @@ Practical rules to keep TypeScript strict and predictable. Minimize exceptions.
- Use explicit types. If unknown, use `unknown` and **narrow** via guards. - Use explicit types. If unknown, use `unknown` and **narrow** via guards.
2. **Error handling uses guards** 2. **Error handling uses guards**
- Reuse guards from `src/interfaces/**` (e.g., `isDatabaseError`, `isApiError`). - Reuse guards from `src/interfaces/**` (e.g., `isDatabaseError`,
`isApiError`).
- Catch with `unknown`; never cast to `any`. - Catch with `unknown`; never cast to `any`.
3. **Dynamic property access is type‑safe** 3. **Dynamic property access is type‑safe**
@ -40,12 +41,30 @@ Practical rules to keep TypeScript strict and predictable. Minimize exceptions.
- Avoid `(obj as any)[k]`. - Avoid `(obj as any)[k]`.
## Type Safety Enforcement
### Core Type Safety Rules
- **No `any` Types**: Use explicit types or `unknown` with proper type guards
- **Error Handling Uses Guards**: Implement and reuse type guards from `src/interfaces/**`
- **Dynamic Property Access**: Use `keyof` + `in` checks for type-safe property access
### Type Guard Patterns
- **API Errors**: Use `isApiError(error)` guards for API error handling
- **Database Errors**: Use `isDatabaseError(error)` guards for database operations
- **Axios Errors**: Implement `isAxiosError(error)` guards for HTTP error handling
### Implementation Guidelines
- **Avoid Type Assertions**: Replace `as any` with proper type guards and interfaces
- **Narrow Types Properly**: Use type guards to narrow `unknown` types safely
- **Document Type Decisions**: Explain complex type structures and their purpose
## Minimal Special Cases (document in PR when used) ## Minimal Special Cases (document in PR when used)
- **Vue refs / instances**: Use `ComponentPublicInstance` or specific component - **Vue refs / instances**: Use `ComponentPublicInstance` or specific
types for dynamic refs. component types for dynamic refs.
- **3rd‑party libs without types**: Narrow immediately to a **known interface**; - **3rd‑party libs without types**: Narrow immediately to a **known
do not leave `any` hanging. interface**; do not leave `any` hanging.
## Patterns (short) ## Patterns (short)
@ -106,3 +125,15 @@ const keys = Object.keys(newSettings).filter(
- TS Handbook — https://www.typescriptlang.org/docs/ - TS Handbook — https://www.typescriptlang.org/docs/
- TS‑ESLint — https://typescript-eslint.io/rules/ - TS‑ESLint — https://typescript-eslint.io/rules/
- Vue 3 + TS — https://vuejs.org/guide/typescript/ - Vue 3 + TS — https://vuejs.org/guide/typescript/
---
**Status**: Active type safety guidelines
**Priority**: High
**Estimated Effort**: Ongoing reference
**Dependencies**: TypeScript, ESLint, Vue 3
**Stakeholders**: Development team
- TS Handbook — https://www.typescriptlang.org/docs/
- TS‑ESLint — https://typescript-eslint.io/rules/
- Vue 3 + TS — https://vuejs.org/guide/typescript/

79
.cursor/rules/docs/markdown-automation.mdc

@ -0,0 +1,79 @@
---
alwaysApply: true
---
# Markdown Automation System
**Author**: Matthew Raymer
**Date**: 2025-08-20
**Status**: 🎯 **ACTIVE** - Markdown formatting automation
## Overview
The Markdown Automation System ensures your markdown formatting standards are
followed **during content generation** by AI agents, not just applied after the
fact.
## AI-First Approach
### **Primary Method**: AI Agent Compliance
- **AI agents follow markdown rules** while generating content
- **No post-generation fixes needed** - content is compliant from creation
- **Consistent formatting** across all generated documentation
### **Secondary Method**: Automated Validation
- **Pre-commit hooks** catch any remaining issues
- **GitHub Actions** validate formatting before merge
- **Manual tools** for bulk fixes when needed
## How It Works
### 1. **AI Agent Compliance** (Primary)
- **When**: Every time AI generates markdown content
- **What**: AI follows markdown rules during generation
- **Result**: Content is properly formatted from creation
### 2. **Pre-commit Hooks** (Backup)
- **When**: Every time you commit
- **What**: Catches any remaining formatting issues
- **Result**: Clean, properly formatted markdown files
### 3. **GitHub Actions** (Pre-merge)
- **When**: Every pull request
- **What**: Validates markdown formatting across all files
- **Result**: Blocks merge if formatting issues exist
## AI Agent Rules Integration
The AI agent follows markdown rules defined in `.cursor/rules/docs/markdown.mdc`:
- **alwaysApply: true** - Rules are enforced during generation
- **Line Length**: AI never generates lines > 80 characters
- **Blank Lines**: AI adds proper spacing around all elements
- **Structure**: AI uses established templates and patterns
## Available Commands
### NPM Scripts
- **`npm run markdown:setup`** - Install the automation system
- **`npm run markdown:fix`** - Fix formatting in all markdown files
- **`npm run markdown:check`** - Validate formatting without fixing
## Benefits
- **No more manual fixes** - AI generates compliant content from start
- **Consistent style** - All files follow same standards
- **Faster development** - No need to fix formatting manually
---
**Status**: Active automation system
**Priority**: High
**Maintainer**: Development team
**Next Review**: 2025-09-20

36
.cursor/rules/docs/markdown.mdc

@ -1,5 +1,5 @@
--- ---
globs: *.md globs: ["*.md", "*.mdc"]
alwaysApply: false alwaysApply: false
--- ---
# Cursor Markdown Ruleset for TimeSafari Documentation # Cursor Markdown Ruleset for TimeSafari Documentation
@ -10,6 +10,36 @@ This ruleset enforces consistent markdown formatting standards across all projec
documentation, ensuring readability, maintainability, and compliance with documentation, ensuring readability, maintainability, and compliance with
markdownlint best practices. markdownlint best practices.
**⚠️ CRITICAL FOR AI AGENTS**: These rules must be followed DURING content
generation, not applied after the fact. Always generate markdown that complies
with these standards from the start.
## AI Generation Guidelines
### **MANDATORY**: Follow These Rules While Writing
When generating markdown content, you MUST:
1. **Line Length**: Never exceed 80 characters per line
2. **Blank Lines**: Always add blank lines around headings, lists, and code
blocks
3. **Structure**: Use proper heading hierarchy and document templates
4. **Formatting**: Apply consistent formatting patterns immediately
### **DO NOT**: Generate content that violates these rules
- ❌ Generate long lines that need breaking
- ❌ Create content without proper blank line spacing
- ❌ Use inconsistent formatting patterns
- ❌ Assume post-processing will fix violations
### **DO**: Generate compliant content from the start
- ✅ Write within 80-character limits
- ✅ Add blank lines around all structural elements
- ✅ Use established templates and patterns
- ✅ Apply formatting standards immediately
## General Formatting Standards ## General Formatting Standards
### Line Length ### Line Length
@ -330,3 +360,7 @@ Description of current situation or problem.
### Security ### Security
### Performance ### Performance
``` ```
## Features ❌ (Duplicate heading)
### Security
### Performance
```

6
.cursor/rules/features/camera-implementation.mdc

@ -1,13 +1,13 @@
--- ---
description: description: when dealing with cameras in the application
globs:
alwaysApply: false alwaysApply: false
--- ---
# Camera Implementation Documentation # Camera Implementation Documentation
## Overview ## Overview
This document describes how camera functionality is implemented across the TimeSafari application. The application uses cameras for two main purposes: This document describes how camera functionality is implemented across the
TimeSafari application. The application uses cameras for two main purposes:
1. QR Code scanning 1. QR Code scanning
2. Photo capture 2. Photo capture

206
.cursor/rules/harbor_pilot_universal.mdc

@ -0,0 +1,206 @@
---
alwaysApply: true
inherits: base_context.mdc
---
```json
{
"coaching_level": "standard",
"socratic_max_questions": 2,
"verbosity": "concise",
"timebox_minutes": 10,
"format_enforcement": "strict"
}
```
# Harbor Pilot — Universal Directive for Human-Facing Technical Guides
**Author**: System/Shared
**Date**: 2025-08-21 (UTC)
**Status**: 🚢 ACTIVE — General ruleset extending *Base Context — Human Competence First*
> **Alignment with Base Context**
> - **Purpose fit**: Prioritizes human competence and collaboration while delivering reproducible artifacts.
> - **Output Contract**: This directive **adds universal constraints** for any technical topic while **inheriting** the Base Context contract sections.
> - **Toggles honored**: Uses the same toggle semantics; defaults above can be overridden by the caller.
---
## Objective
Produce a **developer-grade, reproducible guide** for any technical topic that onboards a competent practitioner **without meta narration** and **with evidence-backed steps**.
## Scope & Constraints
- **One Markdown document** as the deliverable.
- Use **absolute dates** in **UTC** (e.g., `2025-08-21T14:22Z`) — avoid “today/yesterday”.
- Include at least **one diagram** (Mermaid preferred). Choose the most fitting type:
- `sequenceDiagram` (protocols/flows), `flowchart`, `stateDiagram`, `gantt` (timelines), or `classDiagram` (schemas).
- Provide runnable examples where applicable:
- **APIs**: `curl` + one client library (e.g., `httpx` for Python).
- **CLIs**: literal command blocks and expected output snippets.
- **Code**: minimal, self-contained samples (language appropriate).
- Cite **evidence** for *Works/Doesn’t* items (timestamps, filenames, line numbers, IDs/status codes, or logs).
- If something is unknown, output `TODO:<missing>` — **never invent**.
## Required Sections (extends Base Output Contract)
Follow this exact order **after** the Base Contract’s **Objective → Result → Use/Run** headers:
1. **Context & Scope**
- Problem statement, audience, in/out-of-scope bullets.
2. **Artifacts & Links**
- Repos/PRs, design docs, datasets/HARs/pcaps, scripts/tools, dashboards.
3. **Environment & Preconditions**
- OS/runtime, versions/build IDs, services/endpoints/URLs, credentials/auth mode (describe acquisition, do not expose secrets).
4. **Architecture / Process Overview**
- Short prose + **one diagram** selected from the list above.
5. **Interfaces & Contracts (choose one)**
- **API-based**: Endpoint table (*Step, Method, Path/URL, Auth, Key Headers/Params, Sample Req/Resp ref*).
- **Data/Files**: I/O contract table (*Source, Format, Schema/Columns, Size, Validation rules*).
- **Systems/Hardware**: Interfaces table (*Port/Bus, Protocol, Voltage/Timing, Constraints*).
6. **Repro: End-to-End Procedure**
- Minimal copy-paste steps with code/commands and **expected outputs**.
7. **What Works (with Evidence)**
- Each item: **Time (UTC)** • **Artifact/Req IDs** • **Status/Result** • **Where to verify**.
8. **What Doesn’t (Evidence & Hypotheses)**
- Each failure: locus (file/endpoint/module), evidence snippet; short hypothesis and **next probe**.
9. **Risks, Limits, Assumptions**
- SLOs/limits, rate/size caps, security boundaries (CORS/CSRF/ACLs), retries/backoff/idempotency patterns.
10. **Next Steps (Owner • Exit Criteria • Target Date)**
- Actionable, assigned, and time-bound.
11. **References**
- Canonical docs, specs, tickets, prior analyses.
> **Competence Hooks (per Base Context; keep lightweight):**
> - *Why this works* (≤3 bullets) — core invariants or guarantees.
> - *Common pitfalls* (≤3 bullets) — the traps we saw in evidence.
> - *Next skill unlock* (1 line) — the next capability to implement/learn.
> - *Teach-back* (1 line) — prompt the reader to restate the flow/architecture.
> **Collaboration Hooks (per Base Context):**
> - Name reviewers for **Interfaces & Contracts** and the **diagram**.
> - Short **sign-off checklist** before merging/publishing the guide.
## Do / Don’t (Base-aligned)
- **Do** quantify progress only against a defined scope with acceptance criteria.
- **Do** include minimal sample payloads/headers or I/O schemas; redact sensitive values.
- **Do** keep commentary lean; if timeboxed, move depth to **Deferred for depth**.
- **Don’t** use marketing language or meta narration (“Perfect!”, “tool called”, “new chat”).
- **Don’t** include IDE-specific chatter or internal rules unrelated to the task.
## Validation Checklist (self-check before returning)
- [ ] All Required Sections present and ordered.
- [ ] Diagram compiles (basic Mermaid syntax) and fits the problem.
- [ ] If API-based, **Auth** and **Key Headers/Params** are listed for each endpoint.
- [ ] Repro section includes commands/code **and expected outputs**.
- [ ] Every Works/Doesn’t item has **UTC timestamp**, **status/result**, and **verifiable evidence**.
- [ ] Next Steps include **Owner**, **Exit Criteria**, **Target Date**.
- [ ] Unknowns are `TODO:<missing>` — no fabrication.
- [ ] Base **Output Contract** sections satisfied (Objective/Result/Use/Run/Competence/Collaboration/Assumptions/References).
## Universal Template (fill-in)
```markdown
# <Title> — Working Notes (As of YYYY-MM-DDTHH:MMZ)
## Objective
<one line>
## Result
<link to the produced guide file or say “this document”>
## Use/Run
<how to apply/test and where to run samples>
## Context & Scope
- Audience: <role(s)>
- In scope: <bullets>
- Out of scope: <bullets>
## Artifacts & Links
- Repo/PR: <link>
- Data/Logs: <paths or links>
- Scripts/Tools: <paths>
- Dashboards: <links>
## Environment & Preconditions
- OS/Runtime: <details>
- Versions/Builds: <list>
- Services/Endpoints: <list>
- Auth mode: <Bearer/Session/Keys + how acquired>
## Architecture / Process Overview
<short prose>
```mermaid
<one suitable diagram: sequenceDiagram | flowchart | stateDiagram | gantt | classDiagram>
```
## Interfaces & Contracts
### If API-based
| Step | Method | Path/URL | Auth | Key Headers/Params | Sample |
|---|---|---|---|---|---|
| <…> | <…> | <…> | <…> | <…> | below |
### If Data/Files
| Source | Format | Schema/Columns | Size | Validation |
|---|---|---|---|---|
| <…> | <…> | <…> | <…> | <…> |
### If Systems/Hardware
| Interface | Protocol | Timing/Voltage | Constraints | Notes |
|---|---|---|---|---|
| <…> | <…> | <…> | <…> | <…> |
## Repro: End-to-End Procedure
```bash
# commands / curl examples (redacted where necessary)
```
```python
# minimal client library example (language appropriate)
```
> Expected output: <snippet/checks>
## What Works (Evidence)
- ✅ <short statement>
- **Time**: <YYYY-MM-DDTHH:MMZ>
- **Evidence**: file/line/log or request id/status
- **Verify at**: <where>
## What Doesn’t (Evidence & Hypotheses)
- ❌ <short failure> at `<component/endpoint/file>`
- **Time**: <YYYY-MM-DDTHH:MMZ>
- **Evidence**: <snippet/id/status>
- **Hypothesis**: <short>
- **Next probe**: <short>
## Risks, Limits, Assumptions
<bullets: limits, security boundaries, retries/backoff, idempotency, SLOs>
## Next Steps
| Owner | Task | Exit Criteria | Target Date (UTC) |
|---|---|---|---|
| <name> | <action> | <measurable outcome> | <YYYY-MM-DD> |
## References
<links/titles>
## Competence Hooks
- *Why this works*: <≤3 bullets>
- *Common pitfalls*: <≤3 bullets>
- *Next skill unlock*: <1 line>
- *Teach-back*: <1 line>
## Collaboration Hooks
- Reviewers: <names/roles>
- Sign-off checklist: <≤5 checks>
## Assumptions & Limits
<bullets>
## Deferred for depth
<park deeper material here to respect timeboxing>
```
---
**Notes for Implementers:**
- Respect Base *Do-Not* (no filler, no invented facts, no censorship).
- Prefer clarity over completeness when timeboxed; capture unknowns explicitly.
- Apply historical comment management rules (see `.cursor/rules/historical_comment_management.mdc`)
- Apply realistic time estimation rules (see `.cursor/rules/realistic_time_estimation.mdc`)

236
.cursor/rules/historical_comment_management.mdc

@ -0,0 +1,236 @@
---
description: when comments are generated by the model
alwaysApply: false
---
# Historical Comment Management — Harbor Pilot Directive
> **Agent role**: When encountering historical comments about removed methods, deprecated patterns, or architectural changes, apply these guidelines to maintain code clarity and developer guidance.
## 🎯 Purpose
Historical comments should either be **removed entirely** or **transformed into actionable guidance** for future developers. Avoid keeping comments that merely state what was removed without explaining why or what to do instead.
## 📋 Decision Framework
### Remove Historical Comments When:
- **Obsolete Information**: Comment describes functionality that no longer exists
- **No Action Required**: Comment doesn't help future developers make decisions
- **Outdated Context**: Comment refers to old patterns that are no longer relevant
- **Self-Evident**: The current code clearly shows the current approach
### Transform Historical Comments When:
- **Architectural Context**: The change represents a significant pattern shift
- **Migration Guidance**: Future developers might need to understand the evolution
- **Decision Rationale**: The "why" behind the change is still relevant
- **Alternative Approaches**: The comment can guide future implementation choices
## 🔄 Transformation Patterns
### 1. From Removal Notice to Migration Note
```typescript
// ❌ REMOVE THIS
// turnOffNotifyingFlags method removed - notification state is now managed by NotificationSection component
// ✅ TRANSFORM TO THIS
// Note: Notification state management has been migrated to NotificationSection component
// which handles its own lifecycle and persistence via PlatformServiceMixin
```
### 2. From Deprecation Notice to Implementation Guide
```typescript
// ❌ REMOVE THIS
// This will be handled by the NewComponent now
// No need to call oldMethod() as it's no longer needed
// ✅ TRANSFORM TO THIS
// Note: This functionality has been migrated to NewComponent
// which provides better separation of concerns and testability
```
### 3. From Historical Note to Architectural Context
```typescript
// ❌ REMOVE THIS
// Old approach: used direct database calls
// New approach: uses service layer
// ✅ TRANSFORM TO THIS
// Note: Database access has been abstracted through service layer
// for better testability and platform independence
```
## 🚫 Anti-Patterns to Remove
- Comments that only state what was removed
- Comments that don't explain the current approach
- Comments that reference non-existent methods
- Comments that are self-evident from the code
- Comments that don't help future decision-making
## ✅ Best Practices
### When Keeping Historical Context:
1. **Explain the "Why"**: Why was the change made?
2. **Describe the "What"**: What is the current approach?
3. **Provide Context**: When might this information be useful?
4. **Use Actionable Language**: Guide future decisions, not just document history
### When Removing Historical Context:
1. **Verify Obsoleteness**: Ensure the information is truly outdated
2. **Check for Dependencies**: Ensure no other code references the old approach
3. **Update Related Docs**: If removing from code, consider adding to documentation
4. **Preserve in Git History**: The change is preserved in version control
## 🔍 Implementation Checklist
- [ ] Identify historical comments about removed/deprecated functionality
- [ ] Determine if comment provides actionable guidance
- [ ] Transform useful comments into migration notes or architectural context
- [ ] Remove comments that are purely historical without guidance value
- [ ] Ensure remaining comments explain current approach and rationale
- [ ] Update related documentation if significant context is removed
## 📚 Examples
### Good Historical Comment (Keep & Transform)
```typescript
// Note: Database access has been migrated from direct IndexedDB calls to PlatformServiceMixin
// This provides better platform abstraction and consistent error handling across web/mobile/desktop
// When adding new database operations, use this.$getContact(), this.$saveSettings(), etc.
```
### Bad Historical Comment (Remove)
```typescript
// Old method getContactFromDB() removed - now handled by PlatformServiceMixin
// No need to call the old method anymore
```
## 🎯 Integration with Harbor Pilot
This rule works in conjunction with:
- **Component Creation Ideals**: Maintains architectural consistency
- **Migration Patterns**: Documents evolution of patterns
- **Code Review Guidelines**: Ensures comments provide value
## 📝 Version History
### v1.0.0 (2025-08-21)
- Initial creation based on notification system cleanup
- Established decision framework for historical comment management
- Added transformation patterns and anti-patterns
- Integrated with existing Harbor Pilot architecture rules
# Historical Comment Management — Harbor Pilot Directive
> **Agent role**: When encountering historical comments about removed methods, deprecated patterns, or architectural changes, apply these guidelines to maintain code clarity and developer guidance.
## 🎯 Purpose
Historical comments should either be **removed entirely** or **transformed into actionable guidance** for future developers. Avoid keeping comments that merely state what was removed without explaining why or what to do instead.
## 📋 Decision Framework
### Remove Historical Comments When:
- **Obsolete Information**: Comment describes functionality that no longer exists
- **No Action Required**: Comment doesn't help future developers make decisions
- **Outdated Context**: Comment refers to old patterns that are no longer relevant
- **Self-Evident**: The current code clearly shows the current approach
### Transform Historical Comments When:
- **Architectural Context**: The change represents a significant pattern shift
- **Migration Guidance**: Future developers might need to understand the evolution
- **Decision Rationale**: The "why" behind the change is still relevant
- **Alternative Approaches**: The comment can guide future implementation choices
## 🔄 Transformation Patterns
### 1. From Removal Notice to Migration Note
```typescript
// ❌ REMOVE THIS
// turnOffNotifyingFlags method removed - notification state is now managed by NotificationSection component
// ✅ TRANSFORM TO THIS
// Note: Notification state management has been migrated to NotificationSection component
// which handles its own lifecycle and persistence via PlatformServiceMixin
```
### 2. From Deprecation Notice to Implementation Guide
```typescript
// ❌ REMOVE THIS
// This will be handled by the NewComponent now
// No need to call oldMethod() as it's no longer needed
// ✅ TRANSFORM TO THIS
// Note: This functionality has been migrated to NewComponent
// which provides better separation of concerns and testability
```
### 3. From Historical Note to Architectural Context
```typescript
// ❌ REMOVE THIS
// Old approach: used direct database calls
// New approach: uses service layer
// ✅ TRANSFORM TO THIS
// Note: Database access has been abstracted through service layer
// for better testability and platform independence
```
## 🚫 Anti-Patterns to Remove
- Comments that only state what was removed
- Comments that don't explain the current approach
- Comments that reference non-existent methods
- Comments that are self-evident from the code
- Comments that don't help future decision-making
## ✅ Best Practices
### When Keeping Historical Context:
1. **Explain the "Why"**: Why was the change made?
2. **Describe the "What"**: What is the current approach?
3. **Provide Context**: When might this information be useful?
4. **Use Actionable Language**: Guide future decisions, not just document history
### When Removing Historical Context:
1. **Verify Obsoleteness**: Ensure the information is truly outdated
2. **Check for Dependencies**: Ensure no other code references the old approach
3. **Update Related Docs**: If removing from code, consider adding to documentation
4. **Preserve in Git History**: The change is preserved in version control
## 🔍 Implementation Checklist
- [ ] Identify historical comments about removed/deprecated functionality
- [ ] Determine if comment provides actionable guidance
- [ ] Transform useful comments into migration notes or architectural context
- [ ] Remove comments that are purely historical without guidance value
- [ ] Ensure remaining comments explain current approach and rationale
- [ ] Update related documentation if significant context is removed
## 📚 Examples
### Good Historical Comment (Keep & Transform)
```typescript
// Note: Database access has been migrated from direct IndexedDB calls to PlatformServiceMixin
// This provides better platform abstraction and consistent error handling across web/mobile/desktop
// When adding new database operations, use this.$getContact(), this.$saveSettings(), etc.
```
### Bad Historical Comment (Remove)
```typescript
// Old method getContactFromDB() removed - now handled by PlatformServiceMixin
// No need to call the old method anymore
```
## 🎯 Integration with Harbor Pilot
This rule works in conjunction with:
- **Component Creation Ideals**: Maintains architectural consistency
- **Migration Patterns**: Documents evolution of patterns
- **Code Review Guidelines**: Ensures comments provide value
## 📝 Version History
### v1.0.0 (2025-08-21)
- Initial creation based on notification system cleanup
- Established decision framework for historical comment management
- Added transformation patterns and anti-patterns
- Integrated with existing Harbor Pilot architecture rules

81
.cursor/rules/investigation_report_example.mdc

@ -1,76 +1,117 @@
# Investigation Report Example # Investigation Report Example
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Investigation methodology example
## Investigation — Registration Dialog Test Flakiness ## Investigation — Registration Dialog Test Flakiness
## Objective ## Objective
Identify root cause of flaky tests related to registration dialogs in contact import scenarios.
Identify root cause of flaky tests related to registration dialogs in contact
import scenarios.
## System Map ## System Map
- User action → ContactInputForm → ContactsView.addContact() → handleRegistrationPrompt()
- User action → ContactInputForm → ContactsView.addContact() →
handleRegistrationPrompt()
- setTimeout(1000ms) → Modal dialog → User response → Registration API call - setTimeout(1000ms) → Modal dialog → User response → Registration API call
- Test execution → Wait for dialog → Assert dialog content → Click response button - Test execution → Wait for dialog → Assert dialog content → Click response
button
## Findings (Evidence) ## Findings (Evidence)
- **1-second timeout causes flakiness** — evidence: `src/views/ContactsView.vue:971-1000`; setTimeout(..., 1000) in handleRegistrationPrompt()
- **Import flow bypasses dialogs** — evidence: `src/views/ContactImportView.vue:500-520`; importContacts() calls $insertContact() directly, no handleRegistrationPrompt() - **1-second timeout causes flakiness** — evidence:
- **Dialog only appears in direct add flow** — evidence: `src/views/ContactsView.vue:774-800`; addContact() calls handleRegistrationPrompt() after database insert `src/views/ContactsView.vue:971-1000`; setTimeout(..., 1000) in
handleRegistrationPrompt()
- **Import flow bypasses dialogs** — evidence:
`src/views/ContactImportView.vue:500-520`; importContacts() calls
$insertContact() directly, no handleRegistrationPrompt()
- **Dialog only appears in direct add flow** — evidence:
`src/views/ContactsView.vue:774-800`; addContact() calls
handleRegistrationPrompt() after database insert
## Hypotheses & Failure Modes ## Hypotheses & Failure Modes
- H1: 1-second timeout makes dialog appearance unpredictable; would fail when tests run faster than 1000ms
- H2: Test environment timing differs from development; watch for CI vs local test differences - H1: 1-second timeout makes dialog appearance unpredictable; would fail when
tests run faster than 1000ms
- H2: Test environment timing differs from development; watch for CI vs local
test differences
## Corrections ## Corrections
- Updated: "Multiple dialogs interfere with imports" → "Import flow never triggers dialogs - they only appear in direct contact addition"
- Updated: "Complex batch registration needed" → "Simple timeout removal and test mode flag sufficient" - Updated: "Multiple dialogs interfere with imports" → "Import flow never
triggers dialogs - they only appear in direct contact addition"
- Updated: "Complex batch registration needed" → "Simple timeout removal and
test mode flag sufficient"
## Diagnostics (Next Checks) ## Diagnostics (Next Checks)
- [ ] Repro on CI environment vs local - [ ] Repro on CI environment vs local
- [ ] Measure actual dialog appearance timing - [ ] Measure actual dialog appearance timing
- [ ] Test with setTimeout removed - [ ] Test with setTimeout removed
- [ ] Verify import flow doesn't call handleRegistrationPrompt - [ ] Verify import flow doesn't call handleRegistrationPrompt
## Risks & Scope ## Risks & Scope
- Impacted: Contact addition tests, registration workflow tests; Data: None; Users: Test suite reliability
- Impacted: Contact addition tests, registration workflow tests; Data: None;
Users: Test suite reliability
## Decision / Next Steps ## Decision / Next Steps
- Owner: Development Team; By: 2025-01-28 - Owner: Development Team; By: 2025-01-28
- Action: Remove 1-second timeout + add test mode flag; Exit criteria: Tests pass consistently - Action: Remove 1-second timeout + add test mode flag; Exit criteria: Tests
pass consistently
## References ## References
- `src/views/ContactsView.vue:971-1000` - `src/views/ContactsView.vue:971-1000`
- `src/views/ContactImportView.vue:500-520` - `src/views/ContactImportView.vue:500-520`
- `src/views/ContactsView.vue:774-800` - `src/views/ContactsView.vue:774-800`
## Competence Hooks ## Competence Hooks
- Why this works: Code path tracing revealed separate execution flows, evidence disproved initial assumptions
- Common pitfalls: Assuming related functionality without tracing execution paths, over-engineering solutions to imaginary problems
- Next skill: Learn to trace code execution before proposing architectural changes
- Teach-back: "What evidence shows that contact imports bypass registration dialogs?"
--- - Why this works: Code path tracing revealed separate execution flows,
evidence disproved initial assumptions
- Common pitfalls: Assuming related functionality without tracing execution
paths, over-engineering solutions to imaginary problems
- Next skill: Learn to trace code execution before proposing architectural
changes
- Teach-back: "What evidence shows that contact imports bypass registration
dialogs?"
## Key Learning Points ## Key Learning Points
### Evidence-First Approach ### Evidence-First Approach
This investigation demonstrates the importance of: This investigation demonstrates the importance of:
1. **Tracing actual code execution** rather than making assumptions 1. **Tracing actual code execution** rather than making assumptions
2. **Citing specific evidence** with file:line references 2. **Citing specific evidence** with file:line references
3. **Validating problem scope** before proposing solutions 3. **Validating problem scope** before proposing solutions
4. **Considering simpler alternatives** before complex architectural changes 4. **Considering simpler alternatives** before complex architectural changes
### Code Path Tracing Value ### Code Path Tracing Value
By tracing the execution paths, we discovered: By tracing the execution paths, we discovered:
- Import flow and direct add flow are completely separate - Import flow and direct add flow are completely separate
- The "multiple dialog interference" problem didn't exist - The "multiple dialog interference" problem didn't exist
- A simple timeout removal would solve the actual issue - A simple timeout removal would solve the actual issue
### Prevention of Over-Engineering ### Prevention of Over-Engineering
The investigation prevented: The investigation prevented:
- Unnecessary database schema changes - Unnecessary database schema changes
- Complex batch registration systems - Complex batch registration systems
- Migration scripts for non-existent problems - Migration scripts for non-existent problems
- Architectural changes based on assumptions - Architectural changes based on assumptions
description:
globs:
alwaysApply: false
--- ---
**Status**: Active investigation methodology
**Priority**: High
**Estimated Effort**: Ongoing reference
**Dependencies**: software_development.mdc
**Stakeholders**: Development team, QA team

222
.cursor/rules/logging_standards.mdc

@ -0,0 +1,222 @@
# Agent Contract — TimeSafari Logging (Unified, MANDATORY)
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Mandatory logging standards
## Overview
This document defines unified logging standards for the TimeSafari project,
ensuring consistent, rest-parameter logging style using the project logger.
No `console.*` methods are allowed in production code.
## Scope and Goals
**Scope**: Applies to all diffs and generated code in this workspace unless
explicitly exempted below.
**Goal**: One consistent, rest-parameter logging style using the project
logger; no `console.*` in production code.
## Non‑Negotiables (DO THIS)
- You **MUST** use the project logger; **DO NOT** use any `console.*`
methods.
- Import exactly as:
- `import { logger } from '@/utils/logger'`
- If `@` alias is unavailable, compute the correct relative path (do not
fail).
- Call signatures use **rest parameters**: `logger.info(message, ...args)`
- Prefer primitives/IDs and small objects in `...args`; **never build a
throwaway object** just to "wrap context".
- Production defaults: Web = `warn+`, Electron = `error`, Dev/Capacitor =
`info+` (override via `VITE_LOG_LEVEL`).
- **Database persistence**: `info|warn|error` are persisted; `debug` is not.
Use `logger.toDb(msg, level?)` for DB-only.
## Available Logger API (Authoritative)
- `logger.debug(message, ...args)` — verbose internals, timings, input/output
shapes
- `logger.log(message, ...args)` — synonym of `info` for general info
- `logger.info(message, ...args)` — lifecycle, state changes, success paths
- `logger.warn(message, ...args)` — recoverable issues, retries, degraded mode
- `logger.error(message, ...args)` — failures, thrown exceptions, aborts
- `logger.toDb(message, level?)` — DB-only entry (default level = `info`)
- `logger.toConsoleAndDb(message, isError)` — console + DB (use sparingly)
- `logger.withContext(componentName)` — returns a scoped logger
## Level Guidelines (Use These Heuristics)
### DEBUG
Use for method entry/exit, computed values, filters, loops, retries, and
external call payload sizes.
```typescript
logger.debug('[HomeView] reloadFeedOnChange() called');
logger.debug('[HomeView] Current filter settings',
settings.filterFeedByVisible,
settings.filterFeedByNearby,
settings.searchBoxes?.length ?? 0);
logger.debug('[FeedFilters] Toggling nearby filter',
this.isNearby, this.settingChanged, this.activeDid);
```
**Avoid**: Vague messages (`'Processing data'`).
### INFO
Use for user-visible lifecycle and completed operations.
```typescript
logger.info('[StartView] Component mounted', process.env.VITE_PLATFORM);
logger.info('[StartView] User selected new seed generation');
logger.info('[SearchAreaView] Search box stored',
searchBox.name, searchBox.bbox);
logger.info('[ContactQRScanShowView] Contact registration OK',
contact.did);
```
**Avoid**: Diagnostic details that belong in `debug`.
### WARN
Use for recoverable issues, fallbacks, unexpected-but-handled conditions.
```typescript
logger.warn('[ContactQRScanShowView] Invalid scan result – no value',
resultType);
logger.warn('[ContactQRScanShowView] Invalid QR format – no JWT in URL');
logger.warn('[ContactQRScanShowView] JWT missing "own" field');
```
**Avoid**: Hard failures (those are `error`).
### ERROR
Use for unrecoverable failures, data integrity issues, and thrown
exceptions.
```typescript
logger.error('[HomeView Settings] initializeIdentity() failed', err);
logger.error('[StartView] Failed to load initialization data', error);
logger.error('[ContactQRScanShowView] Error processing contact QR',
error, rawValue);
```
**Avoid**: Expected user cancels (use `info`/`debug`).
## Context Hygiene (Consistent, Minimal, Helpful)
- **Component context**: Prefer scoped logger.
```typescript
const log = logger.withContext('UserService');
log.info('User created', userId);
log.error('Failed to create user', error);
```
If not using `withContext`, prefix message with `[ComponentName]`.
- **Emojis**: Optional and minimal for visual scanning. Recommended set:
- Start/finish: 🚀 / ✅
- Retry/loop: 🔄
- External call: 📡
- Data/metrics: 📊
- Inspection: 🔍
- **Sensitive data**: Never log secrets (tokens, keys, passwords) or
payloads >10KB. Prefer IDs over objects; redact/hash when needed.
## Migration — Auto‑Rewrites (Apply Every Time)
- Exact transforms:
- `console.debug(...)` → `logger.debug(...)`
- `console.log(...)` → `logger.log(...)` (or `logger.info(...)` when
clearly stateful)
- `console.info(...)` → `logger.info(...)`
- `console.warn(...)` → `logger.warn(...)`
- `console.error(...)` → `logger.error(...)`
- Multi-arg handling:
- First arg becomes `message` (stringify safely if non-string).
- Remaining args map 1:1 to `...args`:
`console.info(msg, a, b)` → `logger.info(String(msg), a, b)`
- Sole `Error`:
- `console.error(err)` → `logger.error(err.message, err)`
- **Object-wrapping cleanup**: Replace `{{ userId, meta }}` wrappers with
separate args:
`logger.info('User signed in', userId, meta)`
## DB Logging Rules
- `debug` **never** persists automatically.
- `info|warn|error` persist automatically.
- For DB-only events (no console), call `logger.toDb('Message',
'info'|'warn'|'error')`.
## Exceptions (Tightly Scoped)
Allowed paths (still prefer logger):
- `**/*.test.*`, `**/*.spec.*`
- `scripts/dev/**`, `scripts/migrate/**`
To intentionally keep `console.*`, add a pragma on the previous line:
```typescript
// cursor:allow-console reason="short justification"
console.log('temporary output');
```
Without the pragma, rewrite to `logger.*`.
## CI & Diff Enforcement
- Do not introduce `console.*` anywhere outside allowed, pragma'd spots.
- If an import is missing, insert it and resolve alias/relative path
correctly.
- Enforce rest-parameter call shape in reviews; replace object-wrapped
context.
- Ensure environment log level rules remain intact (`VITE_LOG_LEVEL`
respected).
## Quick Before/After
### **Before**
```typescript
console.log('User signed in', user.id, meta);
console.error('Failed to update profile', err);
console.info('Filter toggled', this.hasVisibleDid);
```
### **After**
```typescript
import { logger } from '@/utils/logger';
logger.info('User signed in', user.id, meta);
logger.error('Failed to update profile', err);
logger.debug('[FeedFilters] Filter toggled', this.hasVisibleDid);
```
## Checklist (for every PR)
- [ ] No `console.*` (or properly pragma'd in the allowed locations)
- [ ] Correct import path for `logger`
- [ ] Rest-parameter call shape (`message, ...args`)
- [ ] Right level chosen (debug/info/warn/error)
- [ ] No secrets / oversized payloads / throwaway context objects
- [ ] Component context provided (scoped logger or `[Component]` prefix)
---
**Status**: Active and enforced
**Priority**: Critical
**Estimated Effort**: Ongoing reference
**Dependencies**: TimeSafari logger utility
**Stakeholders**: Development team, Code review team

348
.cursor/rules/realistic_time_estimation.mdc

@ -0,0 +1,348 @@
---
description: when generating text that has project task work estimates
alwaysApply: false
---
# No Time Estimates — Harbor Pilot Directive
> **Agent role**: **DO NOT MAKE TIME ESTIMATES**. Instead, use phases, milestones, and complexity levels. Time estimates are consistently wrong and create unrealistic expectations.
## 🎯 Purpose
Development time estimates are consistently wrong and create unrealistic expectations. This rule ensures we focus on phases, milestones, and complexity rather than trying to predict specific timeframes.
## 🚨 Critical Rule
**DO NOT MAKE TIME ESTIMATES**
- **Never provide specific time estimates** - they are always wrong
- **Use phases and milestones** instead of days/weeks
- **Focus on complexity and dependencies** rather than time
- **Set expectations based on progress, not deadlines**
## 📊 Planning Framework (Not Time Estimates)
### **Complexity Categories**
- **Simple**: Text changes, styling updates, minor bug fixes
- **Medium**: New features, refactoring, component updates
- **Complex**: Architecture changes, integrations, cross-platform work
- **Unknown**: New technologies, APIs, or approaches
### **Platform Complexity**
- **Single platform**: Web-only or mobile-only changes
- **Two platforms**: Web + mobile or web + desktop
- **Three platforms**: Web + mobile + desktop
- **Cross-platform consistency**: Ensuring behavior matches across all platforms
### **Testing Complexity**
- **Basic**: Unit tests for new functionality
- **Comprehensive**: Integration tests, cross-platform testing
- **User acceptance**: User testing, feedback integration
## 🔍 Planning Process (No Time Estimates)
### **Step 1: Break Down the Work**
- Identify all subtasks and dependencies
- Group related work into logical phases
- Identify critical path and blockers
### **Step 2: Define Phases and Milestones**
- **Phase 1**: Foundation work (basic fixes, core functionality)
- **Phase 2**: Enhancement work (new features, integrations)
- **Phase 3**: Polish work (testing, user experience, edge cases)
### **Step 3: Identify Dependencies**
- **Technical dependencies**: What must be built first
- **Platform dependencies**: What works on which platforms
- **Testing dependencies**: What can be tested when
### **Step 4: Set Progress Milestones**
- **Milestone 1**: Basic functionality working
- **Milestone 2**: All platforms supported
- **Milestone 3**: Fully tested and polished
## 📋 Planning Checklist (No Time Estimates)
- [ ] Work broken down into logical phases
- [ ] Dependencies identified and mapped
- [ ] Milestones defined with clear criteria
- [ ] Complexity levels assigned to each phase
- [ ] Platform requirements identified
- [ ] Testing strategy planned
- [ ] Risk factors identified
- [ ] Success criteria defined
## 🎯 Example Planning (No Time Estimates)
### **Example 1: Simple Feature**
```
Phase 1: Core implementation
- Basic functionality
- Single platform support
- Unit tests
Phase 2: Platform expansion
- Multi-platform support
- Integration tests
Phase 3: Polish
- User testing
- Edge case handling
```
### **Example 2: Complex Cross-Platform Feature**
```
Phase 1: Foundation
- Architecture design
- Core service implementation
- Basic web platform support
Phase 2: Platform Integration
- Mobile platform support
- Desktop platform support
- Cross-platform consistency
Phase 3: Testing & Polish
- Comprehensive testing
- Error handling
- User experience refinement
```
## 🚫 Anti-Patterns to Avoid
- **"This should take X days"** - Red flag for time estimation
- **"Just a few hours"** - Ignores complexity and testing
- **"Similar to X"** - Without considering differences
- **"Quick fix"** - Nothing is ever quick in software
- **"No testing needed"** - Testing always takes effort
## ✅ Best Practices
### **When Planning:**
1. **Break down everything** - no work is too small to plan
2. **Consider all platforms** - web, mobile, desktop differences
3. **Include testing strategy** - unit, integration, and user testing
4. **Account for unknowns** - there are always surprises
5. **Focus on dependencies** - what blocks what
### **When Presenting Plans:**
1. **Show the phases** - explain the logical progression
2. **Highlight dependencies** - what could block progress
3. **Define milestones** - clear success criteria
4. **Identify risks** - what could go wrong
5. **Suggest alternatives** - ways to reduce scope or complexity
## 🔄 Continuous Improvement
### **Track Progress**
- Record planned vs. actual phases completed
- Identify what took longer than expected
- Learn from complexity misjudgments
- Adjust planning process based on experience
### **Learn from Experience**
- **Underestimated complexity**: Increase complexity categories
- **Missed dependencies**: Improve dependency mapping
- **Platform surprises**: Better platform research upfront
## 🎯 Integration with Harbor Pilot
This rule works in conjunction with:
- **Project Planning**: Focuses on phases and milestones
- **Resource Allocation**: Based on complexity, not time
- **Risk Management**: Identifies blockers and dependencies
- **Stakeholder Communication**: Sets progress-based expectations
## 📝 Version History
### v2.0.0 (2025-08-21)
- **Major Change**: Completely removed time estimation approach
- **New Focus**: Phases, milestones, and complexity-based planning
- **Eliminated**: All time multipliers, estimates, and calculations
- **Added**: Dependency mapping and progress milestone framework
### v1.0.0 (2025-08-21)
- Initial creation based on user feedback about estimation accuracy
- ~~Established realistic estimation multipliers and process~~
- ~~Added comprehensive estimation checklist and examples~~
- Integrated with Harbor Pilot planning and risk management
---
## 🚨 Remember
**DO NOT MAKE TIME ESTIMATES. Use phases, milestones, and complexity instead. Focus on progress, not deadlines.**
## 🚨 Remember
**Your first estimate is wrong. Your second estimate is probably still wrong. Focus on progress, not deadlines.**
# No Time Estimates — Harbor Pilot Directive
> **Agent role**: **DO NOT MAKE TIME ESTIMATES**. Instead, use phases, milestones, and complexity levels. Time estimates are consistently wrong and create unrealistic expectations.
## 🎯 Purpose
Development time estimates are consistently wrong and create unrealistic expectations. This rule ensures we focus on phases, milestones, and complexity rather than trying to predict specific timeframes.
## 🚨 Critical Rule
**DO NOT MAKE TIME ESTIMATES**
- **Never provide specific time estimates** - they are always wrong
- **Use phases and milestones** instead of days/weeks
- **Focus on complexity and dependencies** rather than time
- **Set expectations based on progress, not deadlines**
## 📊 Planning Framework (Not Time Estimates)
### **Complexity Categories**
- **Simple**: Text changes, styling updates, minor bug fixes
- **Medium**: New features, refactoring, component updates
- **Complex**: Architecture changes, integrations, cross-platform work
- **Unknown**: New technologies, APIs, or approaches
### **Platform Complexity**
- **Single platform**: Web-only or mobile-only changes
- **Two platforms**: Web + mobile or web + desktop
- **Three platforms**: Web + mobile + desktop
- **Cross-platform consistency**: Ensuring behavior matches across all platforms
### **Testing Complexity**
- **Basic**: Unit tests for new functionality
- **Comprehensive**: Integration tests, cross-platform testing
- **User acceptance**: User testing, feedback integration
## 🔍 Planning Process (No Time Estimates)
### **Step 1: Break Down the Work**
- Identify all subtasks and dependencies
- Group related work into logical phases
- Identify critical path and blockers
### **Step 2: Define Phases and Milestones**
- **Phase 1**: Foundation work (basic fixes, core functionality)
- **Phase 2**: Enhancement work (new features, integrations)
- **Phase 3**: Polish work (testing, user experience, edge cases)
### **Step 3: Identify Dependencies**
- **Technical dependencies**: What must be built first
- **Platform dependencies**: What works on which platforms
- **Testing dependencies**: What can be tested when
### **Step 4: Set Progress Milestones**
- **Milestone 1**: Basic functionality working
- **Milestone 2**: All platforms supported
- **Milestone 3**: Fully tested and polished
## 📋 Planning Checklist (No Time Estimates)
- [ ] Work broken down into logical phases
- [ ] Dependencies identified and mapped
- [ ] Milestones defined with clear criteria
- [ ] Complexity levels assigned to each phase
- [ ] Platform requirements identified
- [ ] Testing strategy planned
- [ ] Risk factors identified
- [ ] Success criteria defined
## 🎯 Example Planning (No Time Estimates)
### **Example 1: Simple Feature**
```
Phase 1: Core implementation
- Basic functionality
- Single platform support
- Unit tests
Phase 2: Platform expansion
- Multi-platform support
- Integration tests
Phase 3: Polish
- User testing
- Edge case handling
```
### **Example 2: Complex Cross-Platform Feature**
```
Phase 1: Foundation
- Architecture design
- Core service implementation
- Basic web platform support
Phase 2: Platform Integration
- Mobile platform support
- Desktop platform support
- Cross-platform consistency
Phase 3: Testing & Polish
- Comprehensive testing
- Error handling
- User experience refinement
```
## 🚫 Anti-Patterns to Avoid
- **"This should take X days"** - Red flag for time estimation
- **"Just a few hours"** - Ignores complexity and testing
- **"Similar to X"** - Without considering differences
- **"Quick fix"** - Nothing is ever quick in software
- **"No testing needed"** - Testing always takes effort
## ✅ Best Practices
### **When Planning:**
1. **Break down everything** - no work is too small to plan
2. **Consider all platforms** - web, mobile, desktop differences
3. **Include testing strategy** - unit, integration, and user testing
4. **Account for unknowns** - there are always surprises
5. **Focus on dependencies** - what blocks what
### **When Presenting Plans:**
1. **Show the phases** - explain the logical progression
2. **Highlight dependencies** - what could block progress
3. **Define milestones** - clear success criteria
4. **Identify risks** - what could go wrong
5. **Suggest alternatives** - ways to reduce scope or complexity
## 🔄 Continuous Improvement
### **Track Progress**
- Record planned vs. actual phases completed
- Identify what took longer than expected
- Learn from complexity misjudgments
- Adjust planning process based on experience
### **Learn from Experience**
- **Underestimated complexity**: Increase complexity categories
- **Missed dependencies**: Improve dependency mapping
- **Platform surprises**: Better platform research upfront
## 🎯 Integration with Harbor Pilot
This rule works in conjunction with:
- **Project Planning**: Focuses on phases and milestones
- **Resource Allocation**: Based on complexity, not time
- **Risk Management**: Identifies blockers and dependencies
- **Stakeholder Communication**: Sets progress-based expectations
## 📝 Version History
### v2.0.0 (2025-08-21)
- **Major Change**: Completely removed time estimation approach
- **New Focus**: Phases, milestones, and complexity-based planning
- **Eliminated**: All time multipliers, estimates, and calculations
- **Added**: Dependency mapping and progress milestone framework
### v1.0.0 (2025-08-21)
- Initial creation based on user feedback about estimation accuracy
- ~~Established realistic estimation multipliers and process~~
- ~~Added comprehensive estimation checklist and examples~~
- Integrated with Harbor Pilot planning and risk management
---
## 🚨 Remember
**DO NOT MAKE TIME ESTIMATES. Use phases, milestones, and complexity instead. Focus on progress, not deadlines.**
## 🚨 Remember
**Your first estimate is wrong. Your second estimate is probably still wrong. Focus on progress, not deadlines.**

4
.cursor/rules/research_diagnostic.mdc

@ -31,6 +31,7 @@ steps—**not** code changes.
## Enhanced with Software Development Ruleset ## Enhanced with Software Development Ruleset
When investigating software issues, also apply: When investigating software issues, also apply:
- **Code Path Tracing**: Required for technical investigations - **Code Path Tracing**: Required for technical investigations
- **Evidence Validation**: Ensure claims are code-backed - **Evidence Validation**: Ensure claims are code-backed
- **Solution Complexity Assessment**: Justify architectural changes - **Solution Complexity Assessment**: Justify architectural changes
@ -117,6 +118,7 @@ Copy/paste and fill:
## Code Path Tracing (Required for Software Investigations) ## Code Path Tracing (Required for Software Investigations)
Before proposing solutions, trace the actual execution path: Before proposing solutions, trace the actual execution path:
- [ ] **Entry Points**: Identify where the flow begins (user action, API call, etc.) - [ ] **Entry Points**: Identify where the flow begins (user action, API call, etc.)
- [ ] **Component Flow**: Map which components/methods are involved - [ ] **Component Flow**: Map which components/methods are involved
- [ ] **Data Path**: Track how data moves through the system - [ ] **Data Path**: Track how data moves through the system
@ -136,11 +138,13 @@ Before proposing solutions, trace the actual execution path:
## Integration with Other Rulesets ## Integration with Other Rulesets
### With software_development.mdc ### With software_development.mdc
- **Enhanced Evidence Validation**: Use code path tracing for technical investigations - **Enhanced Evidence Validation**: Use code path tracing for technical investigations
- **Architecture Assessment**: Apply complexity justification to proposed solutions - **Architecture Assessment**: Apply complexity justification to proposed solutions
- **Impact Analysis**: Assess effects on existing systems before recommendations - **Impact Analysis**: Assess effects on existing systems before recommendations
### With base_context.mdc ### With base_context.mdc
- **Competence Building**: Focus on technical investigation skills - **Competence Building**: Focus on technical investigation skills
- **Collaboration**: Structure outputs for team review and discussion - **Collaboration**: Structure outputs for team review and discussion

209
.cursor/rules/software_development.mdc

@ -1,69 +1,144 @@
---
alwaysApply: true
---
# Software Development Ruleset # Software Development Ruleset
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Core development guidelines
## Purpose ## Purpose
Specialized guidelines for software development tasks including code review, debugging, architecture decisions, and testing.
Specialized guidelines for software development tasks including code review,
debugging, architecture decisions, and testing.
## Core Principles ## Core Principles
### 1. Evidence-First Development ### 1. Evidence-First Development
- **Code Citations Required**: Always cite specific file:line references when making claims
- **Execution Path Tracing**: Trace actual code execution before proposing architectural changes - **Code Citations Required**: Always cite specific file:line references when
making claims
- **Execution Path Tracing**: Trace actual code execution before proposing
architectural changes
- **Assumption Validation**: Flag assumptions as "assumed" vs "evidence-based" - **Assumption Validation**: Flag assumptions as "assumed" vs "evidence-based"
### 2. Code Review Standards ### 2. Code Review Standards
- **Trace Before Proposing**: Always trace execution paths before suggesting changes
- **Trace Before Proposing**: Always trace execution paths before suggesting
changes
- **Evidence Over Inference**: Prefer code citations over logical deductions - **Evidence Over Inference**: Prefer code citations over logical deductions
- **Scope Validation**: Confirm the actual scope of problems before proposing solutions - **Scope Validation**: Confirm the actual scope of problems before proposing
solutions
### 3. Problem-Solution Validation ### 3. Problem-Solution Validation
- **Problem Scope**: Does the solution address the actual problem? - **Problem Scope**: Does the solution address the actual problem?
- **Evidence Alignment**: Does the solution match the evidence? - **Evidence Alignment**: Does the solution match the evidence?
- **Complexity Justification**: Is added complexity justified by real needs? - **Complexity Justification**: Is added complexity justified by real needs?
- **Alternative Analysis**: What simpler solutions were considered? - **Alternative Analysis**: What simpler solutions were considered?
### 4. Dependency Management & Environment Validation
- **Pre-build Validation**: Always validate critical dependencies before executing
build scripts
- **Environment Consistency**: Ensure team members have identical development
environments
- **Dependency Verification**: Check that required packages are installed and
accessible
- **Path Resolution**: Use `npx` for local dependencies to avoid PATH issues
## Required Workflows ## Required Workflows
### Before Proposing Changes ### Before Proposing Changes
- [ ] **Code Path Tracing**: Map execution flow from entry to exit - [ ] **Code Path Tracing**: Map execution flow from entry to exit
- [ ] **Evidence Collection**: Gather specific code citations and logs - [ ] **Evidence Collection**: Gather specific code citations and logs
- [ ] **Assumption Surfacing**: Identify what's proven vs. inferred - [ ] **Assumption Surfacing**: Identify what's proven vs. inferred
- [ ] **Scope Validation**: Confirm the actual extent of the problem - [ ] **Scope Validation**: Confirm the actual extent of the problem
- [ ] **Dependency Validation**: Verify all required dependencies are available
and accessible
### During Solution Design ### During Solution Design
- [ ] **Evidence Alignment**: Ensure solution addresses proven problems - [ ] **Evidence Alignment**: Ensure solution addresses proven problems
- [ ] **Complexity Assessment**: Justify any added complexity - [ ] **Complexity Assessment**: Justify any added complexity
- [ ] **Alternative Evaluation**: Consider simpler approaches first - [ ] **Alternative Evaluation**: Consider simpler approaches first
- [ ] **Impact Analysis**: Assess effects on existing systems - [ ] **Impact Analysis**: Assess effects on existing systems
- [ ] **Environment Impact**: Assess how changes affect team member setups
## Software-Specific Competence Hooks ## Software-Specific Competence Hooks
### Evidence Validation ### Evidence Validation
- **"What code path proves this claim?"** - **"What code path proves this claim?"**
- **"How does data actually flow through the system?"** - **"How does data actually flow through the system?"**
- **"What am I assuming vs. what can I prove?"** - **"What am I assuming vs. what can I prove?"**
### Code Tracing ### Code Tracing
- **"What's the execution path from user action to system response?"** - **"What's the execution path from user action to system response?"**
- **"Which components actually interact in this scenario?"** - **"Which components actually interact in this scenario?"**
- **"Where does the data originate and where does it end up?"** - **"Where does the data originate and where does it end up?"**
### Architecture Decisions ### Architecture Decisions
- **"What evidence shows this change is necessary?"** - **"What evidence shows this change is necessary?"**
- **"What simpler solution could achieve the same goal?"** - **"What simpler solution could achieve the same goal?"**
- **"How does this change affect the existing system architecture?"** - **"How does this change affect the existing system architecture?"**
### Dependency & Environment Management
- **"What dependencies does this feature require and are they properly
declared?"**
- **"How will this change affect team member development environments?"**
- **"What validation can we add to catch dependency issues early?"**
## Dependency Management Best Practices
### Pre-build Validation
- **Check Critical Dependencies**: Validate essential tools before executing build
scripts
- **Use npx for Local Dependencies**: Prefer `npx tsx` over direct `tsx` to
avoid PATH issues
- **Environment Consistency**: Ensure all team members have identical dependency
versions
### Common Pitfalls
- **Missing npm install**: Team members cloning without running `npm install`
- **PATH Issues**: Direct command execution vs. npm script execution differences
- **Version Mismatches**: Different Node.js/npm versions across team members
### Validation Strategies
- **Dependency Check Scripts**: Implement pre-build validation for critical
dependencies
- **Environment Requirements**: Document and enforce minimum Node.js/npm versions
- **Onboarding Checklist**: Standardize team member setup procedures
### Error Messages and Guidance
- **Specific Error Context**: Provide clear guidance when dependency issues occur
- **Actionable Solutions**: Direct users to specific commands (`npm install`,
`npm run check:dependencies`)
- **Environment Diagnostics**: Implement comprehensive environment validation
tools
### Build Script Enhancements
- **Early Validation**: Check dependencies before starting build processes
- **Graceful Degradation**: Continue builds when possible but warn about issues
- **Helpful Tips**: Remind users about dependency management best practices
## Integration with Other Rulesets ## Integration with Other Rulesets
### With base_context.mdc ### With base_context.mdc
- Inherits generic competence principles - Inherits generic competence principles
- Adds software-specific evidence requirements - Adds software-specific evidence requirements
- Maintains collaboration and learning focus - Maintains collaboration and learning focus
### With research_diagnostic.mdc ### With research_diagnostic.mdc
- Enhances investigation with code path tracing - Enhances investigation with code path tracing
- Adds evidence validation to diagnostic workflow - Adds evidence validation to diagnostic workflow
- Strengthens problem identification accuracy - Strengthens problem identification accuracy
@ -71,6 +146,7 @@ Specialized guidelines for software development tasks including code review, deb
## Usage Guidelines ## Usage Guidelines
### When to Use This Ruleset ### When to Use This Ruleset
- Code reviews and architectural decisions - Code reviews and architectural decisions
- Bug investigation and debugging - Bug investigation and debugging
- Performance optimization - Performance optimization
@ -78,101 +154,72 @@ Specialized guidelines for software development tasks including code review, deb
- Testing strategy development - Testing strategy development
### When to Combine with Others ### When to Combine with Others
- **base_context + software_development**: General development tasks - **base_context + software_development**: General development tasks
- **research_diagnostic + software_development**: Technical investigations - **research_diagnostic + software_development**: Technical investigations
- **All three**: Complex architectural decisions or major refactoring - **All three**: Complex architectural decisions or major refactoring
## Self-Check (model, before responding) ## Self-Check (model, before responding)
- [ ] Code path traced and documented - [ ] Code path traced and documented
- [ ] Evidence cited with specific file:line references - [ ] Evidence cited with specific file:line references
- [ ] Assumptions clearly flagged as proven vs. inferred - [ ] Assumptions clearly flagged as proven vs. inferred
- [ ] Solution complexity justified by evidence - [ ] Solution complexity justified by evidence
- [ ] Simpler alternatives considered and documented - [ ] Simpler alternatives considered and documented
- [ ] Impact on existing systems assessed - [ ] Impact on existing systems assessed
# Software Development Ruleset - [ ] Dependencies validated and accessible
- [ ] Environment impact assessed for team members
## Purpose - [ ] Pre-build validation implemented where appropriate
Specialized guidelines for software development tasks including code review, debugging, architecture decisions, and testing.
## Core Principles
### 1. Evidence-First Development
- **Code Citations Required**: Always cite specific file:line references when making claims
- **Execution Path Tracing**: Trace actual code execution before proposing architectural changes
- **Assumption Validation**: Flag assumptions as "assumed" vs "evidence-based"
### 2. Code Review Standards
- **Trace Before Proposing**: Always trace execution paths before suggesting changes
- **Evidence Over Inference**: Prefer code citations over logical deductions
- **Scope Validation**: Confirm the actual scope of problems before proposing solutions
### 3. Problem-Solution Validation
- **Problem Scope**: Does the solution address the actual problem?
- **Evidence Alignment**: Does the solution match the evidence?
- **Complexity Justification**: Is added complexity justified by real needs?
- **Alternative Analysis**: What simpler solutions were considered?
## Required Workflows ## Additional Core Principles
### Before Proposing Changes ### 4. Dependency Management & Environment Validation
- [ ] **Code Path Tracing**: Map execution flow from entry to exit - **Pre-build Validation**: Always validate critical dependencies before executing build scripts
- [ ] **Evidence Collection**: Gather specific code citations and logs - **Environment Consistency**: Ensure team members have identical development environments
- [ ] **Assumption Surfacing**: Identify what's proven vs. inferred - **Dependency Verification**: Check that required packages are installed and accessible
- [ ] **Scope Validation**: Confirm the actual extent of the problem - **Path Resolution**: Use `npx` for local dependencies to avoid PATH issues
### During Solution Design ## Additional Required Workflows
- [ ] **Evidence Alignment**: Ensure solution addresses proven problems
- [ ] **Complexity Assessment**: Justify any added complexity
- [ ] **Alternative Evaluation**: Consider simpler approaches first
- [ ] **Impact Analysis**: Assess effects on existing systems
## Software-Specific Competence Hooks ### Dependency Validation (Before Proposing Changes)
- [ ] **Dependency Validation**: Verify all required dependencies are available and accessible
### Evidence Validation ### Environment Impact Assessment (During Solution Design)
- **"What code path proves this claim?"** - [ ] **Environment Impact**: Assess how changes affect team member setups
- **"How does data actually flow through the system?"**
- **"What am I assuming vs. what can I prove?"**
### Code Tracing ## Additional Competence Hooks
- **"What's the execution path from user action to system response?"**
- **"Which components actually interact in this scenario?"**
- **"Where does the data originate and where does it end up?"**
### Architecture Decisions ### Dependency & Environment Management
- **"What evidence shows this change is necessary?"** - **"What dependencies does this feature require and are they properly declared?"**
- **"What simpler solution could achieve the same goal?"** - **"How will this change affect team member development environments?"**
- **"How does this change affect the existing system architecture?"** - **"What validation can we add to catch dependency issues early?"**
## Integration with Other Rulesets ## Dependency Management Best Practices
### With base_context.mdc ### Pre-build Validation
- Inherits generic competence principles - **Check Critical Dependencies**: Validate essential tools before executing build scripts
- Adds software-specific evidence requirements - **Use npx for Local Dependencies**: Prefer `npx tsx` over direct `tsx` to avoid PATH issues
- Maintains collaboration and learning focus - **Environment Consistency**: Ensure all team members have identical dependency versions
### With research_diagnostic.mdc ### Common Pitfalls
- Enhances investigation with code path tracing - **Missing npm install**: Team members cloning without running `npm install`
- Adds evidence validation to diagnostic workflow - **PATH Issues**: Direct command execution vs. npm script execution differences
- Strengthens problem identification accuracy - **Version Mismatches**: Different Node.js/npm versions across team members
## Usage Guidelines ### Validation Strategies
- **Dependency Check Scripts**: Implement pre-build validation for critical dependencies
- **Environment Requirements**: Document and enforce minimum Node.js/npm versions
- **Onboarding Checklist**: Standardize team member setup procedures
### When to Use This Ruleset ### Error Messages and Guidance
- Code reviews and architectural decisions - **Specific Error Context**: Provide clear guidance when dependency issues occur
- Bug investigation and debugging - **Actionable Solutions**: Direct users to specific commands (`npm install`, `npm run check:dependencies`)
- Performance optimization - **Environment Diagnostics**: Implement comprehensive environment validation tools
- Feature implementation planning
- Testing strategy development
### When to Combine with Others ### Build Script Enhancements
- **base_context + software_development**: General development tasks - **Early Validation**: Check dependencies before starting build processes
- **research_diagnostic + software_development**: Technical investigations - **Graceful Degradation**: Continue builds when possible but warn about issues
- **All three**: Complex architectural decisions or major refactoring - **Helpful Tips**: Remind users about dependency management best practices
## Self-Check (model, before responding) - **Narrow Types Properly**: Use type guards to narrow `unknown` types safely
- [ ] Code path traced and documented - **Document Type Decisions**: Explain complex type structures and their purpose
- [ ] Evidence cited with specific file:line references
- [ ] Assumptions clearly flagged as proven vs. inferred
- [ ] Solution complexity justified by evidence
- [ ] Simpler alternatives considered and documented
- [ ] Impact on existing systems assessed

329
.cursor/rules/time.mdc

@ -0,0 +1,329 @@
---
alwaysApply: true
---
# Time Handling in Development Workflow
**Author**: Matthew Raymer
**Date**: 2025-08-17
**Status**: 🎯 **ACTIVE** - Production Ready
## Overview
This guide establishes **how time should be referenced and used** across the
development workflow. It is not tied to any one project, but applies to **all
feature development, issue investigations, ADRs, and documentation**.
## General Principles
- **Explicit over relative**: Always prefer absolute dates (`2025-08-17`) over
relative references like "last week."
- **ISO 8601 Standard**: Use `YYYY-MM-DD` format for all date references in
docs, issues, ADRs, and commits.
- **Time zones**: Default to **UTC** unless explicitly tied to user-facing
behavior.
- **Precision**: Only specify as much precision as needed (date vs. datetime vs.
timestamp).
- **Consistency**: Align time references across ADRs, commits, and investigation
reports.
## In Documentation & ADRs
- Record decision dates using **absolute ISO dates**.
- For ongoing timelines, state start and end explicitly (e.g., `2025-08-01` →
`2025-08-17`).
- Avoid ambiguous terms like *recently*, *last month*, or *soon*.
- For time-based experiments (e.g., A/B tests), always include:
- Start date
- Expected duration
- Review date checkpoint
## In Code & Commits
- Use **UTC timestamps** in logs, DB migrations, and serialized formats.
- In commits, link changes to **date-bound ADRs or investigation docs**.
- For migrations, include both **applied date** and **intended version window**.
- Use constants for known fixed dates; avoid hardcoding arbitrary strings.
## In Investigations & Research
- Capture **when** an issue occurred (absolute time or version tag).
- When describing failures: note whether they are **time-sensitive** (e.g., after
migrations, cache expirations).
- Record diagnostic timelines in ISO format (not relative).
- For performance regressions, annotate both **baseline timeframe** and
**measurement timeframe**.
## Collaboration Hooks
- During reviews, verify **time references are clear, absolute, and
standardized**.
- In syncs, reframe relative terms ("this week") into shared absolute
references.
- Tag ADRs with both **date created** and **review by** checkpoints.
## Self-Check Before Submitting
- [ ] Did I check the time using the **developer's actual system time and
timezone**?
- [ ] Am I using absolute ISO dates?
- [ ] Is UTC assumed unless specified otherwise?
- [ ] Did I avoid ambiguous relative terms?
- [ ] If duration matters, did I specify both start and end?
- [ ] For future work, did I include a review/revisit date?
## Real-Time Context in Developer Interactions
- The model must always resolve **"current time"** using the **developer's
actual system time and timezone**.
- When generating timestamps (e.g., in investigation logs, ADRs, or examples),
the model should:
- Use the **developer's current local time** by default.
- Indicate the timezone explicitly (e.g., `2025-08-17T10:32-05:00`).
- Optionally provide UTC alongside if context requires cross-team clarity.
- When interpreting relative terms like *now*, *today*, *last week*:
- Resolve them against the **developer's current time**.
- Convert them into **absolute ISO-8601 values** in the output.
## LLM Time Checking Instructions
**CRITICAL**: The LLM must actively query the system for current time rather
than assuming or inventing times.
### How to Check Current Time
#### 1. **Query System Time (Required)**
- **Always start** by querying the current system time using available tools
- **Never assume** what the current time is
- **Never use** placeholder values like "current time" or "now"
#### 2. **Available Time Query Methods**
- **System Clock**: Use `date` command or equivalent system time function
- **Programming Language**: Use language-specific time functions (e.g.,
`Date.now()`, `datetime.now()`)
- **Environment Variables**: Check for time-related environment variables
- **API Calls**: Use time service APIs if available
#### 3. **Required Time Information**
When querying time, always obtain:
- **Current Date**: YYYY-MM-DD format
- **Current Time**: HH:MM:SS format (24-hour)
- **Timezone**: Current system timezone or UTC offset
- **UTC Equivalent**: Convert local time to UTC for cross-team clarity
#### 4. **Time Query Examples**
```bash
# Example: Query system time
$ date
# Expected output: Mon Aug 17 10:32:45 EDT 2025
# Example: Query UTC time
$ date -u
# Expected output: Mon Aug 17 14:32:45 UTC 2025
```
```python
# Example: Python time query
import datetime
current_time = datetime.datetime.now()
utc_time = datetime.datetime.utcnow()
print(f"Local: {current_time}")
print(f"UTC: {utc_time}")
```
```javascript
// Example: JavaScript time query
const now = new Date();
const utc = new Date().toISOString();
console.log(`Local: ${now}`);
console.log(`UTC: ${utc}`);
```
#### 5. **LLM Time Checking Workflow**
1. **Query**: Actively query system for current time
2. **Validate**: Confirm time data is reasonable and current
3. **Format**: Convert to ISO 8601 format
4. **Context**: Provide both local and UTC times when helpful
5. **Document**: Show the source of time information
#### 6. **Error Handling for Time Queries**
- **If time query fails**: Ask user for current time or use "unknown time"
with explanation
- **If timezone unclear**: Default to UTC and ask for clarification
- **If time seems wrong**: Verify with user before proceeding
- **Always log**: Record when and how time was obtained
#### 7. **Time Query Verification**
Before using queried time, verify:
- [ ] Time is recent (within last few minutes)
- [ ] Timezone information is available
- [ ] UTC conversion is accurate
- [ ] Format follows ISO 8601 standard
## Model Behavior Rules
- **Never invent a "fake now"**: All "current time" references must come from
the real system clock available at runtime.
- **Check developer time zone**: If ambiguous, ask for clarification (e.g.,
"Should I use UTC or your local timezone?").
- **Format for clarity**:
- Local time: `YYYY-MM-DDTHH:mm±hh:mm`
- UTC equivalent (if needed): `YYYY-MM-DDTHH:mmZ`
## Examples
### Good
- "Feature flag rollout started on `2025-08-01` and will be reviewed on
`2025-08-21`."
- "Migration applied on `2025-07-15T14:00Z`."
- "Issue reproduced on `2025-08-17T09:00-05:00 (local)` /
`2025-08-17T14:00Z (UTC)`."
### Bad
- "Feature flag rolled out last week."
- "Migration applied recently."
- "Now is August, so we assume this was last month."
### More Examples
#### Issue Reports
- ✅ **Good**: "User reported login failure at `2025-08-17T14:30:00Z`. Issue
persisted until `2025-08-17T15:45:00Z`."
- ❌ **Bad**: "User reported login failure earlier today. Issue lasted for a
while."
#### Release Planning
- ✅ **Good**: "Feature X scheduled for release on `2025-08-25`. Testing
window: `2025-08-20` to `2025-08-24`."
- ❌ **Bad**: "Feature X will be released next week after testing."
#### Performance Monitoring
- ✅ **Good**: "Baseline performance measured on `2025-08-10T09:00:00Z`.
Regression detected on `2025-08-15T14:00:00Z`."
- ❌ **Bad**: "Performance was good last week but got worse this week."
## Technical Implementation Notes
### UTC Storage Principle
- **Store all timestamps in UTC** in databases, logs, and serialized formats
- **Convert to local time only for user display**
- **Use ISO 8601 format** for all storage: `YYYY-MM-DDTHH:mm:ss.sssZ`
### Common Implementation Patterns
#### Database Storage
```sql
-- ✅ Good: Store in UTC
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-- ❌ Bad: Store in local time
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
```
#### API Responses
```json
// ✅ Good: Include both UTC and local time
{
"eventTime": "2025-08-17T14:00:00Z",
"localTime": "2025-08-17T10:00:00-04:00",
"timezone": "America/New_York"
}
// ❌ Bad: Only local time
{
"eventTime": "2025-08-17T10:00:00-04:00"
}
```
#### Logging
```python
# ✅ Good: Log in UTC with timezone info
logger.info(f"User action at {datetime.utcnow().isoformat()}Z (UTC)")
# ❌ Bad: Log in local time
logger.info(f"User action at {datetime.now()}")
```
### Timezone Handling Best Practices
#### 1. Always Store Timezone Information
- Include IANA timezone identifier (e.g., `America/New_York`)
- Store UTC offset at time of creation
- Handle daylight saving time transitions automatically
#### 2. User Display Considerations
- Convert UTC to user's preferred timezone
- Show timezone abbreviation when helpful
- Use relative time for recent events ("2 hours ago")
#### 3. Edge Case Handling
- **Daylight Saving Time**: Use timezone-aware libraries
- **Leap Seconds**: Handle gracefully (rare but important)
- **Invalid Times**: Validate before processing
### Common Mistakes to Avoid
#### 1. Timezone Confusion
- ❌ **Don't**: Assume server timezone is user timezone
- ✅ **Do**: Always convert UTC to user's local time for display
#### 2. Format Inconsistency
- ❌ **Don't**: Mix different time formats in the same system
- ✅ **Do**: Standardize on ISO 8601 for all storage
#### 3. Relative Time References
- ❌ **Don't**: Use relative terms in persistent storage
- ✅ **Do**: Convert relative terms to absolute timestamps immediately
## References
- [ISO 8601 Date and Time Standard](https://en.wikipedia.org/wiki/ISO_8601)
- [IANA Timezone Database](https://www.iana.org/time-zones)
- [ADR Template](./adr_template.md)
- [Research & Diagnostic Workflow](./research_diagnostic.mdc)
---
**Rule of Thumb**: Every time reference in development artifacts should be
**clear in 6 months without context**, and aligned to the **developer's actual
current time**.
**Technical Rule of Thumb**: **Store in UTC, display in local time, always
include timezone context.**
---
**Status**: Active
**Version**: 1.0
**Maintainer**: Matthew Raymer
**Next Review**: 2025-09-17

321
.cursor/rules/workflow/version_control.mdc

@ -1,102 +1,306 @@
--- ---
alwaysApply: true description: interacting with git
alwaysApply: false
--- ---
# Directive: Peaceful Co-Existence with Developers # Directive: Peaceful Co-Existence with Developers
**Author**: Matthew Raymer
**Date**: 2025-08-19
**Status**: 🎯 **ACTIVE** - Version control guidelines
## 1) Version-Control Ownership ## 1) Version-Control Ownership
* **MUST NOT** run `git add`, `git commit`, or any write action. - **MUST NOT** run `git add`, `git commit`, or any write action.
* **MUST** leave staging/committing to the developer. - **MUST** leave staging/committing to the developer.
## 2) Source of Truth for Commit Text ## 2) Source of Truth for Commit Text
* **MUST** derive messages **only** from: - **MUST** derive messages **only** from:
- files **staged** for commit (primary), and
* files **staged** for commit (primary), and - files **awaiting staging** (context).
* files **awaiting staging** (context). - **MUST** use the **diffs** to inform content.
* **MUST** use the **diffs** to inform content. - **MUST NOT** invent changes or imply work not present in diffs.
* **MUST NOT** invent changes or imply work not present in diffs.
## 3) Mandatory Preview Flow ## 3) Mandatory Preview Flow
* **ALWAYS** present, before any real commit: - **ALWAYS** present, before any real commit:
- file list + brief per-file notes,
- a **draft commit message** (copy-paste ready),
- nothing auto-applied.
* file list + brief per-file notes, ## 4) Version Synchronization Requirements
* a **draft commit message** (copy-paste ready),
* nothing auto-applied.
--- - **MUST** check for version changes in `package.json` before committing
- **MUST** ensure `CHANGELOG.md` is updated when `package.json` version
changes
- **MUST** validate version format consistency between both files
- **MUST** include version bump commits in changelog with proper semantic
versioning
### Version Sync Checklist (Before Commit)
- [ ] `package.json` version matches latest `CHANGELOG.md` entry
- [ ] New version follows semantic versioning
(MAJOR.MINOR.PATCH[-PRERELEASE])
- [ ] Changelog entry includes all significant changes since last version
- [ ] Version bump commit message follows `build(version): bump to X.Y.Z`
format
- [ ] Breaking changes properly documented with migration notes
- [ ] Alert developer in chat message that version has been updated
# Commit Message Format (Normative) ### Version Change Detection
## A. Subject Line (required) - **Check for version changes** in staged/unstaged `package.json`
- **Alert developer** if version changed but changelog not updated
- **Suggest changelog update** with proper format and content
- **Validate semantic versioning** compliance
### Implementation Notes
- **Version Detection**: Compare `package.json` version field with latest
changelog entry
- **Semantic Validation**: Ensure version follows `X.Y.Z[-PRERELEASE]`
format
- **Changelog Format**: Follow [Keep a Changelog](https://keepachangelog.com/)
standards
- **Breaking Changes**: Use `!` in commit message and `BREAKING CHANGE:`
in changelog
- **Pre-release Versions**: Include beta/alpha/rc suffixes in both files
consistently
## Commit Message Format (Normative)
### A. Subject Line (required)
``` ```
<type>(<scope>)<!>: <summary> <type>(<scope>)<!>: <summary>
``` ```
* **type** (lowercase, Conventional Commits): `feat|fix|refactor|perf|docs|test|build|chore|ci|revert` - **type** (lowercase, Conventional Commits):
* **scope**: optional module/package/area (e.g., `api`, `ui/login`, `db`) `feat|fix|refactor|perf|docs|test|build|chore|ci|revert`
* **!**: include when a breaking change is introduced - **scope**: optional module/package/area (e.g., `api`, `ui/login`, `db`)
* **summary**: imperative mood, ≤ 72 chars, no trailing period - **!**: include when a breaking change is introduced
- **summary**: imperative mood, ≤ 72 chars, no trailing period
**Examples** **Examples**
* `fix(api): handle null token in refresh path` - `fix(api): handle null token in refresh path`
* `feat(ui/login)!: require OTP after 3 failed attempts` - `feat(ui/login)!: require OTP after 3 failed attempts`
## B. Body (optional, when it adds non-obvious value) ### B. Body (optional, when it adds non-obvious value)
* One blank line after subject. - One blank line after subject.
* Wrap at \~72 chars. - Wrap at ~72 chars.
* Explain **what** and **why**, not line-by-line “how”. - Explain **what** and **why**, not line-by-line "how".
* Include brief notes like tests passing or TS/lint issues resolved **only if material**. - Include brief notes like tests passing or TS/lint issues resolved
**only if material**.
**Body checklist** **Body checklist**
* [ ] Problem/symptom being addressed - [ ] Problem/symptom being addressed
* [ ] High-level approach or rationale - [ ] High-level approach or rationale
* [ ] Risks, tradeoffs, or follow-ups (if any) - [ ] Risks, tradeoffs, or follow-ups (if any)
## C. Footer (optional) ### C. Footer (optional)
* Issue refs: `Closes #123`, `Refs #456` - Issue refs: `Closes #123`, `Refs #456`
* Breaking change (alternative to `!`): - Breaking change (alternative to `!`):
`BREAKING CHANGE: <impact + migration note>` `BREAKING CHANGE: <impact + migration note>`
* Authors: `Co-authored-by: Name <email>` - Authors: `Co-authored-by: Name <email>`
* Security: `CVE-XXXX-YYYY: <short note>` (if applicable) - Security: `CVE-XXXX-YYYY: <short note>` (if applicable)
---
## Content Guidance ## Content Guidance
### Include (when relevant) ### Include (when relevant)
* Specific fixes/features delivered - Specific fixes/features delivered
* Symptoms/problems fixed - Symptoms/problems fixed
* Brief note that tests passed or TS/lint errors resolved - Brief note that tests passed or TS/lint errors resolved
### Avoid ### Avoid
* Vague: *improved, enhanced, better* - Vague: *improved, enhanced, better*
* Trivialities: tiny docs, one-liners, pure lint cleanups (separate, focused commits if needed) - Trivialities: tiny docs, one-liners, pure lint cleanups (separate,
* Redundancy: generic blurbs repeated across files focused commits if needed)
* Multi-purpose dumps: keep commits **narrow and focused** - Redundancy: generic blurbs repeated across files
* Long explanations that good inline code comments already cover - Multi-purpose dumps: keep commits **narrow and focused**
- Long explanations that good inline code comments already cover
**Guiding Principle:** Let code and inline docs speak. Use commits to
highlight what isn't obvious.
## Copy-Paste Templates
### Minimal (no body)
```text
<type>(<scope>): <summary>
```
### Standard (with body & footer)
```text
<type>(<scope>)<!>: <summary>
<why-this-change?>
<what-it-does?>
<risks-or-follow-ups?>
Closes #<id>
BREAKING CHANGE: <impact + migration>
Co-authored-by: <Name> <email>
```
## Assistant Output Checklist (before showing the draft)
**Guiding Principle:** Let code and inline docs speak. Use commits to highlight what isn’t obvious. - [ ] List changed files + 1–2 line notes per file
- [ ] Provide **one** focused draft message (subject/body/footer)
- [ ] Subject ≤ 72 chars, imperative mood, correct `type(scope)!` syntax
- [ ] Body only if it adds non-obvious value
- [ ] No invented changes; aligns strictly with diffs
- [ ] Render as a single copy-paste block for the developer
--- ---
# Copy-Paste Templates **Status**: Active version control guidelines
**Priority**: High
**Estimated Effort**: Ongoing reference
**Dependencies**: git, package.json, CHANGELOG.md
**Stakeholders**: Development team, AI assistants
- [ ] No invented changes; aligns strictly with diffs
- [ ] Render as a single copy-paste block for the developer
## 1) Version-Control Ownership
- **MUST NOT** run `git add`, `git commit`, or any write action.
- **MUST** leave staging/committing to the developer.
## 2) Source of Truth for Commit Text
- **MUST** derive messages **only** from:
- files **staged** for commit (primary), and
- files **awaiting staging** (context).
- **MUST** use the **diffs** to inform content.
- **MUST NOT** invent changes or imply work not present in diffs.
## 3) Mandatory Preview Flow
- **ALWAYS** present, before any real commit:
- file list + brief per-file notes,
- a **draft commit message** (copy-paste ready),
- nothing auto-applied.
## 4) Version Synchronization Requirements
- **MUST** check for version changes in `package.json` before committing
- **MUST** ensure `CHANGELOG.md` is updated when `package.json` version
changes
- **MUST** validate version format consistency between both files
- **MUST** include version bump commits in changelog with proper semantic
versioning
### Version Sync Checklist (Before Commit)
- [ ] `package.json` version matches latest `CHANGELOG.md` entry
- [ ] New version follows semantic versioning
(MAJOR.MINOR.PATCH[-PRERELEASE])
- [ ] Changelog entry includes all significant changes since last version
- [ ] Version bump commit message follows `build(version): bump to X.Y.Z`
format
- [ ] Breaking changes properly documented with migration notes
- [ ] Alert developer in chat message that version has been updated
### Version Change Detection
- **Check for version changes** in staged/unstaged `package.json`
- **Alert developer** if version changed but changelog not updated
- **Suggest changelog update** with proper format and content
- **Validate semantic versioning** compliance
### Implementation Notes
- **Version Detection**: Compare `package.json` version field with latest
changelog entry
- **Semantic Validation**: Ensure version follows `X.Y.Z[-PRERELEASE]`
format
- **Changelog Format**: Follow [Keep a Changelog](https://keepachangelog.com/)
standards
- **Breaking Changes**: Use `!` in commit message and `BREAKING CHANGE:`
in changelog
- **Pre-release Versions**: Include beta/alpha/rc suffixes in both files
consistently
## Commit Message Format (Normative)
### A. Subject Line (required)
```
<type>(<scope>)<!>: <summary>
```
- **type** (lowercase, Conventional Commits):
`feat|fix|refactor|perf|docs|test|build|chore|ci|revert`
- **scope**: optional module/package/area (e.g., `api`, `ui/login`, `db`)
- **!**: include when a breaking change is introduced
- **summary**: imperative mood, ≤ 72 chars, no trailing period
**Examples**
- `fix(api): handle null token in refresh path`
- `feat(ui/login)!: require OTP after 3 failed attempts`
## Minimal (no body) ### B. Body (optional, when it adds non-obvious value)
- One blank line after subject.
- Wrap at ~72 chars.
- Explain **what** and **why**, not line-by-line "how".
- Include brief notes like tests passing or TS/lint issues resolved
**only if material**.
**Body checklist**
- [ ] Problem/symptom being addressed
- [ ] High-level approach or rationale
- [ ] Risks, tradeoffs, or follow-ups (if any)
### C. Footer (optional)
- Issue refs: `Closes #123`, `Refs #456`
- Breaking change (alternative to `!`):
`BREAKING CHANGE: <impact + migration note>`
- Authors: `Co-authored-by: Name <email>`
- Security: `CVE-XXXX-YYYY: <short note>` (if applicable)
## Content Guidance
### Include (when relevant)
- Specific fixes/features delivered
- Symptoms/problems fixed
- Brief note that tests passed or TS/lint errors resolved
### Avoid
- Vague: *improved, enhanced, better*
- Trivialities: tiny docs, one-liners, pure lint cleanups (separate,
focused commits if needed)
- Redundancy: generic blurbs repeated across files
- Multi-purpose dumps: keep commits **narrow and focused**
- Long explanations that good inline code comments already cover
**Guiding Principle:** Let code and inline docs speak. Use commits to
highlight what isn't obvious.
## Copy-Paste Templates
### Minimal (no body)
```text ```text
<type>(<scope>): <summary> <type>(<scope>): <summary>
``` ```
## Standard (with body & footer) ### Standard (with body & footer)
```text ```text
<type>(<scope>)<!>: <summary> <type>(<scope>)<!>: <summary>
@ -110,13 +314,22 @@ BREAKING CHANGE: <impact + migration>
Co-authored-by: <Name> <email> Co-authored-by: <Name> <email>
``` ```
## Assistant Output Checklist (before showing the draft)
- [ ] List changed files + 1–2 line notes per file
- [ ] Provide **one** focused draft message (subject/body/footer)
- [ ] Subject ≤ 72 chars, imperative mood, correct `type(scope)!` syntax
- [ ] Body only if it adds non-obvious value
- [ ] No invented changes; aligns strictly with diffs
- [ ] Render as a single copy-paste block for the developer
--- ---
# Assistant Output Checklist (before showing the draft) **Status**: Active version control guidelines
**Priority**: High
**Estimated Effort**: Ongoing reference
**Dependencies**: git, package.json, CHANGELOG.md
**Stakeholders**: Development team, AI assistants
* [ ] List changed files + 1–2 line notes per file
* [ ] Provide **one** focused draft message (subject/body/footer)
* [ ] Subject ≤ 72 chars, imperative mood, correct `type(scope)!` syntax
* [ ] Body only if it adds non-obvious value
* [ ] No invented changes; aligns strictly with diffs * [ ] No invented changes; aligns strictly with diffs
* [ ] Render as a single copy-paste block for the developer * [ ] Render as a single copy-paste block for the developer

2
.dockerignore

@ -140,7 +140,7 @@ docker-compose*
.dockerignore .dockerignore
# CI/CD files # CI/CD files
.github
.gitlab-ci.yml .gitlab-ci.yml
.travis.yml .travis.yml
.circleci .circleci

2
.env.test

@ -7,7 +7,7 @@ VITE_LOG_LEVEL=info
TIME_SAFARI_APP_TITLE="TimeSafari_Test" TIME_SAFARI_APP_TITLE="TimeSafari_Test"
VITE_APP_SERVER=https://test.timesafari.app VITE_APP_SERVER=https://test.timesafari.app
# This is the claim ID for actions in the BVC project, with the JWT ID on this environment (not # This is the claim ID for actions in the BVC project, with the JWT ID on this environment (not
production). # This is the claim ID for actions in the BVC project, with the JWT ID on this environment (not production).
VITE_BVC_MEETUPS_PROJECT_CLAIM_ID=https://endorser.ch/entity/01HWE8FWHQ1YGP7GFZYYPS272F VITE_BVC_MEETUPS_PROJECT_CLAIM_ID=https://endorser.ch/entity/01HWE8FWHQ1YGP7GFZYYPS272F
VITE_DEFAULT_ENDORSER_API_SERVER=https://test-api.endorser.ch VITE_DEFAULT_ENDORSER_API_SERVER=https://test-api.endorser.ch

142
.github/workflows/asset-validation.yml

@ -1,142 +0,0 @@
name: Asset Validation & CI Safeguards
on:
pull_request:
paths:
- 'resources/**'
- 'config/assets/**'
- 'capacitor-assets.config.json'
- 'capacitor.config.ts'
- 'capacitor.config.json'
push:
branches: [main, develop]
paths:
- 'resources/**'
- 'config/assets/**'
- 'capacitor-assets.config.json'
- 'capacitor.config.ts'
- 'capacitor.config.json'
jobs:
asset-validation:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Validate asset configuration
run: npm run assets:validate
- name: Check for committed platform assets (Android)
run: |
if git ls-files -z android/app/src/main/res | grep -E '(AppIcon.*\.png|Splash.*\.png|mipmap-.*/ic_launcher.*\.png)' > /dev/null; then
echo "❌ Android platform assets found in VCS - these should be generated at build-time"
git ls-files -z android/app/src/main/res | grep -E '(AppIcon.*\.png|Splash.*\.png|mipmap-.*/ic_launcher.*\.png)'
exit 1
fi
echo "✅ No Android platform assets committed"
- name: Check for committed platform assets (iOS)
run: |
if git ls-files -z ios/App/App/Assets.xcassets | grep -E '(AppIcon.*\.png|Splash.*\.png)' > /dev/null; then
echo "❌ iOS platform assets found in VCS - these should be generated at build-time"
git ls-files -z ios/App/App/Assets.xcassets | grep -E '(AppIcon.*\.png|Splash.*\.png)'
exit 1
fi
echo "✅ No iOS platform assets committed"
- name: Test asset generation
run: |
echo "🧪 Testing asset generation workflow..."
npm run build:capacitor
npx cap sync
npx capacitor-assets generate --dry-run || npx capacitor-assets generate
echo "✅ Asset generation test completed"
- name: Verify clean tree after build
run: |
if [ -n "$(git status --porcelain)" ]; then
echo "❌ Dirty tree after build - asset configs were modified"
git status
git diff
exit 1
fi
echo "✅ Build completed with clean tree"
schema-validation:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version-file: '.nvmrc'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Validate schema compliance
run: |
echo "🔍 Validating schema compliance..."
node -e "
const fs = require('fs');
const config = JSON.parse(fs.readFileSync('capacitor-assets.config.json', 'utf8'));
const schema = JSON.parse(fs.readFileSync('config/assets/schema.json', 'utf8'));
// Basic schema validation
if (!config.icon || !config.splash) {
throw new Error('Missing required sections: icon and splash');
}
if (!config.icon.source || !config.splash.source) {
throw new Error('Missing required source fields');
}
if (!/^resources\/.*\.(png|svg)$/.test(config.icon.source)) {
throw new Error('Icon source must be in resources/ directory');
}
if (!/^resources\/.*\.(png|svg)$/.test(config.splash.source)) {
throw new Error('Splash source must be in resources/ directory');
}
console.log('✅ Schema validation passed');
"
- name: Check source file existence
run: |
echo "📁 Checking source file existence..."
node -e "
const fs = require('fs');
const config = JSON.parse(fs.readFileSync('capacitor-assets.config.json', 'utf8'));
const requiredFiles = [
config.icon.source,
config.splash.source
];
if (config.splash.darkSource) {
requiredFiles.push(config.splash.darkSource);
}
const missingFiles = requiredFiles.filter(file => !fs.existsSync(file));
if (missingFiles.length > 0) {
console.error('❌ Missing source files:', missingFiles);
process.exit(1);
}
console.log('✅ All source files exist');
"

27
.github/workflows/playwright.yml

@ -1,27 +0,0 @@
name: Playwright Tests
on:
push:
branches: [ main, master ]
pull_request:
branches: [ main, master ]
jobs:
test:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: lts/*
- name: Install dependencies
run: npm ci
- name: Install Playwright Browsers
run: npx playwright install --with-deps
- name: Run Playwright tests
run: npx playwright test
- uses: actions/upload-artifact@v4
if: always()
with:
name: playwright-report
path: playwright-report/
retention-days: 30

40
.husky/_/husky.sh

@ -0,0 +1,40 @@
#!/usr/bin/env sh
#
# Husky Helper Script
# This file is sourced by all Husky hooks
#
if [ -z "$husky_skip_init" ]; then
debug () {
if [ "$HUSKY_DEBUG" = "1" ]; then
echo "husky (debug) - $1"
fi
}
readonly hook_name="$(basename -- "$0")"
debug "starting $hook_name..."
if [ "$HUSKY" = "0" ]; then
debug "HUSKY env variable is set to 0, skipping hook"
exit 0
fi
if [ -f ~/.huskyrc ]; then
debug "sourcing ~/.huskyrc"
. ~/.huskyrc
fi
readonly husky_skip_init=1
export husky_skip_init
sh -e "$0" "$@"
exitCode="$?"
if [ $exitCode != 0 ]; then
echo "husky - $hook_name hook exited with code $exitCode (error)"
fi
if [ $exitCode = 127 ]; then
echo "husky - command not found in PATH=$PATH"
fi
exit $exitCode
fi

10
.husky/commit-msg

@ -0,0 +1,10 @@
#!/usr/bin/env bash
#
# Husky Commit Message Hook
# Validates commit message format using commitlint
#
. "$(dirname -- "$0")/_/husky.sh"
# Run commitlint but don't fail the commit (|| true)
# This provides helpful feedback without blocking commits
npx commitlint --edit "$1" || true

15
.husky/pre-commit

@ -0,0 +1,15 @@
#!/usr/bin/env bash
#
# Husky Pre-commit Hook
# Runs Build Architecture Guard to check staged files
#
. "$(dirname -- "$0")/_/husky.sh"
echo "🔍 Running Build Architecture Guard (pre-commit)..."
bash ./scripts/build-arch-guard.sh --staged || {
echo
echo "💡 To bypass this check for emergency commits, use:"
echo " git commit --no-verify"
echo
exit 1
}

27
.husky/pre-push

@ -0,0 +1,27 @@
#!/usr/bin/env bash
#
# Husky Pre-push Hook
# Runs Build Architecture Guard to check commits being pushed
#
. "$(dirname -- "$0")/_/husky.sh"
echo "🔍 Running Build Architecture Guard (pre-push)..."
# Get the remote branch we're pushing to
REMOTE_BRANCH="origin/$(git rev-parse --abbrev-ref HEAD)"
# Check if remote branch exists
if git show-ref --verify --quiet "refs/remotes/$REMOTE_BRANCH"; then
RANGE="$REMOTE_BRANCH...HEAD"
else
# If remote branch doesn't exist, check last commit
RANGE="HEAD~1..HEAD"
fi
bash ./scripts/build-arch-guard.sh --range "$RANGE" || {
echo
echo "💡 To bypass this check for emergency pushes, use:"
echo " git push --no-verify"
echo
exit 1
}

433
BUILDING.md

File diff suppressed because it is too large

47
CHANGELOG.md

@ -5,72 +5,89 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.0.3] - 2025.07.12 ## [1.0.7] - 2025.08.18
### Changed
- Photo is pinned to profile mode
### Fixed ### Fixed
- Deep link URLs (and other prod settings)
- Error in BVC begin view - Deep link for onboard-meeting-members
## [1.0.6] - 2025.08.09 ## [1.0.6] - 2025.08.09
### Fixed ### Fixed
- Deep link errors where none would validate
- Deep link errors where none would validate
## [1.0.5] - 2025.07.24 ## [1.0.5] - 2025.07.24
### Fixed ### Fixed
- Export & import of contacts corrupted contact methods
- Export & import of contacts corrupted contact methods
## [1.0.4] - 2025.07.20 - 002f2407208d56cc59c0aa7c880535ae4cbace8b ## [1.0.4] - 2025.07.20 - 002f2407208d56cc59c0aa7c880535ae4cbace8b
### Fixed ### Fixed
- Deep link for invite-one-accept
- Deep link for invite-one-accept
## [1.0.3] - 2025.07.12 - a9a8ba217cd6015321911e98e6843e988dc2c4ae ## [1.0.3] - 2025.07.12 - a9a8ba217cd6015321911e98e6843e988dc2c4ae
### Changed ### Changed
- Photo is pinned to profile mode - Photo is pinned to profile mode
### Fixed ### Fixed
- Deep link URLs (and other prod settings) - Deep link URLs (and other prod settings)
- Error in BVC begin view - Error in BVC begin view
## [1.0.2] - 2025.06.20 - 276e0a741bc327de3380c4e508cccb7fee58c06d ## [1.0.2] - 2025.06.20 - 276e0a741bc327de3380c4e508cccb7fee58c06d
### Added ### Added
- Version on feed title
- Version on feed title
## [1.0.1] - 2025.06.20 ## [1.0.1] - 2025.06.20
### Added ### Added
- Allow a user to block someone else's content from view
- Allow a user to block someone else's content from view
## [1.0.0] - 2025.06.20 - 5aa693de6337e5dbb278bfddc6bd39094bc14f73 ## [1.0.0] - 2025.06.20 - 5aa693de6337e5dbb278bfddc6bd39094bc14f73
### Added ### Added
- Web-oriented migration from IndexedDB to SQLite
- Web-oriented migration from IndexedDB to SQLite
## [0.5.8] ## [0.5.8]
### Added ### Added
- /deep-link/ path for URLs that are shared with people - /deep-link/ path for URLs that are shared with people
### Changed ### Changed
- External links now go to /deep-link/... - External links now go to /deep-link/...
- Feed visuals now have arrow imagery from giver to receiver - Feed visuals now have arrow imagery from giver to receiver
## [0.4.7] ## [0.4.7]
### Fixed ### Fixed
- Cameras everywhere - Cameras everywhere
### Changed ### Changed
- IndexedDB -> SQLite
- IndexedDB -> SQLite
## [0.4.5] - 2025.02.23 ## [0.4.5] - 2025.02.23
### Added ### Added
- Total amounts of gives on project page - Total amounts of gives on project page
### Changed in DB or environment ### Changed in DB or environment
- Requires Endorser.ch version 4.2.6+
- Requires Endorser.ch version 4.2.6+
## [0.4.4] - 2025.02.17 ## [0.4.4] - 2025.02.17

290
README-BUILD-GUARD.md

@ -0,0 +1,290 @@
# Build Architecture Guard - Husky Implementation
## Overview
The Build Architecture Guard protects your build system by enforcing
documentation requirements through **Git hooks**. When you modify
build-critical files, the system automatically blocks commits/pushes
until you update `BUILDING.md`.
## 🎯 **Why Husky-Only?**
**Advantages:**
- ✅ **Immediate feedback** - Hooks run before commit/push
- ✅ **Works everywhere** - No server-side CI/CD required
- ✅ **Simple setup** - One tool, one configuration
- ✅ **Fast execution** - No network delays or server queues
- ✅ **Offline support** - Works without internet connection
**Trade-offs:**
- ⚠️ **Can be bypassed** - `git commit --no-verify` or `git push --no-verify`
- ⚠️ **Developer discipline** - Relies on team following the rules
## 🏗️ **Architecture**
```bash
Developer Workflow:
1. Modify build files (scripts/, vite.config.*, etc.)
2. Try to commit → Husky pre-commit hook runs
3. Guard script checks if BUILDING.md was updated
4. ✅ Commit succeeds if docs updated
5. ❌ Commit blocked if docs missing
```
## 🚀 **Quick Start**
### 1. Install Dependencies
```bash
npm install
npm run prepare # Sets up Husky hooks
```
### 2. Test the System
```bash
# Modify a build file without updating BUILDING.md
echo "# test" >> scripts/test.sh
# Try to commit (should be blocked)
git add scripts/test.sh
git commit -m "test: add build script"
# ❌ Hook blocks commit with helpful message
```
### 3. Fix and Retry
```bash
# Update BUILDING.md with your changes
echo "## New Build Script" >> BUILDING.md
echo "Added test.sh for testing purposes" >> BUILDING.md
# Now commit should succeed
git add BUILDING.md
git commit -m "feat: add test build script with docs"
# ✅ Commit succeeds
```
## 🔧 **How It Works**
### Pre-commit Hook (`.husky/pre-commit`)
- **When**: Every `git commit`
- **What**: Runs `./scripts/build-arch-guard.sh --staged`
- **Result**: Blocks commit if build files changed without BUILDING.md update
### Pre-push Hook (`.husky/pre-push`)
- **When**: Every `git push`
- **What**: Runs `./scripts/build-arch-guard.sh --range`
- **Result**: Blocks push if commits contain undocumented build changes
### Guard Script (`scripts/build-arch-guard.sh`)
- **Detects**: Changes to build-sensitive file patterns
- **Validates**: BUILDING.md was updated alongside changes
- **Reports**: Clear error messages with guidance
## 📁 **Protected File Patterns**
The guard script monitors these paths for changes:
```text
Build Configuration:
├── vite.config.* # Vite configuration
├── capacitor.config.ts # Capacitor configuration
├── package.json # Package configuration
├── package-lock.json # Lock files
├── yarn.lock
└── pnpm-lock.yaml
Build Scripts:
├── scripts/** # All build and automation scripts
├── electron/** # Electron build files
├── android/** # Android build configuration
├── ios/** # iOS build configuration
├── sw_scripts/** # Service worker scripts
└── sw_combine.js # Service worker combination
Deployment:
├── Dockerfile # Docker configuration
└── docker/** # Docker services
```
## 🎭 **Usage Scenarios**
### Scenario 1: Adding a New Build Script
```bash
# ❌ This will be blocked
echo '#!/bin/bash' > scripts/new-build.sh
git add scripts/new-build.sh
git commit -m "feat: add new build script"
# Hook blocks: "Build-sensitive files changed but BUILDING.md not updated"
# ✅ This will succeed
echo '#!/bin/bash' > scripts/new-build.sh
echo '## New Build Script' >> BUILDING.md
echo 'Added new-build.sh for feature X' >> BUILDING.md
git add scripts/new-build.sh BUILDING.md
git commit -m "feat: add new build script with docs"
# ✅ Commit succeeds
```
### Scenario 2: Updating Vite Configuration
```bash
# ❌ This will be blocked
echo 'export default { newOption: true }' >> vite.config.ts
git add vite.config.ts
git commit -m "config: add new vite option"
# Hook blocks: "Build-sensitive files changed but BUILDING.md not updated"
# ✅ This will succeed
echo 'export default { newOption: true }' >> vite.config.ts
echo '### New Vite Option' >> BUILDING.md
echo 'Added newOption for improved performance' >> BUILDING.md
git add vite.config.ts BUILDING.md
git commit -m "config: add new vite option with docs"
# ✅ Commit succeeds
```
## 🚨 **Emergency Bypass**
**⚠️ Use sparingly and only for emergencies:**
```bash
# Skip pre-commit hook
git commit -m "emergency: critical fix" --no-verify
# Skip pre-push hook
git push --no-verify
# Remember to update BUILDING.md later!
```
## 🔍 **Troubleshooting**
### Hooks Not Running
```bash
# Reinstall hooks
npm run prepare
# Check hook files exist and are executable
ls -la .husky/
chmod +x .husky/*
# Verify Git hooks path
git config core.hooksPath
# Should show: .husky
```
### Guard Script Issues
```bash
# Test guard script manually
./scripts/build-arch-guard.sh --help
# Check script permissions
chmod +x scripts/build-arch-guard.sh
# Test with specific files
./scripts/build-arch-guard.sh --staged
```
### False Positives
```bash
# If guard blocks legitimate changes, check:
# 1. Are you modifying a protected file pattern?
# 2. Did you update BUILDING.md?
# 3. Is BUILDING.md staged for commit?
# View what the guard sees
git diff --name-only --cached
```
## 📋 **Best Practices**
### For Developers
1. **Update BUILDING.md first** - Document changes before implementing
2. **Test locally** - Run `./scripts/build-arch-guard.sh --staged` before committing
3. **Use descriptive commits** - Include context about build changes
4. **Don't bypass lightly** - Only use `--no-verify` for true emergencies
### For Teams
1. **Document the system** - Ensure everyone understands the guard
2. **Review BUILDING.md updates** - Verify documentation quality
3. **Monitor bypass usage** - Track when hooks are skipped
4. **Regular audits** - Check that BUILDING.md stays current
### For Maintainers
1. **Update protected patterns** - Modify `scripts/build-arch-guard.sh` as needed
2. **Monitor effectiveness** - Track how often the guard catches issues
3. **Team training** - Help developers understand the system
4. **Continuous improvement** - Refine patterns and error messages
## 🔄 **Customization**
### Adding New Protected Paths
Edit `scripts/build-arch-guard.sh`:
```bash
SENSITIVE=(
# ... existing patterns ...
"new-pattern/**" # Add your new pattern
"*.config.js" # Add file extensions
)
```
### Modifying Error Messages
Edit the guard script to customize:
- Error message content
- File pattern matching
- Documentation requirements
- Bypass instructions
### Adding New Validation Rules
Extend the guard script to check for:
- Specific file content patterns
- Required documentation sections
- Commit message formats
- Branch naming conventions
## 📚 **Integration with PR Template**
The `pull_request_template.md` works with this system by:
- **Guiding developers** through required documentation
- **Ensuring consistency** across all build changes
- **Providing checklist** for comprehensive updates
- **Supporting L1/L2/L3** change classification
## 🎯 **Success Metrics**
Track the effectiveness of your Build Architecture Guard:
- **Hook execution rate** - How often hooks run successfully
- **Bypass frequency** - How often `--no-verify` is used
- **Documentation quality** - BUILDING.md stays current
- **Build failures** - Fewer issues from undocumented changes
- **Team adoption** - Developers follow the process
---
**Status**: Active protection system
**Architecture**: Client-side Git hooks only
**Dependencies**: Husky, Git, Bash
**Maintainer**: Development team
**Related**: `pull_request_template.md`, `scripts/build-arch-guard.sh`

82
README-PR-TEMPLATE.md

@ -0,0 +1,82 @@
# Pull Request Template
## Location
The Build Architecture Guard PR template is located at:
- **`pull_request_template.md`** (root directory)
## Usage
When creating a pull request in Gitea, this template will automatically populate the PR description with the required checklist.
## Template Features
### Change Level Classification
- **L1**: Minor changes, documentation updates
- **L2**: Moderate changes, new features, environment changes
- **L3**: Major changes, architecture changes, new platforms
### Required Fields for All Levels
- Change level selection
- Scope and impact description
- Commands executed and their output
- Documentation updates (BUILDING.md)
- Rollback verification steps
### Additional Requirements for L3
- **ADR link**: Must provide URL to Architectural Decision Record
- **Artifacts with SHA256**: Must list artifacts with cryptographic hashes
## Integration
This template works with:
- **Gitea Actions**: `.gitea/workflows/build-guard.yml`
- **Client-side hooks**: `.husky/` pre-commit and pre-push hooks
- **Guard script**: `scripts/build-arch-guard.sh`
## Example Usage
```markdown
### Change Level
- [x] Level: **L2**
**Why:** Adding new build script for Docker deployment
### Scope & Impact
- [x] Files & platforms touched: scripts/build-docker.sh,
BUILDING.md
- [x] Risk triggers: Docker build process changes
- [x] Mitigations/validation done: Tested on local Docker environment
### Commands Run
- [x] Web: `npm run build:web:docker`
- [x] Docker: `docker build -t test-image .`
### Artifacts
- [x] Names + **sha256** of artifacts/installers:
Artifacts:
```text
test-image.tar a1b2c3d4e5f6...
```
### Docs
- [x] **BUILDING.md** updated (sections): Docker deployment
- [x] Troubleshooting updated: Added Docker troubleshooting section
### Rollback
- [x] Verified steps to restore previous behavior:
1. `git revert HEAD`
2. `docker rmi test-image`
3. Restore previous BUILDING.md
```
---
**Note**: This template is enforced by the Build Architecture Guard
system. Complete all required fields to ensure your PR can be merged.

279
README.md

@ -1,243 +1,118 @@
# TimeSafari.app - Crowd-Funder for Time - PWA # Time Safari Application
[Time Safari](https://timesafari.org/) allows people to ease into collaboration: start with expressions of gratitude **Author**: Matthew Raymer
and expand to crowd-fund with time & money, then record and see the impact of contributions. **Version**: 1.0.8-beta
**Description**: Time Safari Application
## Roadmap ## 🛡️ Build Architecture Guard
See [ClickUp](https://sharing.clickup.com/9014278710/l/h/8cmnyhp-174/10573fec74e2ba0) for current priorities. This project uses **Husky Git hooks** to protect the build system
architecture. When you modify build-critical files, the system
automatically blocks commits until you update `BUILDING.md`.
## Setup & Building ### Quick Setup
Quick start:
* For setup, we recommend [pkgx](https://pkgx.dev), which installs what you need (either automatically or with the `dev` command). Core dependencies are typescript & npm; when building for other platforms, you'll need other things such as those in the pkgx.yaml & BUILDING.md files.
```bash
npm install
npm run build:web:serve -- --test
```
To be able to make submissions: go to "profile" (bottom left), go to the bottom and expand "Show Advanced Settings", go to the bottom and to the "Test Page", and finally "Become User 0" to see all the functionality.
See [BUILDING.md](BUILDING.md) for comprehensive build instructions for all platforms (Web, Electron, iOS, Android, Docker).
## Development Database Clearing
TimeSafari provides a simple script-based approach to clear the local database (not the claim server) for development purposes.
## Logging Configuration
TimeSafari supports configurable logging levels via the `VITE_LOG_LEVEL` environment variable. This allows developers to control console output verbosity without modifying code.
### Quick Usage
```bash ```bash
# Show only errors npm run guard:setup # Install and activate the guard
VITE_LOG_LEVEL=error npm run dev
# Show warnings and errors
VITE_LOG_LEVEL=warn npm run dev
# Show info, warnings, and errors (default)
VITE_LOG_LEVEL=info npm run dev
# Show all log levels including debug
VITE_LOG_LEVEL=debug npm run dev
``` ```
### Available Levels ### How It Works
- **`error`**: Critical errors only - **Pre-commit**: Blocks commits if build files changed without
- **`warn`**: Warnings and errors (default for production web) BUILDING.md updates
- **`info`**: Info, warnings, and errors (default for development/capacitor) - **Pre-push**: Blocks pushes if commits contain undocumented build
- **`debug`**: All log levels including verbose debugging changes
- **Protected paths**: `scripts/`, `vite.config.*`, `electron/`,
`android/`, `ios/`, etc.
See [Logging Configuration Guide](doc/logging-configuration.md) for complete details. ### Usage
### Quick Usage
```bash ```bash
# Run the database clearing script # Test the guard manually
./scripts/clear-database.sh npm run guard:test
# Then restart your development server # Emergency bypass (use sparingly)
npm run build:electron:dev # For Electron git commit --no-verify
npm run build:web:dev # For Web git push --no-verify
``` ```
### What It Does **📚 Full documentation**: See `README-BUILD-GUARD.md`
#### **Electron (Desktop App)** ## 🚀 Quick Start
- Automatically finds and clears the SQLite database files
- Works on Linux, macOS, and Windows
- Clears all data and forces fresh migrations on next startup
#### **Web Browser** ### Prerequisites
- Provides instructions for using custom browser data directories
- Shows manual clearing via browser DevTools
- Ensures reliable database clearing without browser complications
### Safety Features - Node.js 18+
- ✅ **Interactive Script**: Guides you through the process - npm, yarn, or pnpm
- ✅ **Platform Detection**: Automatically detects your OS - Git
- ✅ **Clear Instructions**: Step-by-step guidance for each platform
- ✅ **Safe Paths**: Only clears TimeSafari-specific data
### Manual Commands (if needed) ### Installation
#### **Electron Database Location**
```bash ```bash
# Linux npm install
rm -rf ~/.config/TimeSafari/* npm run guard:setup # Sets up Build Architecture Guard
# macOS
rm -rf ~/Library/Application\ Support/TimeSafari/*
# Windows
rmdir /s /q %APPDATA%\TimeSafari
```
#### **Web Browser (Custom Data Directory)**
```bash
# Create isolated browser profile
mkdir ~/timesafari-dev-data
``` ```
## Domain Configuration ### Development
TimeSafari uses a centralized domain configuration system to ensure consistent
URL generation across all environments. This prevents localhost URLs from
appearing in shared links during development.
### Key Features
- ✅ **Production URLs for Sharing**: All copy link buttons use production domain
- ✅ **Environment-Specific Internal URLs**: Internal operations use appropriate
environment URLs
- ✅ **Single Point of Control**: Change domain in one place for entire app
- ✅ **Type-Safe Configuration**: Full TypeScript support
### Quick Reference
```typescript
// For sharing functionality (environment-specific)
import { APP_SERVER } from "@/constants/app";
const shareLink = `${APP_SERVER}/deep-link/claim/123`;
// For internal operations (environment-specific) ```bash
import { APP_SERVER } from "@/constants/app"; npm run build:web:dev # Build web version
const apiUrl = `${APP_SERVER}/api/claim/123`; npm run build:ios:test # Build iOS test version
npm run build:android:test # Build Android test version
npm run build:electron:dev # Build Electron dev version
``` ```
### Documentation ### Testing
- [Constants and Configuration](src/constants/app.ts) - Core constants
## Tests
See [TESTING.md](test-playwright/TESTING.md) for detailed test instructions.
## Asset Management
TimeSafari uses a standardized asset configuration system for consistent
icon and splash screen generation across all platforms.
### Asset Sources
- **Single source of truth**: `resources/` directory (Capacitor default)
- **Source files**: `icon.png`, `splash.png`, `splash_dark.png`
- **Format**: PNG or SVG files for optimal quality
### Asset Generation
- **Configuration**: `config/assets/capacitor-assets.config.json`
- **Schema validation**: `config/assets/schema.json`
- **Build-time generation**: Platform assets generated via `capacitor-assets`
- **No VCS commits**: Generated assets are never committed to version control
### Development Commands
```bash ```bash
# Generate/update asset configurations npm run test:web # Run web tests
npm run assets:config npm run test:mobile # Run mobile tests
npm run test:all # Run all tests
# Validate asset configurations
npm run assets:validate
# Clean generated platform assets (local dev only)
npm run assets:clean
# Build with asset generation
npm run build:native
``` ```
### Platform Support ## 📁 Project Structure
- **Android**: Adaptive icons with foreground/background, monochrome support ```text
- **iOS**: LaunchScreen storyboard preferred, splash assets when needed timesafari/
- **Web**: PWA icons generated during build to `dist/` (not committed) ├── 📁 src/ # Source code
├── 📁 scripts/ # Build and automation scripts
### Font Awesome Icons ├── 📁 electron/ # Electron configuration
├── 📁 android/ # Android configuration
To add a Font Awesome icon, add to `fontawesome.ts` and reference with ├── 📁 ios/ # iOS configuration
`font-awesome` element and `icon` attribute with the hyphenated name. ├── 📁 .husky/ # Git hooks (Build Architecture Guard)
├── 📄 BUILDING.md # Build system documentation
## Other ├── 📄 pull_request_template.md # PR template
└── 📄 README-BUILD-GUARD.md # Guard system documentation
### Reference Material ```
* Notifications can be type of `toast` (self-dismiss), `info`, `success`, `warning`, and `danger`.
They are done via [notiwind](https://www.npmjs.com/package/notiwind) and set up in App.vue.
* [Customize Vue configuration](https://cli.vuejs.org/config/).
* If you are deploying in a subdirectory, add it to `publicPath` in vue.config.js, eg: `publicPath: "/app/time-tracker/",`
### Code Organization
The project uses a centralized approach to type definitions and interfaces:
* `src/interfaces/` - Contains all TypeScript interfaces and type definitions ## 🔧 Build System
* `deepLinks.ts` - Deep linking type system and Zod validation schemas
* `give.ts` - Give-related interfaces and type definitions
* `claims.ts` - Claim-related interfaces and verifiable credentials
* `common.ts` - Shared interfaces and utility types
* Other domain-specific interface files
Key principles: This project supports multiple platforms:
- All interfaces and types are defined in the interfaces folder
- Zod schemas are used for runtime validation and type generation
- Domain-specific interfaces are separated into their own files
- Common interfaces are shared through `common.ts`
- Type definitions are generated from Zod schemas where possible
### Database Architecture - **Web**: Vite-based build with service worker support
- **Mobile**: Capacitor-based iOS and Android builds
- **Desktop**: Electron-based cross-platform desktop app
- **Docker**: Containerized deployment options
The application uses a platform-agnostic database layer with Vue mixins for service access: ## 📚 Documentation
* `src/services/PlatformService.ts` - Database interface definition - **`BUILDING.md`** - Complete build system guide
* `src/services/PlatformServiceFactory.ts` - Platform-specific service factory - **`README-BUILD-GUARD.md`** - Build Architecture Guard documentation
* `src/services/AbsurdSqlDatabaseService.ts` - SQLite implementation - **`pull_request_template.md`** - PR template for build changes
* `src/utils/PlatformServiceMixin.ts` - Vue mixin for database access with caching
* `src/db/` - Legacy Dexie database (migration in progress)
**Development Guidelines**: ## 🤝 Contributing
- Always use `PlatformServiceMixin` for database operations in components 1. **Follow the Build Architecture Guard** - Update BUILDING.md when modifying build files
- Test with PlatformServiceMixin for new features 2. **Use the PR template** - Complete the checklist for build-related changes
- Use migration tools for data transfer between systems 3. **Test your changes** - Ensure builds work on affected platforms
- Leverage mixin's ultra-concise methods: `$db()`, `$exec()`, `$one()`, `$contacts()`, `$settings()` 4. **Document updates** - Keep BUILDING.md current and accurate
**Architecture Decision**: The project uses Vue mixins over Composition API composables for platform service access. See [Architecture Decisions](doc/architecture-decisions.md) for detailed rationale. ## 📄 License
### Kudos [Add your license information here]
Gifts make the world go 'round! ---
* [WebStorm by JetBrains](https://www.jetbrains.com/webstorm/) for the free open-source license **Note**: The Build Architecture Guard is active and will block
* [Máximo Fernández](https://medium.com/@maxfarenas) for the 3D [code](https://github.com/maxfer03/vue-three-ns) and [explanatory post](https://medium.com/nicasource/building-an-interactive-web-portfolio-with-vue-three-js-part-three-implementing-three-js-452cb375ef80) commits/pushes that modify build files without proper documentation
* [Many tools & libraries](https://gitea.anomalistdesign.com/trent_larson/crowd-funder-for-time-pwa/src/branch/master/package.json#L10) such as Nodejs.org, IntelliJ Idea, Veramo.io, Vuejs.org, threejs.org updates. See `README-BUILD-GUARD.md` for complete details.
* [Bush 3D model](https://sketchfab.com/3d-models/lupine-plant-bf30f1110c174d4baedda0ed63778439)
* [Forest floor image](https://www.goodfreephotos.com/albums/textures/leafy-autumn-forest-floor.jpg)
* Time Safari logo assisted by [DALL-E in ChatGPT](https://chat.openai.com/g/g-2fkFE8rbu-dall-e)
* [DiceBear](https://www.dicebear.com/licenses/) and [Avataaars](https://www.dicebear.com/styles/avataaars/#details) for human-looking identicons
* Some gratitude prompts thanks to [Develop Good Habits](https://www.developgoodhabits.com/gratitude-journal-prompts/)

8
TASK_storage.md

@ -1,7 +1,6 @@
# What to do about storage for native apps? # What to do about storage for native apps?
## Problem ## Problem
We can't trust iOS IndexedDB to persist. I want to start delivering an app to people now, in preparation for presentations mid-June: Rotary on June 12 and Porcfest on June 17. We can't trust iOS IndexedDB to persist. I want to start delivering an app to people now, in preparation for presentations mid-June: Rotary on June 12 and Porcfest on June 17.
@ -14,7 +13,6 @@ We can't trust iOS IndexedDB to persist. I want to start delivering an app to pe
Also, with sensitive data, the accounts info should be encrypted. Also, with sensitive data, the accounts info should be encrypted.
# Options # Options
* There is a community [SQLite plugin for Capacitor](https://github.com/capacitor-community/sqlite) with encryption by [SQLCipher](https://github.com/sqlcipher/sqlcipher). * There is a community [SQLite plugin for Capacitor](https://github.com/capacitor-community/sqlite) with encryption by [SQLCipher](https://github.com/sqlcipher/sqlcipher).
@ -29,16 +27,12 @@ Also, with sensitive data, the accounts info should be encrypted.
* Not an option yet: Dexie may support SQLite in [a future version](https://dexie.org/roadmap/dexie5.0). * Not an option yet: Dexie may support SQLite in [a future version](https://dexie.org/roadmap/dexie5.0).
# Current Plan # Current Plan
* Implement SQLite for Capacitor & web, with encryption. That will allow us to test quickly and keep the same interface for native & web, but we don't deal with migrations for current web users. * Implement SQLite for Capacitor & web, with encryption. That will allow us to test quickly and keep the same interface for native & web, but we don't deal with migrations for current web users.
* After that is delivered, write a migration for current web users from IndexedDB to SQLite. * After that is delivered, write a migration for current web users from IndexedDB to SQLite.
# Current method calls # Current method calls
... which is not 100% complete because the AI that generated thus claimed no usage of 'temp' DB. ... which is not 100% complete because the AI that generated thus claimed no usage of 'temp' DB.
@ -80,5 +74,3 @@ Logs operations:
db.logs.get(todayKey) - Gets logs for a specific day db.logs.get(todayKey) - Gets logs for a specific day
db.logs.update(todayKey, { message: fullMessage }) - Updates logs db.logs.update(todayKey, { message: fullMessage }) - Updates logs
db.logs.clear() - Clears all logs db.logs.clear() - Clears all logs

4
android/app/build.gradle

@ -31,8 +31,8 @@ android {
applicationId "app.timesafari.app" applicationId "app.timesafari.app"
minSdkVersion rootProject.ext.minSdkVersion minSdkVersion rootProject.ext.minSdkVersion
targetSdkVersion rootProject.ext.targetSdkVersion targetSdkVersion rootProject.ext.targetSdkVersion
versionCode 39 versionCode 40
versionName "1.0.6" versionName "1.0.7"
testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
aaptOptions { aaptOptions {
// Files and dirs to omit from the packaged assets dir, modified to accommodate modern web apps. // Files and dirs to omit from the packaged assets dir, modified to accommodate modern web apps.

2
android/build.gradle

@ -7,7 +7,7 @@ buildscript {
mavenCentral() mavenCentral()
} }
dependencies { dependencies {
classpath 'com.android.tools.build:gradle:8.12.0' classpath 'com.android.tools.build:gradle:8.12.1'
classpath 'com.google.gms:google-services:4.4.0' classpath 'com.google.gms:google-services:4.4.0'
// NOTE: Do not place your application dependencies here; they belong // NOTE: Do not place your application dependencies here; they belong

3
doc/DEEP_LINKS.md

@ -47,6 +47,7 @@ type ClaimParams = z.infer<typeof claimSchema>;
### Type Safety Layers ### Type Safety Layers
1. **Schema Definition** 1. **Schema Definition**
```typescript ```typescript
// src/interfaces/deepLinks.ts // src/interfaces/deepLinks.ts
export const deepLinkSchemas = { export const deepLinkSchemas = {
@ -59,6 +60,7 @@ type ClaimParams = z.infer<typeof claimSchema>;
``` ```
2. **Type Generation** 2. **Type Generation**
```typescript ```typescript
// Types are automatically generated from schemas // Types are automatically generated from schemas
export type DeepLinkParams = { export type DeepLinkParams = {
@ -67,6 +69,7 @@ type ClaimParams = z.infer<typeof claimSchema>;
``` ```
3. **Runtime Validation** 3. **Runtime Validation**
```typescript ```typescript
// In DeepLinkHandler // In DeepLinkHandler
const result = deepLinkSchemas.claim.safeParse(params); const result = deepLinkSchemas.claim.safeParse(params);

3
doc/README.md

@ -54,7 +54,7 @@ sudo tlmgr install sourceserifpro
The following guide was adapted to this project except that we install with Brew and have a few more packages. The following guide was adapted to this project except that we install with Brew and have a few more packages.
Guide: https://daniel.feldroy.com/posts/setting-up-latex-on-mac-os-x Guide: <https://daniel.feldroy.com/posts/setting-up-latex-on-mac-os-x>
### Usage ### Usage
@ -71,6 +71,7 @@ open usage-guide.pdf
``` ```
Or use this one-liner Or use this one-liner
```bash ```bash
pandoc usage-guide.md -o usage-guide.pdf && open usage-guide.pdf pandoc usage-guide.md -o usage-guide.pdf && open usage-guide.pdf
``` ```

1
doc/asset-migration-plan.md

@ -103,6 +103,7 @@ scripts/
### Configuration Schema ### Configuration Schema
The schema enforces: The schema enforces:
- Source files must be in `resources/` directory - Source files must be in `resources/` directory
- Required fields for icon and splash sections - Required fields for icon and splash sections
- Android adaptive icon support (foreground/background/monochrome) - Android adaptive icon support (foreground/background/monochrome)

8
doc/build-modernization-context.md

@ -3,11 +3,13 @@
**Author:** Matthew Raymer **Author:** Matthew Raymer
## Motivation ## Motivation
- Eliminate manual hacks and post-build scripts for Electron builds - Eliminate manual hacks and post-build scripts for Electron builds
- Ensure maintainability, reproducibility, and security of build outputs - Ensure maintainability, reproducibility, and security of build outputs
- Unify build, test, and deployment scripts for developer experience and CI/CD - Unify build, test, and deployment scripts for developer experience and CI/CD
## Key Technical Decisions ## Key Technical Decisions
- **Vite is the single source of truth for build output** - **Vite is the single source of truth for build output**
- All Electron build output (main process, preload, renderer HTML/CSS/JS) is managed by `vite.config.electron.mts` - All Electron build output (main process, preload, renderer HTML/CSS/JS) is managed by `vite.config.electron.mts`
- **CSS injection for Electron is handled by a Vite plugin** - **CSS injection for Electron is handled by a Vite plugin**
@ -21,6 +23,7 @@
- Renderer assets: `dist-electron/www/` (HTML, CSS, JS) - Renderer assets: `dist-electron/www/` (HTML, CSS, JS)
## Security & Maintenance Checklist ## Security & Maintenance Checklist
- [x] All scripts and configs are committed and documented - [x] All scripts and configs are committed and documented
- [x] No manual file hacks remain - [x] No manual file hacks remain
- [x] All build output is deterministic and reproducible - [x] All build output is deterministic and reproducible
@ -28,21 +31,26 @@
- [x] Documentation (`BUILDING.md`) is up to date - [x] Documentation (`BUILDING.md`) is up to date
## How to Build Electron ## How to Build Electron
1. Run: 1. Run:
```bash ```bash
./scripts/build-electron.sh ./scripts/build-electron.sh
``` ```
2. Output will be in `dist-electron/`: 2. Output will be in `dist-electron/`:
- `main.js`, `preload.js` in root - `main.js`, `preload.js` in root
- `www/` contains all renderer assets - `www/` contains all renderer assets
3. No manual post-processing is required 3. No manual post-processing is required
## Customization ## Customization
- **Vite config:** All build output and asset handling is controlled in `vite.config.electron.mts` - **Vite config:** All build output and asset handling is controlled in `vite.config.electron.mts`
- **CSS/HTML injection:** Use Vite plugins (see `electron-css-injection` in the config) for further customization - **CSS/HTML injection:** Use Vite plugins (see `electron-css-injection` in the config) for further customization
- **Build scripts:** All orchestration is in `scripts/` and documented in `BUILDING.md` - **Build scripts:** All orchestration is in `scripts/` and documented in `BUILDING.md`
## For Future Developers ## For Future Developers
- Always use Vite plugins/config for build output changes - Always use Vite plugins/config for build output changes
- Never manually edit built files or inject assets post-build - Never manually edit built files or inject assets post-build
- Keep documentation and scripts in sync with the build process - Keep documentation and scripts in sync with the build process

17
doc/circular-dependency-analysis.md

@ -13,23 +13,27 @@ The codebase currently has **no active circular dependencies** that are causing
### 🔍 **Resolved Dependency Patterns** ### 🔍 **Resolved Dependency Patterns**
#### 1. **Logger → PlatformServiceFactory → Logger** (RESOLVED) #### 1. **Logger → PlatformServiceFactory → Logger** (RESOLVED)
- **Status**: ✅ **RESOLVED** - **Status**: ✅ **RESOLVED**
- **Previous Issue**: Logger imported `logToDb` from databaseUtil, which imported logger - **Previous Issue**: Logger imported `logToDb` from databaseUtil, which imported logger
- **Solution**: Logger now uses direct database access via PlatformServiceFactory - **Solution**: Logger now uses direct database access via PlatformServiceFactory
- **Implementation**: Self-contained `logToDatabase()` function in logger.ts - **Implementation**: Self-contained `logToDatabase()` function in logger.ts
#### 2. **PlatformServiceMixin → databaseUtil → logger → PlatformServiceMixin** (RESOLVED) #### 2. **PlatformServiceMixin → databaseUtil → logger → PlatformServiceMixin** (RESOLVED)
- **Status**: ✅ **RESOLVED** - **Status**: ✅ **RESOLVED**
- **Previous Issue**: PlatformServiceMixin imported `memoryLogs` from databaseUtil - **Previous Issue**: PlatformServiceMixin imported `memoryLogs` from databaseUtil
- **Solution**: Created self-contained `_memoryLogs` array in PlatformServiceMixin - **Solution**: Created self-contained `_memoryLogs` array in PlatformServiceMixin
- **Implementation**: Self-contained memory logs implementation - **Implementation**: Self-contained memory logs implementation
#### 3. **databaseUtil → logger → PlatformServiceFactory → databaseUtil** (RESOLVED) #### 3. **databaseUtil → logger → PlatformServiceFactory → databaseUtil** (RESOLVED)
- **Status**: ✅ **RESOLVED** - **Status**: ✅ **RESOLVED**
- **Previous Issue**: databaseUtil imported logger, which could create loops - **Previous Issue**: databaseUtil imported logger, which could create loops
- **Solution**: Logger is now self-contained and doesn't import from databaseUtil - **Solution**: Logger is now self-contained and doesn't import from databaseUtil
#### 4. **Utility Files → databaseUtil → PlatformServiceMixin** (RESOLVED) #### 4. **Utility Files → databaseUtil → PlatformServiceMixin** (RESOLVED)
- **Status**: ✅ **RESOLVED** - **Status**: ✅ **RESOLVED**
- **Previous Issue**: `src/libs/util.ts` and `src/services/deepLinks.ts` imported from databaseUtil - **Previous Issue**: `src/libs/util.ts` and `src/services/deepLinks.ts` imported from databaseUtil
- **Solution**: Replaced with self-contained implementations and PlatformServiceFactory usage - **Solution**: Replaced with self-contained implementations and PlatformServiceFactory usage
@ -43,18 +47,21 @@ The codebase currently has **no active circular dependencies** that are causing
### ✅ **All Critical Dependencies Resolved** ### ✅ **All Critical Dependencies Resolved**
#### PlatformServiceMixin Independence #### PlatformServiceMixin Independence
- **Status**: ✅ **COMPLETE** - **Status**: ✅ **COMPLETE**
- **Achievement**: PlatformServiceMixin has no external dependencies on databaseUtil - **Achievement**: PlatformServiceMixin has no external dependencies on databaseUtil
- **Implementation**: Self-contained memory logs and utility functions - **Implementation**: Self-contained memory logs and utility functions
- **Impact**: Enables complete migration of databaseUtil functions to PlatformServiceMixin - **Impact**: Enables complete migration of databaseUtil functions to PlatformServiceMixin
#### Logger Independence #### Logger Independence
- **Status**: ✅ **COMPLETE** - **Status**: ✅ **COMPLETE**
- **Achievement**: Logger is completely self-contained - **Achievement**: Logger is completely self-contained
- **Implementation**: Direct database access via PlatformServiceFactory - **Implementation**: Direct database access via PlatformServiceFactory
- **Impact**: Eliminates all circular dependency risks - **Impact**: Eliminates all circular dependency risks
#### Utility Files Independence #### Utility Files Independence
- **Status**: ✅ **COMPLETE** - **Status**: ✅ **COMPLETE**
- **Achievement**: All utility files no longer depend on databaseUtil - **Achievement**: All utility files no longer depend on databaseUtil
- **Implementation**: Self-contained functions and direct platform service access - **Implementation**: Self-contained functions and direct platform service access
@ -63,6 +70,7 @@ The codebase currently has **no active circular dependencies** that are causing
### 🎯 **Migration Readiness Status** ### 🎯 **Migration Readiness Status**
#### Files Ready for Migration (52 files) #### Files Ready for Migration (52 files)
1. **Components** (15 files): 1. **Components** (15 files):
- `PhotoDialog.vue` - `PhotoDialog.vue`
- `FeedFilters.vue` - `FeedFilters.vue`
@ -98,6 +106,7 @@ The codebase currently has **no active circular dependencies** that are causing
### 🟢 **Healthy Dependencies** ### 🟢 **Healthy Dependencies**
#### Logger Usage (80+ files) #### Logger Usage (80+ files)
- **Status**: ✅ **HEALTHY** - **Status**: ✅ **HEALTHY**
- **Pattern**: All files import logger from `@/utils/logger` - **Pattern**: All files import logger from `@/utils/logger`
- **Impact**: No circular dependencies, logger is self-contained - **Impact**: No circular dependencies, logger is self-contained
@ -106,21 +115,25 @@ The codebase currently has **no active circular dependencies** that are causing
## Resolution Strategy - COMPLETED ## Resolution Strategy - COMPLETED
### ✅ **Phase 1: Complete PlatformServiceMixin Independence (COMPLETE)** ### ✅ **Phase 1: Complete PlatformServiceMixin Independence (COMPLETE)**
1. **Removed memoryLogs import** from PlatformServiceMixin ✅ 1. **Removed memoryLogs import** from PlatformServiceMixin ✅
2. **Created self-contained memoryLogs** implementation ✅ 2. **Created self-contained memoryLogs** implementation ✅
3. **Added missing utility methods** to PlatformServiceMixin ✅ 3. **Added missing utility methods** to PlatformServiceMixin ✅
### ✅ **Phase 2: Utility Files Migration (COMPLETE)** ### ✅ **Phase 2: Utility Files Migration (COMPLETE)**
1. **Migrated deepLinks.ts** - Replaced databaseUtil logging with console logging ✅ 1. **Migrated deepLinks.ts** - Replaced databaseUtil logging with console logging ✅
2. **Migrated util.ts** - Replaced databaseUtil functions with self-contained implementations ✅ 2. **Migrated util.ts** - Replaced databaseUtil functions with self-contained implementations ✅
3. **Updated all PlatformServiceFactory calls** to use async pattern ✅ 3. **Updated all PlatformServiceFactory calls** to use async pattern ✅
### 🎯 **Phase 3: File-by-File Migration (READY TO START)** ### 🎯 **Phase 3: File-by-File Migration (READY TO START)**
1. **High-usage files first** (views, core components) 1. **High-usage files first** (views, core components)
2. **Replace databaseUtil imports** with PlatformServiceMixin 2. **Replace databaseUtil imports** with PlatformServiceMixin
3. **Update function calls** to use mixin methods 3. **Update function calls** to use mixin methods
### 🎯 **Phase 4: Cleanup (FUTURE)** ### 🎯 **Phase 4: Cleanup (FUTURE)**
1. **Remove unused databaseUtil functions** 1. **Remove unused databaseUtil functions**
2. **Update TypeScript interfaces** 2. **Update TypeScript interfaces**
3. **Remove databaseUtil imports** from all files 3. **Remove databaseUtil imports** from all files
@ -128,6 +141,7 @@ The codebase currently has **no active circular dependencies** that are causing
## Current Status Summary ## Current Status Summary
### ✅ **Resolved Issues** ### ✅ **Resolved Issues**
1. **Logger circular dependency** - Fixed with self-contained implementation 1. **Logger circular dependency** - Fixed with self-contained implementation
2. **PlatformServiceMixin circular dependency** - Fixed with self-contained memoryLogs 2. **PlatformServiceMixin circular dependency** - Fixed with self-contained memoryLogs
3. **Utility files circular dependency** - Fixed with self-contained implementations 3. **Utility files circular dependency** - Fixed with self-contained implementations
@ -135,6 +149,7 @@ The codebase currently has **no active circular dependencies** that are causing
5. **Runtime stability** - No circular dependency crashes 5. **Runtime stability** - No circular dependency crashes
### 🎯 **Ready for Next Phase** ### 🎯 **Ready for Next Phase**
1. **52 files** ready for databaseUtil migration 1. **52 files** ready for databaseUtil migration
2. **PlatformServiceMixin** fully independent and functional 2. **PlatformServiceMixin** fully independent and functional
3. **Clear migration path** - Well-defined targets and strategy 3. **Clear migration path** - Well-defined targets and strategy
@ -142,6 +157,7 @@ The codebase currently has **no active circular dependencies** that are causing
## Benefits of Current State ## Benefits of Current State
### ✅ **Achieved** ### ✅ **Achieved**
1. **No runtime circular dependencies** - Application runs without crashes 1. **No runtime circular dependencies** - Application runs without crashes
2. **Self-contained logger** - No more logger/databaseUtil loops 2. **Self-contained logger** - No more logger/databaseUtil loops
3. **PlatformServiceMixin ready** - All methods implemented and independent 3. **PlatformServiceMixin ready** - All methods implemented and independent
@ -149,6 +165,7 @@ The codebase currently has **no active circular dependencies** that are causing
5. **Clear migration path** - Well-defined targets and strategy 5. **Clear migration path** - Well-defined targets and strategy
### 🎯 **Expected After Migration** ### 🎯 **Expected After Migration**
1. **Complete databaseUtil migration** - Single source of truth 1. **Complete databaseUtil migration** - Single source of truth
2. **Eliminated circular dependencies** - Clean architecture 2. **Eliminated circular dependencies** - Clean architecture
3. **Improved performance** - Caching and optimization 3. **Improved performance** - Caching and optimization

14
doc/component-communication-guide.md

@ -93,6 +93,7 @@ export default class FormComponent extends Vue {
When generating component templates, follow these patterns: When generating component templates, follow these patterns:
#### Function Props Template #### Function Props Template
```vue ```vue
<template> <template>
<div class="component-name"> <div class="component-name">
@ -124,6 +125,7 @@ export default class ComponentName extends Vue {
``` ```
#### $emit Template (for DOM events) #### $emit Template (for DOM events)
```vue ```vue
<template> <template>
<div class="component-name"> <div class="component-name">
@ -155,12 +157,14 @@ export default class ComponentName extends Vue {
### Code Generation Rules ### Code Generation Rules
#### 1. Function Props for Business Logic #### 1. Function Props for Business Logic
- **Data operations**: Save, delete, update, validate - **Data operations**: Save, delete, update, validate
- **Navigation**: Route changes, modal opening/closing - **Navigation**: Route changes, modal opening/closing
- **State management**: Store actions, state updates - **State management**: Store actions, state updates
- **API calls**: Data fetching, form submissions - **API calls**: Data fetching, form submissions
#### 2. $emit for User Interactions #### 2. $emit for User Interactions
- **Click events**: Button clicks, link navigation - **Click events**: Button clicks, link navigation
- **Form events**: Input changes, form submissions - **Form events**: Input changes, form submissions
- **Lifecycle events**: Component mounting, unmounting - **Lifecycle events**: Component mounting, unmounting
@ -169,6 +173,7 @@ export default class ComponentName extends Vue {
#### 3. Naming Conventions #### 3. Naming Conventions
**Function Props:** **Function Props:**
```typescript ```typescript
// Action-oriented names // Action-oriented names
onSave: (data: SaveData) => Promise<void> onSave: (data: SaveData) => Promise<void>
@ -179,6 +184,7 @@ onNavigate: (route: string) => void
``` ```
**$emit Events:** **$emit Events:**
```typescript ```typescript
// Event-oriented names // Event-oriented names
@click: (event: MouseEvent) => void @click: (event: MouseEvent) => void
@ -191,6 +197,7 @@ onNavigate: (route: string) => void
### TypeScript Integration ### TypeScript Integration
#### Function Prop Types #### Function Prop Types
```typescript ```typescript
// Define reusable function types // Define reusable function types
interface SaveHandler { interface SaveHandler {
@ -207,6 +214,7 @@ interface ValidationHandler {
``` ```
#### Event Types #### Event Types
```typescript ```typescript
// Define event payload types // Define event payload types
interface ClickEvent { interface ClickEvent {
@ -226,6 +234,7 @@ handleClick(): ClickEvent {
## Testing Guidelines ## Testing Guidelines
### Function Props Testing ### Function Props Testing
```typescript ```typescript
// Easy to mock and test // Easy to mock and test
const mockOnSave = jest.fn(); const mockOnSave = jest.fn();
@ -240,6 +249,7 @@ expect(mockOnSave).toHaveBeenCalledWith(expectedData);
``` ```
### $emit Testing ### $emit Testing
```typescript ```typescript
// Requires event simulation // Requires event simulation
const wrapper = mount(MyComponent); const wrapper = mount(MyComponent);
@ -260,6 +270,7 @@ expect(wrapper.emitted('click')).toBeTruthy();
### Example Migration ### Example Migration
**Before ($emit):** **Before ($emit):**
```typescript ```typescript
@Emit("save") @Emit("save")
handleSave() { handleSave() {
@ -268,6 +279,7 @@ handleSave() {
``` ```
**After (Function Props):** **After (Function Props):**
```typescript ```typescript
@Prop({ required: true }) onSave!: (data: FormData) => void; @Prop({ required: true }) onSave!: (data: FormData) => void;
@ -288,6 +300,7 @@ handleSave() {
## Code Generation Templates ## Code Generation Templates
### Component Generator Input ### Component Generator Input
```typescript ```typescript
interface ComponentSpec { interface ComponentSpec {
name: string; name: string;
@ -306,6 +319,7 @@ interface ComponentSpec {
``` ```
### Generated Output ### Generated Output
```typescript ```typescript
// Generator should automatically choose function props vs $emit // Generator should automatically choose function props vs $emit
// based on the nature of the interaction (business logic vs DOM event) // based on the nature of the interaction (business logic vs DOM event)

14
doc/cors-disabled-for-universal-images.md

@ -7,10 +7,12 @@ CORS headers have been **disabled** to support Time Safari's core mission: enabl
## What Changed ## What Changed
### ❌ Removed CORS Headers ### ❌ Removed CORS Headers
- `Cross-Origin-Opener-Policy: same-origin` - `Cross-Origin-Opener-Policy: same-origin`
- `Cross-Origin-Embedder-Policy: require-corp` - `Cross-Origin-Embedder-Policy: require-corp`
### ✅ Results ### ✅ Results
- Images from **any domain** now work in development and production - Images from **any domain** now work in development and production
- No proxy configuration needed - No proxy configuration needed
- No whitelist of supported image hosts - No whitelist of supported image hosts
@ -19,11 +21,13 @@ CORS headers have been **disabled** to support Time Safari's core mission: enabl
## Technical Tradeoffs ## Technical Tradeoffs
### 🔻 Lost: SharedArrayBuffer Performance ### 🔻 Lost: SharedArrayBuffer Performance
- **Before**: Fast SQLite operations via SharedArrayBuffer - **Before**: Fast SQLite operations via SharedArrayBuffer
- **After**: Slightly slower IndexedDB fallback mode - **After**: Slightly slower IndexedDB fallback mode
- **Impact**: Minimal for typical usage - absurd-sql automatically falls back - **Impact**: Minimal for typical usage - absurd-sql automatically falls back
### 🔺 Gained: Universal Image Support ### 🔺 Gained: Universal Image Support
- **Before**: Only specific domains worked (TimeSafari, Flickr, Imgur, etc.) - **Before**: Only specific domains worked (TimeSafari, Flickr, Imgur, etc.)
- **After**: Any image URL works immediately - **After**: Any image URL works immediately
- **Impact**: Massive improvement for user experience - **Impact**: Massive improvement for user experience
@ -31,6 +35,7 @@ CORS headers have been **disabled** to support Time Safari's core mission: enabl
## Architecture Impact ## Architecture Impact
### Database Operations ### Database Operations
```typescript ```typescript
// absurd-sql automatically detects SharedArrayBuffer availability // absurd-sql automatically detects SharedArrayBuffer availability
if (typeof SharedArrayBuffer === "undefined") { if (typeof SharedArrayBuffer === "undefined") {
@ -43,6 +48,7 @@ if (typeof SharedArrayBuffer === "undefined") {
``` ```
### Image Loading ### Image Loading
```typescript ```typescript
// All images load directly now // All images load directly now
export function transformImageUrlForCors(imageUrl: string): string { export function transformImageUrlForCors(imageUrl: string): string {
@ -53,11 +59,13 @@ export function transformImageUrlForCors(imageUrl: string): string {
## Why This Was The Right Choice ## Why This Was The Right Choice
### Time Safari's Use Case ### Time Safari's Use Case
- **Community platform** where users share content from anywhere - **Community platform** where users share content from anywhere
- **User-generated content** includes images from arbitrary websites - **User-generated content** includes images from arbitrary websites
- **Flexibility** is more important than marginal performance gains - **Flexibility** is more important than marginal performance gains
### Alternative Would Require ### Alternative Would Require
- Pre-configuring proxies for every possible image hosting service - Pre-configuring proxies for every possible image hosting service
- Constantly updating proxy list as users find new sources - Constantly updating proxy list as users find new sources
- Poor user experience when images fail to load - Poor user experience when images fail to load
@ -66,11 +74,13 @@ export function transformImageUrlForCors(imageUrl: string): string {
## Performance Comparison ## Performance Comparison
### Database Operations ### Database Operations
- **SharedArrayBuffer**: ~2x faster for large operations - **SharedArrayBuffer**: ~2x faster for large operations
- **IndexedDB**: Still very fast for typical Time Safari usage - **IndexedDB**: Still very fast for typical Time Safari usage
- **Real Impact**: Negligible for typical user operations - **Real Impact**: Negligible for typical user operations
### Image Loading ### Image Loading
- **With CORS**: Many images failed to load in development - **With CORS**: Many images failed to load in development
- **Without CORS**: All images load immediately - **Without CORS**: All images load immediately
- **Real Impact**: Massive improvement in user experience - **Real Impact**: Massive improvement in user experience
@ -87,11 +97,13 @@ export function transformImageUrlForCors(imageUrl: string): string {
## Migration Notes ## Migration Notes
### For Developers ### For Developers
- No code changes needed - No code changes needed
- `transformImageUrlForCors()` still exists but returns original URL - `transformImageUrlForCors()` still exists but returns original URL
- All existing image references work without modification - All existing image references work without modification
### For Users ### For Users
- Images from any website now work immediately - Images from any website now work immediately
- No more "image failed to load" issues in development - No more "image failed to load" issues in development
- Consistent behavior between development and production - Consistent behavior between development and production
@ -99,12 +111,14 @@ export function transformImageUrlForCors(imageUrl: string): string {
## Future Considerations ## Future Considerations
### If Performance Becomes Critical ### If Performance Becomes Critical
1. **Selective CORS**: Enable only for specific operations 1. **Selective CORS**: Enable only for specific operations
2. **Service Worker**: Handle image proxying at service worker level 2. **Service Worker**: Handle image proxying at service worker level
3. **Build-time Processing**: Pre-process images during build 3. **Build-time Processing**: Pre-process images during build
4. **User Education**: Guide users toward optimized image hosting 4. **User Education**: Guide users toward optimized image hosting
### Monitoring ### Monitoring
- Track database operation performance - Track database operation performance
- Monitor for any user-reported slowness - Monitor for any user-reported slowness
- Consider re-enabling SharedArrayBuffer if usage patterns change - Consider re-enabling SharedArrayBuffer if usage patterns change

26
doc/cors-image-loading-solution.md

@ -7,6 +7,7 @@ This document describes the implementation of a comprehensive image loading solu
## Problem Statement ## Problem Statement
When using SharedArrayBuffer (required for absurd-sql), browsers enforce a cross-origin isolated environment with these headers: When using SharedArrayBuffer (required for absurd-sql), browsers enforce a cross-origin isolated environment with these headers:
- `Cross-Origin-Opener-Policy: same-origin` - `Cross-Origin-Opener-Policy: same-origin`
- `Cross-Origin-Embedder-Policy: require-corp` - `Cross-Origin-Embedder-Policy: require-corp`
@ -19,6 +20,7 @@ This isolation prevents loading external resources (including images) unless the
The solution uses a multi-tier approach to handle images from various sources: The solution uses a multi-tier approach to handle images from various sources:
#### Tier 1: Specific Domain Proxies (Development Only) #### Tier 1: Specific Domain Proxies (Development Only)
- **TimeSafari Images**: `/image-proxy/``https://image.timesafari.app/` - **TimeSafari Images**: `/image-proxy/``https://image.timesafari.app/`
- **Flickr Images**: `/flickr-proxy/``https://live.staticflickr.com/` - **Flickr Images**: `/flickr-proxy/``https://live.staticflickr.com/`
- **Imgur Images**: `/imgur-proxy/``https://i.imgur.com/` - **Imgur Images**: `/imgur-proxy/``https://i.imgur.com/`
@ -26,14 +28,17 @@ The solution uses a multi-tier approach to handle images from various sources:
- **Unsplash**: `/unsplash-proxy/``https://images.unsplash.com/` - **Unsplash**: `/unsplash-proxy/``https://images.unsplash.com/`
#### Tier 2: Universal CORS Proxy (Development Only) #### Tier 2: Universal CORS Proxy (Development Only)
- **Any External Domain**: Uses `https://api.allorigins.win/raw?url=` for arbitrary domains - **Any External Domain**: Uses `https://api.allorigins.win/raw?url=` for arbitrary domains
#### Tier 3: Direct Loading (Production) #### Tier 3: Direct Loading (Production)
- **Production Mode**: All images load directly without proxying - **Production Mode**: All images load directly without proxying
### 2. Smart URL Transformation ### 2. Smart URL Transformation
The `transformImageUrlForCors` function automatically: The `transformImageUrlForCors` function automatically:
- Detects the image source domain - Detects the image source domain
- Routes through appropriate proxy in development - Routes through appropriate proxy in development
- Preserves original URLs in production - Preserves original URLs in production
@ -44,6 +49,7 @@ The `transformImageUrlForCors` function automatically:
### Configuration Files ### Configuration Files
#### `vite.config.common.mts` #### `vite.config.common.mts`
```typescript ```typescript
server: { server: {
headers: { headers: {
@ -63,6 +69,7 @@ server: {
``` ```
#### `src/libs/util.ts` #### `src/libs/util.ts`
```typescript ```typescript
export function transformImageUrlForCors(imageUrl: string): string { export function transformImageUrlForCors(imageUrl: string): string {
// Development mode: Transform URLs to use proxies // Development mode: Transform URLs to use proxies
@ -93,21 +100,25 @@ const imageUrl = transformImageUrlForCors(originalImageUrl);
## Benefits ## Benefits
### ✅ SharedArrayBuffer Support ### ✅ SharedArrayBuffer Support
- Maintains cross-origin isolation required for SharedArrayBuffer - Maintains cross-origin isolation required for SharedArrayBuffer
- Enables fast SQLite database operations via absurd-sql - Enables fast SQLite database operations via absurd-sql
- Provides better performance than IndexedDB fallback - Provides better performance than IndexedDB fallback
### ✅ Universal Image Support ### ✅ Universal Image Support
- Handles images from any domain - Handles images from any domain
- No need to pre-configure every possible image source - No need to pre-configure every possible image source
- Graceful fallback for unknown domains - Graceful fallback for unknown domains
### ✅ Development/Production Flexibility ### ✅ Development/Production Flexibility
- Proxy system only active in development - Proxy system only active in development
- Production uses direct URLs for maximum performance - Production uses direct URLs for maximum performance
- No proxy server required in production - No proxy server required in production
### ✅ Automatic Detection ### ✅ Automatic Detection
- Smart URL transformation based on domain patterns - Smart URL transformation based on domain patterns
- Preserves relative URLs and data URLs - Preserves relative URLs and data URLs
- Handles edge cases gracefully - Handles edge cases gracefully
@ -115,6 +126,7 @@ const imageUrl = transformImageUrlForCors(originalImageUrl);
## Testing ## Testing
### Automated Testing ### Automated Testing
Run the test suite to verify URL transformation: Run the test suite to verify URL transformation:
```typescript ```typescript
@ -125,6 +137,7 @@ testCorsImageTransformation();
``` ```
### Visual Testing ### Visual Testing
Create test image elements to verify loading: Create test image elements to verify loading:
```typescript ```typescript
@ -135,6 +148,7 @@ createTestImageElements();
``` ```
### Manual Testing ### Manual Testing
1. Start development server: `npm run dev` 1. Start development server: `npm run dev`
2. Open browser console to see transformation logs 2. Open browser console to see transformation logs
3. Check Network tab for proxy requests 3. Check Network tab for proxy requests
@ -143,16 +157,19 @@ createTestImageElements();
## Security Considerations ## Security Considerations
### Development Environment ### Development Environment
- CORS proxies are only used in development - CORS proxies are only used in development
- External proxy services (allorigins.win) are used for testing - External proxy services (allorigins.win) are used for testing
- No sensitive data is exposed through proxies - No sensitive data is exposed through proxies
### Production Environment ### Production Environment
- All images load directly without proxying - All images load directly without proxying
- No dependency on external proxy services - No dependency on external proxy services
- Original security model maintained - Original security model maintained
### Privacy ### Privacy
- Image URLs are not logged or stored by proxy services - Image URLs are not logged or stored by proxy services
- Proxy requests are only made during development - Proxy requests are only made during development
- No tracking or analytics in proxy chain - No tracking or analytics in proxy chain
@ -160,11 +177,13 @@ createTestImageElements();
## Performance Impact ## Performance Impact
### Development ### Development
- Slight latency from proxy requests - Slight latency from proxy requests
- Additional network hops for external domains - Additional network hops for external domains
- More verbose logging for debugging - More verbose logging for debugging
### Production ### Production
- No performance impact - No performance impact
- Direct image loading as before - Direct image loading as before
- No proxy overhead - No proxy overhead
@ -174,17 +193,20 @@ createTestImageElements();
### Common Issues ### Common Issues
#### Images Not Loading in Development #### Images Not Loading in Development
1. Check console for proxy errors 1. Check console for proxy errors
2. Verify CORS headers are set 2. Verify CORS headers are set
3. Test with different image URLs 3. Test with different image URLs
4. Check network connectivity to proxy services 4. Check network connectivity to proxy services
#### SharedArrayBuffer Not Available #### SharedArrayBuffer Not Available
1. Verify CORS headers are set in server configuration 1. Verify CORS headers are set in server configuration
2. Check that site is served over HTTPS (or localhost) 2. Check that site is served over HTTPS (or localhost)
3. Ensure browser supports SharedArrayBuffer 3. Ensure browser supports SharedArrayBuffer
#### Proxy Service Unavailable #### Proxy Service Unavailable
1. Check if allorigins.win is accessible 1. Check if allorigins.win is accessible
2. Consider using alternative CORS proxy services 2. Consider using alternative CORS proxy services
3. Temporarily disable CORS headers for testing 3. Temporarily disable CORS headers for testing
@ -207,12 +229,14 @@ testCorsImageTransformation();
## Migration Guide ## Migration Guide
### From Previous Implementation ### From Previous Implementation
1. CORS headers are now required for SharedArrayBuffer 1. CORS headers are now required for SharedArrayBuffer
2. Image URLs automatically transformed in development 2. Image URLs automatically transformed in development
3. No changes needed to existing image loading code 3. No changes needed to existing image loading code
4. Test thoroughly in both development and production 4. Test thoroughly in both development and production
### Adding New Image Sources ### Adding New Image Sources
1. Add specific proxy for frequently used domains 1. Add specific proxy for frequently used domains
2. Update `transformImageUrlForCors` function 2. Update `transformImageUrlForCors` function
3. Add CORS headers to proxy configuration 3. Add CORS headers to proxy configuration
@ -221,6 +245,7 @@ testCorsImageTransformation();
## Future Enhancements ## Future Enhancements
### Possible Improvements ### Possible Improvements
1. **Local Proxy Server**: Run dedicated proxy server for development 1. **Local Proxy Server**: Run dedicated proxy server for development
2. **Caching**: Cache proxy responses for better performance 2. **Caching**: Cache proxy responses for better performance
3. **Fallback Chain**: Multiple proxy services for reliability 3. **Fallback Chain**: Multiple proxy services for reliability
@ -228,6 +253,7 @@ testCorsImageTransformation();
5. **Analytics**: Track image loading success/failure rates 5. **Analytics**: Track image loading success/failure rates
### Alternative Approaches ### Alternative Approaches
1. **Service Worker**: Intercept image requests at service worker level 1. **Service Worker**: Intercept image requests at service worker level
2. **Build-time Processing**: Pre-process images during build 2. **Build-time Processing**: Pre-process images during build
3. **CDN Integration**: Use CDN with proper CORS headers 3. **CDN Integration**: Use CDN with proper CORS headers

1
doc/database-migration-guide.md

@ -294,6 +294,7 @@ const result = await this.$db("SELECT * FROM contacts WHERE did = ?", [accountDi
``` ```
This provides: This provides:
- **Caching**: Automatic caching for performance - **Caching**: Automatic caching for performance
- **Error Handling**: Consistent error handling - **Error Handling**: Consistent error handling
- **Type Safety**: Enhanced TypeScript integration - **Type Safety**: Enhanced TypeScript integration

187
doc/debug-hook-guide.md

@ -0,0 +1,187 @@
# TimeSafari Debug Hook Guide
**Complete Guide for Team Members**
**Date**: 2025-01-27
**Author**: Matthew Raymer
**Status**: ✅ **ACTIVE** - Ready for production use
## 🎯 Overview
A pre-commit hook that automatically detects and prevents debug code from reaching protected branches (master, main, production, release, stable). This ensures production code remains clean while allowing free development on feature branches.
## 🚀 Quick Installation
**From within the TimeSafari repository:**
```bash
./scripts/install-debug-hook.sh
```
This automatically installs, updates, and verifies the hook in your current
repository. **Note**: Hooks are not automatically installed - you must run this
script deliberately to enable debug code checking.
## 🔧 Manual Installation
**Copy files manually:**
```bash
cp scripts/git-hooks/pre-commit /path/to/your/repo/.git/hooks/
cp scripts/git-hooks/debug-checker.config /path/to/your/repo/.git/hooks/
chmod +x /path/to/your/repo/.git/hooks/pre-commit
```
## 📋 What Gets Installed
- **`pre-commit`** - Main hook script (executable)
- **`debug-checker.config`** - Configuration file
- **`README.md`** - Documentation and troubleshooting
**Note**: Hooks are stored in `scripts/git-hooks/` and must be deliberately
installed by each developer. They are not automatically active.
## 🎯 How It Works
1. **Deliberate Installation**: Hooks must be explicitly installed by each
developer
2. **Branch Detection**: Only runs on protected branches
3. **File Filtering**: Automatically skips tests, scripts, and documentation
4. **Pattern Matching**: Detects debug code using regex patterns
5. **Commit Prevention**: Blocks commits containing debug code
## 🔒 Installation Philosophy
**Why deliberate installation?**
- **Developer choice**: Each developer decides whether to use the hook
- **No forced behavior**: Hooks don't interfere with existing workflows
- **Local control**: Hooks are installed locally, not globally
- **Easy removal**: Can be uninstalled at any time
- **Team flexibility**: Some developers may prefer different tools
## 🌿 Branch Behavior
- **Protected branches** (master, main, production, release, stable): Hook runs automatically
- **Feature branches**: Hook is skipped, allowing free development with debug code
## 🔍 Debug Patterns Detected
- **Console statements**: `console.log`, `console.debug`, `console.error`
- **Template debug**: `Debug:`, `debug:` in Vue templates
- **Debug constants**: `DEBUG_`, `debug_` variables
- **HTML debug**: `<!-- debug` comments
- **Debug attributes**: `debug="true"` attributes
- **Vue debug**: `v-if="debug"`, `v-show="debug"`
- **Debug TODOs**: `TODO debug`, `FIXME debug`
## 📁 Files Automatically Skipped
- Test files: `*.test.js`, `*.spec.ts`, `*.test.vue`
- Scripts: `scripts/` directory
- Test directories: `test-*` directories
- Documentation: `docs/`, `*.md`, `*.txt`
- Config files: `*.json`, `*.yml`, `*.yaml`
- IDE files: `.cursor/` directory
## ✅ Verification
**After installation, verify it's working:**
```bash
# Check if files exist
ls -la .git/hooks/pre-commit
ls -la .git/hooks/debug-checker.config
# Test the hook manually
.git/hooks/pre-commit
# Test with actual commit
echo "console.log('test')" > test.vue
git add test.vue
git commit -m "test" # Should be blocked
```
## 📊 Example Output
```
❌ Debug code detected in staged files!
Branch: master
Files checked: 1
Errors found: 3
🚨 AccountViewView.vue: Found debug pattern 'console\.'
🚨 AccountViewView.vue: Found debug pattern 'Debug:'
🚨 AccountViewView.vue: Found debug pattern 'DEBUG_'
💡 Please remove debug code before committing to master
```
## ⚙️ Configuration
Edit `.git/hooks/debug-checker.config` to customize:
- **Protected branches**: Add/remove branches as needed
- **Debug patterns**: Customize what gets detected
- **Skip patterns**: Adjust file filtering rules
## 🚨 Emergency Bypass
If you absolutely need to commit debug code to a protected branch:
```bash
git commit --no-verify -m "emergency: debug code needed"
```
⚠️ **Warning**: This bypasses all pre-commit hooks. Use sparingly.
## 🔄 Updates
When the hook is updated in the main repository:
```bash
./scripts/install-debug-hook.sh
```
## 🚨 Troubleshooting
| Issue | Solution |
|-------|----------|
| Hook not running | Check if on protected branch, verify permissions |
| Permission denied | Run `chmod +x .git/hooks/pre-commit` |
| Files not found | Ensure you're copying from TimeSafari repo |
| False positives | Edit `debug-checker.config` to customize patterns |
## 🧪 Testing
A test script is available at `scripts/test-debug-hook.sh` to verify the hook works correctly.
## 💡 Best Practices
1. **Use feature branches** for development with debug code
2. **Use proper logging** instead of console statements (`logger.info`, `logger.debug`)
3. **Test thoroughly** before merging to protected branches
4. **Review commits** to ensure no debug code slips through
5. **Keep hooks updated** across all repositories
## 📚 Additional Resources
- **Hook documentation**: `scripts/git-hooks/README.md`
- **Configuration**: `scripts/git-hooks/debug-checker.config`
- **Test script**: `scripts/test-debug-hook.sh`
- **Installation script**: `scripts/install-debug-hook.sh`
## 🎯 Team Workflow
**Recommended setup:**
1. **Repository setup**: Include hook files in `.githooks/` directory
2. **Team onboarding**: Run installation script in each repo
3. **Updates**: Re-run installation script when hooks are updated
4. **Documentation**: Keep this guide updated
---
**Status**: Active and enforced
**Last Updated**: 2025-01-27
**Maintainer**: Matthew Raymer

6
doc/electron-cleanup-summary.md

@ -7,18 +7,22 @@ This document summarizes the comprehensive cleanup and improvements made to the
## Key Issues Resolved ## Key Issues Resolved
### 1. Platform Detection Problems ### 1. Platform Detection Problems
- **Before**: `PlatformServiceFactory` only supported "capacitor" and "web" platforms - **Before**: `PlatformServiceFactory` only supported "capacitor" and "web" platforms
- **After**: Added proper "electron" platform support with dedicated `ElectronPlatformService` - **After**: Added proper "electron" platform support with dedicated `ElectronPlatformService`
### 2. Build Configuration Confusion ### 2. Build Configuration Confusion
- **Before**: Electron builds used `VITE_PLATFORM=capacitor`, causing confusion - **Before**: Electron builds used `VITE_PLATFORM=capacitor`, causing confusion
- **After**: Electron builds now properly use `VITE_PLATFORM=electron` - **After**: Electron builds now properly use `VITE_PLATFORM=electron`
### 3. Missing Platform Service Methods ### 3. Missing Platform Service Methods
- **Before**: Platform services lacked proper `isElectron()`, `isCapacitor()`, `isWeb()` methods - **Before**: Platform services lacked proper `isElectron()`, `isCapacitor()`, `isWeb()` methods
- **After**: All platform services implement complete interface with proper detection - **After**: All platform services implement complete interface with proper detection
### 4. Inconsistent Build Scripts ### 4. Inconsistent Build Scripts
- **Before**: Mixed platform settings in build scripts - **Before**: Mixed platform settings in build scripts
- **After**: Clean, consistent electron-specific build process - **After**: Clean, consistent electron-specific build process
@ -215,11 +219,13 @@ if (capabilities.hasFileDownload) {
## File Structure Changes ## File Structure Changes
### New Files ### New Files
- `vite.config.electron.mts` - Electron-specific Vite configuration - `vite.config.electron.mts` - Electron-specific Vite configuration
- `src/main.electron.ts` - Electron main entry point - `src/main.electron.ts` - Electron main entry point
- `doc/electron-cleanup-summary.md` - This documentation - `doc/electron-cleanup-summary.md` - This documentation
### Modified Files ### Modified Files
- `src/services/PlatformServiceFactory.ts` - Added electron platform support - `src/services/PlatformServiceFactory.ts` - Added electron platform support
- `src/services/PlatformService.ts` - Added platform detection methods - `src/services/PlatformService.ts` - Added platform detection methods
- `src/services/platforms/CapacitorPlatformService.ts` - Added missing interface methods - `src/services/platforms/CapacitorPlatformService.ts` - Added missing interface methods

21
doc/electron-console-cleanup.md

@ -7,18 +7,22 @@ This document summarizes the comprehensive changes made to reduce excessive cons
## Issues Addressed ## Issues Addressed
### 1. Excessive Database Logging (Major Issue - 90% Reduction) ### 1. Excessive Database Logging (Major Issue - 90% Reduction)
**Problem:** Every database operation was logging detailed parameter information, creating hundreds of lines of console output. **Problem:** Every database operation was logging detailed parameter information, creating hundreds of lines of console output.
**Solution:** Modified `src/services/platforms/CapacitorPlatformService.ts`: **Solution:** Modified `src/services/platforms/CapacitorPlatformService.ts`:
- Changed `logger.warn` to `logger.debug` for routine SQL operations - Changed `logger.warn` to `logger.debug` for routine SQL operations
- Reduced migration logging verbosity - Reduced migration logging verbosity
- Made database integrity checks use debug-level logging - Made database integrity checks use debug-level logging
- Kept error and completion messages at appropriate log levels - Kept error and completion messages at appropriate log levels
### 2. Enhanced Logger Configuration ### 2. Enhanced Logger Configuration
**Problem:** No platform-specific logging controls, causing noise in Electron. **Problem:** No platform-specific logging controls, causing noise in Electron.
**Solution:** Updated `src/utils/logger.ts`: **Solution:** Updated `src/utils/logger.ts`:
- Added platform detection for Electron vs Web - Added platform detection for Electron vs Web
- Suppressed debug and verbose logs for Electron - Suppressed debug and verbose logs for Electron
- Filtered out routine database operations from database logging - Filtered out routine database operations from database logging
@ -26,28 +30,35 @@ This document summarizes the comprehensive changes made to reduce excessive cons
- Added intelligent filtering for CapacitorPlatformService messages - Added intelligent filtering for CapacitorPlatformService messages
### 3. API Configuration Issues (Major Fix) ### 3. API Configuration Issues (Major Fix)
**Problem:** Electron was trying to use local development endpoints (localhost:3000) from saved user settings, which don't exist in desktop environment, causing: **Problem:** Electron was trying to use local development endpoints (localhost:3000) from saved user settings, which don't exist in desktop environment, causing:
- 400 status errors from missing local development servers - 400 status errors from missing local development servers
- JSON parsing errors (HTML error pages instead of JSON responses) - JSON parsing errors (HTML error pages instead of JSON responses)
**Solution:** **Solution:**
- Updated `src/constants/app.ts` to provide Electron-specific API endpoints - Updated `src/constants/app.ts` to provide Electron-specific API endpoints
- **Critical Fix:** Modified `src/db/databaseUtil.ts` in `retrieveSettingsForActiveAccount()` to force Electron to use production API endpoints regardless of saved user settings - **Critical Fix:** Modified `src/db/databaseUtil.ts` in `retrieveSettingsForActiveAccount()` to force Electron to use production API endpoints regardless of saved user settings
- This ensures Electron never uses localhost development servers that users might have saved - This ensures Electron never uses localhost development servers that users might have saved
### 4. SharedArrayBuffer Logging Noise ### 4. SharedArrayBuffer Logging Noise
**Problem:** Web-specific SharedArrayBuffer detection was running in Electron, creating unnecessary debug output. **Problem:** Web-specific SharedArrayBuffer detection was running in Electron, creating unnecessary debug output.
**Solution:** Modified `src/main.web.ts`: **Solution:** Modified `src/main.web.ts`:
- Made SharedArrayBuffer logging conditional on web platform only - Made SharedArrayBuffer logging conditional on web platform only
- Converted console.log statements to logger.debug - Converted console.log statements to logger.debug
- Only show in development mode for web platform - Only show in development mode for web platform
- Reduced platform detection noise - Reduced platform detection noise
### 5. Missing Source Maps Warnings ### 5. Missing Source Maps Warnings
**Problem:** Electron DevTools was complaining about missing source maps for external dependencies. **Problem:** Electron DevTools was complaining about missing source maps for external dependencies.
**Solution:** Updated `vite.config.electron.mts`: **Solution:** Updated `vite.config.electron.mts`:
- Disabled source maps for Electron builds (`sourcemap: false`) - Disabled source maps for Electron builds (`sourcemap: false`)
- Added build configuration to suppress external dependency warnings - Added build configuration to suppress external dependency warnings
- Prevents DevTools from looking for non-existent source map files - Prevents DevTools from looking for non-existent source map files
@ -87,14 +98,16 @@ This document summarizes the comprehensive changes made to reduce excessive cons
## Impact ## Impact
### Before Cleanup: ### Before Cleanup
- 500+ lines of console output per minute - 500+ lines of console output per minute
- Detailed SQL parameter logging for every operation - Detailed SQL parameter logging for every operation
- API connection errors every few seconds (400 status, JSON parsing errors) - API connection errors every few seconds (400 status, JSON parsing errors)
- SharedArrayBuffer warnings on every startup - SharedArrayBuffer warnings on every startup
- DevTools source map warnings - DevTools source map warnings
### After Cleanup: ### After Cleanup
- **~95% reduction** in console output - **~95% reduction** in console output
- Only errors and important status messages visible - Only errors and important status messages visible
- **No API connection errors** - Electron uses proper production endpoints - **No API connection errors** - Electron uses proper production endpoints
@ -106,6 +119,7 @@ This document summarizes the comprehensive changes made to reduce excessive cons
## Technical Details ## Technical Details
### API Configuration Fix ### API Configuration Fix
The most critical fix was in `src/db/databaseUtil.ts` where we added: The most critical fix was in `src/db/databaseUtil.ts` where we added:
```typescript ```typescript
@ -122,6 +136,7 @@ if (process.env.VITE_PLATFORM === "electron") {
This ensures that even if users have localhost development endpoints saved in their settings, Electron will override them with production endpoints. This ensures that even if users have localhost development endpoints saved in their settings, Electron will override them with production endpoints.
### Logger Enhancement ### Logger Enhancement
Enhanced the logger with platform-specific behavior: Enhanced the logger with platform-specific behavior:
```typescript ```typescript
@ -135,6 +150,7 @@ if (!isElectron || !message.includes("[CapacitorPlatformService]")) {
## Testing ## Testing
The changes were tested with: The changes were tested with:
- `npm run lint-fix` - 0 errors, warnings only (pre-existing) - `npm run lint-fix` - 0 errors, warnings only (pre-existing)
- Electron development environment - Electron development environment
- Web platform (unchanged functionality) - Web platform (unchanged functionality)
@ -150,6 +166,7 @@ The changes were tested with:
## Backward Compatibility ## Backward Compatibility
All changes maintain backward compatibility: All changes maintain backward compatibility:
- Web platform logging unchanged - Web platform logging unchanged
- Capacitor platform logging unchanged - Capacitor platform logging unchanged
- Error handling preserved - Error handling preserved

13
doc/error-diagnostics-log.md

@ -5,6 +5,7 @@ This file tracks console errors observed during development for future investiga
## 2025-07-07 08:56 UTC - ProjectsView.vue Migration Session ## 2025-07-07 08:56 UTC - ProjectsView.vue Migration Session
### Migration Context ### Migration Context
- **Current Work**: Completed ProjectsView.vue Triple Migration Pattern - **Current Work**: Completed ProjectsView.vue Triple Migration Pattern
- **Migration Status**: 21 complete, 4 appropriately incomplete components - **Migration Status**: 21 complete, 4 appropriately incomplete components
- **Recent Changes**: - **Recent Changes**:
@ -15,42 +16,50 @@ This file tracks console errors observed during development for future investiga
### Observed Errors ### Observed Errors
#### 1. HomeView.vue API Rate Limit Errors #### 1. HomeView.vue API Rate Limit Errors
``` ```
GET https://api.endorser.ch/api/report/rateLimits 400 (Bad Request) GET https://api.endorser.ch/api/report/rateLimits 400 (Bad Request)
Source: endorserServer.ts:1494, HomeView.vue:593, HomeView.vue:742 Source: endorserServer.ts:1494, HomeView.vue:593, HomeView.vue:742
``` ```
**Analysis**: **Analysis**:
- API server returning 400 for rate limit checks - API server returning 400 for rate limit checks
- Occurs during identity initialization and registration status checks - Occurs during identity initialization and registration status checks
- **Migration Impact**: None - HomeView.vue was migrated and tested earlier - **Migration Impact**: None - HomeView.vue was migrated and tested earlier
- **Likely Cause**: Server-side authentication or API configuration issue - **Likely Cause**: Server-side authentication or API configuration issue
**Action Items**: **Action Items**:
- [ ] Check endorser.ch API documentation for rate limit endpoint changes - [ ] Check endorser.ch API documentation for rate limit endpoint changes
- [ ] Verify authentication headers being sent correctly - [ ] Verify authentication headers being sent correctly
- [ ] Consider fallback handling for rate limit API failures - [ ] Consider fallback handling for rate limit API failures
#### 2. ProjectViewView.vue Project Not Found Error #### 2. ProjectViewView.vue Project Not Found Error
``` ```
GET https://api.endorser.ch/api/claim/byHandle/...01JY2Q5D90E8P267ABB963S71D 404 (Not Found) GET https://api.endorser.ch/api/claim/byHandle/...01JY2Q5D90E8P267ABB963S71D 404 (Not Found)
Source: ProjectViewView.vue:830 loadProject() method Source: ProjectViewView.vue:830 loadProject() method
``` ```
**Analysis**: **Analysis**:
- Attempting to load project ID: `01JY2Q5D90E8P267ABB963S71D` - Attempting to load project ID: `01JY2Q5D90E8P267ABB963S71D`
- **Migration Impact**: None - error handling working correctly - **Migration Impact**: None - error handling working correctly
- **Likely Cause**: User navigated to non-existent project or stale link - **Likely Cause**: User navigated to non-existent project or stale link
**Action Items**: **Action Items**:
- [ ] Consider adding better user messaging for missing projects - [ ] Consider adding better user messaging for missing projects
- [ ] Investigate if project IDs are being generated/stored correctly - [ ] Investigate if project IDs are being generated/stored correctly
- [ ] Add breadcrumb or "return to projects" option on 404s - [ ] Add breadcrumb or "return to projects" option on 404s
#### 3. Axios Request Stack Traces #### 3. Axios Request Stack Traces
Multiple stack traces showing Vue router navigation and component mounting cycles. Multiple stack traces showing Vue router navigation and component mounting cycles.
**Analysis**: **Analysis**:
- Normal Vue.js lifecycle and routing behavior - Normal Vue.js lifecycle and routing behavior
- No obvious memory leaks or infinite loops - No obvious memory leaks or infinite loops
- **Migration Impact**: None - expected framework behavior - **Migration Impact**: None - expected framework behavior
@ -58,22 +67,26 @@ Multiple stack traces showing Vue router navigation and component mounting cycle
### System Health Indicators ### System Health Indicators
#### ✅ Working Correctly #### ✅ Working Correctly
- Database migrations: `Migration process complete! Summary: 0 applied, 2 skipped` - Database migrations: `Migration process complete! Summary: 0 applied, 2 skipped`
- Platform service factory initialization: `Creating singleton instance for platform: development` - Platform service factory initialization: `Creating singleton instance for platform: development`
- SQL worker loading: `Worker loaded, ready to receive messages` - SQL worker loading: `Worker loaded, ready to receive messages`
- Database connection: `Opened!` - Database connection: `Opened!`
#### 🔄 For Investigation #### 🔄 For Investigation
- API authentication/authorization with endorser.ch - API authentication/authorization with endorser.ch
- Project ID validation and error handling - Project ID validation and error handling
- Rate limiting strategy - Rate limiting strategy
### Migration Validation ### Migration Validation
- **ProjectsView.vue**: Appropriately incomplete (3 helpers + 1 complex modal) - **ProjectsView.vue**: Appropriately incomplete (3 helpers + 1 complex modal)
- **Error Handling**: Migrated components showing proper error handling - **Error Handling**: Migrated components showing proper error handling
- **No Migration-Related Errors**: All errors appear to be infrastructure/data issues - **No Migration-Related Errors**: All errors appear to be infrastructure/data issues
### Next Steps ### Next Steps
1. Continue migration slog with next component 1. Continue migration slog with next component
2. Monitor these same error patterns in future sessions 2. Monitor these same error patterns in future sessions
3. Address API/server issues in separate debugging session 3. Address API/server issues in separate debugging session

21
doc/image-hosting-guide.md

@ -25,6 +25,7 @@
## Why This Happens ## Why This Happens
In development mode, we enable SharedArrayBuffer for fast SQLite operations, which requires: In development mode, we enable SharedArrayBuffer for fast SQLite operations, which requires:
- `Cross-Origin-Opener-Policy: same-origin` - `Cross-Origin-Opener-Policy: same-origin`
- `Cross-Origin-Embedder-Policy: require-corp` - `Cross-Origin-Embedder-Policy: require-corp`
@ -35,6 +36,7 @@ These headers create a **cross-origin isolated environment** that blocks resourc
### 1. Use Supported Image Hosting Services ### 1. Use Supported Image Hosting Services
**Recommended services that work well:** **Recommended services that work well:**
- **Imgur**: Free, no registration required, direct links - **Imgur**: Free, no registration required, direct links
- **GitHub**: If you have images in repositories - **GitHub**: If you have images in repositories
- **Unsplash**: For stock photos - **Unsplash**: For stock photos
@ -45,6 +47,7 @@ These headers create a **cross-origin isolated environment** that blocks resourc
If you frequently use images from a specific domain, add a proxy: If you frequently use images from a specific domain, add a proxy:
#### Step 1: Add Proxy to `vite.config.common.mts` #### Step 1: Add Proxy to `vite.config.common.mts`
```typescript ```typescript
'/yourservice-proxy': { '/yourservice-proxy': {
target: 'https://yourservice.com', target: 'https://yourservice.com',
@ -63,6 +66,7 @@ If you frequently use images from a specific domain, add a proxy:
``` ```
#### Step 2: Update Transform Function in `src/libs/util.ts` #### Step 2: Update Transform Function in `src/libs/util.ts`
```typescript ```typescript
// Transform YourService URLs to use proxy // Transform YourService URLs to use proxy
if (imageUrl.startsWith("https://yourservice.com/")) { if (imageUrl.startsWith("https://yourservice.com/")) {
@ -74,6 +78,7 @@ if (imageUrl.startsWith("https://yourservice.com/")) {
### 3. Use Alternative Image Sources ### 3. Use Alternative Image Sources
For frequently failing domains, consider: For frequently failing domains, consider:
- Upload images to Imgur or GitHub - Upload images to Imgur or GitHub
- Use a CDN with proper CORS headers - Use a CDN with proper CORS headers
- Host images on your own domain with CORS enabled - Host images on your own domain with CORS enabled
@ -81,11 +86,13 @@ For frequently failing domains, consider:
## Development vs Production ## Development vs Production
### Development Mode ### Development Mode
- Images from supported services work through proxies - Images from supported services work through proxies
- Unsupported images may fail to load - Unsupported images may fail to load
- Console warnings show which images have issues - Console warnings show which images have issues
### Production Mode ### Production Mode
- All images load directly without proxies - All images load directly without proxies
- No CORS restrictions in production - No CORS restrictions in production
- Better performance without proxy overhead - Better performance without proxy overhead
@ -93,6 +100,7 @@ For frequently failing domains, consider:
## Testing Image Sources ## Testing Image Sources
### Check if an Image Source Works ### Check if an Image Source Works
```bash ```bash
# Test in browser console: # Test in browser console:
fetch('https://example.com/image.jpg', { mode: 'cors' }) fetch('https://example.com/image.jpg', { mode: 'cors' })
@ -101,6 +109,7 @@ fetch('https://example.com/image.jpg', { mode: 'cors' })
``` ```
### Visual Testing ### Visual Testing
```typescript ```typescript
import { createTestImageElements } from './libs/test-cors-images'; import { createTestImageElements } from './libs/test-cors-images';
createTestImageElements(); // Creates visual test panel createTestImageElements(); // Creates visual test panel
@ -109,30 +118,36 @@ createTestImageElements(); // Creates visual test panel
## Common Error Messages ## Common Error Messages
### `ERR_BLOCKED_BY_RESPONSE.NotSameOriginAfterDefaultedToSameOriginByCoep` ### `ERR_BLOCKED_BY_RESPONSE.NotSameOriginAfterDefaultedToSameOriginByCoep`
**Cause**: Image source doesn't send required CORS headers **Cause**: Image source doesn't send required CORS headers
**Solution**: Use a supported image hosting service or add a proxy **Solution**: Use a supported image hosting service or add a proxy
### `ERR_NETWORK` or `ERR_INTERNET_DISCONNECTED` ### `ERR_NETWORK` or `ERR_INTERNET_DISCONNECTED`
**Cause**: Proxy service is unavailable **Cause**: Proxy service is unavailable
**Solution**: Check internet connection or use alternative image source **Solution**: Check internet connection or use alternative image source
### Images Load in Production but Not Development ### Images Load in Production but Not Development
**Cause**: Normal behavior - development has stricter CORS requirements **Cause**: Normal behavior - development has stricter CORS requirements
**Solution**: Use supported image sources for development testing **Solution**: Use supported image sources for development testing
## Best Practices ## Best Practices
### For New Projects ### For New Projects
1. Use supported image hosting services from the start 1. Use supported image hosting services from the start
2. Upload user images to Imgur or similar service 2. Upload user images to Imgur or similar service
3. Host critical images on your own domain with CORS enabled 3. Host critical images on your own domain with CORS enabled
### For Existing Projects ### For Existing Projects
1. Identify frequently used image domains in console warnings 1. Identify frequently used image domains in console warnings
2. Add proxies for the most common domains 2. Add proxies for the most common domains
3. Gradually migrate to supported image hosting services 3. Gradually migrate to supported image hosting services
### For User-Generated Content ### For User-Generated Content
1. Provide upload functionality to supported services 1. Provide upload functionality to supported services
2. Validate image URLs against supported domains 2. Validate image URLs against supported domains
3. Show helpful error messages for unsupported sources 3. Show helpful error messages for unsupported sources
@ -140,17 +155,20 @@ createTestImageElements(); // Creates visual test panel
## Troubleshooting ## Troubleshooting
### Image Not Loading? ### Image Not Loading?
1. Check browser console for error messages 1. Check browser console for error messages
2. Verify the domain is in the supported list 2. Verify the domain is in the supported list
3. Test if the image loads in production mode 3. Test if the image loads in production mode
4. Consider adding a proxy for that domain 4. Consider adding a proxy for that domain
### Proxy Not Working? ### Proxy Not Working?
1. Check if the target service allows proxying 1. Check if the target service allows proxying
2. Verify CORS headers are being set correctly 2. Verify CORS headers are being set correctly
3. Test with a simpler image URL from the same domain 3. Test with a simpler image URL from the same domain
### Performance Issues? ### Performance Issues?
1. Proxies add latency in development only 1. Proxies add latency in development only
2. Production uses direct image loading 2. Production uses direct image loading
3. Consider using a local image cache for development 3. Consider using a local image cache for development
@ -158,6 +176,7 @@ createTestImageElements(); // Creates visual test panel
## Quick Fixes ## Quick Fixes
### For Immediate Issues ### For Immediate Issues
```typescript ```typescript
// Temporary fallback: disable CORS headers for testing // Temporary fallback: disable CORS headers for testing
// In vite.config.common.mts, comment out: // In vite.config.common.mts, comment out:
@ -166,9 +185,11 @@ createTestImageElements(); // Creates visual test panel
// 'Cross-Origin-Embedder-Policy': 'require-corp' // 'Cross-Origin-Embedder-Policy': 'require-corp'
// }, // },
``` ```
**Note**: This disables SharedArrayBuffer performance benefits. **Note**: This disables SharedArrayBuffer performance benefits.
### For Long-term Solution ### For Long-term Solution
- Use supported image hosting services - Use supported image hosting services
- Add proxies for frequently used domains - Add proxies for frequently used domains
- Migrate critical images to your own CORS-enabled CDN - Migrate critical images to your own CORS-enabled CDN

2
doc/logging-configuration.md

@ -101,6 +101,7 @@ Database logging continues to work regardless of console log level settings. All
### No Logs Appearing ### No Logs Appearing
Check your `VITE_LOG_LEVEL` setting: Check your `VITE_LOG_LEVEL` setting:
```bash ```bash
echo $VITE_LOG_LEVEL echo $VITE_LOG_LEVEL
``` ```
@ -108,6 +109,7 @@ echo $VITE_LOG_LEVEL
### Too Many Logs ### Too Many Logs
Reduce verbosity by setting a lower log level: Reduce verbosity by setting a lower log level:
```bash ```bash
VITE_LOG_LEVEL=warn VITE_LOG_LEVEL=warn
``` ```

21
doc/migration-fence-definition.md

@ -9,6 +9,7 @@ This document defines the **migration fence** - the boundary between the legacy
## Current Migration Status ## Current Migration Status
### ✅ Completed Components ### ✅ Completed Components
- **SQLite Database Service**: Fully implemented with absurd-sql - **SQLite Database Service**: Fully implemented with absurd-sql
- **Platform Service Layer**: Unified database interface across platforms - **Platform Service Layer**: Unified database interface across platforms
- **PlatformServiceMixin**: Centralized database access with caching and utilities - **PlatformServiceMixin**: Centralized database access with caching and utilities
@ -17,12 +18,14 @@ This document defines the **migration fence** - the boundary between the legacy
- **Data Export/Import**: Backup and restore functionality - **Data Export/Import**: Backup and restore functionality
### 🔄 Active Migration Components ### 🔄 Active Migration Components
- **Settings Migration**: Core user settings transferred - **Settings Migration**: Core user settings transferred
- **Account Migration**: Identity and key management - **Account Migration**: Identity and key management
- **Contact Migration**: User contact data (via import interface) - **Contact Migration**: User contact data (via import interface)
- **DatabaseUtil Migration**: Moving functions to PlatformServiceMixin - **DatabaseUtil Migration**: Moving functions to PlatformServiceMixin
### ❌ Legacy Components (Fence Boundary) ### ❌ Legacy Components (Fence Boundary)
- **Dexie Database**: Legacy IndexedDB storage (disabled by default) - **Dexie Database**: Legacy IndexedDB storage (disabled by default)
- **Dexie-Specific Code**: Direct database access patterns - **Dexie-Specific Code**: Direct database access patterns
- **Legacy Migration Paths**: Old data transfer methods - **Legacy Migration Paths**: Old data transfer methods
@ -45,6 +48,7 @@ export const PlatformServiceMixin = {
``` ```
**Fence Rule**: All database operations must use: **Fence Rule**: All database operations must use:
- `this.$db()` for read operations - `this.$db()` for read operations
- `this.$exec()` for write operations - `this.$exec()` for write operations
- `this.$settings()` for settings access - `this.$settings()` for settings access
@ -64,6 +68,7 @@ export class PlatformServiceFactory {
``` ```
**Fence Rule**: All database operations must use: **Fence Rule**: All database operations must use:
- `PlatformService.dbQuery()` for read operations - `PlatformService.dbQuery()` for read operations
- `PlatformService.dbExec()` for write operations - `PlatformService.dbExec()` for write operations
- No direct `db.` or `accountsDBPromise` access in application code - No direct `db.` or `accountsDBPromise` access in application code
@ -71,6 +76,7 @@ export class PlatformServiceFactory {
### 3. Data Access Patterns ### 3. Data Access Patterns
#### ✅ Allowed (Inside Fence) #### ✅ Allowed (Inside Fence)
```typescript ```typescript
// Use PlatformServiceMixin for all database operations // Use PlatformServiceMixin for all database operations
const contacts = await this.$contacts(); const contacts = await this.$contacts();
@ -79,6 +85,7 @@ const result = await this.$db("SELECT * FROM contacts WHERE did = ?", [accountDi
``` ```
#### ❌ Forbidden (Outside Fence) #### ❌ Forbidden (Outside Fence)
```typescript ```typescript
// Direct Dexie access (legacy pattern) // Direct Dexie access (legacy pattern)
const contacts = await db.contacts.where('did').equals(accountDid).toArray(); const contacts = await db.contacts.where('did').equals(accountDid).toArray();
@ -98,6 +105,7 @@ export async function compareDatabases(): Promise<DataComparison> {
``` ```
**Fence Rule**: Migration tools are the exclusive interface between: **Fence Rule**: Migration tools are the exclusive interface between:
- Legacy Dexie database - Legacy Dexie database
- New SQLite database - New SQLite database
- Data comparison and transfer operations - Data comparison and transfer operations
@ -107,11 +115,13 @@ export async function compareDatabases(): Promise<DataComparison> {
### 1. Code Development Rules ### 1. Code Development Rules
#### New Feature Development #### New Feature Development
- **Always** use `PlatformServiceMixin` for database operations - **Always** use `PlatformServiceMixin` for database operations
- **Never** import or reference Dexie directly - **Never** import or reference Dexie directly
- **Always** use mixin methods like `this.$settings()`, `this.$contacts()` - **Always** use mixin methods like `this.$settings()`, `this.$contacts()`
#### Legacy Code Maintenance #### Legacy Code Maintenance
- **Only** modify Dexie code for migration purposes - **Only** modify Dexie code for migration purposes
- **Always** add migration tests for schema changes - **Always** add migration tests for schema changes
- **Never** add new Dexie-specific features - **Never** add new Dexie-specific features
@ -119,11 +129,13 @@ export async function compareDatabases(): Promise<DataComparison> {
### 2. Data Integrity Rules ### 2. Data Integrity Rules
#### Migration Safety #### Migration Safety
- **Always** create backups before migration - **Always** create backups before migration
- **Always** verify data integrity after migration - **Always** verify data integrity after migration
- **Never** delete legacy data until verified - **Never** delete legacy data until verified
#### Rollback Strategy #### Rollback Strategy
- **Always** maintain ability to rollback to Dexie - **Always** maintain ability to rollback to Dexie
- **Always** preserve migration logs - **Always** preserve migration logs
- **Never** assume migration is irreversible - **Never** assume migration is irreversible
@ -131,6 +143,7 @@ export async function compareDatabases(): Promise<DataComparison> {
### 3. Testing Requirements ### 3. Testing Requirements
#### Migration Testing #### Migration Testing
```typescript ```typescript
// Required test pattern for migration // Required test pattern for migration
describe('Database Migration', () => { describe('Database Migration', () => {
@ -144,6 +157,7 @@ describe('Database Migration', () => {
``` ```
#### Application Testing #### Application Testing
```typescript ```typescript
// Required test pattern for application features // Required test pattern for application features
describe('Feature with Database', () => { describe('Feature with Database', () => {
@ -159,6 +173,7 @@ describe('Feature with Database', () => {
### 1. Static Analysis ### 1. Static Analysis
#### ESLint Rules #### ESLint Rules
```json ```json
{ {
"rules": { "rules": {
@ -178,6 +193,7 @@ describe('Feature with Database', () => {
``` ```
#### TypeScript Rules #### TypeScript Rules
```json ```json
{ {
"compilerOptions": { "compilerOptions": {
@ -190,6 +206,7 @@ describe('Feature with Database', () => {
### 2. Runtime Checks ### 2. Runtime Checks
#### Development Mode Validation #### Development Mode Validation
```typescript ```typescript
// Development-only fence validation // Development-only fence validation
if (import.meta.env.DEV) { if (import.meta.env.DEV) {
@ -198,6 +215,7 @@ if (import.meta.env.DEV) {
``` ```
#### Production Safety #### Production Safety
```typescript ```typescript
// Production fence enforcement // Production fence enforcement
if (import.meta.env.PROD) { if (import.meta.env.PROD) {
@ -209,6 +227,7 @@ if (import.meta.env.PROD) {
## Migration Status Checklist ## Migration Status Checklist
### ✅ Completed ### ✅ Completed
- [x] PlatformServiceMixin implementation - [x] PlatformServiceMixin implementation
- [x] SQLite database service - [x] SQLite database service
- [x] Migration tools - [x] Migration tools
@ -217,11 +236,13 @@ if (import.meta.env.PROD) {
- [x] ActiveDid migration - [x] ActiveDid migration
### 🔄 In Progress ### 🔄 In Progress
- [ ] Contact migration - [ ] Contact migration
- [ ] DatabaseUtil to PlatformServiceMixin migration - [ ] DatabaseUtil to PlatformServiceMixin migration
- [ ] File-by-file migration - [ ] File-by-file migration
### ❌ Not Started ### ❌ Not Started
- [ ] Legacy Dexie removal - [ ] Legacy Dexie removal
- [ ] Final cleanup and validation - [ ] Final cleanup and validation

69
doc/migration-progress-tracker.md

@ -3,6 +3,7 @@
## Per-File Migration Workflow (MANDATORY) ## Per-File Migration Workflow (MANDATORY)
For each file migrated: For each file migrated:
1. **First**, migrate to PlatformServiceMixin (replace all databaseUtil usage, etc.). 1. **First**, migrate to PlatformServiceMixin (replace all databaseUtil usage, etc.).
2. **Immediately after**, standardize notify helper usage (property + created() pattern) and fix any related linter/type errors. 2. **Immediately after**, standardize notify helper usage (property + created() pattern) and fix any related linter/type errors.
@ -25,22 +26,26 @@ This document tracks the progress of the 2-day sprint to complete PlatformServic
## ✅ **DAY 1: PlatformServiceMixin Completion (COMPLETE)** ## ✅ **DAY 1: PlatformServiceMixin Completion (COMPLETE)**
### **Phase 1: Remove Circular Dependency (COMPLETE)** ### **Phase 1: Remove Circular Dependency (COMPLETE)**
**Status**: ✅ **COMPLETE** **Status**: ✅ **COMPLETE**
**Issue**: PlatformServiceMixin imports `memoryLogs` from databaseUtil **Issue**: PlatformServiceMixin imports `memoryLogs` from databaseUtil
**Solution**: Create self-contained memoryLogs implementation **Solution**: Create self-contained memoryLogs implementation
#### **Tasks**: #### **Tasks**
- [x] **Step 1.1**: Remove `memoryLogs` import from PlatformServiceMixin.ts ✅ - [x] **Step 1.1**: Remove `memoryLogs` import from PlatformServiceMixin.ts ✅
- [x] **Step 1.2**: Add self-contained `_memoryLogs` array to PlatformServiceMixin ✅ - [x] **Step 1.2**: Add self-contained `_memoryLogs` array to PlatformServiceMixin ✅
- [x] **Step 1.3**: Add `$appendToMemoryLogs()` method to PlatformServiceMixin ✅ - [x] **Step 1.3**: Add `$appendToMemoryLogs()` method to PlatformServiceMixin ✅
- [x] **Step 1.4**: Update logger.ts to use self-contained memoryLogs ✅ - [x] **Step 1.4**: Update logger.ts to use self-contained memoryLogs ✅
- [x] **Step 1.5**: Test memoryLogs functionality ✅ - [x] **Step 1.5**: Test memoryLogs functionality ✅
#### **Files Modified**: #### **Files Modified**
- `src/utils/PlatformServiceMixin.ts` - `src/utils/PlatformServiceMixin.ts`
- `src/utils/logger.ts` - `src/utils/logger.ts`
#### **Validation**: #### **Validation**
- [x] No circular dependency errors ✅ - [x] No circular dependency errors ✅
- [x] memoryLogs functionality works correctly ✅ - [x] memoryLogs functionality works correctly ✅
- [x] Linting passes ✅ - [x] Linting passes ✅
@ -48,20 +53,24 @@ This document tracks the progress of the 2-day sprint to complete PlatformServic
--- ---
### **Phase 2: Add Missing Utility Functions (COMPLETE)** ### **Phase 2: Add Missing Utility Functions (COMPLETE)**
**Status**: ✅ **COMPLETE** **Status**: ✅ **COMPLETE**
**Missing Functions**: `generateInsertStatement`, `generateUpdateStatement` **Missing Functions**: `generateInsertStatement`, `generateUpdateStatement`
#### **Tasks**: #### **Tasks**
- [x] **Step 2.1**: Add `_generateInsertStatement()` private method to PlatformServiceMixin ✅ - [x] **Step 2.1**: Add `_generateInsertStatement()` private method to PlatformServiceMixin ✅
- [x] **Step 2.2**: Add `_generateUpdateStatement()` private method to PlatformServiceMixin ✅ - [x] **Step 2.2**: Add `_generateUpdateStatement()` private method to PlatformServiceMixin ✅
- [x] **Step 2.3**: Add `$generateInsertStatement()` public wrapper method ✅ - [x] **Step 2.3**: Add `$generateInsertStatement()` public wrapper method ✅
- [x] **Step 2.4**: Add `$generateUpdateStatement()` public wrapper method ✅ - [x] **Step 2.4**: Add `$generateUpdateStatement()` public wrapper method ✅
- [x] **Step 2.5**: Test both utility functions ✅ - [x] **Step 2.5**: Test both utility functions ✅
#### **Files Modified**: #### **Files Modified**
- `src/utils/PlatformServiceMixin.ts` - `src/utils/PlatformServiceMixin.ts`
#### **Validation**: #### **Validation**
- [x] Both functions generate correct SQL ✅ - [x] Both functions generate correct SQL ✅
- [x] Parameter handling works correctly ✅ - [x] Parameter handling works correctly ✅
- [x] Type safety maintained ✅ - [x] Type safety maintained ✅
@ -69,18 +78,22 @@ This document tracks the progress of the 2-day sprint to complete PlatformServic
--- ---
### **Phase 3: Update Type Definitions (COMPLETE)** ### **Phase 3: Update Type Definitions (COMPLETE)**
**Status**: ✅ **COMPLETE** **Status**: ✅ **COMPLETE**
**Goal**: Add new methods to TypeScript interfaces **Goal**: Add new methods to TypeScript interfaces
#### **Tasks**: #### **Tasks**
- [x] **Step 3.1**: Add new methods to `IPlatformServiceMixin` interface ✅ - [x] **Step 3.1**: Add new methods to `IPlatformServiceMixin` interface ✅
- [x] **Step 3.2**: Add new methods to `ComponentCustomProperties` interface ✅ - [x] **Step 3.2**: Add new methods to `ComponentCustomProperties` interface ✅
- [x] **Step 3.3**: Verify TypeScript compilation ✅ - [x] **Step 3.3**: Verify TypeScript compilation ✅
#### **Files Modified**: #### **Files Modified**
- `src/utils/PlatformServiceMixin.ts` (interface definitions) ✅ - `src/utils/PlatformServiceMixin.ts` (interface definitions) ✅
#### **Validation**: #### **Validation**
- [x] TypeScript compilation passes ✅ - [x] TypeScript compilation passes ✅
- [x] All new methods properly typed ✅ - [x] All new methods properly typed ✅
- [x] No type errors in existing code ✅ - [x] No type errors in existing code ✅
@ -88,17 +101,20 @@ This document tracks the progress of the 2-day sprint to complete PlatformServic
--- ---
### **Phase 4: Testing & Validation (COMPLETE)** ### **Phase 4: Testing & Validation (COMPLETE)**
**Status**: ✅ **COMPLETE** **Status**: ✅ **COMPLETE**
**Goal**: Ensure PlatformServiceMixin is fully functional **Goal**: Ensure PlatformServiceMixin is fully functional
#### **Tasks**: #### **Tasks**
- [x] **Step 4.1**: Create test component to verify all methods ✅ - [x] **Step 4.1**: Create test component to verify all methods ✅
- [x] **Step 4.2**: Run comprehensive linting ✅ - [x] **Step 4.2**: Run comprehensive linting ✅
- [x] **Step 4.3**: Run TypeScript type checking ✅ - [x] **Step 4.3**: Run TypeScript type checking ✅
- [x] **Step 4.4**: Test caching functionality ✅ - [x] **Step 4.4**: Test caching functionality ✅
- [x] **Step 4.5**: Test database operations ✅ - [x] **Step 4.5**: Test database operations ✅
#### **Validation**: #### **Validation**
- [x] All tests pass ✅ - [x] All tests pass ✅
- [x] No linting errors ✅ - [x] No linting errors ✅
- [x] No TypeScript errors ✅ - [x] No TypeScript errors ✅
@ -108,10 +124,12 @@ This document tracks the progress of the 2-day sprint to complete PlatformServic
--- ---
### **Phase 5: Utility Files Migration (COMPLETE)** ### **Phase 5: Utility Files Migration (COMPLETE)**
**Status**: ✅ **COMPLETE** **Status**: ✅ **COMPLETE**
**Goal**: Remove all remaining databaseUtil imports from utility files **Goal**: Remove all remaining databaseUtil imports from utility files
#### **Tasks**: #### **Tasks**
- [x] **Step 5.1**: Migrate `src/services/deepLinks.ts` - [x] **Step 5.1**: Migrate `src/services/deepLinks.ts`
- Replaced `logConsoleAndDb` with `console.error` - Replaced `logConsoleAndDb` with `console.error`
- Removed databaseUtil import - Removed databaseUtil import
@ -121,7 +139,8 @@ This document tracks the progress of the 2-day sprint to complete PlatformServic
- Updated all async calls to use proper async pattern - Updated all async calls to use proper async pattern
- [x] **Step 5.3**: Verify no remaining databaseUtil imports ✅ - [x] **Step 5.3**: Verify no remaining databaseUtil imports ✅
#### **Validation**: #### **Validation**
- [x] No databaseUtil imports in any TypeScript files ✅ - [x] No databaseUtil imports in any TypeScript files ✅
- [x] No databaseUtil imports in any Vue files ✅ - [x] No databaseUtil imports in any Vue files ✅
- [x] All functions work correctly ✅ - [x] All functions work correctly ✅
@ -131,13 +150,16 @@ This document tracks the progress of the 2-day sprint to complete PlatformServic
## 🎯 **DAY 2: Migrate All 52 Files (READY TO START)** ## 🎯 **DAY 2: Migrate All 52 Files (READY TO START)**
### **Migration Strategy** ### **Migration Strategy**
**Priority Order**: **Priority Order**:
1. **Views** (25 files) - User-facing components 1. **Views** (25 files) - User-facing components
2. **Components** (15 files) - Reusable UI components 2. **Components** (15 files) - Reusable UI components
3. **Services** (8 files) - Business logic 3. **Services** (8 files) - Business logic
4. **Utils** (4 files) - Utility functions 4. **Utils** (4 files) - Utility functions
### **Migration Pattern for Each File** ### **Migration Pattern for Each File**
```typescript ```typescript
// 1. Add PlatformServiceMixin // 1. Add PlatformServiceMixin
import { PlatformServiceMixin } from "@/utils/PlatformServiceMixin"; import { PlatformServiceMixin } from "@/utils/PlatformServiceMixin";
@ -155,6 +177,7 @@ export default class ComponentName extends Vue {
``` ```
### **Common Replacements** ### **Common Replacements**
- `generateInsertStatement``this.$generateInsertStatement` - `generateInsertStatement``this.$generateInsertStatement`
- `generateUpdateStatement``this.$generateUpdateStatement` - `generateUpdateStatement``this.$generateUpdateStatement`
- `parseJsonField``this._parseJsonField` - `parseJsonField``this._parseJsonField`
@ -168,6 +191,7 @@ export default class ComponentName extends Vue {
## 📋 **File Migration Checklist** ## 📋 **File Migration Checklist**
### **Views (25 files) - Priority 1** ### **Views (25 files) - Priority 1**
**Progress**: 6/25 (24%) **Progress**: 6/25 (24%)
- [ ] QuickActionBvcEndView.vue - [ ] QuickActionBvcEndView.vue
@ -209,6 +233,7 @@ export default class ComponentName extends Vue {
- [ ] UserProfileView.vue - [ ] UserProfileView.vue
### **Components (15 files) - Priority 2** ### **Components (15 files) - Priority 2**
**Progress**: 9/15 (60%) **Progress**: 9/15 (60%)
- [x] UserNameDialog.vue ✅ **MIGRATED** - [x] UserNameDialog.vue ✅ **MIGRATED**
@ -233,6 +258,7 @@ export default class ComponentName extends Vue {
- [x] IconRenderer.vue ✅ MIGRATED & HUMAN TESTED 2024-12-19 (0 min, no migration needed - already compliant) - [x] IconRenderer.vue ✅ MIGRATED & HUMAN TESTED 2024-12-19 (0 min, no migration needed - already compliant)
### **Services (8 files) - Priority 3** ### **Services (8 files) - Priority 3**
**Progress**: 2/8 (25%) **Progress**: 2/8 (25%)
- [x] api.ts ✅ MIGRATED 2024-12-19 (0 min, no migration needed - already compliant) - [x] api.ts ✅ MIGRATED 2024-12-19 (0 min, no migration needed - already compliant)
@ -241,6 +267,7 @@ export default class ComponentName extends Vue {
- [ ] deepLinks.ts - [ ] deepLinks.ts
### **Utils (4 files) - Priority 4** ### **Utils (4 files) - Priority 4**
**Progress**: 1/4 (25%) **Progress**: 1/4 (25%)
- [ ] LogCollector.ts - [ ] LogCollector.ts
@ -253,6 +280,7 @@ export default class ComponentName extends Vue {
## 🛠️ **Migration Tools** ## 🛠️ **Migration Tools**
### **Migration Helper Script** ### **Migration Helper Script**
```bash ```bash
# Track progress # Track progress
./scripts/migration-helper.sh progress ./scripts/migration-helper.sh progress
@ -277,6 +305,7 @@ export default class ComponentName extends Vue {
``` ```
### **Validation Commands** ### **Validation Commands**
```bash ```bash
# Check for remaining databaseUtil imports # Check for remaining databaseUtil imports
find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil"
@ -296,12 +325,14 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
## 📊 **Progress Tracking** ## 📊 **Progress Tracking**
### **Day 1 Progress** ### **Day 1 Progress**
- [ ] Phase 1: Circular dependency resolved - [ ] Phase 1: Circular dependency resolved
- [ ] Phase 2: Utility functions added - [ ] Phase 2: Utility functions added
- [ ] Phase 3: Type definitions updated - [ ] Phase 3: Type definitions updated
- [ ] Phase 4: Testing completed - [ ] Phase 4: Testing completed
### **Day 2 Progress** ### **Day 2 Progress**
- [ ] Views migrated (0/25) - [ ] Views migrated (0/25)
- [ ] Components migrated (0/15) - [ ] Components migrated (0/15)
- [ ] Services migrated (0/8) - [ ] Services migrated (0/8)
@ -309,6 +340,7 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
- [ ] Validation completed - [ ] Validation completed
### **Overall Progress** ### **Overall Progress**
- **Total files to migrate**: 52 - **Total files to migrate**: 52
- **Files migrated**: 3 - **Files migrated**: 3
- **Progress**: 6% - **Progress**: 6%
@ -318,6 +350,7 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
## 🎯 **Success Criteria** ## 🎯 **Success Criteria**
### **Day 1 Success Criteria** ### **Day 1 Success Criteria**
- [ ] PlatformServiceMixin has no circular dependencies - [ ] PlatformServiceMixin has no circular dependencies
- [ ] All utility functions implemented and tested - [ ] All utility functions implemented and tested
- [ ] Type definitions complete and accurate - [ ] Type definitions complete and accurate
@ -325,6 +358,7 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
- [ ] TypeScript compilation passes - [ ] TypeScript compilation passes
### **Day 2 Success Criteria** ### **Day 2 Success Criteria**
- [ ] 0 files importing databaseUtil - [ ] 0 files importing databaseUtil
- [ ] All 52 files migrated to PlatformServiceMixin - [ ] All 52 files migrated to PlatformServiceMixin
- [ ] No runtime errors in migrated components - [ ] No runtime errors in migrated components
@ -332,6 +366,7 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
- [ ] Performance maintained or improved - [ ] Performance maintained or improved
### **Overall Success Criteria** ### **Overall Success Criteria**
- [ ] Complete elimination of databaseUtil dependency - [ ] Complete elimination of databaseUtil dependency
- [ ] PlatformServiceMixin is the single source of truth for database operations - [ ] PlatformServiceMixin is the single source of truth for database operations
- [ ] Migration fence is fully implemented - [ ] Migration fence is fully implemented
@ -354,14 +389,17 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
## 📝 **Notes & Issues** ## 📝 **Notes & Issues**
### **Current Issues** ### **Current Issues**
- None identified yet - None identified yet
### **Decisions Made** ### **Decisions Made**
- PlatformServiceMixin approach chosen over USE_DEXIE_DB constant - PlatformServiceMixin approach chosen over USE_DEXIE_DB constant
- Self-contained utility functions preferred over imports - Self-contained utility functions preferred over imports
- Priority order: Views → Components → Services → Utils - Priority order: Views → Components → Services → Utils
### **Lessons Learned** ### **Lessons Learned**
- To be filled as migration progresses - To be filled as migration progresses
--- ---
@ -369,6 +407,7 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
## 🔄 **Daily Updates** ## 🔄 **Daily Updates**
### **Day 1 Updates** ### **Day 1 Updates**
- [ ] Start time: _____ - [ ] Start time: _____
- [ ] Phase 1 completion: _____ - [ ] Phase 1 completion: _____
- [ ] Phase 2 completion: _____ - [ ] Phase 2 completion: _____
@ -377,6 +416,7 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
- [ ] End time: _____ - [ ] End time: _____
### **Day 2 Updates** ### **Day 2 Updates**
- [ ] Start time: _____ - [ ] Start time: _____
- [ ] Views migration completion: _____ - [ ] Views migration completion: _____
- [ ] Components migration completion: _____ - [ ] Components migration completion: _____
@ -390,16 +430,19 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
## 🆘 **Contingency Plans** ## 🆘 **Contingency Plans**
### **If Day 1 Takes Longer** ### **If Day 1 Takes Longer**
- Focus on core functionality first - Focus on core functionality first
- Defer advanced utility functions to Day 2 - Defer advanced utility functions to Day 2
- Prioritize circular dependency resolution - Prioritize circular dependency resolution
### **If Day 2 Takes Longer** ### **If Day 2 Takes Longer**
- Focus on high-impact views first - Focus on high-impact views first
- Batch similar components together - Batch similar components together
- Use automated scripts for common patterns - Use automated scripts for common patterns
### **If Issues Arise** ### **If Issues Arise**
- Document specific problems in Notes section - Document specific problems in Notes section
- Create targeted fixes - Create targeted fixes
- Maintain backward compatibility during transition - Maintain backward compatibility during transition

1
doc/migration-quick-reference.md

@ -63,6 +63,7 @@ export default class ComponentName extends Vue {
## ✅ **Validation Checklist** ## ✅ **Validation Checklist**
After each file migration: After each file migration:
- [ ] No databaseUtil imports - [ ] No databaseUtil imports
- [ ] PlatformServiceMixin added - [ ] PlatformServiceMixin added
- [ ] Method calls updated - [ ] Method calls updated

31
doc/migration-readiness-summary.md

@ -11,11 +11,14 @@
## 🎯 **Migration Overview** ## 🎯 **Migration Overview**
### **Goal** ### **Goal**
Complete the TimeSafari database migration from Dexie to SQLite by: Complete the TimeSafari database migration from Dexie to SQLite by:
1. **Day 1**: Finish PlatformServiceMixin implementation (4-6 hours) 1. **Day 1**: Finish PlatformServiceMixin implementation (4-6 hours)
2. **Day 2**: Migrate all 52 files to PlatformServiceMixin (6-8 hours) 2. **Day 2**: Migrate all 52 files to PlatformServiceMixin (6-8 hours)
### **Current Status** ### **Current Status**
- ✅ **PlatformServiceMixin**: 95% complete (1,301 lines) - ✅ **PlatformServiceMixin**: 95% complete (1,301 lines)
- ✅ **Migration Tools**: Ready and tested - ✅ **Migration Tools**: Ready and tested
- ✅ **Documentation**: Complete and cross-machine accessible - ✅ **Documentation**: Complete and cross-machine accessible
@ -27,22 +30,30 @@ Complete the TimeSafari database migration from Dexie to SQLite by:
## 📊 **File Breakdown** ## 📊 **File Breakdown**
### **Views (42 files) - Priority 1** ### **Views (42 files) - Priority 1**
User-facing components that need immediate attention: User-facing components that need immediate attention:
- 25 files from original list - 25 files from original list
- 17 additional files identified by migration helper - 17 additional files identified by migration helper
### **Components (9 files) - Priority 2** ### **Components (9 files) - Priority 2**
Reusable UI components: Reusable UI components:
- FeedFilters.vue, GiftedDialog.vue, GiftedPrompts.vue - FeedFilters.vue, GiftedDialog.vue, GiftedPrompts.vue
- ImageMethodDialog.vue, OfferDialog.vue, OnboardingDialog.vue - ImageMethodDialog.vue, OfferDialog.vue, OnboardingDialog.vue
- PhotoDialog.vue, PushNotificationPermission.vue, UserNameDialog.vue - PhotoDialog.vue, PushNotificationPermission.vue, UserNameDialog.vue
### **Services (1 file) - Priority 3** ### **Services (1 file) - Priority 3**
Business logic: Business logic:
- deepLinks.ts - deepLinks.ts
### **Utils (3 files) - Priority 4** ### **Utils (3 files) - Priority 4**
Utility functions: Utility functions:
- util.ts, test/index.ts, PlatformServiceMixin.ts (circular dependency fix) - util.ts, test/index.ts, PlatformServiceMixin.ts (circular dependency fix)
--- ---
@ -50,17 +61,21 @@ Utility functions:
## 🛠️ **Available Tools** ## 🛠️ **Available Tools**
### **Migration Helper Script** ### **Migration Helper Script**
```bash ```bash
./scripts/migration-helper.sh [command] ./scripts/migration-helper.sh [command]
``` ```
**Commands**: progress, files, patterns, template, validate, next, all **Commands**: progress, files, patterns, template, validate, next, all
### **Progress Tracking** ### **Progress Tracking**
- **Main Tracker**: `doc/migration-progress-tracker.md` - **Main Tracker**: `doc/migration-progress-tracker.md`
- **Quick Reference**: `doc/migration-quick-reference.md` - **Quick Reference**: `doc/migration-quick-reference.md`
- **Completion Plan**: `doc/platformservicemixin-completion-plan.md` - **Completion Plan**: `doc/platformservicemixin-completion-plan.md`
### **Validation Commands** ### **Validation Commands**
```bash ```bash
# Check progress # Check progress
./scripts/migration-helper.sh progress ./scripts/migration-helper.sh progress
@ -77,6 +92,7 @@ find src -name "*.vue" -o -name "*.ts" | xargs grep -l "import.*databaseUtil" |
## 🔄 **Migration Pattern** ## 🔄 **Migration Pattern**
### **Standard Template** ### **Standard Template**
```typescript ```typescript
// 1. Add import // 1. Add import
import { PlatformServiceMixin } from "@/utils/PlatformServiceMixin"; import { PlatformServiceMixin } from "@/utils/PlatformServiceMixin";
@ -94,6 +110,7 @@ export default class ComponentName extends Vue {
``` ```
### **Common Replacements** ### **Common Replacements**
| Old | New | | Old | New |
|-----|-----| |-----|-----|
| `generateInsertStatement` | `this.$generateInsertStatement` | | `generateInsertStatement` | `this.$generateInsertStatement` |
@ -109,19 +126,23 @@ export default class ComponentName extends Vue {
## 🎯 **Day 1 Plan: PlatformServiceMixin Completion** ## 🎯 **Day 1 Plan: PlatformServiceMixin Completion**
### **Phase 1: Remove Circular Dependency (30 min)** ### **Phase 1: Remove Circular Dependency (30 min)**
- Remove `memoryLogs` import from PlatformServiceMixin - Remove `memoryLogs` import from PlatformServiceMixin
- Add self-contained memoryLogs implementation - Add self-contained memoryLogs implementation
- Update logger.ts - Update logger.ts
### **Phase 2: Add Missing Functions (1 hour)** ### **Phase 2: Add Missing Functions (1 hour)**
- Add `generateInsertStatement` and `generateUpdateStatement` - Add `generateInsertStatement` and `generateUpdateStatement`
- Test both utility functions - Test both utility functions
### **Phase 3: Update Types (30 min)** ### **Phase 3: Update Types (30 min)**
- Add new methods to TypeScript interfaces - Add new methods to TypeScript interfaces
- Verify compilation - Verify compilation
### **Phase 4: Testing (1 hour)** ### **Phase 4: Testing (1 hour)**
- Comprehensive testing and validation - Comprehensive testing and validation
- Ensure no circular dependencies - Ensure no circular dependencies
@ -130,17 +151,20 @@ export default class ComponentName extends Vue {
## 🎯 **Day 2 Plan: File Migration** ## 🎯 **Day 2 Plan: File Migration**
### **Strategy** ### **Strategy**
1. **Views First** (42 files) - High impact, user-facing 1. **Views First** (42 files) - High impact, user-facing
2. **Components** (9 files) - Reusable UI elements 2. **Components** (9 files) - Reusable UI elements
3. **Services** (1 file) - Business logic 3. **Services** (1 file) - Business logic
4. **Utils** (3 files) - Utility functions 4. **Utils** (3 files) - Utility functions
### **Batch Processing** ### **Batch Processing**
- Process similar files together - Process similar files together
- Use automated scripts for common patterns - Use automated scripts for common patterns
- Validate after each batch - Validate after each batch
### **Success Criteria** ### **Success Criteria**
- 0 files importing databaseUtil - 0 files importing databaseUtil
- All tests passing - All tests passing
- No runtime errors - No runtime errors
@ -151,12 +175,14 @@ export default class ComponentName extends Vue {
## 🚀 **Expected Benefits** ## 🚀 **Expected Benefits**
### **Immediate Benefits** ### **Immediate Benefits**
- **80% reduction** in database boilerplate code - **80% reduction** in database boilerplate code
- **Eliminated circular dependencies** - **Eliminated circular dependencies**
- **Centralized caching** for performance - **Centralized caching** for performance
- **Type-safe** database operations - **Type-safe** database operations
### **Long-term Benefits** ### **Long-term Benefits**
- **Simplified testing** with mockable mixin - **Simplified testing** with mockable mixin
- **Consistent error handling** across components - **Consistent error handling** across components
- **Ready for SQLite-only mode** - **Ready for SQLite-only mode**
@ -167,18 +193,21 @@ export default class ComponentName extends Vue {
## 📋 **Pre-Migration Checklist** ## 📋 **Pre-Migration Checklist**
### **Environment Ready** ### **Environment Ready**
- [x] Migration helper script tested and working - [x] Migration helper script tested and working
- [x] Progress tracking system operational - [x] Progress tracking system operational
- [x] Documentation complete and accessible - [x] Documentation complete and accessible
- [x] Validation commands working - [x] Validation commands working
### **Tools Available** ### **Tools Available**
- [x] Automated progress tracking - [x] Automated progress tracking
- [x] Migration pattern templates - [x] Migration pattern templates
- [x] Validation scripts - [x] Validation scripts
- [x] Cross-machine documentation - [x] Cross-machine documentation
### **Knowledge Base** ### **Knowledge Base**
- [x] Common replacement patterns documented - [x] Common replacement patterns documented
- [x] Migration templates ready - [x] Migration templates ready
- [x] Troubleshooting guides available - [x] Troubleshooting guides available
@ -191,12 +220,14 @@ export default class ComponentName extends Vue {
**All systems are ready for the 2-day migration sprint.** **All systems are ready for the 2-day migration sprint.**
### **Next Steps** ### **Next Steps**
1. **Start Day 1**: Complete PlatformServiceMixin 1. **Start Day 1**: Complete PlatformServiceMixin
2. **Use tracking tools**: Monitor progress with helper script 2. **Use tracking tools**: Monitor progress with helper script
3. **Follow documentation**: Use provided templates and patterns 3. **Follow documentation**: Use provided templates and patterns
4. **Validate frequently**: Run checks after each phase 4. **Validate frequently**: Run checks after each phase
### **Success Metrics** ### **Success Metrics**
- **Day 1**: PlatformServiceMixin 100% complete, no circular dependencies - **Day 1**: PlatformServiceMixin 100% complete, no circular dependencies
- **Day 2**: 0 files importing databaseUtil, all tests passing - **Day 2**: 0 files importing databaseUtil, all tests passing
- **Overall**: Ready for Phase 3 cleanup and optimization - **Overall**: Ready for Phase 3 cleanup and optimization

31
doc/migration-roadmap-next-steps.md

@ -7,6 +7,7 @@ This document outlines the immediate next steps for completing the TimeSafari da
## Current Status Summary ## Current Status Summary
### ✅ **Completed Achievements** ### ✅ **Completed Achievements**
1. **Circular Dependencies Resolved** - No active circular dependencies blocking development 1. **Circular Dependencies Resolved** - No active circular dependencies blocking development
2. **PlatformServiceMixin Implemented** - Core functionality with caching and utilities 2. **PlatformServiceMixin Implemented** - Core functionality with caching and utilities
3. **Migration Tools Ready** - Data comparison and transfer utilities functional 3. **Migration Tools Ready** - Data comparison and transfer utilities functional
@ -14,6 +15,7 @@ This document outlines the immediate next steps for completing the TimeSafari da
5. **Documentation Updated** - All docs reflect current PlatformServiceMixin approach 5. **Documentation Updated** - All docs reflect current PlatformServiceMixin approach
### 🔄 **Current Phase: Phase 2 - Active Migration** ### 🔄 **Current Phase: Phase 2 - Active Migration**
- **DatabaseUtil Migration**: 52 files still importing databaseUtil - **DatabaseUtil Migration**: 52 files still importing databaseUtil
- **Contact Migration**: Framework ready, implementation in progress - **Contact Migration**: Framework ready, implementation in progress
- **File-by-File Migration**: Ready to begin systematic migration - **File-by-File Migration**: Ready to begin systematic migration
@ -23,6 +25,7 @@ This document outlines the immediate next steps for completing the TimeSafari da
### 🔴 **Priority 1: Complete PlatformServiceMixin Independence** ### 🔴 **Priority 1: Complete PlatformServiceMixin Independence**
#### **Step 1.1: Remove memoryLogs Dependency** #### **Step 1.1: Remove memoryLogs Dependency**
```typescript ```typescript
// Current: PlatformServiceMixin imports from databaseUtil // Current: PlatformServiceMixin imports from databaseUtil
import { memoryLogs } from "@/db/databaseUtil"; import { memoryLogs } from "@/db/databaseUtil";
@ -32,12 +35,15 @@ const memoryLogs: string[] = [];
``` ```
**Files to modify**: **Files to modify**:
- `src/utils/PlatformServiceMixin.ts` - Remove import, add self-contained implementation - `src/utils/PlatformServiceMixin.ts` - Remove import, add self-contained implementation
**Estimated time**: 30 minutes **Estimated time**: 30 minutes
#### **Step 1.2: Add Missing Utility Methods** #### **Step 1.2: Add Missing Utility Methods**
Add these methods to PlatformServiceMixin: Add these methods to PlatformServiceMixin:
- `$parseJson()` - Self-contained JSON parsing - `$parseJson()` - Self-contained JSON parsing
- `$generateInsertStatement()` - SQL generation - `$generateInsertStatement()` - SQL generation
- `$generateUpdateStatement()` - SQL generation - `$generateUpdateStatement()` - SQL generation
@ -48,6 +54,7 @@ Add these methods to PlatformServiceMixin:
### 🟡 **Priority 2: Start File-by-File Migration** ### 🟡 **Priority 2: Start File-by-File Migration**
#### **Step 2.1: Migrate Critical Files First** #### **Step 2.1: Migrate Critical Files First**
Based on the migration plan, start with these high-priority files: Based on the migration plan, start with these high-priority files:
1. **`src/App.vue`** - Main application (highest impact) 1. **`src/App.vue`** - Main application (highest impact)
@ -57,6 +64,7 @@ Based on the migration plan, start with these high-priority files:
5. **`src/services/deepLinks.ts`** - Service layer 5. **`src/services/deepLinks.ts`** - Service layer
**Migration pattern for each file**: **Migration pattern for each file**:
```typescript ```typescript
// 1. Remove databaseUtil import // 1. Remove databaseUtil import
// Remove: import * as databaseUtil from "../db/databaseUtil"; // Remove: import * as databaseUtil from "../db/databaseUtil";
@ -82,7 +90,9 @@ Based on the migration plan, start with these high-priority files:
### 🟡 **Priority 3: Systematic File Migration** ### 🟡 **Priority 3: Systematic File Migration**
#### **Step 3.1: Migrate High-Usage Components (15 files)** #### **Step 3.1: Migrate High-Usage Components (15 files)**
Target components with databaseUtil imports: Target components with databaseUtil imports:
- `PhotoDialog.vue` - `PhotoDialog.vue`
- `FeedFilters.vue` - `FeedFilters.vue`
- `UserNameDialog.vue` - `UserNameDialog.vue`
@ -97,7 +107,9 @@ Target components with databaseUtil imports:
**Estimated time**: 15-30 hours **Estimated time**: 15-30 hours
#### **Step 3.2: Migrate High-Usage Views (20 files)** #### **Step 3.2: Migrate High-Usage Views (20 files)**
Target views with databaseUtil imports: Target views with databaseUtil imports:
- `IdentitySwitcherView.vue` - `IdentitySwitcherView.vue`
- `ContactEditView.vue` - `ContactEditView.vue`
- `ContactGiftingView.vue` - `ContactGiftingView.vue`
@ -113,6 +125,7 @@ Target views with databaseUtil imports:
**Estimated time**: 20-40 hours **Estimated time**: 20-40 hours
#### **Step 3.3: Migrate Remaining Files (27 files)** #### **Step 3.3: Migrate Remaining Files (27 files)**
Complete migration of all remaining files with databaseUtil imports. Complete migration of all remaining files with databaseUtil imports.
**Estimated time**: 27-54 hours **Estimated time**: 27-54 hours
@ -120,6 +133,7 @@ Complete migration of all remaining files with databaseUtil imports.
### 🟢 **Priority 4: Contact Migration Completion** ### 🟢 **Priority 4: Contact Migration Completion**
#### **Step 4.1: Complete Contact Migration Framework** #### **Step 4.1: Complete Contact Migration Framework**
- Implement contact import/export functionality - Implement contact import/export functionality
- Add contact validation and error handling - Add contact validation and error handling
- Test contact migration with real data - Test contact migration with real data
@ -127,6 +141,7 @@ Complete migration of all remaining files with databaseUtil imports.
**Estimated time**: 4-8 hours **Estimated time**: 4-8 hours
#### **Step 4.2: User Testing and Validation** #### **Step 4.2: User Testing and Validation**
- Test migration with various data scenarios - Test migration with various data scenarios
- Validate data integrity after migration - Validate data integrity after migration
- Performance testing with large datasets - Performance testing with large datasets
@ -138,7 +153,9 @@ Complete migration of all remaining files with databaseUtil imports.
### 🔵 **Priority 5: Cleanup and Optimization** ### 🔵 **Priority 5: Cleanup and Optimization**
#### **Step 5.1: Remove Unused databaseUtil Functions** #### **Step 5.1: Remove Unused databaseUtil Functions**
After all files are migrated: After all files are migrated:
- Remove unused functions from databaseUtil.ts - Remove unused functions from databaseUtil.ts
- Update TypeScript interfaces - Update TypeScript interfaces
- Clean up legacy code - Clean up legacy code
@ -146,6 +163,7 @@ After all files are migrated:
**Estimated time**: 4-8 hours **Estimated time**: 4-8 hours
#### **Step 5.2: Performance Optimization** #### **Step 5.2: Performance Optimization**
- Optimize PlatformServiceMixin caching - Optimize PlatformServiceMixin caching
- Add performance monitoring - Add performance monitoring
- Implement database query optimization - Implement database query optimization
@ -153,6 +171,7 @@ After all files are migrated:
**Estimated time**: 8-16 hours **Estimated time**: 8-16 hours
#### **Step 5.3: Legacy Dexie Removal** #### **Step 5.3: Legacy Dexie Removal**
- Remove Dexie dependencies - Remove Dexie dependencies
- Clean up migration tools - Clean up migration tools
- Update build configurations - Update build configurations
@ -162,6 +181,7 @@ After all files are migrated:
## Migration Commands and Tools ## Migration Commands and Tools
### **Automated Migration Script** ### **Automated Migration Script**
Create a script to help with bulk migrations: Create a script to help with bulk migrations:
```bash ```bash
@ -193,6 +213,7 @@ echo "Please review and test the changes"
``` ```
### **Migration Testing Commands** ### **Migration Testing Commands**
```bash ```bash
# Test individual file migration # Test individual file migration
npm run test -- --grep "ComponentName" npm run test -- --grep "ComponentName"
@ -213,18 +234,21 @@ npx tsc --noEmit
## Risk Mitigation ## Risk Mitigation
### **Incremental Migration Strategy** ### **Incremental Migration Strategy**
1. **One file at a time** - Minimize risk of breaking changes 1. **One file at a time** - Minimize risk of breaking changes
2. **Comprehensive testing** - Test each migration thoroughly 2. **Comprehensive testing** - Test each migration thoroughly
3. **Rollback capability** - Keep databaseUtil.ts until migration complete 3. **Rollback capability** - Keep databaseUtil.ts until migration complete
4. **Documentation updates** - Update docs as methods are migrated 4. **Documentation updates** - Update docs as methods are migrated
### **Testing Strategy** ### **Testing Strategy**
1. **Unit tests** - Test individual component functionality 1. **Unit tests** - Test individual component functionality
2. **Integration tests** - Test database operations 2. **Integration tests** - Test database operations
3. **End-to-end tests** - Test complete user workflows 3. **End-to-end tests** - Test complete user workflows
4. **Performance tests** - Ensure no performance regression 4. **Performance tests** - Ensure no performance regression
### **Rollback Plan** ### **Rollback Plan**
1. **Git branches** - Each migration in separate branch 1. **Git branches** - Each migration in separate branch
2. **Backup files** - Keep original files until migration verified 2. **Backup files** - Keep original files until migration verified
3. **Feature flags** - Ability to switch back to databaseUtil if needed 3. **Feature flags** - Ability to switch back to databaseUtil if needed
@ -233,18 +257,21 @@ npx tsc --noEmit
## Success Metrics ## Success Metrics
### **Short-Term (This Week)** ### **Short-Term (This Week)**
- [ ] PlatformServiceMixin completely independent - [ ] PlatformServiceMixin completely independent
- [ ] 5 critical files migrated - [ ] 5 critical files migrated
- [ ] No new circular dependencies - [ ] No new circular dependencies
- [ ] All tests passing - [ ] All tests passing
### **Medium-Term (Next 2 Weeks)** ### **Medium-Term (Next 2 Weeks)**
- [ ] 35+ files migrated (70% completion) - [ ] 35+ files migrated (70% completion)
- [ ] Contact migration framework complete - [ ] Contact migration framework complete
- [ ] Performance maintained or improved - [ ] Performance maintained or improved
- [ ] User testing completed - [ ] User testing completed
### **Long-Term (Next Month)** ### **Long-Term (Next Month)**
- [ ] All 52 files migrated (100% completion) - [ ] All 52 files migrated (100% completion)
- [ ] databaseUtil.ts removed or minimal - [ ] databaseUtil.ts removed or minimal
- [ ] Legacy Dexie code removed - [ ] Legacy Dexie code removed
@ -253,12 +280,14 @@ npx tsc --noEmit
## Resource Requirements ## Resource Requirements
### **Development Time** ### **Development Time**
- **Immediate (This Week)**: 8-12 hours - **Immediate (This Week)**: 8-12 hours
- **Medium-Term (Next 2 Weeks)**: 35-70 hours - **Medium-Term (Next 2 Weeks)**: 35-70 hours
- **Long-Term (Next Month)**: 16-32 hours - **Long-Term (Next Month)**: 16-32 hours
- **Total Estimated**: 59-114 hours - **Total Estimated**: 59-114 hours
### **Testing Time** ### **Testing Time**
- **Unit Testing**: 20-30 hours - **Unit Testing**: 20-30 hours
- **Integration Testing**: 10-15 hours - **Integration Testing**: 10-15 hours
- **User Testing**: 8-12 hours - **User Testing**: 8-12 hours
@ -266,6 +295,7 @@ npx tsc --noEmit
- **Total Testing**: 43-65 hours - **Total Testing**: 43-65 hours
### **Total Project Time** ### **Total Project Time**
- **Development**: 59-114 hours - **Development**: 59-114 hours
- **Testing**: 43-65 hours - **Testing**: 43-65 hours
- **Documentation**: 5-10 hours - **Documentation**: 5-10 hours
@ -274,6 +304,7 @@ npx tsc --noEmit
## Conclusion ## Conclusion
The migration is well-positioned for completion with: The migration is well-positioned for completion with:
- ✅ **No blocking circular dependencies** - ✅ **No blocking circular dependencies**
- ✅ **PlatformServiceMixin mostly complete** - ✅ **PlatformServiceMixin mostly complete**
- ✅ **Clear migration path defined** - ✅ **Clear migration path defined**

29
doc/migration-to-wa-sqlite.md

@ -29,12 +29,15 @@ This document outlines the migration process from Dexie.js to absurd-sql for the
## Migration Architecture ## Migration Architecture
### Migration Fence ### Migration Fence
The migration fence is now defined by the **PlatformServiceMixin** in `src/utils/PlatformServiceMixin.ts`: The migration fence is now defined by the **PlatformServiceMixin** in `src/utils/PlatformServiceMixin.ts`:
- **PlatformServiceMixin**: Centralized database access with caching and utilities - **PlatformServiceMixin**: Centralized database access with caching and utilities
- **Migration Tools**: Exclusive interface between legacy and new databases - **Migration Tools**: Exclusive interface between legacy and new databases
- **Service Layer**: All database operations go through PlatformService - **Service Layer**: All database operations go through PlatformService
### Migration Order ### Migration Order
The migration follows a specific order to maintain data integrity: The migration follows a specific order to maintain data integrity:
1. **Accounts** (foundational - contains DIDs) 1. **Accounts** (foundational - contains DIDs)
@ -45,9 +48,11 @@ The migration follows a specific order to maintain data integrity:
## ActiveDid Migration ⭐ **NEW FEATURE** ## ActiveDid Migration ⭐ **NEW FEATURE**
### Problem Solved ### Problem Solved
Previously, the `activeDid` setting was not migrated from Dexie to SQLite, causing users to lose their active identity after migration. Previously, the `activeDid` setting was not migrated from Dexie to SQLite, causing users to lose their active identity after migration.
### Solution Implemented ### Solution Implemented
The migration now includes a dedicated step for migrating the `activeDid`: The migration now includes a dedicated step for migrating the `activeDid`:
1. **Detection**: Identifies the `activeDid` from Dexie master settings 1. **Detection**: Identifies the `activeDid` from Dexie master settings
@ -58,6 +63,7 @@ The migration now includes a dedicated step for migrating the `activeDid`:
### Implementation Details ### Implementation Details
#### New Function: `migrateActiveDid()` #### New Function: `migrateActiveDid()`
```typescript ```typescript
export async function migrateActiveDid(): Promise<MigrationResult> { export async function migrateActiveDid(): Promise<MigrationResult> {
// 1. Get Dexie settings to find the activeDid // 1. Get Dexie settings to find the activeDid
@ -76,13 +82,17 @@ export async function migrateActiveDid(): Promise<MigrationResult> {
``` ```
#### Enhanced `migrateSettings()` Function #### Enhanced `migrateSettings()` Function
The settings migration now includes activeDid handling: The settings migration now includes activeDid handling:
- Extracts `activeDid` from Dexie master settings - Extracts `activeDid` from Dexie master settings
- Validates account existence in SQLite - Validates account existence in SQLite
- Updates SQLite master settings with the `activeDid` - Updates SQLite master settings with the `activeDid`
#### Updated `migrateAll()` Function #### Updated `migrateAll()` Function
The complete migration now includes a dedicated step for activeDid: The complete migration now includes a dedicated step for activeDid:
```typescript ```typescript
// Step 3: Migrate ActiveDid (depends on accounts and settings) // Step 3: Migrate ActiveDid (depends on accounts and settings)
logger.info("[MigrationService] Step 3: Migrating activeDid..."); logger.info("[MigrationService] Step 3: Migrating activeDid...");
@ -90,6 +100,7 @@ const activeDidResult = await migrateActiveDid();
``` ```
### Benefits ### Benefits
- ✅ **User Identity Preservation**: Users maintain their active identity - ✅ **User Identity Preservation**: Users maintain their active identity
- ✅ **Seamless Experience**: No need to manually select identity after migration - ✅ **Seamless Experience**: No need to manually select identity after migration
- ✅ **Data Consistency**: Ensures all identity-related settings are preserved - ✅ **Data Consistency**: Ensures all identity-related settings are preserved
@ -98,17 +109,20 @@ const activeDidResult = await migrateActiveDid();
## Migration Process ## Migration Process
### Phase 1: Preparation ✅ ### Phase 1: Preparation ✅
- [x] PlatformServiceMixin implementation - [x] PlatformServiceMixin implementation
- [x] Implement data comparison tools - [x] Implement data comparison tools
- [x] Create migration service structure - [x] Create migration service structure
### Phase 2: Core Migration ✅ ### Phase 2: Core Migration ✅
- [x] Account migration with `importFromMnemonic` - [x] Account migration with `importFromMnemonic`
- [x] Settings migration (excluding activeDid) - [x] Settings migration (excluding activeDid)
- [x] **ActiveDid migration** ⭐ **COMPLETED** - [x] **ActiveDid migration** ⭐ **COMPLETED**
- [x] Contact migration framework - [x] Contact migration framework
### Phase 3: Validation and Cleanup 🔄 ### Phase 3: Validation and Cleanup 🔄
- [ ] Comprehensive data validation - [ ] Comprehensive data validation
- [ ] Performance testing - [ ] Performance testing
- [ ] User acceptance testing - [ ] User acceptance testing
@ -117,6 +131,7 @@ const activeDidResult = await migrateActiveDid();
## Usage ## Usage
### Manual Migration ### Manual Migration
```typescript ```typescript
import { migrateAll, migrateActiveDid } from '../services/indexedDBMigrationService'; import { migrateAll, migrateActiveDid } from '../services/indexedDBMigrationService';
@ -128,6 +143,7 @@ const activeDidResult = await migrateActiveDid();
``` ```
### Migration Verification ### Migration Verification
```typescript ```typescript
import { compareDatabases } from '../services/indexedDBMigrationService'; import { compareDatabases } from '../services/indexedDBMigrationService';
@ -136,7 +152,9 @@ console.log('Migration differences:', comparison.differences);
``` ```
### PlatformServiceMixin Integration ### PlatformServiceMixin Integration
After migration, use the mixin for all database operations: After migration, use the mixin for all database operations:
```typescript ```typescript
// Use mixin methods for database access // Use mixin methods for database access
const contacts = await this.$contacts(); const contacts = await this.$contacts();
@ -147,11 +165,13 @@ const result = await this.$db("SELECT * FROM contacts WHERE did = ?", [accountDi
## Error Handling ## Error Handling
### ActiveDid Migration Errors ### ActiveDid Migration Errors
- **Missing Account**: If the `activeDid` from Dexie doesn't exist in SQLite accounts - **Missing Account**: If the `activeDid` from Dexie doesn't exist in SQLite accounts
- **Database Errors**: Connection or query failures - **Database Errors**: Connection or query failures
- **Settings Update Failures**: Issues updating SQLite master settings - **Settings Update Failures**: Issues updating SQLite master settings
### Recovery Strategies ### Recovery Strategies
1. **Automatic Recovery**: Migration continues even if activeDid migration fails 1. **Automatic Recovery**: Migration continues even if activeDid migration fails
2. **Manual Recovery**: Users can manually select their identity after migration 2. **Manual Recovery**: Users can manually select their identity after migration
3. **Fallback**: System creates new identity if none exists 3. **Fallback**: System creates new identity if none exists
@ -159,11 +179,13 @@ const result = await this.$db("SELECT * FROM contacts WHERE did = ?", [accountDi
## Security Considerations ## Security Considerations
### Data Protection ### Data Protection
- All sensitive data (mnemonics, private keys) are encrypted - All sensitive data (mnemonics, private keys) are encrypted
- Migration preserves encryption standards - Migration preserves encryption standards
- No plaintext data exposure during migration - No plaintext data exposure during migration
### Identity Verification ### Identity Verification
- ActiveDid migration validates account existence - ActiveDid migration validates account existence
- Prevents setting non-existent identities as active - Prevents setting non-existent identities as active
- Maintains cryptographic integrity - Maintains cryptographic integrity
@ -171,6 +193,7 @@ const result = await this.$db("SELECT * FROM contacts WHERE did = ?", [accountDi
## Testing ## Testing
### Migration Testing ### Migration Testing
```bash ```bash
# Run migration # Run migration
npm run migrate npm run migrate
@ -180,6 +203,7 @@ npm run test:migration
``` ```
### ActiveDid Testing ### ActiveDid Testing
```typescript ```typescript
// Test activeDid migration specifically // Test activeDid migration specifically
const result = await migrateActiveDid(); const result = await migrateActiveDid();
@ -188,6 +212,7 @@ expect(result.warnings).toContain('Successfully migrated activeDid');
``` ```
### PlatformServiceMixin Testing ### PlatformServiceMixin Testing
```typescript ```typescript
// Test mixin integration // Test mixin integration
describe('PlatformServiceMixin', () => { describe('PlatformServiceMixin', () => {
@ -224,6 +249,7 @@ describe('PlatformServiceMixin', () => {
- Verify caching and error handling work correctly - Verify caching and error handling work correctly
### Debugging ### Debugging
```typescript ```typescript
// Debug migration process // Debug migration process
import { logger } from '../utils/logger'; import { logger } from '../utils/logger';
@ -245,6 +271,7 @@ logger.debug('[Migration] Migration completed:', result);
## Migration Status Checklist ## Migration Status Checklist
### ✅ Completed ### ✅ Completed
- [x] PlatformServiceMixin implementation - [x] PlatformServiceMixin implementation
- [x] SQLite database service - [x] SQLite database service
- [x] Migration tools - [x] Migration tools
@ -253,11 +280,13 @@ logger.debug('[Migration] Migration completed:', result);
- [x] ActiveDid migration - [x] ActiveDid migration
### 🔄 In Progress ### 🔄 In Progress
- [ ] Contact migration - [ ] Contact migration
- [ ] DatabaseUtil to PlatformServiceMixin migration - [ ] DatabaseUtil to PlatformServiceMixin migration
- [ ] File-by-file migration - [ ] File-by-file migration
### ❌ Not Started ### ❌ Not Started
- [ ] Legacy Dexie removal - [ ] Legacy Dexie removal
- [ ] Final cleanup and validation - [ ] Final cleanup and validation

29
doc/platformservicemixin-completion-plan.md

@ -7,6 +7,7 @@ This document outlines the complete plan to finish PlatformServiceMixin implemen
## Current Status ## Current Status
### ✅ **PlatformServiceMixin - 95% Complete** ### ✅ **PlatformServiceMixin - 95% Complete**
- **Core functionality**: ✅ Implemented - **Core functionality**: ✅ Implemented
- **Caching system**: ✅ Implemented - **Caching system**: ✅ Implemented
- **Database methods**: ✅ Implemented - **Database methods**: ✅ Implemented
@ -14,6 +15,7 @@ This document outlines the complete plan to finish PlatformServiceMixin implemen
- **Type definitions**: ✅ Implemented - **Type definitions**: ✅ Implemented
### ⚠️ **Remaining Issues** ### ⚠️ **Remaining Issues**
1. **Single circular dependency**: `memoryLogs` import from databaseUtil 1. **Single circular dependency**: `memoryLogs` import from databaseUtil
2. **Missing utility functions**: `generateInsertStatement`, `generateUpdateStatement` 2. **Missing utility functions**: `generateInsertStatement`, `generateUpdateStatement`
3. **52 files** still importing databaseUtil 3. **52 files** still importing databaseUtil
@ -25,6 +27,7 @@ This document outlines the complete plan to finish PlatformServiceMixin implemen
### **Phase 1: Remove Circular Dependency (30 minutes)** ### **Phase 1: Remove Circular Dependency (30 minutes)**
#### **Step 1.1: Create Self-Contained memoryLogs** #### **Step 1.1: Create Self-Contained memoryLogs**
```typescript ```typescript
// In PlatformServiceMixin.ts - Replace line 50: // In PlatformServiceMixin.ts - Replace line 50:
// Remove: import { memoryLogs } from "@/db/databaseUtil"; // Remove: import { memoryLogs } from "@/db/databaseUtil";
@ -48,6 +51,7 @@ $appendToMemoryLogs(message: string): void {
``` ```
#### **Step 1.2: Update logger.ts** #### **Step 1.2: Update logger.ts**
```typescript ```typescript
// In logger.ts - Replace memoryLogs usage: // In logger.ts - Replace memoryLogs usage:
// Remove: import { memoryLogs } from "@/db/databaseUtil"; // Remove: import { memoryLogs } from "@/db/databaseUtil";
@ -70,6 +74,7 @@ export function getMemoryLogs(): string[] {
### **Phase 2: Add Missing Utility Functions (1 hour)** ### **Phase 2: Add Missing Utility Functions (1 hour)**
#### **Step 2.1: Add generateInsertStatement to PlatformServiceMixin** #### **Step 2.1: Add generateInsertStatement to PlatformServiceMixin**
```typescript ```typescript
// Add to PlatformServiceMixin methods: // Add to PlatformServiceMixin methods:
_generateInsertStatement( _generateInsertStatement(
@ -95,6 +100,7 @@ _generateInsertStatement(
``` ```
#### **Step 2.2: Add generateUpdateStatement to PlatformServiceMixin** #### **Step 2.2: Add generateUpdateStatement to PlatformServiceMixin**
```typescript ```typescript
// Add to PlatformServiceMixin methods: // Add to PlatformServiceMixin methods:
_generateUpdateStatement( _generateUpdateStatement(
@ -129,6 +135,7 @@ _generateUpdateStatement(
``` ```
#### **Step 2.3: Add Public Wrapper Methods** #### **Step 2.3: Add Public Wrapper Methods**
```typescript ```typescript
// Add to PlatformServiceMixin methods: // Add to PlatformServiceMixin methods:
$generateInsertStatement( $generateInsertStatement(
@ -151,6 +158,7 @@ $generateUpdateStatement(
### **Phase 3: Update Type Definitions (30 minutes)** ### **Phase 3: Update Type Definitions (30 minutes)**
#### **Step 3.1: Update IPlatformServiceMixin Interface** #### **Step 3.1: Update IPlatformServiceMixin Interface**
```typescript ```typescript
// Add to IPlatformServiceMixin interface: // Add to IPlatformServiceMixin interface:
$generateInsertStatement( $generateInsertStatement(
@ -167,6 +175,7 @@ $appendToMemoryLogs(message: string): void;
``` ```
#### **Step 3.2: Update ComponentCustomProperties** #### **Step 3.2: Update ComponentCustomProperties**
```typescript ```typescript
// Add to ComponentCustomProperties interface: // Add to ComponentCustomProperties interface:
$generateInsertStatement( $generateInsertStatement(
@ -185,12 +194,14 @@ $appendToMemoryLogs(message: string): void;
### **Phase 4: Test PlatformServiceMixin (1 hour)** ### **Phase 4: Test PlatformServiceMixin (1 hour)**
#### **Step 4.1: Create Test Component** #### **Step 4.1: Create Test Component**
```typescript ```typescript
// Create test file: src/test/PlatformServiceMixin.test.ts // Create test file: src/test/PlatformServiceMixin.test.ts
// Test all methods including new utility functions // Test all methods including new utility functions
``` ```
#### **Step 4.2: Run Linting and Type Checking** #### **Step 4.2: Run Linting and Type Checking**
```bash ```bash
npm run lint npm run lint
npx tsc --noEmit npx tsc --noEmit
@ -203,6 +214,7 @@ npx tsc --noEmit
### **Migration Strategy** ### **Migration Strategy**
#### **Priority Order:** #### **Priority Order:**
1. **Views** (25 files) - User-facing components 1. **Views** (25 files) - User-facing components
2. **Components** (15 files) - Reusable UI components 2. **Components** (15 files) - Reusable UI components
3. **Services** (8 files) - Business logic 3. **Services** (8 files) - Business logic
@ -211,6 +223,7 @@ npx tsc --noEmit
#### **Migration Pattern for Each File:** #### **Migration Pattern for Each File:**
**Step 1: Add PlatformServiceMixin** **Step 1: Add PlatformServiceMixin**
```typescript ```typescript
// Add to component imports: // Add to component imports:
import { PlatformServiceMixin } from "@/utils/PlatformServiceMixin"; import { PlatformServiceMixin } from "@/utils/PlatformServiceMixin";
@ -223,6 +236,7 @@ export default class ComponentName extends Vue {
``` ```
**Step 2: Replace databaseUtil Imports** **Step 2: Replace databaseUtil Imports**
```typescript ```typescript
// Remove: // Remove:
import { import {
@ -244,6 +258,7 @@ import {
``` ```
**Step 3: Update Method Calls** **Step 3: Update Method Calls**
```typescript ```typescript
// Before: // Before:
const { sql, params } = generateInsertStatement(contact, 'contacts'); const { sql, params } = generateInsertStatement(contact, 'contacts');
@ -255,6 +270,7 @@ const { sql, params } = this.$generateInsertStatement(contact, 'contacts');
### **File Migration Checklist** ### **File Migration Checklist**
#### **Views (25 files) - Priority 1** #### **Views (25 files) - Priority 1**
- [ ] QuickActionBvcEndView.vue - [ ] QuickActionBvcEndView.vue
- [ ] ProjectsView.vue - [ ] ProjectsView.vue
- [ ] ClaimReportCertificateView.vue - [ ] ClaimReportCertificateView.vue
@ -278,6 +294,7 @@ const { sql, params } = this.$generateInsertStatement(contact, 'contacts');
- [ ] [5 more view files] - [ ] [5 more view files]
#### **Components (15 files) - Priority 2** #### **Components (15 files) - Priority 2**
- [ ] ActivityListItem.vue - [ ] ActivityListItem.vue
- [ ] AmountInput.vue - [ ] AmountInput.vue
- [ ] ChoiceButtonDialog.vue - [ ] ChoiceButtonDialog.vue
@ -295,18 +312,21 @@ const { sql, params } = this.$generateInsertStatement(contact, 'contacts');
- [ ] IconRenderer.vue - [ ] IconRenderer.vue
#### **Services (8 files) - Priority 3** #### **Services (8 files) - Priority 3**
- [ ] api.ts - [ ] api.ts
- [ ] endorserServer.ts - [ ] endorserServer.ts
- [ ] partnerServer.ts - [ ] partnerServer.ts
- [ ] [5 more service files] - [ ] [5 more service files]
#### **Utils (4 files) - Priority 4** #### **Utils (4 files) - Priority 4**
- [ ] LogCollector.ts - [ ] LogCollector.ts
- [ ] [3 more util files] - [ ] [3 more util files]
### **Migration Tools** ### **Migration Tools**
#### **Automated Script for Common Patterns** #### **Automated Script for Common Patterns**
```bash ```bash
#!/bin/bash #!/bin/bash
# migration-helper.sh # migration-helper.sh
@ -326,6 +346,7 @@ echo "logConsoleAndDb → this.\$logAndConsole"
``` ```
#### **Validation Script** #### **Validation Script**
```bash ```bash
#!/bin/bash #!/bin/bash
# validate-migration.sh # validate-migration.sh
@ -350,6 +371,7 @@ echo "Migration validation complete!"
## 🎯 **Success Criteria** ## 🎯 **Success Criteria**
### **Day 1 Success Criteria:** ### **Day 1 Success Criteria:**
- [ ] PlatformServiceMixin has no circular dependencies - [ ] PlatformServiceMixin has no circular dependencies
- [ ] All utility functions implemented and tested - [ ] All utility functions implemented and tested
- [ ] Type definitions complete and accurate - [ ] Type definitions complete and accurate
@ -357,6 +379,7 @@ echo "Migration validation complete!"
- [ ] TypeScript compilation passes - [ ] TypeScript compilation passes
### **Day 2 Success Criteria:** ### **Day 2 Success Criteria:**
- [ ] 0 files importing databaseUtil - [ ] 0 files importing databaseUtil
- [ ] All 52 files migrated to PlatformServiceMixin - [ ] All 52 files migrated to PlatformServiceMixin
- [ ] No runtime errors in migrated components - [ ] No runtime errors in migrated components
@ -364,6 +387,7 @@ echo "Migration validation complete!"
- [ ] Performance maintained or improved - [ ] Performance maintained or improved
### **Overall Success Criteria:** ### **Overall Success Criteria:**
- [ ] Complete elimination of databaseUtil dependency - [ ] Complete elimination of databaseUtil dependency
- [ ] PlatformServiceMixin is the single source of truth for database operations - [ ] PlatformServiceMixin is the single source of truth for database operations
- [ ] Migration fence is fully implemented - [ ] Migration fence is fully implemented
@ -386,12 +410,14 @@ echo "Migration validation complete!"
## 📋 **Daily Progress Tracking** ## 📋 **Daily Progress Tracking**
### **Day 1 Progress:** ### **Day 1 Progress:**
- [ ] Phase 1: Circular dependency resolved - [ ] Phase 1: Circular dependency resolved
- [ ] Phase 2: Utility functions added - [ ] Phase 2: Utility functions added
- [ ] Phase 3: Type definitions updated - [ ] Phase 3: Type definitions updated
- [ ] Phase 4: Testing completed - [ ] Phase 4: Testing completed
### **Day 2 Progress:** ### **Day 2 Progress:**
- [ ] Views migrated (0/25) - [ ] Views migrated (0/25)
- [ ] Components migrated (0/15) - [ ] Components migrated (0/15)
- [ ] Services migrated (0/8) - [ ] Services migrated (0/8)
@ -403,16 +429,19 @@ echo "Migration validation complete!"
## 🆘 **Contingency Plans** ## 🆘 **Contingency Plans**
### **If Day 1 Takes Longer:** ### **If Day 1 Takes Longer:**
- Focus on core functionality first - Focus on core functionality first
- Defer advanced utility functions to Day 2 - Defer advanced utility functions to Day 2
- Prioritize circular dependency resolution - Prioritize circular dependency resolution
### **If Day 2 Takes Longer:** ### **If Day 2 Takes Longer:**
- Focus on high-impact views first - Focus on high-impact views first
- Batch similar components together - Batch similar components together
- Use automated scripts for common patterns - Use automated scripts for common patterns
### **If Issues Arise:** ### **If Issues Arise:**
- Document specific problems - Document specific problems
- Create targeted fixes - Create targeted fixes
- Maintain backward compatibility during transition - Maintain backward compatibility during transition

28
doc/qr-code-implementation-guide.md

@ -7,6 +7,7 @@ This document describes the QR code scanning and generation implementation in th
## Architecture ## Architecture
### Directory Structure ### Directory Structure
``` ```
src/ src/
├── services/ ├── services/
@ -74,6 +75,7 @@ interface QRScannerOptions {
### Platform-Specific Implementations ### Platform-Specific Implementations
#### Mobile (Capacitor) #### Mobile (Capacitor)
- Uses `@capacitor-mlkit/barcode-scanning` - Uses `@capacitor-mlkit/barcode-scanning`
- Native camera access through platform APIs - Native camera access through platform APIs
- Optimized for mobile performance - Optimized for mobile performance
@ -82,6 +84,7 @@ interface QRScannerOptions {
- Back camera preferred for scanning - Back camera preferred for scanning
Configuration: Configuration:
```typescript ```typescript
// capacitor.config.ts // capacitor.config.ts
const config: CapacitorConfig = { const config: CapacitorConfig = {
@ -105,6 +108,7 @@ const config: CapacitorConfig = {
``` ```
#### Web #### Web
- Uses browser's MediaDevices API - Uses browser's MediaDevices API
- Vue.js components for UI - Vue.js components for UI
- EventEmitter for stream management - EventEmitter for stream management
@ -116,6 +120,7 @@ const config: CapacitorConfig = {
### View Components ### View Components
#### ContactQRScanView #### ContactQRScanView
- Dedicated view for scanning QR codes - Dedicated view for scanning QR codes
- Full-screen camera interface - Full-screen camera interface
- Simple UI focused on scanning - Simple UI focused on scanning
@ -123,6 +128,7 @@ const config: CapacitorConfig = {
- Streamlined scanning experience - Streamlined scanning experience
#### ContactQRScanShowView #### ContactQRScanShowView
- Combined view for QR code display and scanning - Combined view for QR code display and scanning
- Shows user's own QR code - Shows user's own QR code
- Handles user registration status - Handles user registration status
@ -160,6 +166,7 @@ const config: CapacitorConfig = {
## Build Configuration ## Build Configuration
### Common Vite Configuration ### Common Vite Configuration
```typescript ```typescript
// vite.config.common.mts // vite.config.common.mts
export async function createBuildConfig(mode: string) { export async function createBuildConfig(mode: string) {
@ -183,6 +190,7 @@ export async function createBuildConfig(mode: string) {
``` ```
### Platform-Specific Builds ### Platform-Specific Builds
```json ```json
{ {
"scripts": { "scripts": {
@ -196,6 +204,7 @@ export async function createBuildConfig(mode: string) {
## Error Handling ## Error Handling
### Common Error Scenarios ### Common Error Scenarios
1. No camera found 1. No camera found
2. Permission denied 2. Permission denied
3. Camera in use by another application 3. Camera in use by another application
@ -207,6 +216,7 @@ export async function createBuildConfig(mode: string) {
9. Network connectivity issues 9. Network connectivity issues
### Error Response ### Error Response
- User-friendly error messages - User-friendly error messages
- Troubleshooting tips - Troubleshooting tips
- Clear instructions for resolution - Clear instructions for resolution
@ -215,6 +225,7 @@ export async function createBuildConfig(mode: string) {
## Security Considerations ## Security Considerations
### QR Code Security ### QR Code Security
- Encryption of contact data - Encryption of contact data
- Timestamp validation - Timestamp validation
- Version checking - Version checking
@ -222,6 +233,7 @@ export async function createBuildConfig(mode: string) {
- Rate limiting for scans - Rate limiting for scans
### Data Protection ### Data Protection
- Secure transmission of contact data - Secure transmission of contact data
- Validation of QR code authenticity - Validation of QR code authenticity
- Prevention of duplicate scans - Prevention of duplicate scans
@ -231,6 +243,7 @@ export async function createBuildConfig(mode: string) {
## Best Practices ## Best Practices
### Camera Access ### Camera Access
1. Always check for camera availability 1. Always check for camera availability
2. Request permissions explicitly 2. Request permissions explicitly
3. Handle all error conditions 3. Handle all error conditions
@ -238,6 +251,7 @@ export async function createBuildConfig(mode: string) {
5. Implement proper cleanup 5. Implement proper cleanup
### Performance ### Performance
1. Optimize camera resolution 1. Optimize camera resolution
2. Implement proper resource cleanup 2. Implement proper resource cleanup
3. Handle camera switching efficiently 3. Handle camera switching efficiently
@ -245,6 +259,7 @@ export async function createBuildConfig(mode: string) {
5. Battery usage optimization 5. Battery usage optimization
### User Experience ### User Experience
1. Clear visual feedback 1. Clear visual feedback
2. Camera preview 2. Camera preview
3. Scanning status indicators 3. Scanning status indicators
@ -257,6 +272,7 @@ export async function createBuildConfig(mode: string) {
## Testing ## Testing
### Test Scenarios ### Test Scenarios
1. Permission handling 1. Permission handling
2. Camera switching 2. Camera switching
3. Error conditions 3. Error conditions
@ -267,6 +283,7 @@ export async function createBuildConfig(mode: string) {
8. Security validation 8. Security validation
### Test Environment ### Test Environment
- Multiple browsers - Multiple browsers
- iOS and Android devices - iOS and Android devices
- Various network conditions - Various network conditions
@ -275,6 +292,7 @@ export async function createBuildConfig(mode: string) {
## Dependencies ## Dependencies
### Key Packages ### Key Packages
- `@capacitor-mlkit/barcode-scanning` - `@capacitor-mlkit/barcode-scanning`
- `qrcode-stream` - `qrcode-stream`
- `vue-qrcode-reader` - `vue-qrcode-reader`
@ -283,12 +301,14 @@ export async function createBuildConfig(mode: string) {
## Maintenance ## Maintenance
### Regular Updates ### Regular Updates
- Keep dependencies updated - Keep dependencies updated
- Monitor platform changes - Monitor platform changes
- Update documentation - Update documentation
- Review security patches - Review security patches
### Performance Monitoring ### Performance Monitoring
- Track memory usage - Track memory usage
- Monitor camera performance - Monitor camera performance
- Check error rates - Check error rates
@ -436,6 +456,7 @@ The camera switching implementation includes comprehensive error handling:
- Camera switch timeout - Camera switch timeout
2. **Error Response** 2. **Error Response**
```typescript ```typescript
private async handleCameraSwitch(deviceId: string): Promise<void> { private async handleCameraSwitch(deviceId: string): Promise<void> {
try { try {
@ -460,6 +481,7 @@ The camera switching implementation includes comprehensive error handling:
The camera system maintains several states: The camera system maintains several states:
1. **Camera States** 1. **Camera States**
```typescript ```typescript
type CameraState = type CameraState =
| "initializing" // Camera is being initialized | "initializing" // Camera is being initialized
@ -529,6 +551,7 @@ The camera system maintains several states:
#### MLKit Barcode Scanner Configuration #### MLKit Barcode Scanner Configuration
1. **Plugin Setup** 1. **Plugin Setup**
```typescript ```typescript
// capacitor.config.ts // capacitor.config.ts
const config: CapacitorConfig = { const config: CapacitorConfig = {
@ -552,6 +575,7 @@ The camera system maintains several states:
``` ```
2. **Camera Management** 2. **Camera Management**
```typescript ```typescript
// CapacitorQRScanner.ts // CapacitorQRScanner.ts
export class CapacitorQRScanner implements QRScannerService { export class CapacitorQRScanner implements QRScannerService {
@ -603,6 +627,7 @@ The camera system maintains several states:
``` ```
3. **Camera State Management** 3. **Camera State Management**
```typescript ```typescript
// CapacitorQRScanner.ts // CapacitorQRScanner.ts
private async handleCameraState(): Promise<void> { private async handleCameraState(): Promise<void> {
@ -645,6 +670,7 @@ The camera system maintains several states:
``` ```
4. **Error Handling** 4. **Error Handling**
```typescript ```typescript
// CapacitorQRScanner.ts // CapacitorQRScanner.ts
private async handleCameraError(error: Error): Promise<void> { private async handleCameraError(error: Error): Promise<void> {
@ -737,6 +763,7 @@ The camera system maintains several states:
#### Performance Optimization #### Performance Optimization
1. **Battery Usage** 1. **Battery Usage**
```typescript ```typescript
// CapacitorQRScanner.ts // CapacitorQRScanner.ts
private optimizeBatteryUsage(): void { private optimizeBatteryUsage(): void {
@ -759,6 +786,7 @@ The camera system maintains several states:
``` ```
2. **Memory Management** 2. **Memory Management**
```typescript ```typescript
// CapacitorQRScanner.ts // CapacitorQRScanner.ts
private async cleanupResources(): Promise<void> { private async cleanupResources(): Promise<void> {

9
doc/secure-storage-implementation.md

@ -111,6 +111,7 @@ export class AbsurdSqlDatabaseService implements PlatformService {
``` ```
Key features: Key features:
- Uses absurd-sql for SQLite in the browser - Uses absurd-sql for SQLite in the browser
- Implements operation queuing for thread safety - Implements operation queuing for thread safety
- Handles initialization and connection management - Handles initialization and connection management
@ -143,6 +144,7 @@ async function getAccount(did: string): Promise<Account | undefined> {
When converting from Dexie.js to SQL-based implementation, follow these patterns: When converting from Dexie.js to SQL-based implementation, follow these patterns:
1. **Database Access Pattern** 1. **Database Access Pattern**
```typescript ```typescript
// Before (Dexie) // Before (Dexie)
const result = await db.table.where("field").equals(value).first(); const result = await db.table.where("field").equals(value).first();
@ -161,6 +163,7 @@ When converting from Dexie.js to SQL-based implementation, follow these patterns
``` ```
2. **Update Operations** 2. **Update Operations**
```typescript ```typescript
// Before (Dexie) // Before (Dexie)
await db.table.where("id").equals(id).modify(changes); await db.table.where("id").equals(id).modify(changes);
@ -184,6 +187,7 @@ When converting from Dexie.js to SQL-based implementation, follow these patterns
``` ```
3. **Insert Operations** 3. **Insert Operations**
```typescript ```typescript
// Before (Dexie) // Before (Dexie)
await db.table.add(item); await db.table.add(item);
@ -202,6 +206,7 @@ When converting from Dexie.js to SQL-based implementation, follow these patterns
``` ```
4. **Delete Operations** 4. **Delete Operations**
```typescript ```typescript
// Before (Dexie) // Before (Dexie)
await db.table.where("id").equals(id).delete(); await db.table.where("id").equals(id).delete();
@ -216,6 +221,7 @@ When converting from Dexie.js to SQL-based implementation, follow these patterns
``` ```
5. **Result Processing** 5. **Result Processing**
```typescript ```typescript
// Before (Dexie) // Before (Dexie)
const items = await db.table.toArray(); const items = await db.table.toArray();
@ -247,6 +253,7 @@ await databaseUtil.logConsoleAndDb(message, showInConsole);
``` ```
Key Considerations: Key Considerations:
- Always use `databaseUtil.mapQueryResultToValues()` to process SQL query results - Always use `databaseUtil.mapQueryResultToValues()` to process SQL query results
- Use utility methods from `db/index.ts` when available instead of direct SQL - Use utility methods from `db/index.ts` when available instead of direct SQL
- Keep Dexie fallbacks wrapped in migration period checks - Keep Dexie fallbacks wrapped in migration period checks
@ -254,6 +261,7 @@ Key Considerations:
- For updates/inserts/deletes, execute both SQL and Dexie operations during migration period - For updates/inserts/deletes, execute both SQL and Dexie operations during migration period
Example Migration: Example Migration:
```typescript ```typescript
// Before (Dexie) // Before (Dexie)
export async function updateSettings(settings: Settings): Promise<void> { export async function updateSettings(settings: Settings): Promise<void> {
@ -274,6 +282,7 @@ export async function updateSettings(settings: Settings): Promise<void> {
``` ```
Remember to: Remember to:
- Create database access code to use the platform service, putting it in front of the Dexie version - Create database access code to use the platform service, putting it in front of the Dexie version
- Instead of removing Dexie-specific code, keep it. - Instead of removing Dexie-specific code, keep it.

14
doc/sharebufferarray_spectre_security.md

@ -4,11 +4,13 @@
## 1. Introduction to SharedArrayBuffer ## 1. Introduction to SharedArrayBuffer
### Overview ### Overview
- `SharedArrayBuffer` is a JavaScript object that enables **shared memory** access between the main thread and Web Workers. - `SharedArrayBuffer` is a JavaScript object that enables **shared memory** access between the main thread and Web Workers.
- Unlike `ArrayBuffer`, the memory is **not copied** between threads—allowing **true parallelism**. - Unlike `ArrayBuffer`, the memory is **not copied** between threads—allowing **true parallelism**.
- Paired with `Atomics`, it allows low-level memory synchronization (e.g., locks, waits). - Paired with `Atomics`, it allows low-level memory synchronization (e.g., locks, waits).
### Example Use ### Example Use
```js ```js
const sab = new SharedArrayBuffer(1024); const sab = new SharedArrayBuffer(1024);
const sharedArray = new Uint8Array(sab); const sharedArray = new Uint8Array(sab);
@ -18,6 +20,7 @@ sharedArray[0] = 42;
## 2. Browser Security Requirements ## 2. Browser Security Requirements
### Security Headers Required to Use SharedArrayBuffer ### Security Headers Required to Use SharedArrayBuffer
Modern browsers **restrict access** to `SharedArrayBuffer` due to Spectre-class vulnerabilities. Modern browsers **restrict access** to `SharedArrayBuffer` due to Spectre-class vulnerabilities.
The following **HTTP headers must be set** to enable it: The following **HTTP headers must be set** to enable it:
@ -28,23 +31,28 @@ Cross-Origin-Embedder-Policy: require-corp
``` ```
### HTTPS Requirement ### HTTPS Requirement
- Must be served over **HTTPS** (except `localhost` for dev). - Must be served over **HTTPS** (except `localhost` for dev).
- These headers enforce **cross-origin isolation**. - These headers enforce **cross-origin isolation**.
### Role of CORS ### Role of CORS
- CORS **alone is not sufficient**. - CORS **alone is not sufficient**.
- However, embedded resources (like scripts and iframes) must still include proper CORS headers if they are to be loaded in a cross-origin isolated context. - However, embedded resources (like scripts and iframes) must still include proper CORS headers if they are to be loaded in a cross-origin isolated context.
## 3. Spectre Vulnerability ## 3. Spectre Vulnerability
### What is Spectre? ### What is Spectre?
- A class of **side-channel attacks** exploiting **speculative execution** in CPUs. - A class of **side-channel attacks** exploiting **speculative execution** in CPUs.
- Allows an attacker to read arbitrary memory from the same address space. - Allows an attacker to read arbitrary memory from the same address space.
### Affected Architectures ### Affected Architectures
- Intel, AMD, ARM — essentially **all modern processors**. - Intel, AMD, ARM — essentially **all modern processors**.
### Why It's Still a Concern ### Why It's Still a Concern
- It's a **hardware flaw**, not just a software bug. - It's a **hardware flaw**, not just a software bug.
- Can't be fully fixed in software without performance penalties. - Can't be fully fixed in software without performance penalties.
- New Spectre **variants** (e.g., v2, RSB, BranchScope) continue to emerge. - New Spectre **variants** (e.g., v2, RSB, BranchScope) continue to emerge.
@ -52,16 +60,19 @@ Cross-Origin-Embedder-Policy: require-corp
## 4. Mitigations and Current Limitations ## 4. Mitigations and Current Limitations
### Browser Mitigations ### Browser Mitigations
- **Restricted precision** for `performance.now()`. - **Restricted precision** for `performance.now()`.
- **Disabled or gated** access to `SharedArrayBuffer`. - **Disabled or gated** access to `SharedArrayBuffer`.
- **Reduced or removed** fine-grained timers. - **Reduced or removed** fine-grained timers.
### OS/Hardware Mitigations ### OS/Hardware Mitigations
- **Kernel Page Table Isolation (KPTI)** - **Kernel Page Table Isolation (KPTI)**
- **Microcode updates** - **Microcode updates**
- **Retpoline** compiler mitigations - **Retpoline** compiler mitigations
### Developer Responsibilities ### Developer Responsibilities
- Avoid sharing sensitive data across threads unless necessary. - Avoid sharing sensitive data across threads unless necessary.
- Use **constant-time cryptographic functions**. - Use **constant-time cryptographic functions**.
- Assume timing attacks are **still possible**. - Assume timing attacks are **still possible**.
@ -70,10 +81,12 @@ Cross-Origin-Embedder-Policy: require-corp
## 5. Practical Development Notes ## 5. Practical Development Notes
### Using SharedArrayBuffer Safely ### Using SharedArrayBuffer Safely
- Ensure the site is **cross-origin isolated**: - Ensure the site is **cross-origin isolated**:
- Serve all resources with appropriate **CORS policies** (`Cross-Origin-Resource-Policy`, `Access-Control-Allow-Origin`) - Serve all resources with appropriate **CORS policies** (`Cross-Origin-Resource-Policy`, `Access-Control-Allow-Origin`)
- Set the required **COOP/COEP headers** - Set the required **COOP/COEP headers**
- Validate support using: - Validate support using:
```js ```js
if (window.crossOriginIsolated) { if (window.crossOriginIsolated) {
// Safe to use SharedArrayBuffer // Safe to use SharedArrayBuffer
@ -81,6 +94,7 @@ if (window.crossOriginIsolated) {
``` ```
### Testing and Fallback ### Testing and Fallback
- Provide fallbacks to `ArrayBuffer` if isolation is not available. - Provide fallbacks to `ArrayBuffer` if isolation is not available.
- Document use cases clearly (e.g., high-performance WebAssembly applications or real-time audio/video processing). - Document use cases clearly (e.g., high-performance WebAssembly applications or real-time audio/video processing).

29
doc/storage-implementation-checklist.md

@ -3,6 +3,7 @@
## Core Services ## Core Services
### 1. Storage Service Layer ### 1. Storage Service Layer
- [x] Create base `PlatformService` interface - [x] Create base `PlatformService` interface
- [x] Define common methods for all platforms - [x] Define common methods for all platforms
- [x] Add platform-specific method signatures - [x] Add platform-specific method signatures
@ -25,6 +26,7 @@
- [ ] File system access - [ ] File system access
### 2. Migration Services ### 2. Migration Services
- [x] Implement basic migration support - [x] Implement basic migration support
- [x] Dual-storage pattern (SQLite + Dexie) - [x] Dual-storage pattern (SQLite + Dexie)
- [x] Basic data verification - [x] Basic data verification
@ -37,6 +39,7 @@
- [ ] Manual triggers - [ ] Manual triggers
### 3. Security Layer ### 3. Security Layer
- [x] Basic data integrity - [x] Basic data integrity
- [ ] Implement `EncryptionService` (planned) - [ ] Implement `EncryptionService` (planned)
- [ ] Key management - [ ] Key management
@ -50,14 +53,17 @@
## Platform-Specific Implementation ## Platform-Specific Implementation
### Web Platform ### Web Platform
- [x] Setup absurd-sql - [x] Setup absurd-sql
- [x] Install dependencies - [x] Install dependencies
```json ```json
{ {
"@jlongster/sql.js": "^1.8.0", "@jlongster/sql.js": "^1.8.0",
"absurd-sql": "^1.8.0" "absurd-sql": "^1.8.0"
} }
``` ```
- [x] Configure VFS with IndexedDB backend - [x] Configure VFS with IndexedDB backend
- [x] Setup worker threads - [x] Setup worker threads
- [x] Implement operation queuing - [x] Implement operation queuing
@ -83,6 +89,7 @@
- [x] Implement atomic operations - [x] Implement atomic operations
### iOS Platform (Planned) ### iOS Platform (Planned)
- [ ] Setup SQLCipher - [ ] Setup SQLCipher
- [ ] Install pod dependencies - [ ] Install pod dependencies
- [ ] Configure encryption - [ ] Configure encryption
@ -96,6 +103,7 @@
- [ ] Setup app groups - [ ] Setup app groups
### Android Platform (Planned) ### Android Platform (Planned)
- [ ] Setup SQLCipher - [ ] Setup SQLCipher
- [ ] Add Gradle dependencies - [ ] Add Gradle dependencies
- [ ] Configure encryption - [ ] Configure encryption
@ -109,6 +117,7 @@
- [ ] Setup file provider - [ ] Setup file provider
### Electron Platform (Planned) ### Electron Platform (Planned)
- [ ] Setup Node SQLite - [ ] Setup Node SQLite
- [ ] Install dependencies - [ ] Install dependencies
- [ ] Configure IPC - [ ] Configure IPC
@ -124,6 +133,7 @@
## Data Models and Types ## Data Models and Types
### 1. Database Schema ### 1. Database Schema
- [x] Define tables - [x] Define tables
```sql ```sql
@ -166,6 +176,7 @@
### 2. Type Definitions ### 2. Type Definitions
- [x] Create interfaces - [x] Create interfaces
```typescript ```typescript
interface Account { interface Account {
did: string; did: string;
@ -197,6 +208,7 @@
## UI Components ## UI Components
### 1. Migration UI (Planned) ### 1. Migration UI (Planned)
- [ ] Create components - [ ] Create components
- [ ] `MigrationProgress.vue` - [ ] `MigrationProgress.vue`
- [ ] `MigrationError.vue` - [ ] `MigrationError.vue`
@ -204,6 +216,7 @@
- [ ] `MigrationStatus.vue` - [ ] `MigrationStatus.vue`
### 2. Settings UI (Planned) ### 2. Settings UI (Planned)
- [ ] Update components - [ ] Update components
- [ ] Add storage settings - [ ] Add storage settings
- [ ] Add migration controls - [ ] Add migration controls
@ -211,6 +224,7 @@
- [ ] Add security settings - [ ] Add security settings
### 3. Error Handling UI (Planned) ### 3. Error Handling UI (Planned)
- [ ] Create components - [ ] Create components
- [ ] `StorageError.vue` - [ ] `StorageError.vue`
- [ ] `QuotaExceeded.vue` - [ ] `QuotaExceeded.vue`
@ -220,6 +234,7 @@
## Testing ## Testing
### 1. Unit Tests ### 1. Unit Tests
- [x] Basic service tests - [x] Basic service tests
- [x] Platform service tests - [x] Platform service tests
- [x] Database operation tests - [x] Database operation tests
@ -227,6 +242,7 @@
- [ ] Platform detection tests (planned) - [ ] Platform detection tests (planned)
### 2. Integration Tests (Planned) ### 2. Integration Tests (Planned)
- [ ] Test migrations - [ ] Test migrations
- [ ] Web platform tests - [ ] Web platform tests
- [ ] iOS platform tests - [ ] iOS platform tests
@ -234,6 +250,7 @@
- [ ] Electron platform tests - [ ] Electron platform tests
### 3. E2E Tests (Planned) ### 3. E2E Tests (Planned)
- [ ] Test workflows - [ ] Test workflows
- [ ] Account management - [ ] Account management
- [ ] Settings management - [ ] Settings management
@ -243,12 +260,14 @@
## Documentation ## Documentation
### 1. Technical Documentation ### 1. Technical Documentation
- [x] Update architecture docs - [x] Update architecture docs
- [x] Add API documentation - [x] Add API documentation
- [ ] Create migration guides (planned) - [ ] Create migration guides (planned)
- [ ] Document security measures (planned) - [ ] Document security measures (planned)
### 2. User Documentation (Planned) ### 2. User Documentation (Planned)
- [ ] Update user guides - [ ] Update user guides
- [ ] Add troubleshooting guides - [ ] Add troubleshooting guides
- [ ] Create FAQ - [ ] Create FAQ
@ -257,12 +276,14 @@
## Deployment ## Deployment
### 1. Build Process ### 1. Build Process
- [x] Update build scripts - [x] Update build scripts
- [x] Add platform-specific builds - [x] Add platform-specific builds
- [ ] Configure CI/CD (planned) - [ ] Configure CI/CD (planned)
- [ ] Setup automated testing (planned) - [ ] Setup automated testing (planned)
### 2. Release Process (Planned) ### 2. Release Process (Planned)
- [ ] Create release checklist - [ ] Create release checklist
- [ ] Add version management - [ ] Add version management
- [ ] Setup rollback procedures - [ ] Setup rollback procedures
@ -271,12 +292,14 @@
## Monitoring and Analytics (Planned) ## Monitoring and Analytics (Planned)
### 1. Error Tracking ### 1. Error Tracking
- [ ] Setup error logging - [ ] Setup error logging
- [ ] Add performance monitoring - [ ] Add performance monitoring
- [ ] Configure alerts - [ ] Configure alerts
- [ ] Create dashboards - [ ] Create dashboards
### 2. Usage Analytics ### 2. Usage Analytics
- [ ] Add storage metrics - [ ] Add storage metrics
- [ ] Track migration success - [ ] Track migration success
- [ ] Monitor performance - [ ] Monitor performance
@ -285,12 +308,14 @@
## Security Audit (Planned) ## Security Audit (Planned)
### 1. Code Review ### 1. Code Review
- [ ] Review encryption - [ ] Review encryption
- [ ] Check access controls - [ ] Check access controls
- [ ] Verify data handling - [ ] Verify data handling
- [ ] Audit dependencies - [ ] Audit dependencies
### 2. Penetration Testing ### 2. Penetration Testing
- [ ] Test data access - [ ] Test data access
- [ ] Verify encryption - [ ] Verify encryption
- [ ] Check authentication - [ ] Check authentication
@ -299,6 +324,7 @@
## Success Criteria ## Success Criteria
### 1. Performance ### 1. Performance
- [x] Query response time < 100ms - [x] Query response time < 100ms
- [x] Operation queuing for thread safety - [x] Operation queuing for thread safety
- [x] Proper initialization handling - [x] Proper initialization handling
@ -307,6 +333,7 @@
- [ ] Memory usage < 50MB (planned) - [ ] Memory usage < 50MB (planned)
### 2. Reliability ### 2. Reliability
- [x] Basic data integrity - [x] Basic data integrity
- [x] Operation queuing - [x] Operation queuing
- [ ] Automatic recovery (planned) - [ ] Automatic recovery (planned)
@ -315,6 +342,7 @@
- [ ] Data consistency (planned) - [ ] Data consistency (planned)
### 3. Security ### 3. Security
- [x] Basic data integrity - [x] Basic data integrity
- [ ] AES-256 encryption (planned) - [ ] AES-256 encryption (planned)
- [ ] Secure key storage (planned) - [ ] Secure key storage (planned)
@ -322,6 +350,7 @@
- [ ] Audit logging (planned) - [ ] Audit logging (planned)
### 4. User Experience ### 4. User Experience
- [x] Basic database operations - [x] Basic database operations
- [ ] Smooth migration (planned) - [ ] Smooth migration (planned)
- [ ] Clear error messages (planned) - [ ] Clear error messages (planned)

35
doc/usage-guide.md

@ -53,7 +53,6 @@ header-includes:
\clearpage \clearpage
# Purpose of Document # Purpose of Document
Both end-users and development team members need to know how to use TimeSafari. Both end-users and development team members need to know how to use TimeSafari.
@ -90,14 +89,16 @@ development environment. This section will guide you through the process.
## Prerequisites ## Prerequisites
1. Have the following installed on your local machine: 1. Have the following installed on your local machine:
- Node.js and NPM
- A web browser. For this guide, we will use Google Chrome. - Node.js and NPM
- Git - A web browser. For this guide, we will use Google Chrome.
- A code editor - Git
- A code editor
2. Create an API key on Infura. This is necessary for the Endorser API to connect to the Ethereum 2. Create an API key on Infura. This is necessary for the Endorser API to connect to the Ethereum
blockchain. blockchain.
- You can create an account on Infura [here](https://infura.io/).\
- You can create an account on Infura [here](https://infura.io/).\
Click "CREATE NEW API KEY" and label the key. Then click "API Keys" in the top menu bar to Click "CREATE NEW API KEY" and label the key. Then click "API Keys" in the top menu bar to
be taken back to the list of keys. be taken back to the list of keys.
@ -105,23 +106,23 @@ development environment. This section will guide you through the process.
![](images/01_infura-api-keys.png){ width=550px } ![](images/01_infura-api-keys.png){ width=550px }
- Go to the key detail page. Then click "MANAGE API KEY". - Go to the key detail page. Then click "MANAGE API KEY".
![](images/02-infura-key-detail.png){ width=550px } ![](images/02-infura-key-detail.png){ width=550px }
- Click the copy and paste button next to the string of alphanumeric characters.\ - Click the copy and paste button next to the string of alphanumeric characters.\
This is your API, also known as your project ID. This is your API, also known as your project ID.
![](images/03-infura-api-key-id.png){width=550px } ![](images/03-infura-api-key-id.png){width=550px }
- Save this for later during the Endorser API setup. This will go in your `INFURA_PROJECT_ID` - Save this for later during the Endorser API setup. This will go in your `INFURA_PROJECT_ID`
environment variable. environment variable.
## Setup steps ## Setup steps
### 1. Clone the following repositories from their respective Git hosts: ### 1. Clone the following repositories from their respective Git hosts
- [TimeSafari Frontend](https://gitea.anomalistdesign.com/trent_larson/crowd-funder-for-time-pwa)\
- [TimeSafari Frontend](https://gitea.anomalistdesign.com/trent_larson/crowd-funder-for-time-pwa)\
This is a Progressive Web App (PWA) built with VueJS and TypeScript. This is a Progressive Web App (PWA) built with VueJS and TypeScript.
Note that the clone command here is different from the one you would use for GitHub. Note that the clone command here is different from the one you would use for GitHub.
@ -130,7 +131,7 @@ git clone git clone \
ssh://git@gitea.anomalistdesign.com:222/trent_larson/crowd-funder-for-time-pwa.git ssh://git@gitea.anomalistdesign.com:222/trent_larson/crowd-funder-for-time-pwa.git
``` ```
- [TimeSafari Backend - Endorser API](https://github.com/trentlarson/endorser-ch)\ - [TimeSafari Backend - Endorser API](https://github.com/trentlarson/endorser-ch)\
This is a NodeJS service providing the backend for TimeSafari. This is a NodeJS service providing the backend for TimeSafari.
```bash ```bash
@ -157,21 +158,25 @@ second user to the app.
1. Install dependencies and environment variables.\ 1. Install dependencies and environment variables.\
In endorser-ch install dependencies and set up environment variables to allow starting it up in In endorser-ch install dependencies and set up environment variables to allow starting it up in
development mode. development mode.
```bash ```bash
cd endorser-ch cd endorser-ch
npm clean install # or npm ci npm clean install # or npm ci
cp .env.local .env cp .env.local .env
``` ```
Edit the .env file's INFURA_PROJECT_ID with the value you saved earlier in the Edit the .env file's INFURA_PROJECT_ID with the value you saved earlier in the
prerequisites.\ prerequisites.\
Then create the SQLite database by running `npm run flyway migrate` with environment variables Then create the SQLite database by running `npm run flyway migrate` with environment variables
set correctly to select the default SQLite development user as follows. set correctly to select the default SQLite development user as follows.
```bash ```bash
export NODE_ENV=dev export NODE_ENV=dev
export DBUSER=sa export DBUSER=sa
export DBPASS=sasa export DBPASS=sasa
npm run flyway migrate npm run flyway migrate
``` ```
The first run of flyway migrate may take some time to complete because the entire Flyway The first run of flyway migrate may take some time to complete because the entire Flyway
distribution must be downloaded prior to executing migrations. distribution must be downloaded prior to executing migrations.
@ -254,7 +259,7 @@ A Flyway report has been generated here: /Users/kbull/code/timesafari/endorser-c
In our case this DID is:\ In our case this DID is:\
`did:ethr:0xe4B783c74c8B0e229524e44d0cD898D272E02CD6` `did:ethr:0xe4B783c74c8B0e229524e44d0cD898D272E02CD6`
- Add that DID to the following echoed SQL statement where it says `YOUR_DID` - Add that DID to the following echoed SQL statement where it says `YOUR_DID`
```bash ```bash
echo "INSERT INTO registration (did, maxClaims, maxRegs, epoch) echo "INSERT INTO registration (did, maxClaims, maxRegs, epoch)
@ -268,7 +273,7 @@ A Flyway report has been generated here: /Users/kbull/code/timesafari/endorser-c
`endorser-ch` creates the SQLite database it depends on it creates it in the parent directory `endorser-ch` creates the SQLite database it depends on it creates it in the parent directory
of `endorser-ch`. of `endorser-ch`.
- You can verify with an SQL browser tool that your record has been added to the `registration` - You can verify with an SQL browser tool that your record has been added to the `registration`
table. table.
![](images/08-endorser-sqlite-row-added.png){width=350px} ![](images/08-endorser-sqlite-row-added.png){width=350px}

32
docker/README.md

@ -155,6 +155,7 @@ VITE_PASSKEYS_ENABLED=true
## Build Modes ## Build Modes
### Development Mode ### Development Mode
- **Target**: `development` - **Target**: `development`
- **Features**: Hot reloading, development server - **Features**: Hot reloading, development server
- **Port**: 5173 - **Port**: 5173
@ -168,6 +169,7 @@ docker build --target development -t timesafari:dev .
``` ```
### Staging Mode ### Staging Mode
- **Target**: `staging` - **Target**: `staging`
- **Features**: Production build with relaxed caching - **Features**: Production build with relaxed caching
- **Port**: 8080 (mapped from 80) - **Port**: 8080 (mapped from 80)
@ -181,6 +183,7 @@ docker build --build-arg BUILD_MODE=staging -t timesafari:staging .
``` ```
### Production Mode ### Production Mode
- **Target**: `production` - **Target**: `production`
- **Features**: Optimized production build - **Features**: Optimized production build
- **Port**: 80 - **Port**: 80
@ -194,6 +197,7 @@ docker build -t timesafari:latest .
``` ```
### Custom Mode ### Custom Mode
- **Target**: Configurable via `BUILD_TARGET` - **Target**: Configurable via `BUILD_TARGET`
- **Features**: Fully configurable - **Features**: Fully configurable
- **Port**: Configurable via `CUSTOM_PORT` - **Port**: Configurable via `CUSTOM_PORT`
@ -250,6 +254,7 @@ docker-compose up staging
## Security Features ## Security Features
### Built-in Security ### Built-in Security
- **Non-root user execution**: All containers run as non-root users - **Non-root user execution**: All containers run as non-root users
- **Security headers**: XSS protection, content type options, frame options - **Security headers**: XSS protection, content type options, frame options
- **Rate limiting**: API request rate limiting - **Rate limiting**: API request rate limiting
@ -257,6 +262,7 @@ docker-compose up staging
- **Minimal attack surface**: Alpine Linux base images - **Minimal attack surface**: Alpine Linux base images
### Security Headers ### Security Headers
- `X-Frame-Options: SAMEORIGIN` - `X-Frame-Options: SAMEORIGIN`
- `X-Content-Type-Options: nosniff` - `X-Content-Type-Options: nosniff`
- `X-XSS-Protection: 1; mode=block` - `X-XSS-Protection: 1; mode=block`
@ -266,17 +272,20 @@ docker-compose up staging
## Performance Optimizations ## Performance Optimizations
### Caching Strategy ### Caching Strategy
- **Static assets**: 1 year cache with immutable flag (production) - **Static assets**: 1 year cache with immutable flag (production)
- **HTML files**: 1 hour cache (production) / no cache (staging) - **HTML files**: 1 hour cache (production) / no cache (staging)
- **Service worker**: No cache - **Service worker**: No cache
- **Manifest**: 1 day cache (production) / 1 hour cache (staging) - **Manifest**: 1 day cache (production) / 1 hour cache (staging)
### Compression ### Compression
- **Gzip compression**: Enabled for text-based files - **Gzip compression**: Enabled for text-based files
- **Compression level**: 6 (balanced) - **Compression level**: 6 (balanced)
- **Minimum size**: 1024 bytes - **Minimum size**: 1024 bytes
### Nginx Optimizations ### Nginx Optimizations
- **Sendfile**: Enabled for efficient file serving - **Sendfile**: Enabled for efficient file serving
- **TCP optimizations**: nopush and nodelay enabled - **TCP optimizations**: nopush and nodelay enabled
- **Keepalive**: 65 second timeout - **Keepalive**: 65 second timeout
@ -285,19 +294,23 @@ docker-compose up staging
## Health Checks ## Health Checks
### Built-in Health Checks ### Built-in Health Checks
All services include health checks that: All services include health checks that:
- Check every 30 seconds - Check every 30 seconds
- Timeout after 10 seconds - Timeout after 10 seconds
- Retry 3 times before marking unhealthy - Retry 3 times before marking unhealthy
- Start checking after 40 seconds - Start checking after 40 seconds
### Health Check Endpoints ### Health Check Endpoints
- **Production/Staging**: `http://localhost/health` - **Production/Staging**: `http://localhost/health`
- **Development**: `http://localhost:5173` - **Development**: `http://localhost:5173`
## SSL/HTTPS Setup ## SSL/HTTPS Setup
### SSL Certificates ### SSL Certificates
For SSL deployment, create an `ssl` directory with certificates: For SSL deployment, create an `ssl` directory with certificates:
```bash ```bash
@ -308,6 +321,7 @@ cp your-key.pem ssl/
``` ```
### SSL Configuration ### SSL Configuration
Use the `production-ssl` service in docker-compose: Use the `production-ssl` service in docker-compose:
```bash ```bash
@ -317,10 +331,12 @@ docker-compose up production-ssl
## Monitoring and Logging ## Monitoring and Logging
### Log Locations ### Log Locations
- **Access logs**: `/var/log/nginx/access.log` - **Access logs**: `/var/log/nginx/access.log`
- **Error logs**: `/var/log/nginx/error.log` - **Error logs**: `/var/log/nginx/error.log`
### Log Format ### Log Format
``` ```
$remote_addr - $remote_user [$time_local] "$request" $remote_addr - $remote_user [$time_local] "$request"
$status $body_bytes_sent "$http_referer" $status $body_bytes_sent "$http_referer"
@ -328,6 +344,7 @@ $status $body_bytes_sent "$http_referer"
``` ```
### Log Levels ### Log Levels
- **Production**: `warn` level - **Production**: `warn` level
- **Staging**: `debug` level - **Staging**: `debug` level
- **Development**: Full logging - **Development**: Full logging
@ -337,6 +354,7 @@ $status $body_bytes_sent "$http_referer"
### Common Issues ### Common Issues
#### Build Failures #### Build Failures
```bash ```bash
# Check build logs # Check build logs
docker build -t timesafari:latest . 2>&1 | tee build.log docker build -t timesafari:latest . 2>&1 | tee build.log
@ -349,6 +367,7 @@ docker run --rm timesafari:latest npm list --depth=0
``` ```
#### Container Won't Start #### Container Won't Start
```bash ```bash
# Check container logs # Check container logs
docker logs <container_id> docker logs <container_id>
@ -361,6 +380,7 @@ netstat -tulpn | grep :80
``` ```
#### Environment Variables Not Set #### Environment Variables Not Set
```bash ```bash
# Check environment in container # Check environment in container
docker exec <container_id> env | grep VITE_ docker exec <container_id> env | grep VITE_
@ -373,6 +393,7 @@ cat .env.production
``` ```
#### Performance Issues #### Performance Issues
```bash ```bash
# Check container resources # Check container resources
docker stats <container_id> docker stats <container_id>
@ -387,6 +408,7 @@ docker exec <container_id> tail -f /var/log/nginx/access.log
### Debug Commands ### Debug Commands
#### Container Debugging #### Container Debugging
```bash ```bash
# Enter running container # Enter running container
docker exec -it <container_id> /bin/sh docker exec -it <container_id> /bin/sh
@ -399,6 +421,7 @@ docker exec <container_id> ls -la /usr/share/nginx/html
``` ```
#### Network Debugging #### Network Debugging
```bash ```bash
# Check container network # Check container network
docker network inspect bridge docker network inspect bridge
@ -413,6 +436,7 @@ docker exec <container_id> nslookup google.com
## Production Deployment ## Production Deployment
### Recommended Production Setup ### Recommended Production Setup
1. **Use specific version tags**: `timesafari:1.0.0` 1. **Use specific version tags**: `timesafari:1.0.0`
2. **Implement health checks**: Already included 2. **Implement health checks**: Already included
3. **Configure proper logging**: Use external log aggregation 3. **Configure proper logging**: Use external log aggregation
@ -420,6 +444,7 @@ docker exec <container_id> nslookup google.com
5. **Use Docker secrets**: For sensitive data 5. **Use Docker secrets**: For sensitive data
### Production Commands ### Production Commands
```bash ```bash
# Build with specific version # Build with specific version
docker build -t timesafari:1.0.0 . docker build -t timesafari:1.0.0 .
@ -442,6 +467,7 @@ docker run -d --name timesafari -p 80:80 --restart unless-stopped --env-file .en
## Development Workflow ## Development Workflow
### Local Development ### Local Development
```bash ```bash
# Start development environment # Start development environment
./docker/run.sh dev ./docker/run.sh dev
@ -454,6 +480,7 @@ docker-compose down dev
``` ```
### Testing Changes ### Testing Changes
```bash ```bash
# Build and test staging # Build and test staging
./docker/run.sh staging ./docker/run.sh staging
@ -463,6 +490,7 @@ docker-compose down dev
``` ```
### Continuous Integration ### Continuous Integration
```bash ```bash
# Build and test in CI # Build and test in CI
docker build -t timesafari:test . docker build -t timesafari:test .
@ -479,6 +507,7 @@ docker rm timesafari-test
## Best Practices ## Best Practices
### Security ### Security
- Always use non-root users - Always use non-root users
- Keep base images updated - Keep base images updated
- Scan images for vulnerabilities - Scan images for vulnerabilities
@ -486,6 +515,7 @@ docker rm timesafari-test
- Implement proper access controls - Implement proper access controls
### Performance ### Performance
- Use multi-stage builds - Use multi-stage builds
- Optimize layer caching - Optimize layer caching
- Minimize image size - Minimize image size
@ -493,6 +523,7 @@ docker rm timesafari-test
- Implement proper caching - Implement proper caching
### Monitoring ### Monitoring
- Use health checks - Use health checks
- Monitor resource usage - Monitor resource usage
- Set up log aggregation - Set up log aggregation
@ -500,6 +531,7 @@ docker rm timesafari-test
- Use proper error handling - Use proper error handling
### Maintenance ### Maintenance
- Regular security updates - Regular security updates
- Monitor for vulnerabilities - Monitor for vulnerabilities
- Keep dependencies updated - Keep dependencies updated

29
electron/README-BUILDING.md

@ -18,6 +18,7 @@ This guide covers building and running the TimeSafari Electron application for d
## Quick Start ## Quick Start
### Development Mode ### Development Mode
```bash ```bash
# Start development server # Start development server
npm run build:electron:dev npm run build:electron:dev
@ -28,6 +29,7 @@ npm run electron:start
``` ```
### Production Builds ### Production Builds
```bash ```bash
# Build for current platform # Build for current platform
npm run build:electron:prod npm run build:electron:prod
@ -48,16 +50,19 @@ npm run build:electron:deb # Linux DEB package
The Electron app enforces single instance operation to prevent database conflicts and resource contention: The Electron app enforces single instance operation to prevent database conflicts and resource contention:
### Implementation ### Implementation
- Uses Electron's built-in `app.requestSingleInstanceLock()` - Uses Electron's built-in `app.requestSingleInstanceLock()`
- Second instances exit immediately with user-friendly message - Second instances exit immediately with user-friendly message
- Existing instance focuses and shows informational dialog - Existing instance focuses and shows informational dialog
### Behavior ### Behavior
- **First instance**: Starts normally and acquires lock - **First instance**: Starts normally and acquires lock
- **Second instance**: Detects existing instance, exits immediately - **Second instance**: Detects existing instance, exits immediately
- **User experience**: Clear messaging about single instance requirement - **User experience**: Clear messaging about single instance requirement
### Benefits ### Benefits
- Prevents database corruption from concurrent access - Prevents database corruption from concurrent access
- Avoids resource conflicts - Avoids resource conflicts
- Maintains data integrity - Maintains data integrity
@ -66,6 +71,7 @@ The Electron app enforces single instance operation to prevent database conflict
## Build Configuration ## Build Configuration
### Environment Modes ### Environment Modes
```bash ```bash
# Development (default) # Development (default)
npm run build:electron:dev npm run build:electron:dev
@ -78,6 +84,7 @@ npm run build:electron:prod
``` ```
### Platform-Specific Builds ### Platform-Specific Builds
```bash ```bash
# Windows # Windows
npm run build:electron:windows:dev npm run build:electron:windows:dev
@ -96,6 +103,7 @@ npm run build:electron:linux:prod
``` ```
### Package Types ### Package Types
```bash ```bash
# Linux AppImage # Linux AppImage
npm run build:electron:appimage:dev npm run build:electron:appimage:dev
@ -116,26 +124,31 @@ npm run build:electron:deb:prod
## Platform-Specific Requirements ## Platform-Specific Requirements
### Windows ### Windows
- Windows 10+ (64-bit) - Windows 10+ (64-bit)
- Visual Studio Build Tools (for native modules) - Visual Studio Build Tools (for native modules)
### macOS ### macOS
- macOS 10.15+ (Catalina) - macOS 10.15+ (Catalina)
- Xcode Command Line Tools - Xcode Command Line Tools
- Code signing certificate (for distribution) - Code signing certificate (for distribution)
### Linux ### Linux
- Ubuntu 18.04+ / Debian 10+ / CentOS 7+ - Ubuntu 18.04+ / Debian 10+ / CentOS 7+
- Development headers for native modules - Development headers for native modules
## Database Configuration ## Database Configuration
### SQLite Integration ### SQLite Integration
- Uses native Node.js SQLite3 for Electron - Uses native Node.js SQLite3 for Electron
- Database stored in user's app data directory - Database stored in user's app data directory
- Automatic migration from IndexedDB (if applicable) - Automatic migration from IndexedDB (if applicable)
### Single Instance Protection ### Single Instance Protection
- File-based locking prevents concurrent database access - File-based locking prevents concurrent database access
- Automatic cleanup on app exit - Automatic cleanup on app exit
- Graceful handling of lock conflicts - Graceful handling of lock conflicts
@ -143,11 +156,13 @@ npm run build:electron:deb:prod
## Security Features ## Security Features
### Content Security Policy ### Content Security Policy
- Strict CSP in production builds - Strict CSP in production builds
- Development mode allows localhost connections - Development mode allows localhost connections
- Automatic configuration based on build mode - Automatic configuration based on build mode
### Auto-Updater ### Auto-Updater
- Disabled in development mode - Disabled in development mode
- Production builds check for updates automatically - Production builds check for updates automatically
- AppImage builds skip update checks - AppImage builds skip update checks
@ -157,6 +172,7 @@ npm run build:electron:deb:prod
### Common Issues ### Common Issues
#### Build Failures #### Build Failures
```bash ```bash
# Clean and rebuild # Clean and rebuild
npm run clean:electron npm run clean:electron
@ -164,6 +180,7 @@ npm run build:electron:dev
``` ```
#### Native Module Issues #### Native Module Issues
```bash ```bash
# Rebuild native modules # Rebuild native modules
cd electron cd electron
@ -171,16 +188,19 @@ npm run electron:rebuild
``` ```
#### Single Instance Conflicts #### Single Instance Conflicts
- Ensure no other TimeSafari instances are running - Ensure no other TimeSafari instances are running
- Check for orphaned processes: `ps aux | grep electron` - Check for orphaned processes: `ps aux | grep electron`
- Restart system if necessary - Restart system if necessary
#### Database Issues #### Database Issues
- Check app data directory permissions - Check app data directory permissions
- Verify SQLite database integrity - Verify SQLite database integrity
- Clear app data if corrupted - Clear app data if corrupted
### Debug Mode ### Debug Mode
```bash ```bash
# Enable debug logging # Enable debug logging
DEBUG=* npm run build:electron:dev DEBUG=* npm run build:electron:dev
@ -203,6 +223,7 @@ electron/
## Development Workflow ## Development Workflow
1. **Start Development** 1. **Start Development**
```bash ```bash
npm run build:electron:dev npm run build:electron:dev
``` ```
@ -212,11 +233,13 @@ electron/
- Changes auto-reload in development - Changes auto-reload in development
3. **Test Build** 3. **Test Build**
```bash ```bash
npm run build:electron:test npm run build:electron:test
``` ```
4. **Production Build** 4. **Production Build**
```bash ```bash
npm run build:electron:prod npm run build:electron:prod
``` ```
@ -224,16 +247,19 @@ electron/
## Performance Considerations ## Performance Considerations
### Memory Usage ### Memory Usage
- Monitor renderer process memory - Monitor renderer process memory
- Implement proper cleanup in components - Implement proper cleanup in components
- Use efficient data structures - Use efficient data structures
### Startup Time ### Startup Time
- Lazy load non-critical modules - Lazy load non-critical modules
- Optimize database initialization - Optimize database initialization
- Minimize synchronous operations - Minimize synchronous operations
### Database Performance ### Database Performance
- Use transactions for bulk operations - Use transactions for bulk operations
- Implement proper indexing - Implement proper indexing
- Monitor query performance - Monitor query performance
@ -251,16 +277,19 @@ electron/
## Deployment ## Deployment
### Distribution ### Distribution
- Windows: `.exe` installer - Windows: `.exe` installer
- macOS: `.dmg` disk image - macOS: `.dmg` disk image
- Linux: `.AppImage` or `.deb` package - Linux: `.AppImage` or `.deb` package
### Code Signing ### Code Signing
- Windows: Authenticode certificate - Windows: Authenticode certificate
- macOS: Developer ID certificate - macOS: Developer ID certificate
- Linux: GPG signing (optional) - Linux: GPG signing (optional)
### Auto-Updates ### Auto-Updates
- Configured for production builds - Configured for production builds
- Disabled for development and AppImage - Disabled for development and AppImage
- Handles update failures gracefully - Handles update failures gracefully

32
electron/README.md

@ -56,21 +56,25 @@ npm run build:electron:dmg:prod
``` ```
**Stage 1: Web Build** **Stage 1: Web Build**
- Vite builds web assets with Electron configuration - Vite builds web assets with Electron configuration
- Environment variables loaded based on build mode - Environment variables loaded based on build mode
- Assets optimized for desktop application - Assets optimized for desktop application
**Stage 2: Capacitor Sync** **Stage 2: Capacitor Sync**
- Copies web assets to Electron app directory - Copies web assets to Electron app directory
- Syncs Capacitor configuration and plugins - Syncs Capacitor configuration and plugins
- Prepares native module bindings - Prepares native module bindings
**Stage 3: TypeScript Compile** **Stage 3: TypeScript Compile**
- Compiles Electron main process TypeScript - Compiles Electron main process TypeScript
- Rebuilds native modules for target platform - Rebuilds native modules for target platform
- Generates production-ready JavaScript - Generates production-ready JavaScript
**Stage 4: Package Creation** **Stage 4: Package Creation**
- Creates platform-specific installers - Creates platform-specific installers
- Generates distribution packages - Generates distribution packages
- Signs applications (when configured) - Signs applications (when configured)
@ -82,6 +86,7 @@ npm run build:electron:dmg:prod
**Purpose**: Local development and testing **Purpose**: Local development and testing
**Command**: `npm run build:electron:dev` **Command**: `npm run build:electron:dev`
**Features**: **Features**:
- Hot reload enabled - Hot reload enabled
- Debug tools available - Debug tools available
- Development logging - Development logging
@ -92,6 +97,7 @@ npm run build:electron:dmg:prod
**Purpose**: Staging and testing environments **Purpose**: Staging and testing environments
**Command**: `npm run build:electron -- --mode test` **Command**: `npm run build:electron -- --mode test`
**Features**: **Features**:
- Test API endpoints - Test API endpoints
- Staging configurations - Staging configurations
- Optimized for testing - Optimized for testing
@ -102,6 +108,7 @@ npm run build:electron:dmg:prod
**Purpose**: Production deployment **Purpose**: Production deployment
**Command**: `npm run build:electron -- --mode production` **Command**: `npm run build:electron -- --mode production`
**Features**: **Features**:
- Production optimizations - Production optimizations
- Code minification - Code minification
- Security hardening - Security hardening
@ -116,6 +123,7 @@ npm run build:electron:dmg:prod
**Command**: `npm run build:electron:windows:prod` **Command**: `npm run build:electron:windows:prod`
**Features**: **Features**:
- NSIS installer with custom options - NSIS installer with custom options
- Desktop and Start Menu shortcuts - Desktop and Start Menu shortcuts
- Elevation permissions for installation - Elevation permissions for installation
@ -128,6 +136,7 @@ npm run build:electron:dmg:prod
**Command**: `npm run build:electron:mac:prod` **Command**: `npm run build:electron:mac:prod`
**Features**: **Features**:
- Universal binary (x64 + arm64) - Universal binary (x64 + arm64)
- DMG installer with custom branding - DMG installer with custom branding
- App Store compliance (when configured) - App Store compliance (when configured)
@ -140,6 +149,7 @@ npm run build:electron:dmg:prod
**Command**: `npm run build:electron:linux:prod` **Command**: `npm run build:electron:linux:prod`
**Features**: **Features**:
- AppImage for universal distribution - AppImage for universal distribution
- DEB package for Debian-based systems - DEB package for Debian-based systems
- RPM package for Red Hat-based systems - RPM package for Red Hat-based systems
@ -152,6 +162,7 @@ npm run build:electron:dmg:prod
**Format**: Self-contained Linux executable **Format**: Self-contained Linux executable
**Command**: `npm run build:electron:appimage:prod` **Command**: `npm run build:electron:appimage:prod`
**Features**: **Features**:
- Single file distribution - Single file distribution
- No installation required - No installation required
- Portable across Linux distributions - Portable across Linux distributions
@ -162,6 +173,7 @@ npm run build:electron:dmg:prod
**Format**: Debian package installer **Format**: Debian package installer
**Command**: `npm run build:electron:deb:prod` **Command**: `npm run build:electron:deb:prod`
**Features**: **Features**:
- Native package management - Native package management
- Dependency resolution - Dependency resolution
- System integration - System integration
@ -172,6 +184,7 @@ npm run build:electron:dmg:prod
**Format**: macOS disk image **Format**: macOS disk image
**Command**: `npm run build:electron:dmg:prod` **Command**: `npm run build:electron:dmg:prod`
**Features**: **Features**:
- Native macOS installer - Native macOS installer
- Custom branding and layout - Custom branding and layout
- Drag-and-drop installation - Drag-and-drop installation
@ -293,6 +306,7 @@ Local Electron scripts for building:
### Environment Variables ### Environment Variables
**Development**: **Development**:
```bash ```bash
VITE_API_URL=http://localhost:3000 VITE_API_URL=http://localhost:3000
VITE_DEBUG=true VITE_DEBUG=true
@ -301,6 +315,7 @@ VITE_ENABLE_DEV_TOOLS=true
``` ```
**Testing**: **Testing**:
```bash ```bash
VITE_API_URL=https://test-api.timesafari.com VITE_API_URL=https://test-api.timesafari.com
VITE_DEBUG=false VITE_DEBUG=false
@ -309,6 +324,7 @@ VITE_ENABLE_DEV_TOOLS=false
``` ```
**Production**: **Production**:
```bash ```bash
VITE_API_URL=https://api.timesafari.com VITE_API_URL=https://api.timesafari.com
VITE_DEBUG=false VITE_DEBUG=false
@ -347,6 +363,7 @@ electron/
### Common Issues ### Common Issues
**TypeScript Compilation Errors**: **TypeScript Compilation Errors**:
```bash ```bash
# Clean and rebuild # Clean and rebuild
npm run clean:electron npm run clean:electron
@ -354,18 +371,21 @@ cd electron && npm run build
``` ```
**Native Module Issues**: **Native Module Issues**:
```bash ```bash
# Rebuild native modules # Rebuild native modules
cd electron && npm run build cd electron && npm run build
``` ```
**Asset Copy Issues**: **Asset Copy Issues**:
```bash ```bash
# Verify Capacitor sync # Verify Capacitor sync
npx cap sync electron npx cap sync electron
``` ```
**Package Creation Failures**: **Package Creation Failures**:
```bash ```bash
# Check electron-builder configuration # Check electron-builder configuration
# Verify platform-specific requirements # Verify platform-specific requirements
@ -375,16 +395,19 @@ npx cap sync electron
### Platform-Specific Issues ### Platform-Specific Issues
**Windows**: **Windows**:
- Ensure Windows Build Tools installed - Ensure Windows Build Tools installed
- Check NSIS installation - Check NSIS installation
- Verify code signing certificates - Verify code signing certificates
**macOS**: **macOS**:
- Install Xcode Command Line Tools - Install Xcode Command Line Tools
- Configure code signing certificates - Configure code signing certificates
- Check app notarization requirements - Check app notarization requirements
**Linux**: **Linux**:
- Install required packages (rpm-tools, etc.) - Install required packages (rpm-tools, etc.)
- Check AppImage dependencies - Check AppImage dependencies
- Verify desktop integration - Verify desktop integration
@ -394,11 +417,13 @@ npx cap sync electron
### Build Performance ### Build Performance
**Parallel Builds**: **Parallel Builds**:
- Use concurrent TypeScript compilation - Use concurrent TypeScript compilation
- Optimize asset copying - Optimize asset copying
- Minimize file system operations - Minimize file system operations
**Caching Strategies**: **Caching Strategies**:
- Cache node_modules between builds - Cache node_modules between builds
- Cache compiled TypeScript - Cache compiled TypeScript
- Cache web assets when unchanged - Cache web assets when unchanged
@ -406,11 +431,13 @@ npx cap sync electron
### Runtime Performance ### Runtime Performance
**Application Startup**: **Application Startup**:
- Optimize main process initialization - Optimize main process initialization
- Minimize startup dependencies - Minimize startup dependencies
- Use lazy loading for features - Use lazy loading for features
**Memory Management**: **Memory Management**:
- Monitor memory usage - Monitor memory usage
- Implement proper cleanup - Implement proper cleanup
- Optimize asset loading - Optimize asset loading
@ -420,16 +447,19 @@ npx cap sync electron
### Code Signing ### Code Signing
**Windows**: **Windows**:
- Authenticode code signing - Authenticode code signing
- EV certificate for SmartScreen - EV certificate for SmartScreen
- Timestamp server configuration - Timestamp server configuration
**macOS**: **macOS**:
- Developer ID code signing - Developer ID code signing
- App notarization - App notarization
- Hardened runtime - Hardened runtime
**Linux**: **Linux**:
- GPG signing for packages - GPG signing for packages
- AppImage signing - AppImage signing
- Package verification - Package verification
@ -437,12 +467,14 @@ npx cap sync electron
### Security Hardening ### Security Hardening
**Production Builds**: **Production Builds**:
- Disable developer tools - Disable developer tools
- Remove debug information - Remove debug information
- Enable security policies - Enable security policies
- Implement sandboxing - Implement sandboxing
**Update Security**: **Update Security**:
- Secure update channels - Secure update channels
- Package integrity verification - Package integrity verification
- Rollback capabilities - Rollback capabilities

2
index.html

@ -11,6 +11,6 @@
</head> </head>
<body> <body>
<div id="app"></div> <div id="app"></div>
<script type="module" src="/src/main.web.ts"></script> <script type="module" src="/src/main.ts"></script>
</body> </body>
</html> </html>

8
ios/App/App.xcodeproj/project.pbxproj

@ -403,7 +403,7 @@
buildSettings = { buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CODE_SIGN_STYLE = Automatic; CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 39; CURRENT_PROJECT_VERSION = 40;
DEVELOPMENT_TEAM = GM3FS5JQPH; DEVELOPMENT_TEAM = GM3FS5JQPH;
ENABLE_APP_SANDBOX = NO; ENABLE_APP_SANDBOX = NO;
ENABLE_USER_SCRIPT_SANDBOXING = NO; ENABLE_USER_SCRIPT_SANDBOXING = NO;
@ -413,7 +413,7 @@
"$(inherited)", "$(inherited)",
"@executable_path/Frameworks", "@executable_path/Frameworks",
); );
MARKETING_VERSION = 1.0.6; MARKETING_VERSION = 1.0.7;
OTHER_SWIFT_FLAGS = "$(inherited) \"-D\" \"COCOAPODS\" \"-DDEBUG\""; OTHER_SWIFT_FLAGS = "$(inherited) \"-D\" \"COCOAPODS\" \"-DDEBUG\"";
PRODUCT_BUNDLE_IDENTIFIER = app.timesafari; PRODUCT_BUNDLE_IDENTIFIER = app.timesafari;
PRODUCT_NAME = "$(TARGET_NAME)"; PRODUCT_NAME = "$(TARGET_NAME)";
@ -430,7 +430,7 @@
buildSettings = { buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CODE_SIGN_STYLE = Automatic; CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 39; CURRENT_PROJECT_VERSION = 40;
DEVELOPMENT_TEAM = GM3FS5JQPH; DEVELOPMENT_TEAM = GM3FS5JQPH;
ENABLE_APP_SANDBOX = NO; ENABLE_APP_SANDBOX = NO;
ENABLE_USER_SCRIPT_SANDBOXING = NO; ENABLE_USER_SCRIPT_SANDBOXING = NO;
@ -440,7 +440,7 @@
"$(inherited)", "$(inherited)",
"@executable_path/Frameworks", "@executable_path/Frameworks",
); );
MARKETING_VERSION = 1.0.6; MARKETING_VERSION = 1.0.7;
PRODUCT_BUNDLE_IDENTIFIER = app.timesafari; PRODUCT_BUNDLE_IDENTIFIER = app.timesafari;
PRODUCT_NAME = "$(TARGET_NAME)"; PRODUCT_NAME = "$(TARGET_NAME)";
SWIFT_ACTIVE_COMPILATION_CONDITIONS = ""; SWIFT_ACTIVE_COMPILATION_CONDITIONS = "";

15
ios/App/app_privacy_manifest_fixer/CHANGELOG.md

@ -1,30 +1,38 @@
## 1.4.1 ## 1.4.1
- Fix macOS app re-signing issue. - Fix macOS app re-signing issue.
- Automatically enable Hardened Runtime in macOS codesign. - Automatically enable Hardened Runtime in macOS codesign.
- Add clean script. - Add clean script.
## 1.4.0 ## 1.4.0
- Support for macOS app ([#9](https://github.com/crasowas/app_privacy_manifest_fixer/issues/9)). - Support for macOS app ([#9](https://github.com/crasowas/app_privacy_manifest_fixer/issues/9)).
## 1.3.11 ## 1.3.11
- Fix install issue by skipping `PBXAggregateTarget` ([#4](https://github.com/crasowas/app_privacy_manifest_fixer/issues/4)). - Fix install issue by skipping `PBXAggregateTarget` ([#4](https://github.com/crasowas/app_privacy_manifest_fixer/issues/4)).
## 1.3.10 ## 1.3.10
- Fix app re-signing issue. - Fix app re-signing issue.
- Enhance Build Phases script robustness. - Enhance Build Phases script robustness.
## 1.3.9 ## 1.3.9
- Add log file output. - Add log file output.
## 1.3.8 ## 1.3.8
- Add version info to privacy access report. - Add version info to privacy access report.
- Remove empty tables from privacy access report. - Remove empty tables from privacy access report.
## 1.3.7 ## 1.3.7
- Enhance API symbols analysis with strings tool. - Enhance API symbols analysis with strings tool.
- Improve performance of API usage analysis. - Improve performance of API usage analysis.
## 1.3.5 ## 1.3.5
- Fix issue with inaccurate privacy manifest search. - Fix issue with inaccurate privacy manifest search.
- Disable dependency analysis to force the script to run on every build. - Disable dependency analysis to force the script to run on every build.
- Add placeholder for privacy access report. - Add placeholder for privacy access report.
@ -32,27 +40,34 @@
- Add examples for privacy access report. - Add examples for privacy access report.
## 1.3.0 ## 1.3.0
- Add privacy access report generation. - Add privacy access report generation.
## 1.2.3 ## 1.2.3
- Fix issue with relative path parameter. - Fix issue with relative path parameter.
- Add support for all application targets. - Add support for all application targets.
## 1.2.1 ## 1.2.1
- Fix backup issue with empty user templates directory. - Fix backup issue with empty user templates directory.
## 1.2.0 ## 1.2.0
- Add uninstall script. - Add uninstall script.
## 1.1.2 ## 1.1.2
- Remove `Templates/.gitignore` to track `UserTemplates`. - Remove `Templates/.gitignore` to track `UserTemplates`.
- Fix incorrect use of `App.xcprivacy` template in `App.framework`. - Fix incorrect use of `App.xcprivacy` template in `App.framework`.
## 1.1.0 ## 1.1.0
- Add logs for latest release fetch failure. - Add logs for latest release fetch failure.
- Fix issue with converting published time to local time. - Fix issue with converting published time to local time.
- Disable showing environment variables in the build log. - Disable showing environment variables in the build log.
- Add `--install-builds-only` command line option. - Add `--install-builds-only` command line option.
## 1.0.0 ## 1.0.0
- Initial version. - Initial version.

7
ios/App/app_privacy_manifest_fixer/README.md

@ -150,6 +150,7 @@ The privacy manifest templates are stored in the [`Templates`](https://github.co
### Template Types ### Template Types
The templates are categorized as follows: The templates are categorized as follows:
- **AppTemplate.xcprivacy**: A privacy manifest template for the app. - **AppTemplate.xcprivacy**: A privacy manifest template for the app.
- **FrameworkTemplate.xcprivacy**: A generic privacy manifest template for frameworks. - **FrameworkTemplate.xcprivacy**: A generic privacy manifest template for frameworks.
- **FrameworkName.xcprivacy**: A privacy manifest template for a specific framework, available only in the `Templates/UserTemplates` directory. - **FrameworkName.xcprivacy**: A privacy manifest template for a specific framework, available only in the `Templates/UserTemplates` directory.
@ -157,20 +158,24 @@ The templates are categorized as follows:
### Template Priority ### Template Priority
For an app, the priority of privacy manifest templates is as follows: For an app, the priority of privacy manifest templates is as follows:
- `Templates/UserTemplates/AppTemplate.xcprivacy` > `Templates/AppTemplate.xcprivacy` - `Templates/UserTemplates/AppTemplate.xcprivacy` > `Templates/AppTemplate.xcprivacy`
For a specific framework, the priority of privacy manifest templates is as follows: For a specific framework, the priority of privacy manifest templates is as follows:
- `Templates/UserTemplates/FrameworkName.xcprivacy` > `Templates/UserTemplates/FrameworkTemplate.xcprivacy` > `Templates/FrameworkTemplate.xcprivacy` - `Templates/UserTemplates/FrameworkName.xcprivacy` > `Templates/UserTemplates/FrameworkTemplate.xcprivacy` > `Templates/FrameworkTemplate.xcprivacy`
### Default Templates ### Default Templates
The default templates are located in the `Templates` root directory and currently include the following templates: The default templates are located in the `Templates` root directory and currently include the following templates:
- `Templates/AppTemplate.xcprivacy` - `Templates/AppTemplate.xcprivacy`
- `Templates/FrameworkTemplate.xcprivacy` - `Templates/FrameworkTemplate.xcprivacy`
These templates will be modified based on the API usage analysis results, especially the `NSPrivacyAccessedAPIType` entries, to generate new privacy manifests for fixes, ensuring compliance with App Store requirements. These templates will be modified based on the API usage analysis results, especially the `NSPrivacyAccessedAPIType` entries, to generate new privacy manifests for fixes, ensuring compliance with App Store requirements.
**If adjustments to the privacy manifest template are needed, such as in the following scenarios, avoid directly modifying the default templates. Instead, use a custom template. If a custom template with the same name exists, it will take precedence over the default template for fixes.** **If adjustments to the privacy manifest template are needed, such as in the following scenarios, avoid directly modifying the default templates. Instead, use a custom template. If a custom template with the same name exists, it will take precedence over the default template for fixes.**
- Generating a non-compliant privacy manifest due to inaccurate API usage analysis. - Generating a non-compliant privacy manifest due to inaccurate API usage analysis.
- Modifying the reason declared in the template. - Modifying the reason declared in the template.
- Adding declarations for collected data. - Adding declarations for collected data.
@ -198,6 +203,7 @@ The privacy access API categories and their associated declared reasons in `Fram
### Custom Templates ### Custom Templates
To create custom templates, place them in the `Templates/UserTemplates` directory with the following structure: To create custom templates, place them in the `Templates/UserTemplates` directory with the following structure:
- `Templates/UserTemplates/AppTemplate.xcprivacy` - `Templates/UserTemplates/AppTemplate.xcprivacy`
- `Templates/UserTemplates/FrameworkTemplate.xcprivacy` - `Templates/UserTemplates/FrameworkTemplate.xcprivacy`
- `Templates/UserTemplates/FrameworkName.xcprivacy` - `Templates/UserTemplates/FrameworkName.xcprivacy`
@ -205,6 +211,7 @@ To create custom templates, place them in the `Templates/UserTemplates` director
Among these templates, only `FrameworkTemplate.xcprivacy` will be modified based on the API usage analysis results to adjust the `NSPrivacyAccessedAPIType` entries, thereby generating a new privacy manifest for framework fixes. The other templates will remain unchanged and will be directly used for fixes. Among these templates, only `FrameworkTemplate.xcprivacy` will be modified based on the API usage analysis results to adjust the `NSPrivacyAccessedAPIType` entries, thereby generating a new privacy manifest for framework fixes. The other templates will remain unchanged and will be directly used for fixes.
**Important Notes:** **Important Notes:**
- The template for a specific framework must follow the naming convention `FrameworkName.xcprivacy`, where `FrameworkName` should match the name of the framework. For example, the template for `Flutter.framework` should be named `Flutter.xcprivacy`. - The template for a specific framework must follow the naming convention `FrameworkName.xcprivacy`, where `FrameworkName` should match the name of the framework. For example, the template for `Flutter.framework` should be named `Flutter.xcprivacy`.
- For macOS frameworks, the naming convention should be `FrameworkName.Version.xcprivacy`, where the version name is added to distinguish different versions. For a single version macOS framework, the `Version` is typically `A`. - For macOS frameworks, the naming convention should be `FrameworkName.Version.xcprivacy`, where the version name is added to distinguish different versions. For a single version macOS framework, the `Version` is typically `A`.
- The name of an SDK may not exactly match the name of the framework. To determine the correct framework name, check the `Frameworks` directory in the application bundle after building the project. - The name of an SDK may not exactly match the name of the framework. To determine the correct framework name, check the `Frameworks` directory in the application bundle after building the project.

7
ios/App/app_privacy_manifest_fixer/README.zh-CN.md

@ -150,6 +150,7 @@ sh clean.sh
### 模板类型 ### 模板类型
模板分为以下几类: 模板分为以下几类:
- **AppTemplate.xcprivacy**:App 的隐私清单模板。 - **AppTemplate.xcprivacy**:App 的隐私清单模板。
- **FrameworkTemplate.xcprivacy**:通用的 Framework 隐私清单模板。 - **FrameworkTemplate.xcprivacy**:通用的 Framework 隐私清单模板。
- **FrameworkName.xcprivacy**:特定的 Framework 隐私清单模板,仅在`Templates/UserTemplates`目录有效。 - **FrameworkName.xcprivacy**:特定的 Framework 隐私清单模板,仅在`Templates/UserTemplates`目录有效。
@ -157,20 +158,24 @@ sh clean.sh
### 模板优先级 ### 模板优先级
对于 App,隐私清单模板的优先级如下: 对于 App,隐私清单模板的优先级如下:
- `Templates/UserTemplates/AppTemplate.xcprivacy` > `Templates/AppTemplate.xcprivacy` - `Templates/UserTemplates/AppTemplate.xcprivacy` > `Templates/AppTemplate.xcprivacy`
对于特定的 Framework,隐私清单模板的优先级如下: 对于特定的 Framework,隐私清单模板的优先级如下:
- `Templates/UserTemplates/FrameworkName.xcprivacy` > `Templates/UserTemplates/FrameworkTemplate.xcprivacy` > `Templates/FrameworkTemplate.xcprivacy` - `Templates/UserTemplates/FrameworkName.xcprivacy` > `Templates/UserTemplates/FrameworkTemplate.xcprivacy` > `Templates/FrameworkTemplate.xcprivacy`
### 默认模板 ### 默认模板
默认模板位于`Templates`根目录,目前包括以下模板: 默认模板位于`Templates`根目录,目前包括以下模板:
- `Templates/AppTemplate.xcprivacy` - `Templates/AppTemplate.xcprivacy`
- `Templates/FrameworkTemplate.xcprivacy` - `Templates/FrameworkTemplate.xcprivacy`
这些模板将根据 API 使用分析结果进行修改,特别是`NSPrivacyAccessedAPIType`条目将被调整,以生成新的隐私清单用于修复,确保符合 App Store 要求。 这些模板将根据 API 使用分析结果进行修改,特别是`NSPrivacyAccessedAPIType`条目将被调整,以生成新的隐私清单用于修复,确保符合 App Store 要求。
**如果需要调整隐私清单模板,例如以下场景,请避免直接修改默认模板,而是使用自定义模板。如果存在相同名称的自定义模板,它将优先于默认模板用于修复。** **如果需要调整隐私清单模板,例如以下场景,请避免直接修改默认模板,而是使用自定义模板。如果存在相同名称的自定义模板,它将优先于默认模板用于修复。**
- 由于 API 使用分析结果不准确,生成了不合规的隐私清单。 - 由于 API 使用分析结果不准确,生成了不合规的隐私清单。
- 需要修改模板中声明的理由。 - 需要修改模板中声明的理由。
- 需要声明收集的数据。 - 需要声明收集的数据。
@ -198,6 +203,7 @@ sh clean.sh
### 自定义模板 ### 自定义模板
要创建自定义模板,请将其放在`Templates/UserTemplates`目录,结构如下: 要创建自定义模板,请将其放在`Templates/UserTemplates`目录,结构如下:
- `Templates/UserTemplates/AppTemplate.xcprivacy` - `Templates/UserTemplates/AppTemplate.xcprivacy`
- `Templates/UserTemplates/FrameworkTemplate.xcprivacy` - `Templates/UserTemplates/FrameworkTemplate.xcprivacy`
- `Templates/UserTemplates/FrameworkName.xcprivacy` - `Templates/UserTemplates/FrameworkName.xcprivacy`
@ -205,6 +211,7 @@ sh clean.sh
在这些模板中,只有`FrameworkTemplate.xcprivacy`会根据 API 使用分析结果对`NSPrivacyAccessedAPIType`条目进行调整,以生成新的隐私清单用于 Framework 修复。其他模板保持不变,将直接用于修复。 在这些模板中,只有`FrameworkTemplate.xcprivacy`会根据 API 使用分析结果对`NSPrivacyAccessedAPIType`条目进行调整,以生成新的隐私清单用于 Framework 修复。其他模板保持不变,将直接用于修复。
**重要说明:** **重要说明:**
- 特定的 Framework 模板必须遵循命名规范`FrameworkName.xcprivacy`,其中`FrameworkName`需与 Framework 的名称匹配。例如`Flutter.framework`的模板应命名为`Flutter.xcprivacy`。 - 特定的 Framework 模板必须遵循命名规范`FrameworkName.xcprivacy`,其中`FrameworkName`需与 Framework 的名称匹配。例如`Flutter.framework`的模板应命名为`Flutter.xcprivacy`。
- 对于 macOS Framework,应遵循命名规范`FrameworkName.Version.xcprivacy`,额外增加版本名称用于区分不同的版本。对于单一版本的 macOS Framework,`Version`通常为`A`。 - 对于 macOS Framework,应遵循命名规范`FrameworkName.Version.xcprivacy`,额外增加版本名称用于区分不同的版本。对于单一版本的 macOS Framework,`Version`通常为`A`。
- SDK 的名称可能与 Framework 的名称不完全一致。要确定正确的 Framework 名称,请在构建项目后检查 App 包中的`Frameworks`目录。 - SDK 的名称可能与 Framework 的名称不完全一致。要确定正确的 Framework 名称,请在构建项目后检查 App 包中的`Frameworks`目录。

3247
package-lock.json

File diff suppressed because it is too large

31
package.json

@ -1,6 +1,6 @@
{ {
"name": "timesafari", "name": "timesafari",
"version": "1.0.7-beta", "version": "1.0.8-beta",
"description": "Time Safari Application", "description": "Time Safari Application",
"author": { "author": {
"name": "Time Safari Team" "name": "Time Safari Team"
@ -12,6 +12,8 @@
"type-check": "tsc --noEmit", "type-check": "tsc --noEmit",
"prebuild": "eslint --ext .js,.ts,.vue --ignore-path .gitignore src && node sw_combine.js && node scripts/copy-wasm.js", "prebuild": "eslint --ext .js,.ts,.vue --ignore-path .gitignore src && node sw_combine.js && node scripts/copy-wasm.js",
"test:prerequisites": "node scripts/check-prerequisites.js", "test:prerequisites": "node scripts/check-prerequisites.js",
"check:dependencies": "./scripts/check-dependencies.sh",
"test:all": "npm run lint && tsc && npm run test:web && npm run test:mobile && ./scripts/test-safety-check.sh && echo '\n\n\nGotta add the performance tests'",
"test:web": "npx playwright test -c playwright.config-local.ts --trace on", "test:web": "npx playwright test -c playwright.config-local.ts --trace on",
"test:mobile": "./scripts/test-mobile.sh", "test:mobile": "./scripts/test-mobile.sh",
"test:android": "node scripts/test-android.js", "test:android": "node scripts/test-android.js",
@ -27,8 +29,8 @@
"build:capacitor": "VITE_GIT_HASH=`git log -1 --pretty=format:%h` vite build --mode capacitor --config vite.config.capacitor.mts", "build:capacitor": "VITE_GIT_HASH=`git log -1 --pretty=format:%h` vite build --mode capacitor --config vite.config.capacitor.mts",
"build:capacitor:sync": "npm run build:capacitor && npx cap sync", "build:capacitor:sync": "npm run build:capacitor && npx cap sync",
"build:native": "vite build && npx cap sync && npx capacitor-assets generate", "build:native": "vite build && npx cap sync && npx capacitor-assets generate",
"assets:config": "tsx scripts/assets-config.ts", "assets:config": "npx tsx scripts/assets-config.ts",
"assets:validate": "tsx scripts/assets-validator.ts", "assets:validate": "npx tsx scripts/assets-validator.ts",
"assets:clean": "rimraf android/app/src/main/res/mipmap-* ios/App/App/Assets.xcassets/**/AppIcon*.png ios/App/App/Assets.xcassets/**/Splash*.png || true", "assets:clean": "rimraf android/app/src/main/res/mipmap-* ios/App/App/Assets.xcassets/**/AppIcon*.png ios/App/App/Assets.xcassets/**/Splash*.png || true",
"build:ios": "./scripts/build-ios.sh", "build:ios": "./scripts/build-ios.sh",
"build:ios:dev": "./scripts/build-ios.sh --dev", "build:ios:dev": "./scripts/build-ios.sh --dev",
@ -96,7 +98,14 @@
"build:electron:dmg:dev": "./scripts/build-electron.sh --dev --dmg", "build:electron:dmg:dev": "./scripts/build-electron.sh --dev --dmg",
"build:electron:dmg:test": "./scripts/build-electron.sh --test --dmg", "build:electron:dmg:test": "./scripts/build-electron.sh --test --dmg",
"build:electron:dmg:prod": "./scripts/build-electron.sh --prod --dmg", "build:electron:dmg:prod": "./scripts/build-electron.sh --prod --dmg",
"clean:android": "adb uninstall app.timesafari.app || true", "markdown:fix": "./scripts/fix-markdown.sh",
"markdown:check": "./scripts/validate-markdown.sh",
"markdown:setup": "./scripts/setup-markdown-hooks.sh",
"prepare": "husky",
"guard": "bash ./scripts/build-arch-guard.sh",
"guard:test": "bash ./scripts/build-arch-guard.sh --staged",
"guard:setup": "npm run prepare && echo '✅ Build Architecture Guard is now active!'",
"clean:android": "./scripts/clean-android.sh",
"clean:ios": "rm -rf ios/App/build ios/App/Pods ios/App/output ios/App/App/public ios/DerivedData ios/capacitor-cordova-ios-plugins ios/App/App/capacitor.config.json ios/App/App/config.xml || true", "clean:ios": "rm -rf ios/App/build ios/App/Pods ios/App/output ios/App/App/public ios/DerivedData ios/capacitor-cordova-ios-plugins ios/App/App/capacitor.config.json ios/App/App/config.xml || true",
"clean:electron": "./scripts/build-electron.sh --clean", "clean:electron": "./scripts/build-electron.sh --clean",
"clean:all": "npm run clean:ios && npm run clean:android && npm run clean:electron", "clean:all": "npm run clean:ios && npm run clean:android && npm run clean:electron",
@ -122,6 +131,12 @@
"build:android:dev:run:custom": "./scripts/build-android.sh --dev --api-ip --auto-run", "build:android:dev:run:custom": "./scripts/build-android.sh --dev --api-ip --auto-run",
"build:android:test:run:custom": "./scripts/build-android.sh --test --api-ip --auto-run" "build:android:test:run:custom": "./scripts/build-android.sh --test --api-ip --auto-run"
}, },
"lint-staged": {
"*.{js,ts,vue,css,md,json,yml,yaml}": "eslint --fix || true"
},
"commitlint": {
"extends": ["@commitlint/config-conventional"]
},
"dependencies": { "dependencies": {
"@capacitor-community/electron": "^5.0.1", "@capacitor-community/electron": "^5.0.1",
"@capacitor-community/sqlite": "6.0.2", "@capacitor-community/sqlite": "6.0.2",
@ -200,9 +215,9 @@
"three": "^0.156.1", "three": "^0.156.1",
"ua-parser-js": "^1.0.37", "ua-parser-js": "^1.0.37",
"uint8arrays": "^5.0.0", "uint8arrays": "^5.0.0",
"vue": "^3.5.13", "vue": "3.5.13",
"vue-axios": "^3.5.2", "vue-axios": "^3.5.2",
"vue-facing-decorator": "^3.0.4", "vue-facing-decorator": "3.0.4",
"vue-picture-cropper": "^0.7.0", "vue-picture-cropper": "^0.7.0",
"vue-qrcode-reader": "^5.5.3", "vue-qrcode-reader": "^5.5.3",
"vue-router": "^4.5.0", "vue-router": "^4.5.0",
@ -241,6 +256,10 @@
"jest": "^30.0.4", "jest": "^30.0.4",
"markdownlint": "^0.37.4", "markdownlint": "^0.37.4",
"markdownlint-cli": "^0.44.0", "markdownlint-cli": "^0.44.0",
"husky": "^9.0.11",
"lint-staged": "^15.2.2",
"@commitlint/cli": "^18.6.1",
"@commitlint/config-conventional": "^18.6.2",
"npm-check-updates": "^17.1.13", "npm-check-updates": "^17.1.13",
"path-browserify": "^1.0.1", "path-browserify": "^1.0.1",
"postcss": "^8.4.38", "postcss": "^8.4.38",

47
pull_request_template.md

@ -0,0 +1,47 @@
# Build Architecture Guard PR Template
## Change Level
- [ ] Level: **L1** / **L2** / **L3** (pick one)
**Why:** …
## Scope & Impact
- [ ] Files & platforms touched: …
- [ ] Risk triggers (env / script flow / packaging / SW+WASM /
Docker / signing): …
- [ ] Mitigations/validation done: …
## Commands Run (paste exact logs/snips)
- [ ] Web: `npm run build:web` / `:prod`
- [ ] Electron: `npm run build:electron:dev` / package step
- [ ] Mobile: `npm run build:android:test` / iOS equivalent
- [ ] Clean/auto-run impacted scripts
## Artifacts
- [ ] Names + **sha256** of artifacts/installers:
Artifacts:
```text
<name-1> <sha256-1>
<name-2> <sha256-2>
```
## Docs
- [ ] **BUILDING.md** updated (sections): …
- [ ] Troubleshooting updated (if applicable)
## Rollback
- [ ] Verified steps (1–3 cmds) to restore previous behavior
## L3 only
- [ ] ADR link:
ADR: https://…

3
resources/README.md

@ -27,12 +27,14 @@ resources/
## Asset Requirements ## Asset Requirements
### Icon Requirements ### Icon Requirements
- **Format**: PNG - **Format**: PNG
- **Size**: 1024x1024 pixels minimum - **Size**: 1024x1024 pixels minimum
- **Background**: Transparent or solid color - **Background**: Transparent or solid color
- **Content**: App logo/icon - **Content**: App logo/icon
### Splash Screen Requirements ### Splash Screen Requirements
- **Format**: PNG - **Format**: PNG
- **Size**: 1242x2688 pixels (iPhone 11 Pro Max size) - **Size**: 1242x2688 pixels (iPhone 11 Pro Max size)
- **Background**: Solid color or gradient - **Background**: Solid color or gradient
@ -70,6 +72,7 @@ Asset generation is configured in `capacitor-assets.config.json` at the project
## Build Integration ## Build Integration
Assets are automatically generated as part of the build process: Assets are automatically generated as part of the build process:
- `npm run build:android` - Generates Android assets - `npm run build:android` - Generates Android assets
- `npm run build:ios` - Generates iOS assets - `npm run build:ios` - Generates iOS assets
- `npm run build:web` - Generates web assets - `npm run build:web` - Generates web assets

7
scripts/README.md

@ -31,6 +31,7 @@ All scripts automatically handle environment variables for different build types
#### Automatic Environment Setup #### Automatic Environment Setup
Each script automatically: Each script automatically:
1. **Sets platform-specific variables** based on build type 1. **Sets platform-specific variables** based on build type
2. **Gets git hash** for versioning (`VITE_GIT_HASH`) 2. **Gets git hash** for versioning (`VITE_GIT_HASH`)
3. **Creates application directories** (`~/.local/share/TimeSafari/timesafari`) 3. **Creates application directories** (`~/.local/share/TimeSafari/timesafari`)
@ -104,6 +105,7 @@ exit 0
## Benefits of Unification ## Benefits of Unification
### Before (Redundant) ### Before (Redundant)
```bash ```bash
# Each script had 50+ lines of duplicate code: # Each script had 50+ lines of duplicate code:
readonly RED='\033[0;31m' readonly RED='\033[0;31m'
@ -121,6 +123,7 @@ export VITE_PWA_ENABLED=false
``` ```
### After (Unified) ### After (Unified)
```bash ```bash
# Each script is now ~20 lines of focused logic: # Each script is now ~20 lines of focused logic:
source "$(dirname "$0")/common.sh" source "$(dirname "$0")/common.sh"
@ -133,6 +136,7 @@ print_footer "Script Title"
## Usage Examples ## Usage Examples
### Running Tests ### Running Tests
```bash ```bash
# Run all tests # Run all tests
./scripts/test-all.sh ./scripts/test-all.sh
@ -189,6 +193,7 @@ export NODE_ENV=production
``` ```
### .env File Support ### .env File Support
Scripts automatically load variables from `.env` files if they exist: Scripts automatically load variables from `.env` files if they exist:
```bash ```bash
@ -199,6 +204,7 @@ CUSTOM_VAR=value
``` ```
### Environment Validation ### Environment Validation
Required environment variables can be validated: Required environment variables can be validated:
```bash ```bash
@ -207,6 +213,7 @@ validate_env_vars "VITE_API_URL" "VITE_DEBUG" || exit 1
``` ```
### Environment Inspection ### Environment Inspection
View current environment variables with the `--env` flag: View current environment variables with the `--env` flag:
```bash ```bash

34
scripts/build-android.sh

@ -49,6 +49,31 @@ set -e
# Source common utilities # Source common utilities
source "$(dirname "$0")/common.sh" source "$(dirname "$0")/common.sh"
# Function to validate critical dependencies
validate_dependencies() {
log_info "Validating critical dependencies..."
# Check if node_modules exists
if [ ! -d "node_modules" ]; then
log_error "node_modules directory not found. Please run 'npm install' first."
exit 1
fi
# Check if tsx is available
if [ ! -f "node_modules/.bin/tsx" ]; then
log_error "tsx dependency not found. Please run 'npm install' first."
exit 1
fi
# Check if capacitor-assets is available
if [ ! -f "node_modules/.bin/capacitor-assets" ]; then
log_error "capacitor-assets dependency not found. Please run 'npm install' first."
exit 1
fi
log_success "All critical dependencies validated successfully"
}
# Default values # Default values
BUILD_MODE="development" BUILD_MODE="development"
BUILD_TYPE="debug" BUILD_TYPE="debug"
@ -179,6 +204,11 @@ parse_android_args "$@"
# Print build header # Print build header
print_header "TimeSafari Android Build Process" print_header "TimeSafari Android Build Process"
# Validate dependencies before proceeding
validate_dependencies
# Log build start
log_info "Starting Android build process at $(date)" log_info "Starting Android build process at $(date)"
log_info "Build mode: $BUILD_MODE" log_info "Build mode: $BUILD_MODE"
log_info "Build type: $BUILD_TYPE" log_info "Build type: $BUILD_TYPE"
@ -257,6 +287,7 @@ fi
# Step 1: Validate asset configuration # Step 1: Validate asset configuration
safe_execute "Validating asset configuration" "npm run assets:validate" || { safe_execute "Validating asset configuration" "npm run assets:validate" || {
log_warn "Asset validation found issues, but continuing with build..." log_warn "Asset validation found issues, but continuing with build..."
log_info "If you encounter build failures, please run 'npm install' first to ensure all dependencies are available."
} }
# Step 2: Clean Android app # Step 2: Clean Android app
@ -337,6 +368,9 @@ if [ "$OPEN_STUDIO" = true ]; then
log_info "Android Studio: opened" log_info "Android Studio: opened"
fi fi
# Reminder about dependency management
log_info "💡 Tip: If you encounter dependency issues, run 'npm install' to ensure all packages are up to date."
print_footer "Android Build" print_footer "Android Build"
# Exit with success # Exit with success

187
scripts/build-arch-guard.sh

@ -0,0 +1,187 @@
#!/usr/bin/env bash
#
# Build Architecture Guard Script
#
# Author: Matthew Raymer
# Date: 2025-08-20
# Purpose: Protects build-critical files by requiring BUILDING.md updates
#
# Usage:
# ./scripts/build-arch-guard.sh --staged # Check staged files (pre-commit)
# ./scripts/build-arch-guard.sh --range # Check range (pre-push)
# ./scripts/build-arch-guard.sh # Check working directory
#
set -euo pipefail
# Sensitive paths that require BUILDING.md updates when modified
SENSITIVE=(
"vite.config.*"
"scripts/**"
"electron/**"
"android/**"
"ios/**"
"sw_scripts/**"
"sw_combine.js"
"Dockerfile"
"docker/**"
"capacitor.config.ts"
"package.json"
"package-lock.json"
"yarn.lock"
"pnpm-lock.yaml"
)
# Documentation files that must be updated alongside sensitive changes
DOCS_REQUIRED=("BUILDING.md")
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
log_info() {
echo -e "${BLUE}[guard]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[guard]${NC} $1"
}
log_error() {
echo -e "${RED}[guard]${NC} $1"
}
log_success() {
echo -e "${GREEN}[guard]${NC} $1"
}
# Collect files based on mode
collect_files() {
if [[ "${1:-}" == "--staged" ]]; then
# Pre-commit: check staged files
git diff --name-only --cached
elif [[ "${1:-}" == "--range" ]]; then
# Pre-push: check commits being pushed
RANGE="${2:-HEAD~1..HEAD}"
git diff --name-only "$RANGE"
else
# Default: check working directory changes
git diff --name-only HEAD
fi
}
# Check if a file matches any sensitive pattern
matches_sensitive() {
local f="$1"
for pat in "${SENSITIVE[@]}"; do
# Convert glob pattern to regex
local rx="^${pat//\./\.}$"
rx="${rx//\*\*/.*}"
rx="${rx//\*/[^/]*}"
if [[ "$f" =~ $rx ]]; then
return 0
fi
done
return 1
}
# Check if documentation was updated
check_docs_updated() {
local changed_files=("$@")
for changed_file in "${changed_files[@]}"; do
for required_doc in "${DOCS_REQUIRED[@]}"; do
if [[ "$changed_file" == "$required_doc" ]]; then
return 0
fi
done
done
return 1
}
# Main guard logic
main() {
local mode="${1:-}"
local arg="${2:-}"
log_info "Running Build Architecture Guard..."
# Collect changed files
mapfile -t changed_files < <(collect_files "$mode" "$arg")
if [[ ${#changed_files[@]} -eq 0 ]]; then
log_info "No files changed, guard check passed"
exit 0
fi
log_info "Checking ${#changed_files[@]} changed files..."
# Find sensitive files that were touched
sensitive_touched=()
for file in "${changed_files[@]}"; do
if matches_sensitive "$file"; then
sensitive_touched+=("$file")
fi
done
# If no sensitive files were touched, allow the change
if [[ ${#sensitive_touched[@]} -eq 0 ]]; then
log_success "No build-sensitive files changed, guard check passed"
exit 0
fi
# Sensitive files were touched, log them
log_warn "Build-sensitive paths changed:"
for file in "${sensitive_touched[@]}"; do
echo " - $file"
done
# Check if required documentation was updated
if check_docs_updated "${changed_files[@]}"; then
log_success "BUILDING.md updated alongside build changes, guard check passed"
exit 0
else
log_error "Build-sensitive files changed but BUILDING.md was not updated!"
echo
echo "The following build-sensitive files were modified:"
for file in "${sensitive_touched[@]}"; do
echo " - $file"
done
echo
echo "When modifying build-critical files, you must also update BUILDING.md"
echo "to document any changes to the build process."
echo
echo "Please:"
echo " 1. Update BUILDING.md with relevant changes"
echo " 2. Stage the BUILDING.md changes: git add BUILDING.md"
echo " 3. Retry your commit/push"
echo
exit 2
fi
}
# Handle help flag
if [[ "${1:-}" =~ ^(-h|--help)$ ]]; then
echo "Build Architecture Guard Script"
echo
echo "Usage:"
echo " $0 [--staged|--range [RANGE]]"
echo
echo "Options:"
echo " --staged Check staged files (for pre-commit hook)"
echo " --range [RANGE] Check git range (for pre-push hook)"
echo " Default range: HEAD~1..HEAD"
echo " (no args) Check working directory changes"
echo
echo "Examples:"
echo " $0 --staged # Pre-commit check"
echo " $0 --range origin/main..HEAD # Pre-push check"
echo " $0 # Working directory check"
exit 0
fi
main "$@"

8
scripts/build-ios.sh

@ -173,20 +173,20 @@ check_ios_resources() {
# Check for required assets # Check for required assets
if [ ! -f "assets/icon.png" ]; then if [ ! -f "assets/icon.png" ]; then
log_warning "App icon not found at assets/icon.png" log_warn "App icon not found at assets/icon.png"
fi fi
if [ ! -f "assets/splash.png" ]; then if [ ! -f "assets/splash.png" ]; then
log_warning "Splash screen not found at assets/splash.png" log_warn "Splash screen not found at assets/splash.png"
fi fi
# Check for iOS-specific files # Check for iOS-specific files
if [ ! -f "ios/App/App/Info.plist" ]; then if [ ! -f "ios/App/App/Info.plist" ]; then
log_warning "Info.plist not found" log_warn "Info.plist not found"
fi fi
if [ ! -f "ios/App/App/AppDelegate.swift" ]; then if [ ! -f "ios/App/App/AppDelegate.swift" ]; then
log_warning "AppDelegate.swift not found" log_warn "AppDelegate.swift not found"
fi fi
log_success "iOS resource check completed" log_success "iOS resource check completed"

110
scripts/check-dependencies.sh

@ -0,0 +1,110 @@
#!/bin/bash
# check-dependencies.sh
# Author: Matthew Raymer
# Date: 2025-08-19
# Description: Dependency validation script for TimeSafari development environment
# This script checks for critical dependencies required for building the application.
# Exit on any error
set -e
# Source common utilities
source "$(dirname "$0")/common.sh"
print_header "TimeSafari Dependency Validation"
log_info "Checking development environment dependencies..."
# Check Node.js version
if command -v node &> /dev/null; then
NODE_VERSION=$(node --version)
log_info "Node.js version: $NODE_VERSION"
# Extract major version number
MAJOR_VERSION=$(echo $NODE_VERSION | sed 's/v\([0-9]*\)\..*/\1/')
if [ "$MAJOR_VERSION" -lt 18 ]; then
log_error "Node.js version $NODE_VERSION is too old. Please upgrade to Node.js 18 or later."
exit 1
fi
else
log_error "Node.js is not installed. Please install Node.js 18 or later."
exit 1
fi
# Check npm version
if command -v npm &> /dev/null; then
NPM_VERSION=$(npm --version)
log_info "npm version: $NPM_VERSION"
else
log_error "npm is not installed. Please install npm."
exit 1
fi
# Check if node_modules exists
if [ ! -d "node_modules" ]; then
log_error "node_modules directory not found."
log_info "Please run: npm install"
exit 1
fi
# Check critical dependencies
log_info "Validating critical packages..."
CRITICAL_DEPS=("tsx" "capacitor-assets" "vite")
for dep in "${CRITICAL_DEPS[@]}"; do
if [ -f "node_modules/.bin/$dep" ]; then
log_success "$dep found"
else
log_error "$dep not found in node_modules/.bin"
log_info "This usually means the package wasn't installed properly."
log_info "Try running: npm install"
exit 1
fi
done
# Check TypeScript via npx
if npx tsc --version &> /dev/null; then
TSC_VERSION=$(npx tsc --version)
log_success "✓ TypeScript found: $TSC_VERSION"
else
log_error "✗ TypeScript not accessible via npx"
log_info "Try running: npm install"
exit 1
fi
# Check Capacitor CLI
if command -v npx &> /dev/null; then
if npx cap --version &> /dev/null; then
CAP_VERSION=$(npx cap --version)
log_success "✓ Capacitor CLI version: $CAP_VERSION"
else
log_error "✗ Capacitor CLI not accessible via npx"
log_info "Try running: npm install @capacitor/cli"
exit 1
fi
else
log_error "npx is not available. Please ensure npm is properly installed."
exit 1
fi
# Check Android development tools
if command -v adb &> /dev/null; then
log_success "✓ Android Debug Bridge (adb) found"
else
log_warn "⚠ Android Debug Bridge (adb) not found"
log_info "This is only needed for Android development and testing."
fi
if command -v gradle &> /dev/null; then
GRADLE_VERSION=$(gradle --version | head -n 1)
log_success "✓ Gradle found: $GRADLE_VERSION"
else
log_warn "⚠ Gradle not found in PATH"
log_info "This is only needed if building outside of Android Studio."
fi
log_success "Dependency validation completed successfully!"
log_info "Your development environment is ready for TimeSafari development."
print_footer "Dependency Validation"

62
scripts/clean-android.sh

@ -0,0 +1,62 @@
#!/bin/bash
# clean-android.sh
# Author: Matthew Raymer
# Date: 2025-08-19
# Description: Clean Android app with timeout protection to prevent hanging
# This script safely uninstalls the TimeSafari app from connected Android devices
# with a 30-second timeout to prevent indefinite hanging.
# Exit on any error
set -e
# Source common utilities
source "$(dirname "$0")/common.sh"
# Function to implement timeout for systems without timeout command
timeout_command() {
local timeout_seconds="$1"
shift
# Check if timeout command exists
if command -v timeout &> /dev/null; then
timeout "$timeout_seconds" "$@"
else
# Fallback for systems without timeout (like macOS)
# Use perl to implement timeout
perl -e '
eval {
local $SIG{ALRM} = sub { die "timeout" };
alarm shift;
system @ARGV;
alarm 0;
};
if ($@) { exit 1; }
' "$timeout_seconds" "$@"
fi
}
log_info "Starting Android cleanup process..."
# Check if adb is available
if ! command -v adb &> /dev/null; then
log_error "adb command not found. Please install Android SDK Platform Tools."
exit 1
fi
# Check for connected devices
log_info "Checking for connected Android devices..."
if adb devices | grep -q 'device$'; then
log_info "Android device(s) found. Attempting to uninstall app..."
# Try to uninstall with timeout
if timeout_command 30 adb uninstall app.timesafari.app; then
log_success "Successfully uninstalled TimeSafari app"
else
log_warn "Uninstall failed or timed out after 30 seconds"
log_info "This is normal if the app wasn't installed or device is unresponsive"
fi
else
log_info "No Android devices connected. Skipping uninstall."
fi
log_success "Android cleanup process completed"

19
scripts/fix-markdown.sh

@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -euo pipefail
echo "🔧 Auto-fixing markdown formatting..."
# Check if markdownlint is available
if ! command -v npx &> /dev/null; then
echo "❌ npx not found. Please install Node.js and npm first."
exit 1
fi
# Run markdownlint with auto-fix on project markdown files (exclude node_modules)
echo "📝 Fixing project markdown files..."
npx markdownlint "*.md" "*.mdc" "scripts/**/*.md" "src/**/*.md" "test-playwright/**/*.md" "resources/**/*.md" --config .markdownlint.json --fix 2>/dev/null || {
echo "⚠️ Some issues could not be auto-fixed. Check manually."
}
echo "✅ Markdown auto-fix complete!"
echo "💡 Run 'npm run markdown:check' to verify all issues are resolved."

124
scripts/git-hooks/README.md

@ -0,0 +1,124 @@
# TimeSafari Git Hooks
This directory contains custom Git hooks for the TimeSafari project.
## Debug Code Checker Hook
### Overview
The `pre-commit` hook automatically checks for debug code when committing to protected branches (master, main, production, release). This prevents debug statements from accidentally reaching production code.
### How It Works
1. **Branch Detection**: Only runs on protected branches (configurable)
2. **File Filtering**: Automatically skips test files, scripts, and documentation
3. **Pattern Matching**: Detects common debug patterns using regex
4. **Commit Prevention**: Blocks commits containing debug code
### Protected Branches (Default)
- `master`
- `main`
- `production`
- `release`
- `stable`
### Debug Patterns Detected
- **Console statements**: `console.log`, `console.debug`, `console.error`
- **Template debug**: `Debug:`, `debug:` in Vue templates
- **Debug constants**: `DEBUG_`, `debug_` variables
- **HTML debug**: `<!-- debug` comments
- **Debug attributes**: `debug="true"` attributes
- **Vue debug**: `v-if="debug"`, `v-show="debug"`
- **Debug TODOs**: `TODO debug`, `FIXME debug`
### Files Automatically Skipped
- Test files: `*.test.js`, `*.spec.ts`, `*.test.vue`
- Scripts: `scripts/` directory
- Test directories: `test-*` directories
- Documentation: `docs/`, `*.md`, `*.txt`
- Config files: `*.json`, `*.yml`, `*.yaml`
- IDE files: `.cursor/` directory
### Configuration
Edit `.git/hooks/debug-checker.config` to customize:
- Protected branches
- Debug patterns
- Skip patterns
- Logging level
### Testing the Hook
Run the test script to verify the hook works:
```bash
./scripts/test-debug-hook.sh
```
### Manual Testing
1. Make changes to a file with debug code
2. Stage the file: `git add <filename>`
3. Try to commit: `git commit -m 'test'`
4. Hook should prevent commit if debug code is found
### Bypassing the Hook (Emergency)
If you absolutely need to commit debug code to a protected branch:
```bash
git commit --no-verify -m "emergency: debug code needed"
```
⚠️ **Warning**: This bypasses all pre-commit hooks. Use sparingly and only in emergencies.
### Troubleshooting
#### Hook not running
- Ensure the hook is executable: `chmod +x .git/hooks/pre-commit`
- Check if you're on a protected branch
- Verify the hook file exists and has correct permissions
#### False positives
- Add legitimate debug patterns to skip patterns in config
- Use proper logging levels (`logger.info`, `logger.debug`) instead of console
- Move debug code to feature branches first
#### Hook too strict
- Modify debug patterns in config file
- Add more file types to skip patterns
- Adjust protected branch list
### Best Practices
1. **Use feature branches** for development with debug code
2. **Use proper logging** instead of console statements
3. **Test thoroughly** before merging to protected branches
4. **Review commits** to ensure no debug code slips through
5. **Keep config updated** as project needs change
### Integration with CI/CD
This hook works locally. For CI/CD pipelines, consider:
- Running the same checks in your build process
- Adding ESLint rules for console statements
- Using TypeScript strict mode
- Adding debug code detection to PR checks
### Support
If you encounter issues:
1. Check the hook output for specific error messages
2. Verify your branch is in the protected list
3. Review the configuration file
4. Test with the provided test script
5. Check file permissions and git setup

86
scripts/git-hooks/debug-checker.config

@ -0,0 +1,86 @@
# TimeSafari Debug Checker Configuration
# Edit this file to customize protected branches and debug patterns
# Protected branches where debug code checking is enforced
# Add or remove branches as needed
PROTECTED_BRANCHES=(
"master"
"main"
"production"
"release"
"stable"
)
# Debug patterns to detect (regex patterns)
# Add or remove patterns as needed
DEBUG_PATTERNS=(
# Console statements
"console\."
# Template debug text
"Debug:"
"debug:"
# Debug constants and variables
"DEBUG_"
"debug_"
# HTML debug comments
"<!-- debug"
# Debug attributes
"debug.*="
# Vue debug patterns
"v-if.*debug"
"v-show.*debug"
# Common debug text
"TODO.*debug"
"FIXME.*debug"
# Debug imports (uncomment if you want to catch these)
# "import.*debug"
# "require.*debug"
)
# Files and directories to skip during checking
# Add patterns to exclude from debug checking
SKIP_PATTERNS=(
"\.(test|spec)\.(js|ts|vue)$" # Test files (must have .test. or .spec.)
"^scripts/" # Scripts directory
"^test-.*/" # Test directories (must end with /)
"^\.git/" # Git directory
"^node_modules/" # Dependencies
"^docs/" # Documentation
"^\.cursor/" # Cursor IDE files
"\.md$" # Markdown files
"\.txt$" # Text files
"\.json$" # JSON config files
"\.yml$" # YAML config files
"\.yaml$" # YAML config files
)
# Files that are whitelisted for console statements
# These files may contain intentional console.log statements that are
# properly whitelisted with eslint-disable-next-line no-console comments
WHITELIST_FILES=(
"src/services/platforms/WebPlatformService.ts" # Worker context logging
"src/services/platforms/CapacitorPlatformService.ts" # Platform-specific logging
"src/services/platforms/ElectronPlatformService.ts" # Electron-specific logging
"src/services/QRScanner/.*" # QR Scanner services
"src/utils/logger.ts" # Logger utility itself
"src/utils/LogCollector.ts" # Log collection utilities
"scripts/.*" # Build and utility scripts
"test-.*/.*" # Test directories
".*\.test\..*" # Test files
".*\.spec\..*" # Spec files
)
# Logging level (debug, info, warn, error)
LOG_LEVEL="info"
# Exit codes
EXIT_SUCCESS=0
EXIT_DEBUG_FOUND=1
EXIT_ERROR=2

252
scripts/git-hooks/pre-commit

@ -0,0 +1,252 @@
#!/bin/bash
# TimeSafari Pre-commit Hook - Debug Code Checker
# Only runs on master or specified branches to catch debug code before it reaches production
# Hook directory
HOOK_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CONFIG_FILE="$HOOK_DIR/debug-checker.config"
# Default configuration (fallback if config file is missing)
DEFAULT_PROTECTED_BRANCHES=("master" "main" "production" "release")
DEFAULT_DEBUG_PATTERNS=(
"console\."
"Debug:"
"debug:"
"DEBUG_"
"debug_"
"<!-- debug"
"debug.*="
)
DEFAULT_WHITELIST_FILES=(
"src/services/platforms/WebPlatformService.ts"
"src/services/platforms/CapacitorPlatformService.ts"
"src/services/platforms/ElectronPlatformService.ts"
)
# Load configuration from file if it exists
load_config() {
if [[ -f "$CONFIG_FILE" ]]; then
# Source the config file to load variables
# We'll use a safer approach by reading and parsing
PROTECTED_BRANCHES=()
DEBUG_PATTERNS=()
SKIP_PATTERNS=()
WHITELIST_FILES=()
# Read protected branches
while IFS= read -r line; do
if [[ "$line" =~ ^PROTECTED_BRANCHES=\( ]]; then
# Start reading array
while IFS= read -r line; do
if [[ "$line" =~ ^\)$ ]]; then
break
fi
if [[ "$line" =~ \"([^\"]+)\" ]]; then
PROTECTED_BRANCHES+=("${BASH_REMATCH[1]}")
fi
done
fi
done < "$CONFIG_FILE"
# Read debug patterns
while IFS= read -r line; do
if [[ "$line" =~ ^DEBUG_PATTERNS=\( ]]; then
while IFS= read -r line; do
if [[ "$line" =~ ^\)$ ]]; then
break
fi
if [[ "$line" =~ \"([^\"]+)\" ]]; then
DEBUG_PATTERNS+=("${BASH_REMATCH[1]}")
fi
done
fi
done < "$CONFIG_FILE"
# Read skip patterns
while IFS= read -r line; do
if [[ "$line" =~ ^SKIP_PATTERNS=\( ]]; then
while IFS= read -r line; do
if [[ "$line" =~ ^\)$ ]]; then
break
fi
if [[ "$line" =~ \"([^\"]+)\" ]]; then
SKIP_PATTERNS+=("${BASH_REMATCH[1]}")
fi
done
fi
done < "$CONFIG_FILE"
# Read whitelist files
while IFS= read -r line; do
if [[ "$line" =~ ^WHITELIST_FILES=\( ]]; then
while IFS= read -r line; do
if [[ "$line" =~ ^\)$ ]]; then
break
fi
if [[ "$line" =~ \"([^\"]+)\" ]]; then
WHITELIST_FILES+=("${BASH_REMATCH[1]}")
fi
done
fi
done < "$CONFIG_FILE"
fi
# Use defaults if config loading failed
if [[ ${#PROTECTED_BRANCHES[@]} -eq 0 ]]; then
PROTECTED_BRANCHES=("${DEFAULT_PROTECTED_BRANCHES[@]}")
fi
if [[ ${#DEBUG_PATTERNS[@]} -eq 0 ]]; then
DEBUG_PATTERNS=("${DEFAULT_DEBUG_PATTERNS[@]}")
fi
if [[ ${#SKIP_PATTERNS[@]} -eq 0 ]]; then
SKIP_PATTERNS=("${DEFAULT_SKIP_PATTERNS[@]}")
fi
if [[ ${#WHITELIST_FILES[@]} -eq 0 ]]; then
WHITELIST_FILES=("${DEFAULT_WHITELIST_FILES[@]}")
fi
}
# Check if current branch is protected
is_protected_branch() {
local branch="$1"
for protected in "${PROTECTED_BRANCHES[@]}"; do
if [[ "$branch" == "$protected" ]]; then
return 0
fi
done
return 1
}
# Check if file should be skipped
should_skip_file() {
local file="$1"
for pattern in "${SKIP_PATTERNS[@]}"; do
if [[ "$file" =~ $pattern ]]; then
return 0
fi
done
return 1
}
# Check if file is whitelisted for console statements
is_whitelisted_file() {
local file="$1"
for whitelisted in "${WHITELIST_FILES[@]}"; do
if [[ "$file" =~ $whitelisted ]]; then
return 0
fi
done
return 1
}
# Main execution
main() {
# Load configuration
load_config
# Get current branch name
CURRENT_BRANCH=$(git symbolic-ref --short HEAD 2>/dev/null)
if [[ -z "$CURRENT_BRANCH" ]]; then
echo "⚠️ Could not determine current branch, skipping debug check"
exit 0
fi
# Check if we should run the hook
if ! is_protected_branch "$CURRENT_BRANCH"; then
echo "🔒 Pre-commit hook skipped - not on protected branch ($CURRENT_BRANCH)"
echo " Protected branches: ${PROTECTED_BRANCHES[*]}"
exit 0
fi
echo "🔍 Running debug code check on protected branch: $CURRENT_BRANCH"
echo " Using config: $CONFIG_FILE"
# Get all staged files (modified, added, copied, merged)
ALL_STAGED_FILES=$(git diff --cached --name-only)
if [ -z "$ALL_STAGED_FILES" ]; then
echo "✅ No staged files to check"
exit 0
fi
# Initialize error tracking
ERRORS_FOUND=0
ERROR_MESSAGES=()
FILES_CHECKED=0
# Check each staged file for debug patterns
for file in $ALL_STAGED_FILES; do
# Skip files that should be ignored
if should_skip_file "$file"; then
continue
fi
FILES_CHECKED=$((FILES_CHECKED + 1))
# Check for debug patterns in the file
for pattern in "${DEBUG_PATTERNS[@]}"; do
# Skip console pattern checks for whitelisted files
if [[ "$pattern" == "console\." ]] && is_whitelisted_file "$file"; then
continue
fi
# For new files, check the file content directly
# For modified files, check the staged diff
if [[ -f "$file" ]]; then
# New file - check content directly
if grep -E "$pattern" "$file" > /dev/null; then
ERRORS_FOUND=$((ERRORS_FOUND + 1))
ERROR_MESSAGES+=("🚨 $file: Found debug pattern '$pattern'")
fi
else
# Modified file - check staged diff
if git diff --cached "$file" | grep -E "$pattern" > /dev/null; then
ERRORS_FOUND=$((ERRORS_FOUND + 1))
ERROR_MESSAGES+=("🚨 $file: Found debug pattern '$pattern'")
fi
fi
done
done
# Report results
if [ $ERRORS_FOUND -gt 0 ]; then
echo ""
echo "❌ Debug code detected in staged files!"
echo " Branch: $CURRENT_BRANCH"
echo " Files checked: $FILES_CHECKED"
echo " Errors found: $ERRORS_FOUND"
echo ""
for msg in "${ERROR_MESSAGES[@]}"; do
echo " $msg"
done
echo ""
echo "💡 Please remove debug code before committing to $CURRENT_BRANCH"
echo " Common debug patterns to check:"
echo " - console.log, console.debug, console.error"
echo " - Debug: or debug: in templates"
echo " - DEBUG_ constants"
echo " - HTML comments with debug"
echo ""
echo " If debug code is intentional, consider:"
echo " - Moving to a feature branch first"
echo " - Using proper logging levels (logger.info, logger.debug)"
echo " - Adding debug code to .gitignore or .debugignore"
echo ""
echo " Configuration file: $CONFIG_FILE"
exit 1
else
echo "✅ No debug code found in $FILES_CHECKED staged files"
exit 0
fi
}
# Run main function
main "$@"

171
scripts/install-debug-hook.sh

@ -0,0 +1,171 @@
#!/bin/bash
# TimeSafari Debug Hook Installer
# Run this script in any repository to install the debug pre-commit hook
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}🔧 TimeSafari Debug Hook Installer${NC}"
echo "============================================="
# Check if we're in a git repository
if ! git rev-parse --git-dir > /dev/null 2>&1; then
echo -e "${RED}❌ Error: Not in a git repository${NC}"
echo "Please run this script from within a git repository"
exit 1
fi
# Get repository root
REPO_ROOT=$(git rev-parse --show-toplevel)
HOOKS_DIR="$REPO_ROOT/.git/hooks"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
echo -e "${BLUE}Repository:${NC} $REPO_ROOT"
echo -e "${BLUE}Hooks directory:${NC} $HOOKS_DIR"
echo -e "${BLUE}Script directory:${NC} $SCRIPT_DIR"
# Check if hooks directory exists
if [[ ! -d "$HOOKS_DIR" ]]; then
echo -e "${RED}❌ Error: Hooks directory not found${NC}"
echo "This repository may not be properly initialized"
exit 1
fi
# Check if we have the hook files in the repository
HOOK_SCRIPT="$SCRIPT_DIR/git-hooks/pre-commit"
CONFIG_FILE="$SCRIPT_DIR/git-hooks/debug-checker.config"
if [[ ! -f "$HOOK_SCRIPT" ]]; then
echo -e "${RED}❌ Error: Pre-commit hook script not found${NC}"
echo "Expected location: $HOOK_SCRIPT"
echo "Make sure you're running this from the TimeSafari repository"
exit 1
fi
if [[ ! -f "$CONFIG_FILE" ]]; then
echo -e "${RED}❌ Error: Debug checker config not found${NC}"
echo "Expected location: $CONFIG_FILE"
echo "Make sure you're running this from the TimeSafari repository"
exit 1
fi
# Check if already installed
if [[ -f "$HOOKS_DIR/pre-commit" && -f "$HOOKS_DIR/debug-checker.config" ]]; then
echo -e "${YELLOW}⚠️ Debug hook already appears to be installed${NC}"
echo -e " Checking if update is needed..."
# Check if files are different
if diff "$HOOK_SCRIPT" "$HOOKS_DIR/pre-commit" > /dev/null 2>&1; then
echo -e " ${GREEN}${NC} Hook script is up to date"
HOOK_UP_TO_DATE=true
else
echo -e " ${YELLOW}⚠️ Hook script differs - will update${NC}"
HOOK_UP_TO_DATE=false
fi
if diff "$CONFIG_FILE" "$HOOKS_DIR/debug-checker.config" > /dev/null 2>&1; then
echo -e " ${GREEN}${NC} Config file is up to date"
CONFIG_UP_TO_DATE=true
else
echo -e " ${YELLOW}⚠️ Config file differs - will update${NC}"
CONFIG_UP_TO_DATE=false
fi
if [[ "$HOOK_UP_TO_DATE" == true && "$CONFIG_UP_TO_DATE" == true ]]; then
echo -e "\n${GREEN}✅ Debug hook is already up to date!${NC}"
echo -e " No installation needed"
else
echo -e "\n${BLUE}Updating existing installation...${NC}"
fi
else
echo -e "\n${BLUE}Installing debug hook...${NC}"
fi
# Copy/update the hook script if needed
if [[ "$HOOK_UP_TO_DATE" != true ]]; then
cp "$HOOK_SCRIPT" "$HOOKS_DIR/pre-commit"
chmod +x "$HOOKS_DIR/pre-commit"
echo -e " ${GREEN}${NC} Pre-commit hook installed/updated"
fi
# Copy/update the config file if needed
if [[ "$CONFIG_UP_TO_DATE" != true ]]; then
cp "$CONFIG_FILE" "$HOOKS_DIR/debug-checker.config"
echo -e " ${GREEN}${NC} Configuration file installed/updated"
fi
# Copy/update the README if needed
README_FILE="$SCRIPT_DIR/git-hooks/README.md"
if [[ -f "$README_FILE" ]]; then
if [[ ! -f "$HOOKS_DIR/README.md" ]] || ! diff "$README_FILE" "$HOOKS_DIR/README.md" > /dev/null 2>&1; then
cp "$README_FILE" "$HOOKS_DIR/README.md"
echo -e " ${GREEN}${NC} Documentation installed/updated"
else
echo -e " ${GREEN}${NC} Documentation is up to date"
fi
fi
echo -e "\n${GREEN}🎉 Debug hook installation complete!${NC}"
# Test the installation
echo -e "\n${BLUE}Testing installation...${NC}"
if [[ -x "$HOOKS_DIR/pre-commit" ]]; then
echo -e " ${GREEN}${NC} Hook is executable"
else
echo -e " ${RED}${NC} Hook is not executable"
fi
if [[ -f "$HOOKS_DIR/debug-checker.config" ]]; then
echo -e " ${GREEN}${NC} Config file exists"
else
echo -e " ${RED}${NC} Config file missing"
fi
# Show current branch status
CURRENT_BRANCH=$(git symbolic-ref --short HEAD 2>/dev/null || echo "detached")
echo -e "\n${BLUE}Current branch:${NC} $CURRENT_BRANCH"
# Check if this is a protected branch
PROTECTED_BRANCHES=("master" "main" "production" "release" "stable")
IS_PROTECTED=false
for branch in "${PROTECTED_BRANCHES[@]}"; do
if [[ "$CURRENT_BRANCH" == "$branch" ]]; then
IS_PROTECTED=true
break
fi
done
if [[ "$IS_PROTECTED" == true ]]; then
echo -e "${YELLOW}⚠️ You're on a protected branch ($CURRENT_BRANCH)${NC}"
echo -e " The debug hook will now run on all commits to this branch"
echo -e " Consider switching to a feature branch for development"
else
echo -e "${GREEN}✅ You're on a feature branch ($CURRENT_BRANCH)${NC}"
echo -e " The debug hook will be skipped on this branch"
echo -e " You can develop with debug code freely"
fi
echo -e "\n${BLUE}Next steps:${NC}"
echo "1. The hook will now run automatically on protected branches"
echo "2. Test it by trying to commit a file with debug code"
echo "3. Use feature branches for development with debug code"
echo "4. Check the README.md in .git/hooks/ for more information"
echo -e "\n${BLUE}To test the hook:${NC}"
echo "1. Create a test file with debug code (e.g., console.log('test'))"
echo "2. Stage it: git add <filename>"
echo "3. Try to commit: git commit -m 'test'"
echo "4. The hook should prevent the commit if debug code is found"
echo -e "\n${BLUE}To uninstall:${NC}"
echo "rm $HOOKS_DIR/pre-commit"
echo "rm $HOOKS_DIR/debug-checker.config"
echo "rm $HOOKS_DIR/README.md"

214
scripts/setup-markdown-hooks.sh

@ -0,0 +1,214 @@
#!/bin/bash
# Setup Markdown Pre-commit Hooks
# This script installs pre-commit hooks that automatically fix markdown formatting
set -e
echo "🔧 Setting up Markdown Pre-commit Hooks..."
# Check if pre-commit is installed
if ! command -v pre-commit &> /dev/null; then
echo "📦 Installing pre-commit..."
pip install pre-commit
else
echo "✅ pre-commit already installed"
fi
# Create .pre-commit-config.yaml if it doesn't exist
if [ ! -f .pre-commit-config.yaml ]; then
echo "📝 Creating .pre-commit-config.yaml..."
cat > .pre-commit-config.yaml << 'EOF'
repos:
- repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.38.0
hooks:
- id: markdownlint
args: [--fix, --config, .markdownlint.json]
files: \.(md|mdc)$
description: "Auto-fix markdown formatting issues"
stages: [commit]
additional_dependencies: [markdownlint-cli]
- repo: local
hooks:
- id: markdown-format-check
name: Markdown Format Validation
entry: bash -c 'echo "Checking markdown files..." && npx markdownlint --config .markdownlint.json "$@"'
language: system
files: \.(md|mdc)$
stages: [commit]
description: "Validate markdown formatting"
pass_filenames: true
- repo: local
hooks:
- id: markdown-line-length
name: Markdown Line Length Check
entry: bash -c '
for file in "$@"; do
if [[ "$file" =~ \.(md|mdc)$ ]]; then
echo "Checking line length in $file..."
if grep -q ".\{81,\}" "$file"; then
echo "❌ Line length violations found in $file"
echo "Lines exceeding 80 characters:"
grep -n ".\{81,\}" "$file" | head -5
exit 1
fi
fi
done
'
language: system
files: \.(md|mdc)$
stages: [commit]
description: "Check markdown line length (80 chars max)"
pass_filenames: true
- repo: local
hooks:
- id: markdown-blank-lines
name: Markdown Blank Line Validation
entry: bash -c '
for file in "$@"; do
if [[ "$file" =~ \.(md|mdc)$ ]]; then
echo "Checking blank lines in $file..."
# Check for multiple consecutive blank lines
if grep -q "^$" "$file" && grep -A1 "^$" "$file" | grep -q "^$"; then
echo "❌ Multiple consecutive blank lines found in $file"
exit 1
fi
# Check for missing blank lines around headings
if grep -B1 "^##" "$file" | grep -v "^##" | grep -v "^$" | grep -v "^--"; then
echo "❌ Missing blank line before heading in $file"
exit 1
fi
fi
done
'
language: system
files: \.(md|mdc)$
stages: [commit]
description: "Validate markdown blank line formatting"
pass_filenames: true
EOF
echo "✅ Created .pre-commit-config.yaml"
else
echo "✅ .pre-commit-config.yaml already exists"
fi
# Install the pre-commit hooks
echo "🔗 Installing pre-commit hooks..."
pre-commit install
# Install markdownlint if not present
if ! command -v npx &> /dev/null; then
echo "📦 Installing Node.js dependencies..."
npm install --save-dev markdownlint-cli
else
if ! npx markdownlint --version &> /dev/null; then
echo "📦 Installing markdownlint-cli..."
npm install --save-dev markdownlint-cli
else
echo "✅ markdownlint-cli already available"
fi
fi
# Create a markdown auto-fix script
echo "📝 Creating markdown auto-fix script..."
cat > scripts/fix-markdown.sh << 'EOF'
#!/bin/bash
# Auto-fix markdown formatting issues
# Usage: ./scripts/fix-markdown.sh [file_or_directory]
set -e
FIX_MARKDOWN() {
local target="$1"
if [ -f "$target" ]; then
# Fix single file
if [[ "$target" =~ \.(md|mdc)$ ]]; then
echo "🔧 Fixing markdown formatting in $target..."
npx markdownlint --fix "$target" || true
fi
elif [ -d "$target" ]; then
# Fix all markdown files in directory
echo "🔧 Fixing markdown formatting in $target..."
find "$target" -name "*.md" -o -name "*.mdc" | while read -r file; do
echo " Processing $file..."
npx markdownlint --fix "$file" || true
done
else
echo "❌ Target $target not found"
exit 1
fi
}
# Default to current directory if no target specified
TARGET="${1:-.}"
FIX_MARKDOWN "$TARGET"
echo "✅ Markdown formatting fixes applied!"
echo "💡 Run 'git diff' to see what was changed"
EOF
chmod +x scripts/fix-markdown.sh
# Create a markdown validation script
echo "📝 Creating markdown validation script..."
cat > scripts/validate-markdown.sh << 'EOF'
#!/bin/bash
# Validate markdown formatting without auto-fixing
# Usage: ./scripts/validate-markdown.sh [file_or_directory]
set -e
VALIDATE_MARKDOWN() {
local target="$1"
if [ -f "$target" ]; then
# Validate single file
if [[ "$target" =~ \.(md|mdc)$ ]]; then
echo "🔍 Validating markdown formatting in $target..."
npx markdownlint "$target"
fi
elif [ -d "$target" ]; then
# Validate all markdown files in directory
echo "🔍 Validating markdown formatting in $target..."
find "$target" -name "*.md" -o -name "*.mdc" | while read -r file; do
echo " Checking $file..."
npx markdownlint "$file" || true
done
else
echo "❌ Target $target not found"
exit 1
fi
}
# Default to current directory if no target specified
TARGET="${1:-.}"
VALIDATE_MARKDOWN "$TARGET"
echo "✅ Markdown validation complete!"
EOF
chmod +x scripts/validate-markdown.sh
echo ""
echo "🎉 Markdown Pre-commit Hooks Setup Complete!"
echo ""
echo "📋 What was installed:"
echo " ✅ pre-commit hooks for automatic markdown formatting"
echo " ✅ .pre-commit-config.yaml with markdown rules"
echo " ✅ scripts/fix-markdown.sh for manual fixes"
echo " ✅ scripts/validate-markdown.sh for validation"
echo ""
echo "🚀 Usage:"
echo " • Hooks run automatically on commit"
echo " • Manual fix: ./scripts/fix-markdown.sh [file/dir]"
echo " • Manual check: ./scripts/validate-markdown.sh [file/dir]"
echo " • Test hooks: pre-commit run --all-files"
echo ""
echo "💡 The hooks will now automatically fix markdown issues before commits!"

117
scripts/test-debug-hook.sh

@ -0,0 +1,117 @@
#!/bin/bash
# Test script for the debug pre-commit hook
# This script helps verify that the hook is working correctly
set -e
echo "🧪 Testing TimeSafari Debug Pre-commit Hook"
echo "============================================="
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test directory
TEST_DIR="$(mktemp -d)"
echo -e "${BLUE}Created test directory: $TEST_DIR${NC}"
# Function to cleanup
cleanup() {
echo -e "${YELLOW}Cleaning up test directory...${NC}"
rm -rf "$TEST_DIR"
}
# Set trap to cleanup on exit
trap cleanup EXIT
# Function to run test
run_test() {
local test_name="$1"
local test_file="$2"
local expected_exit="$3"
echo -e "\n${BLUE}Running test: $test_name${NC}"
# Create test file
echo "$test_file" > "$TEST_DIR/test.vue"
# Stage the file
cd "$TEST_DIR"
git init > /dev/null 2>&1
git add test.vue > /dev/null 2>&1
# Run the hook
if bash ../../.git/hooks/pre-commit > hook_output.txt 2>&1; then
exit_code=0
else
exit_code=$?
fi
# Check result
if [[ $exit_code -eq $expected_exit ]]; then
echo -e " ${GREEN}✅ PASS${NC} - Exit code: $exit_code (expected: $expected_exit)"
else
echo -e " ${RED}❌ FAIL${NC} - Exit code: $exit_code (expected: $expected_exit)"
echo -e " ${YELLOW}Hook output:${NC}"
cat hook_output.txt
fi
# Cleanup git
rm -rf .git
rm -f hook_output.txt
}
# Test cases
echo -e "\n${BLUE}Test Case 1: Clean file (should pass)${NC}"
run_test "Clean file" "// No debug code here" 0
echo -e "\n${BLUE}Test Case 2: Console statement (should fail)${NC}"
run_test "Console statement" "console.log('debug info')" 1
echo -e "\n${BLUE}Test Case 3: Debug template (should fail)${NC}"
run_test "Debug template" "Debug: {{ isMapReady ? 'Map Ready' : 'Map Loading' }}" 1
echo -e "\n${BLUE}Test Case 4: Debug constant (should fail)${NC}"
run_test "Debug constant" "const DEBUG_MODE = true" 1
echo -e "\n${BLUE}Test Case 5: Mixed content (should fail)${NC}"
run_test "Mixed content" "// Some normal code\nconsole.debug('test')\n// More normal code" 1
echo -e "\n${BLUE}Test Case 6: HTML debug comment (should fail)${NC}"
run_test "HTML debug comment" "<!-- debug: this is debug info -->" 1
echo -e "\n${BLUE}Test Case 7: Debug attribute (should fail)${NC}"
run_test "Debug attribute" "<div debug='true'>content</div>" 1
echo -e "\n${BLUE}Test Case 8: Test file (should be skipped)${NC}"
run_test "Test file" "console.log('this should be skipped')" 0
# Test branch detection
echo -e "\n${BLUE}Testing branch detection...${NC}"
cd "$TEST_DIR"
git init > /dev/null 2>&1
git checkout -b feature-branch > /dev/null 2>&1
echo "console.log('debug')" > test.vue
git add test.vue > /dev/null 2>&1
if bash ../../.git/hooks/pre-commit > hook_output.txt 2>&1; then
echo -e " ${GREEN}✅ PASS${NC} - Hook skipped on feature branch"
else
echo -e " ${RED}❌ FAIL${NC} - Hook should have been skipped on feature branch"
echo -e " ${YELLOW}Hook output:${NC}"
cat hook_output.txt
fi
rm -rf .git
rm -f hook_output.txt
echo -e "\n${GREEN}🎉 All tests completed!${NC}"
echo -e "\n${BLUE}To test manually:${NC}"
echo "1. Make changes to a file with debug code"
echo "2. Stage the file: git add <filename>"
echo "3. Try to commit: git commit -m 'test'"
echo "4. The hook should prevent the commit if debug code is found"

19
scripts/validate-markdown.sh

@ -0,0 +1,19 @@
#!/usr/bin/env bash
set -euo pipefail
echo "🔍 Validating markdown formatting..."
# Check if markdownlint is available
if ! command -v npx &> /dev/null; then
echo "❌ npx not found. Please install Node.js and npm first."
exit 1
fi
# Run markdownlint on project markdown files (exclude node_modules)
echo "📝 Checking project markdown files..."
npx markdownlint "*.md" "*.mdc" "scripts/**/*.md" "src/**/*.md" "test-playwright/**/*.md" "resources/**/*.md" --config .markdownlint.json 2>/dev/null || {
echo "❌ Markdown validation failed. Run 'npm run markdown:fix' to auto-fix issues."
exit 1
}
echo "✅ All markdown files pass validation!"

24
src/components/FeedFilters.vue

@ -101,6 +101,7 @@ import {
import { Router } from "vue-router"; import { Router } from "vue-router";
import { PlatformServiceMixin } from "@/utils/PlatformServiceMixin"; import { PlatformServiceMixin } from "@/utils/PlatformServiceMixin";
import { logger } from "@/utils/logger";
@Component({ @Component({
components: { components: {
@ -119,11 +120,13 @@ export default class FeedFilters extends Vue {
isNearby = false; isNearby = false;
settingChanged = false; settingChanged = false;
visible = false; visible = false;
activeDid = "";
async open(onCloseIfChanged: () => void) { async open(onCloseIfChanged: () => void, activeDid: string) {
this.onCloseIfChanged = onCloseIfChanged; this.onCloseIfChanged = onCloseIfChanged;
this.activeDid = activeDid;
const settings = await this.$settings(); const settings = await this.$accountSettings(activeDid);
this.hasVisibleDid = !!settings.filterFeedByVisible; this.hasVisibleDid = !!settings.filterFeedByVisible;
this.isNearby = !!settings.filterFeedByNearby; this.isNearby = !!settings.filterFeedByNearby;
if (settings.searchBoxes && settings.searchBoxes.length > 0) { if (settings.searchBoxes && settings.searchBoxes.length > 0) {
@ -137,6 +140,7 @@ export default class FeedFilters extends Vue {
async toggleHasVisibleDid() { async toggleHasVisibleDid() {
this.settingChanged = true; this.settingChanged = true;
this.hasVisibleDid = !this.hasVisibleDid; this.hasVisibleDid = !this.hasVisibleDid;
await this.$updateSettings({ await this.$updateSettings({
filterFeedByVisible: this.hasVisibleDid, filterFeedByVisible: this.hasVisibleDid,
}); });
@ -145,9 +149,18 @@ export default class FeedFilters extends Vue {
async toggleNearby() { async toggleNearby() {
this.settingChanged = true; this.settingChanged = true;
this.isNearby = !this.isNearby; this.isNearby = !this.isNearby;
logger.debug("[FeedFilters] 🔄 Toggling nearby filter:", {
newValue: this.isNearby,
settingChanged: this.settingChanged,
activeDid: this.activeDid,
});
await this.$updateSettings({ await this.$updateSettings({
filterFeedByNearby: this.isNearby, filterFeedByNearby: this.isNearby,
}); });
logger.debug("[FeedFilters] ✅ Nearby filter updated in settings");
} }
async clearAll() { async clearAll() {
@ -179,13 +192,20 @@ export default class FeedFilters extends Vue {
} }
close() { close() {
logger.debug("[FeedFilters] 🚪 Closing dialog:", {
settingChanged: this.settingChanged,
hasCallback: !!this.onCloseIfChanged,
});
if (this.settingChanged) { if (this.settingChanged) {
logger.debug("[FeedFilters] 🔄 Settings changed, calling callback");
this.onCloseIfChanged(); this.onCloseIfChanged();
} }
this.visible = false; this.visible = false;
} }
done() { done() {
logger.debug("[FeedFilters] ✅ Done button clicked");
this.close(); this.close();
} }
} }

6
src/interfaces/common.ts

@ -60,9 +60,13 @@ export interface AxiosErrorResponse {
[key: string]: unknown; [key: string]: unknown;
}; };
status?: number; status?: number;
statusText?: string;
config?: unknown; config?: unknown;
}; };
config?: unknown; config?: {
url?: string;
[key: string]: unknown;
};
[key: string]: unknown; [key: string]: unknown;
} }

28
src/interfaces/deepLinks.ts

@ -28,7 +28,7 @@
import { z } from "zod"; import { z } from "zod";
// Parameter validation schemas for each route type // Parameter validation schemas for each route type
export const deepLinkSchemas = { export const deepLinkPathSchemas = {
claim: z.object({ claim: z.object({
id: z.string(), id: z.string(),
}), }),
@ -60,7 +60,7 @@ export const deepLinkSchemas = {
jwt: z.string().optional(), jwt: z.string().optional(),
}), }),
"onboard-meeting-members": z.object({ "onboard-meeting-members": z.object({
id: z.string(), groupId: z.string(),
}), }),
project: z.object({ project: z.object({
id: z.string(), id: z.string(),
@ -70,6 +70,17 @@ export const deepLinkSchemas = {
}), }),
}; };
export const deepLinkQuerySchemas = {
"onboard-meeting-members": z.object({
password: z.string(),
}),
};
// Add a union type of all valid route paths
export const VALID_DEEP_LINK_ROUTES = Object.keys(
deepLinkPathSchemas,
) as readonly (keyof typeof deepLinkPathSchemas)[];
// Create a type from the array // Create a type from the array
export type DeepLinkRoute = (typeof VALID_DEEP_LINK_ROUTES)[number]; export type DeepLinkRoute = (typeof VALID_DEEP_LINK_ROUTES)[number];
@ -80,14 +91,13 @@ export const baseUrlSchema = z.object({
queryParams: z.record(z.string()).optional(), queryParams: z.record(z.string()).optional(),
}); });
// Add a union type of all valid route paths // export type DeepLinkPathParams = {
export const VALID_DEEP_LINK_ROUTES = Object.keys( // [K in keyof typeof deepLinkPathSchemas]: z.infer<(typeof deepLinkPathSchemas)[K]>;
deepLinkSchemas, // };
) as readonly (keyof typeof deepLinkSchemas)[];
export type DeepLinkParams = { // export type DeepLinkQueryParams = {
[K in keyof typeof deepLinkSchemas]: z.infer<(typeof deepLinkSchemas)[K]>; // [K in keyof typeof deepLinkQuerySchemas]: z.infer<(typeof deepLinkQuerySchemas)[K]>;
}; // };
export interface DeepLinkError extends Error { export interface DeepLinkError extends Error {
code: string; code: string;

120
src/main.capacitor.ts

@ -29,14 +29,14 @@
*/ */
import { initializeApp } from "./main.common"; import { initializeApp } from "./main.common";
import { App } from "./libs/capacitor/app"; import { App as CapacitorApp } from "@capacitor/app";
import router from "./router"; import router from "./router";
import { handleApiError } from "./services/api"; import { handleApiError } from "./services/api";
import { AxiosError } from "axios"; import { AxiosError } from "axios";
import { DeepLinkHandler } from "./services/deepLinks"; import { DeepLinkHandler } from "./services/deepLinks";
import { logger, safeStringify } from "./utils/logger"; import { logger, safeStringify } from "./utils/logger";
logger.log("[Capacitor] Starting initialization"); logger.log("[Capacitor] 🚀 Starting initialization");
logger.log("[Capacitor] Platform:", process.env.VITE_PLATFORM); logger.log("[Capacitor] Platform:", process.env.VITE_PLATFORM);
const app = initializeApp(); const app = initializeApp();
@ -67,23 +67,123 @@ const deepLinkHandler = new DeepLinkHandler(router);
* @throws {Error} If URL format is invalid * @throws {Error} If URL format is invalid
*/ */
const handleDeepLink = async (data: { url: string }) => { const handleDeepLink = async (data: { url: string }) => {
const { url } = data;
logger.info(`[Main] 🌐 Deeplink received from Capacitor: ${url}`);
try { try {
// Wait for router to be ready
logger.info(`[Main] ⏳ Waiting for router to be ready...`);
await router.isReady(); await router.isReady();
await deepLinkHandler.handleDeepLink(data.url); logger.info(`[Main] ✅ Router is ready, processing deeplink`);
// Process the deeplink
logger.info(`[Main] 🚀 Starting deeplink processing`);
await deepLinkHandler.handleDeepLink(url);
logger.info(`[Main] ✅ Deeplink processed successfully`);
} catch (error) { } catch (error) {
logger.error("[DeepLink] Error handling deep link: ", error); logger.error(`[Main] ❌ Deeplink processing failed:`, {
url,
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
timestamp: new Date().toISOString(),
});
// Log additional context for debugging
logger.error(`[Main] 🔍 Debug context:`, {
routerReady: router.isReady(),
currentRoute: router.currentRoute.value,
appMounted: app._instance?.isMounted,
timestamp: new Date().toISOString(),
});
// Fallback to original error handling
let message: string = let message: string =
error instanceof Error ? error.message : safeStringify(error); error instanceof Error ? error.message : safeStringify(error);
if (data.url) { if (url) {
message += `\nURL: ${data.url}`; message += `\nURL: ${url}`;
} }
handleApiError({ message } as AxiosError, "deep-link"); handleApiError({ message } as AxiosError, "deep-link");
} }
}; };
// Register deep link handler with Capacitor // Function to register the deeplink listener
App.addListener("appUrlOpen", handleDeepLink); const registerDeepLinkListener = async () => {
try {
logger.info(
`[Main] 🔗 Attempting to register deeplink handler with Capacitor`,
);
// Check if Capacitor App plugin is available
logger.info(`[Main] 🔍 Checking Capacitor App plugin availability...`);
if (!CapacitorApp) {
throw new Error("Capacitor App plugin not available");
}
logger.info(`[Main] ✅ Capacitor App plugin is available`);
// Check available methods on CapacitorApp
logger.info(
`[Main] 🔍 Capacitor App plugin methods:`,
Object.getOwnPropertyNames(CapacitorApp),
);
logger.info(
`[Main] 🔍 Capacitor App plugin addListener method:`,
typeof CapacitorApp.addListener,
);
// Wait for router to be ready first
await router.isReady();
logger.info(
`[Main] ✅ Router is ready, proceeding with listener registration`,
);
// Try to register the listener
logger.info(`[Main] 🧪 Attempting to register appUrlOpen listener...`);
const listenerHandle = await CapacitorApp.addListener(
"appUrlOpen",
handleDeepLink,
);
logger.info(
`[Main] ✅ appUrlOpen listener registered successfully with handle:`,
listenerHandle,
);
logger.log("[Capacitor] Mounting app"); // Test the listener registration by checking if it's actually registered
logger.info(`[Main] 🧪 Verifying listener registration...`);
return listenerHandle;
} catch (error) {
logger.error(`[Main] ❌ Failed to register deeplink listener:`, {
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
timestamp: new Date().toISOString(),
});
throw error;
}
};
logger.log("[Capacitor] 🚀 Mounting app");
app.mount("#app"); app.mount("#app");
logger.log("[Capacitor] App mounted"); logger.info(`[Main] ✅ App mounted successfully`);
// Register deeplink listener after app is mounted
setTimeout(async () => {
try {
logger.info(
`[Main] ⏳ Delaying listener registration to ensure Capacitor is ready...`,
);
await registerDeepLinkListener();
logger.info(`[Main] 🎉 Deep link system fully initialized!`);
} catch (error) {
logger.error(`[Main] ❌ Deep link system initialization failed:`, error);
}
}, 2000); // 2 second delay to ensure Capacitor is fully ready
// Log app initialization status
setTimeout(() => {
logger.info(`[Main] 📊 App initialization status:`, {
routerReady: router.isReady(),
currentRoute: router.currentRoute.value,
appMounted: app._instance?.isMounted,
timestamp: new Date().toISOString(),
});
}, 1000);

26
src/main.ts

@ -0,0 +1,26 @@
/**
* @file Dynamic Main Entry Point
* @author Matthew Raymer
*
* This file dynamically loads the appropriate platform-specific main entry point
* based on the current environment and build configuration.
*/
import { logger } from "./utils/logger";
// Check the platform from environment variables
const platform = process.env.VITE_PLATFORM || "web";
logger.info(`[Main] 🚀 Loading TimeSafari for platform: ${platform}`);
// Dynamically import the appropriate main entry point
if (platform === "capacitor") {
logger.info(`[Main] 📱 Loading Capacitor-specific entry point`);
import("./main.capacitor");
} else if (platform === "electron") {
logger.info(`[Main] 💻 Loading Electron-specific entry point`);
import("./main.electron");
} else {
logger.info(`[Main] 🌐 Loading Web-specific entry point`);
import("./main.web");
}

72
src/router/index.ts

@ -321,24 +321,21 @@ const errorHandler = (
router.onError(errorHandler); // Assign the error handler to the router instance router.onError(errorHandler); // Assign the error handler to the router instance
/** /**
* Global navigation guard to ensure user identity exists * Navigation guard to ensure user has an identity before accessing protected routes
*
* This guard checks if the user has any identities before navigating to most routes.
* If no identity exists, it automatically creates one using the default seed-based method.
*
* Routes that are excluded from this check:
* - /start - Manual identity creation selection
* - /new-identifier - Manual seed-based creation
* - /import-account - Manual import flow
* - /import-derive - Manual derivation flow
* - /database-migration - Migration utilities
* - /deep-link-error - Error page
*
* @param to - Target route * @param to - Target route
* @param from - Source route * @param _from - Source route (unused)
* @param next - Navigation function * @param next - Navigation function
*/ */
router.beforeEach(async (to, _from, next) => { router.beforeEach(async (to, _from, next) => {
logger.info(`[Router] 🧭 Navigation guard triggered:`, {
from: _from?.path || "none",
to: to.path,
name: to.name,
params: to.params,
query: to.query,
timestamp: new Date().toISOString(),
});
try { try {
// Skip identity check for routes that handle identity creation manually // Skip identity check for routes that handle identity creation manually
const skipIdentityRoutes = [ const skipIdentityRoutes = [
@ -351,32 +348,67 @@ router.beforeEach(async (to, _from, next) => {
]; ];
if (skipIdentityRoutes.includes(to.path)) { if (skipIdentityRoutes.includes(to.path)) {
logger.debug(`[Router] ⏭️ Skipping identity check for route: ${to.path}`);
return next(); return next();
} }
logger.info(`[Router] 🔍 Checking user identity for route: ${to.path}`);
// Check if user has any identities // Check if user has any identities
const allMyDids = await retrieveAccountDids(); const allMyDids = await retrieveAccountDids();
logger.info(`[Router] 📋 Found ${allMyDids.length} user identities`);
if (allMyDids.length === 0) { if (allMyDids.length === 0) {
logger.info("[Router] No identities found, creating default identity"); logger.info("[Router] ⚠️ No identities found, creating default identity");
// Create identity automatically using seed-based method // Create identity automatically using seed-based method
await generateSaveAndActivateIdentity(); await generateSaveAndActivateIdentity();
logger.info("[Router] Default identity created successfully"); logger.info("[Router] ✅ Default identity created successfully");
} else {
logger.info(
`[Router] ✅ User has ${allMyDids.length} identities, proceeding`,
);
} }
logger.info(`[Router] ✅ Navigation guard passed for: ${to.path}`);
next(); next();
} catch (error) { } catch (error) {
logger.error( logger.error("[Router] ❌ Identity creation failed in navigation guard:", {
"[Router] Identity creation failed in navigation guard:", error: error instanceof Error ? error.message : String(error),
error, stack: error instanceof Error ? error.stack : undefined,
); route: to.path,
timestamp: new Date().toISOString(),
});
// Redirect to start page if identity creation fails // Redirect to start page if identity creation fails
// This allows users to manually create an identity or troubleshoot // This allows users to manually create an identity or troubleshoot
logger.info(
`[Router] 🔄 Redirecting to /start due to identity creation failure`,
);
next("/start"); next("/start");
} }
}); });
// Add navigation success logging
router.afterEach((to, from) => {
logger.info(`[Router] ✅ Navigation completed:`, {
from: from?.path || "none",
to: to.path,
name: to.name,
params: to.params,
query: to.query,
timestamp: new Date().toISOString(),
});
});
// Add error logging
router.onError((error) => {
logger.error(`[Router] ❌ Navigation error:`, {
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
timestamp: new Date().toISOString(),
});
});
export default router; export default router;

77
src/services/ProfileService.ts

@ -124,34 +124,30 @@ export class ProfileService {
async deleteProfile(activeDid: string): Promise<boolean> { async deleteProfile(activeDid: string): Promise<boolean> {
try { try {
const headers = await getHeaders(activeDid); const headers = await getHeaders(activeDid);
logger.debug("Attempting to delete profile for DID:", activeDid);
logger.debug("Using partner API server:", this.partnerApiServer);
logger.debug("Request headers:", headers);
const url = `${this.partnerApiServer}/api/partner/userProfile`; const url = `${this.partnerApiServer}/api/partner/userProfile`;
logger.debug("DELETE request URL:", url);
const response = await this.axios.delete(url, { headers }); const response = await this.axios.delete(url, { headers });
if (response.status === 200 || response.status === 204) { if (response.status === 204 || response.status === 200) {
logger.debug("Profile deleted successfully"); logger.info("Profile deleted successfully");
return true; return true;
} else { } else {
logger.error("Unexpected response status when deleting profile:", { logger.error("Unexpected response status when deleting profile:", {
status: response.status, status: response.status,
statusText: response.statusText, statusText: response.statusText,
data: response.data data: response.data,
}); });
throw new Error(`Profile not deleted - HTTP ${response.status}: ${response.statusText}`); throw new Error(
`Profile not deleted - HTTP ${response.status}: ${response.statusText}`,
);
} }
} catch (error) { } catch (error) {
if (this.isApiError(error) && error.response) { if (this.isApiError(error) && error.response) {
const response = error.response as any; // Type assertion for error response const response = error.response;
logger.error("API error deleting profile:", { logger.error("API error deleting profile:", {
status: response.status, status: response.status,
statusText: response.statusText, statusText: response.statusText,
data: response.data, data: response.data,
url: (error as any).config?.url url: this.getErrorUrl(error),
}); });
// Handle specific HTTP status codes // Handle specific HTTP status codes
@ -163,7 +159,11 @@ export class ProfileService {
return true; // Consider this a success if profile doesn't exist return true; // Consider this a success if profile doesn't exist
} else if (response.status === 400) { } else if (response.status === 400) {
logger.error("Bad request when deleting profile:", response.data); logger.error("Bad request when deleting profile:", response.data);
throw new Error(`Profile deletion failed: ${response.data?.message || 'Bad request'}`); const errorMessage =
typeof response.data === "string"
? response.data
: response.data?.message || "Bad request";
throw new Error(`Profile deletion failed: ${errorMessage}`);
} else if (response.status === 401) { } else if (response.status === 401) {
logger.error("Unauthorized to delete profile"); logger.error("Unauthorized to delete profile");
throw new Error("You are not authorized to delete this profile"); throw new Error("You are not authorized to delete this profile");
@ -242,13 +242,56 @@ export class ProfileService {
} }
/** /**
* Type guard for API errors * Type guard for API errors with proper typing
*/ */
private isApiError( private isApiError(error: unknown): error is {
error: unknown, response?: {
): error is { response?: { status?: number } } { status?: number;
statusText?: string;
data?: { message?: string } | string;
};
} {
return typeof error === "object" && error !== null && "response" in error; return typeof error === "object" && error !== null && "response" in error;
} }
/**
* Extract error URL safely from error object
*/
private getErrorUrl(error: unknown): string | undefined {
if (this.isAxiosError(error)) {
return error.config?.url;
}
if (this.isApiError(error) && this.hasConfigProperty(error)) {
const config = this.getConfigProperty(error);
return config?.url;
}
return undefined;
}
/**
* Type guard to check if error has config property
*/
private hasConfigProperty(
error: unknown,
): error is { config?: { url?: string } } {
return typeof error === "object" && error !== null && "config" in error;
}
/**
* Safely extract config property from error
*/
private getConfigProperty(error: {
config?: { url?: string };
}): { url?: string } | undefined {
return error.config;
}
/**
* Type guard for AxiosError
*/
private isAxiosError(error: unknown): error is AxiosError {
return error instanceof AxiosError;
}
} }
/** /**

352
src/services/deepLinks.ts

@ -1,56 +1,22 @@
/** /**
* @file Deep Link Handler Service * DeepLinks Service
* @author Matthew Raymer
*
* This service handles the processing and routing of deep links in the TimeSafari app.
* It provides a type-safe interface between the raw deep links and the application router.
*
* Architecture:
* 1. DeepLinkHandler class encapsulates all deep link processing logic
* 2. Uses Zod schemas from interfaces/deepLinks for parameter validation
* 3. Provides consistent error handling and logging
* 4. Maps validated parameters to Vue router calls
*
* Error Handling Strategy:
* - All errors are wrapped in DeepLinkError interface
* - Errors include error codes for systematic handling
* - Detailed error information is logged for debugging
* - Errors are propagated to the global error handler
*
* Validation Strategy:
* - URL structure validation
* - Route-specific parameter validation using Zod schemas
* - Query parameter validation and sanitization
* - Type-safe parameter passing to router
* *
* Deep Link Format: * Handles deep link processing and routing for the TimeSafari application.
* timesafari://<route>[/<param>][?queryParam1=value1&queryParam2=value2] * Supports both path parameters and query parameters with comprehensive validation.
* *
* Supported Routes: * @author Matthew Raymer
* - claim: View claim * @version 2.0.0
* - claim-add-raw: Add raw claim * @since 2025-01-25
* - claim-cert: View claim certificate
* - confirm-gift
* - contact-import: Import contacts
* - did: View DID
* - invite-one-accept: Accept invitation
* - onboard-meeting-members
* - project: View project details
* - user-profile: View user profile
*
* @example
* const handler = new DeepLinkHandler(router);
* await handler.handleDeepLink("timesafari://claim/123?view=details");
*/ */
import { Router } from "vue-router"; import { Router } from "vue-router";
import { z } from "zod"; import { z } from "zod";
import { import {
deepLinkSchemas, deepLinkPathSchemas,
baseUrlSchema,
routeSchema, routeSchema,
DeepLinkRoute, DeepLinkRoute,
deepLinkQuerySchemas,
} from "../interfaces/deepLinks"; } from "../interfaces/deepLinks";
import type { DeepLinkError } from "../interfaces/deepLinks"; import type { DeepLinkError } from "../interfaces/deepLinks";
import { logger } from "../utils/logger"; import { logger } from "../utils/logger";
@ -74,7 +40,7 @@ function getFirstKeyFromZodObject(
* because "router.replace" expects the right parameter name for the route. * because "router.replace" expects the right parameter name for the route.
*/ */
export const ROUTE_MAP: Record<string, { name: string; paramKey?: string }> = export const ROUTE_MAP: Record<string, { name: string; paramKey?: string }> =
Object.entries(deepLinkSchemas).reduce( Object.entries(deepLinkPathSchemas).reduce(
(acc, [routeName, schema]) => { (acc, [routeName, schema]) => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any // eslint-disable-next-line @typescript-eslint/no-explicit-any
const paramKey = getFirstKeyFromZodObject(schema as z.ZodObject<any>); const paramKey = getFirstKeyFromZodObject(schema as z.ZodObject<any>);
@ -103,83 +69,152 @@ export class DeepLinkHandler {
} }
/** /**
* Main entry point for processing deep links
* Parses deep link URL into path, params and query components. * @param url - The deep link URL to process
* Validates URL structure using Zod schemas. * @throws {DeepLinkError} If validation fails or route is invalid
*
* @param url - The deep link URL to parse (format: scheme://path[?query])
* @throws {DeepLinkError} If URL format is invalid
* @returns Parsed URL components (path: string, params: {KEY: string}, query: {KEY: string})
*/ */
private parseDeepLink(url: string) { async handleDeepLink(url: string): Promise<void> {
const parts = url.split("://"); logger.info(`[DeepLink] 🚀 Starting deeplink processing for URL: ${url}`);
if (parts.length !== 2) {
throw { code: "INVALID_URL", message: "Invalid URL format" };
}
// Validate base URL structure try {
baseUrlSchema.parse({ logger.info(`[DeepLink] 📍 Parsing URL: ${url}`);
scheme: parts[0], const { path, params, query } = this.parseDeepLink(url);
path: parts[1],
queryParams: {}, // Will be populated below
});
const [path, queryString] = parts[1].split("?"); logger.info(`[DeepLink] ✅ URL parsed successfully:`, {
const [routePath, ...pathParams] = path.split("/"); path,
params: Object.keys(params),
query: Object.keys(query),
fullParams: params,
fullQuery: query,
});
// Validate route exists before proceeding // Sanitize parameters (remove undefined values)
if (!ROUTE_MAP[routePath]) { const sanitizedParams = Object.fromEntries(
throw { Object.entries(params).map(([key, value]) => [key, value ?? ""]),
code: "INVALID_ROUTE", );
message: `Invalid route path: ${routePath}`,
details: { routePath }, logger.info(`[DeepLink] 🧹 Parameters sanitized:`, sanitizedParams);
};
}
const query: Record<string, string> = {}; await this.validateAndRoute(path, sanitizedParams, query);
if (queryString) { logger.info(`[DeepLink] 🎯 Deeplink processing completed successfully`);
new URLSearchParams(queryString).forEach((value, key) => { } catch (error) {
query[key] = value; logger.error(`[DeepLink] ❌ Deeplink processing failed:`, {
url,
error: error instanceof Error ? error.message : String(error),
stack: error instanceof Error ? error.stack : undefined,
}); });
}
const params: Record<string, string> = {}; const deepLinkError = error as DeepLinkError;
if (pathParams) { throw deepLinkError;
// Now we know routePath exists in ROUTE_MAP
const routeConfig = ROUTE_MAP[routePath];
params[routeConfig.paramKey ?? "id"] = pathParams.join("/");
} }
}
/**
* Parse a deep link URL into its components
* @param url - The deep link URL
* @returns Parsed components
*/
private parseDeepLink(url: string): {
path: string;
params: Record<string, string>;
query: Record<string, string>;
} {
logger.debug(`[DeepLink] 🔍 Parsing deep link: ${url}`);
// logConsoleAndDb( try {
// `[DeepLink] Debug: Route Path: ${routePath} Path Params: ${JSON.stringify(params)} Query String: ${JSON.stringify(query)}`, const parts = url.split("://");
// false, if (parts.length !== 2) {
// ); throw new Error("Invalid URL format");
return { path: routePath, params, query }; }
const [path, queryString] = parts[1].split("?");
const [routePath, ...pathParams] = path.split("/");
// Parse path parameters using route-specific configuration
const params: Record<string, string> = {};
if (pathParams.length > 0) {
// Get the correct parameter key for this route
const routeConfig = ROUTE_MAP[routePath];
if (routeConfig?.paramKey) {
params[routeConfig.paramKey] = pathParams[0];
logger.debug(
`[DeepLink] 📍 Path parameter extracted: ${routeConfig.paramKey}=${pathParams[0]}`,
);
} else {
// Fallback to 'id' for backward compatibility
params.id = pathParams[0];
logger.debug(
`[DeepLink] 📍 Path parameter extracted: id=${pathParams[0]} (fallback)`,
);
}
}
// Parse query parameters
const query: Record<string, string> = {};
if (queryString) {
const queryParams = new URLSearchParams(queryString);
for (const [key, value] of queryParams.entries()) {
query[key] = value;
}
logger.debug(`[DeepLink] 🔗 Query parameters extracted:`, query);
}
logger.info(`[DeepLink] ✅ Parse completed:`, {
routePath,
pathParams: pathParams.length,
queryParams: Object.keys(query).length,
});
return { path: routePath, params, query };
} catch (error) {
logger.error(`[DeepLink] ❌ Parse failed:`, {
url,
error: error instanceof Error ? error.message : String(error),
});
throw error;
}
} }
/** /**
* Routes the deep link to appropriate view with validated parameters. * Validate and route the deep link
* Validates route and parameters using Zod schemas before routing. * @param path - The route path
* * @param params - Path parameters
* @param path - The route path from the deep link * @param query - Query parameters
* @param params - URL parameters
* @param query - Query string parameters
* @throws {DeepLinkError} If validation fails or route is invalid
*/ */
private async validateAndRoute( private async validateAndRoute(
path: string, path: string,
params: Record<string, string>, params: Record<string, string>,
query: Record<string, string>, query: Record<string, string>,
): Promise<void> { ): Promise<void> {
logger.info(
`[DeepLink] 🎯 Starting validation and routing for path: ${path}`,
);
// First try to validate the route path // First try to validate the route path
let routeName: string; let routeName: string;
try { try {
logger.debug(`[DeepLink] 🔍 Validating route path: ${path}`);
// Validate route exists // Validate route exists
const validRoute = routeSchema.parse(path) as DeepLinkRoute; const validRoute = routeSchema.parse(path) as DeepLinkRoute;
routeName = ROUTE_MAP[validRoute].name; logger.info(`[DeepLink] ✅ Route validation passed: ${validRoute}`);
// Get route configuration
const routeConfig = ROUTE_MAP[validRoute];
logger.info(`[DeepLink] 📋 Route config retrieved:`, routeConfig);
if (!routeConfig) {
logger.error(`[DeepLink] ❌ No route config found for: ${validRoute}`);
throw new Error(`Route configuration missing for: ${validRoute}`);
}
routeName = routeConfig.name;
logger.info(`[DeepLink] 🎯 Route name resolved: ${routeName}`);
} catch (error) { } catch (error) {
logger.error(`[DeepLink] Invalid route path: ${path}`); logger.error(`[DeepLink] ❌ Route validation failed:`, {
path,
error: error instanceof Error ? error.message : String(error),
});
// Redirect to error page with information about the invalid link // Redirect to error page with information about the invalid link
await this.router.replace({ await this.router.replace({
@ -193,21 +228,66 @@ export class DeepLinkHandler {
}, },
}); });
// This previously threw an error but we're redirecting so there's no need. logger.info(
`[DeepLink] 🔄 Redirected to error page for invalid route: ${path}`,
);
return; return;
} }
// Continue with parameter validation as before... // Continue with parameter validation
const schema = deepLinkSchemas[path as keyof typeof deepLinkSchemas]; logger.info(
`[DeepLink] 🔍 Starting parameter validation for route: ${routeName}`,
);
const pathSchema =
deepLinkPathSchemas[path as keyof typeof deepLinkPathSchemas];
const querySchema =
deepLinkQuerySchemas[path as keyof typeof deepLinkQuerySchemas];
logger.debug(`[DeepLink] 📋 Schemas found:`, {
hasPathSchema: !!pathSchema,
hasQuerySchema: !!querySchema,
pathSchemaType: pathSchema ? typeof pathSchema : "none",
querySchemaType: querySchema ? typeof querySchema : "none",
});
let validatedPathParams: Record<string, string> = {};
let validatedQueryParams: Record<string, string> = {};
let validatedParams;
try { try {
validatedParams = await schema.parseAsync(params); if (pathSchema) {
logger.debug(`[DeepLink] 🔍 Validating path parameters:`, params);
validatedPathParams = await pathSchema.parseAsync(params);
logger.info(
`[DeepLink] ✅ Path parameters validated:`,
validatedPathParams,
);
} else {
logger.debug(`[DeepLink] ⚠️ No path schema found for: ${path}`);
validatedPathParams = params;
}
if (querySchema) {
logger.debug(`[DeepLink] 🔍 Validating query parameters:`, query);
validatedQueryParams = await querySchema.parseAsync(query);
logger.info(
`[DeepLink] ✅ Query parameters validated:`,
validatedQueryParams,
);
} else {
logger.debug(`[DeepLink] ⚠️ No query schema found for: ${path}`);
validatedQueryParams = query;
}
} catch (error) { } catch (error) {
// For parameter validation errors, provide specific error feedback logger.error(`[DeepLink] ❌ Parameter validation failed:`, {
logger.error( routeName,
`[DeepLink] Invalid parameters for route name ${routeName} for path: ${path}: ${JSON.stringify(error)} ... with params: ${JSON.stringify(params)} ... and query: ${JSON.stringify(query)}`, path,
); params,
query,
error: error instanceof Error ? error.message : String(error),
errorDetails: JSON.stringify(error),
});
await this.router.replace({ await this.router.replace({
name: "deep-link-error", name: "deep-link-error",
params, params,
@ -219,58 +299,52 @@ export class DeepLinkHandler {
}, },
}); });
// This previously threw an error but we're redirecting so there's no need. logger.info(
`[DeepLink] 🔄 Redirected to error page for invalid parameters`,
);
return; return;
} }
// Attempt navigation
try { try {
logger.info(`[DeepLink] 🚀 Attempting navigation:`, {
routeName,
pathParams: validatedPathParams,
queryParams: validatedQueryParams,
});
await this.router.replace({ await this.router.replace({
name: routeName, name: routeName,
params: validatedParams, params: validatedPathParams,
query: validatedQueryParams,
}); });
logger.info(`[DeepLink] ✅ Navigation successful to: ${routeName}`);
} catch (error) { } catch (error) {
logger.error( logger.error(`[DeepLink] ❌ Navigation failed:`, {
`[DeepLink] Error routing to route name ${routeName} for path: ${path}: ${JSON.stringify(error)} ... with validated params: ${JSON.stringify(validatedParams)}`, routeName,
); path,
// For parameter validation errors, provide specific error feedback validatedPathParams,
validatedQueryParams,
error: error instanceof Error ? error.message : String(error),
errorDetails: JSON.stringify(error),
});
// Redirect to error page for navigation failures
await this.router.replace({ await this.router.replace({
name: "deep-link-error", name: "deep-link-error",
params: validatedParams, params: validatedPathParams,
query: { query: {
originalPath: path, originalPath: path,
errorCode: "ROUTING_ERROR", errorCode: "ROUTING_ERROR",
errorMessage: `Error routing to ${routeName}: ${JSON.stringify(error)}`, errorMessage: `Error routing to ${routeName}: ${(error as Error).message}`,
...validatedQueryParams,
}, },
}); });
}
}
/** logger.info(
* Processes incoming deep links and routes them appropriately. `[DeepLink] 🔄 Redirected to error page for navigation failure`,
* Handles validation, error handling, and routing to the correct view.
*
* @param url - The deep link URL to process
* @throws {DeepLinkError} If URL processing fails
*/
async handleDeepLink(url: string): Promise<void> {
try {
const { path, params, query } = this.parseDeepLink(url);
// Ensure params is always a Record<string,string> by converting undefined to empty string
const sanitizedParams = Object.fromEntries(
Object.entries(params).map(([key, value]) => [key, value ?? ""]),
); );
await this.validateAndRoute(path, sanitizedParams, query);
} catch (error) {
const deepLinkError = error as DeepLinkError;
logger.error(
`[DeepLink] Error (${deepLinkError.code}): ${deepLinkError.details}`,
);
throw {
code: deepLinkError.code || "UNKNOWN_ERROR",
message: deepLinkError.message,
details: deepLinkError.details,
};
} }
} }
} }

48
src/views/AccountViewView.vue

@ -174,16 +174,15 @@
:aria-busy="loadingProfile || savingProfile" :aria-busy="loadingProfile || savingProfile"
></textarea> ></textarea>
<div class="flex items-center mb-4"> <div class="flex items-center mb-4">
<input <input
v-model="includeUserProfileLocation" v-model="includeUserProfileLocation"
type="checkbox" type="checkbox"
class="mr-2" class="mr-2"
@change="onLocationCheckboxChange" @change="onLocationCheckboxChange"
/> />
<label for="includeUserProfileLocation">Include Location</label> <label for="includeUserProfileLocation">Include Location</label>
<span class="text-xs text-slate-400 ml-2">(Debug: {{ isMapReady ? 'Map Ready' : 'Map Loading' }})</span> </div>
</div>
<div v-if="includeUserProfileLocation" class="mb-4 aspect-video"> <div v-if="includeUserProfileLocation" class="mb-4 aspect-video">
<p class="text-sm mb-2 text-slate-500"> <p class="text-sm mb-2 text-slate-500">
The location you choose will be shared with the world until you remove The location you choose will be shared with the world until you remove
@ -922,11 +921,17 @@ export default class AccountViewView extends Vue {
// Fix Leaflet icon issues in modern bundlers // Fix Leaflet icon issues in modern bundlers
// This prevents the "Cannot read properties of undefined (reading 'Default')" error // This prevents the "Cannot read properties of undefined (reading 'Default')" error
if (L.Icon.Default) { if (L.Icon.Default) {
delete (L.Icon.Default.prototype as any)._getIconUrl; // Type-safe way to handle Leaflet icon prototype
const iconDefault = L.Icon.Default.prototype as Record<string, unknown>;
if ("_getIconUrl" in iconDefault) {
delete iconDefault._getIconUrl;
}
L.Icon.Default.mergeOptions({ L.Icon.Default.mergeOptions({
iconRetinaUrl: 'https://unpkg.com/leaflet@1.7.1/dist/images/marker-icon-2x.png', iconRetinaUrl:
iconUrl: 'https://unpkg.com/leaflet@1.7.1/dist/images/marker-icon.png', "https://unpkg.com/leaflet@1.7.1/dist/images/marker-icon-2x.png",
shadowUrl: 'https://unpkg.com/leaflet@1.7.1/dist/images/marker-shadow.png', iconUrl: "https://unpkg.com/leaflet@1.7.1/dist/images/marker-icon.png",
shadowUrl:
"https://unpkg.com/leaflet@1.7.1/dist/images/marker-shadow.png",
}); });
} }
} }
@ -1543,12 +1548,18 @@ export default class AccountViewView extends Vue {
try { try {
logger.debug("Map ready event fired, map object:", map); logger.debug("Map ready event fired, map object:", map);
// doing this here instead of on the l-map element avoids a recentering after a drag then zoom at startup // doing this here instead of on the l-map element avoids a recentering after a drag then zoom at startup
const zoom = this.userProfileLatitude && this.userProfileLongitude ? 12 : 2; const zoom =
this.userProfileLatitude && this.userProfileLongitude ? 12 : 2;
const lat = this.userProfileLatitude || 0; const lat = this.userProfileLatitude || 0;
const lng = this.userProfileLongitude || 0; const lng = this.userProfileLongitude || 0;
map.setView([lat, lng], zoom); map.setView([lat, lng], zoom);
this.isMapReady = true; this.isMapReady = true;
logger.debug("Map ready state set to true, coordinates:", [lat, lng], "zoom:", zoom); logger.debug(
"Map ready state set to true, coordinates:",
[lat, lng],
"zoom:",
zoom,
);
} catch (error) { } catch (error) {
logger.error("Error in onMapReady:", error); logger.error("Error in onMapReady:", error);
this.isMapReady = true; // Set to true even on error to prevent infinite loading this.isMapReady = true; // Set to true even on error to prevent infinite loading
@ -1710,7 +1721,10 @@ export default class AccountViewView extends Vue {
onLocationCheckboxChange(): void { onLocationCheckboxChange(): void {
try { try {
logger.debug("Location checkbox changed, new value:", this.includeUserProfileLocation); logger.debug(
"Location checkbox changed, new value:",
this.includeUserProfileLocation,
);
if (!this.includeUserProfileLocation) { if (!this.includeUserProfileLocation) {
// Location checkbox was unchecked, clean up map state // Location checkbox was unchecked, clean up map state
this.isMapReady = false; this.isMapReady = false;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save