Compare commits

...

22 Commits

Author SHA1 Message Date
Gitea Actions
e4d830ab90 ci: Bump version to 0.14.2 [skip ci] 2026-02-13 23:35:46 +05:00
b6a62a036f be specific about pm2 processes
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 3m31s
2026-02-13 10:19:28 -08:00
2d2cd52011 Massive Dependency Modernization Project
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 3m58s
2026-02-13 00:34:22 -08:00
379b8bf532 fix tour / whats new collision
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Has been cancelled
2026-02-12 11:05:47 -08:00
Gitea Actions
d06a1952a0 ci: Bump version to 0.14.1 [skip ci] 2026-02-12 17:37:36 +05:00
4d323a51ca fix tour / whats new collision
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 49m39s
2026-02-12 04:29:43 -08:00
Gitea Actions
ee15c67429 ci: Bump version to 0.14.0 for production release [skip ci] 2026-02-12 16:16:16 +05:00
Gitea Actions
9956d07480 ci: Bump version to 0.13.0 for production release [skip ci] 2026-02-12 16:08:44 +05:00
Gitea Actions
5bc8f6a42b ci: Bump version to 0.12.25 [skip ci] 2026-01-31 03:35:28 +05:00
4fd5e900af minor test fixes
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 25m22s
2026-01-30 14:29:45 -08:00
Gitea Actions
39ab773b82 ci: Bump version to 0.12.24 [skip ci] 2026-01-30 06:23:37 +05:00
75406cd924 typescript fix
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 25m7s
2026-01-29 17:21:55 -08:00
Gitea Actions
8fb0a57f02 ci: Bump version to 0.12.23 [skip ci] 2026-01-30 05:24:50 +05:00
c78323275b more unit tests - done for now
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 2m28s
2026-01-29 16:21:48 -08:00
Gitea Actions
5fe537b93d ci: Bump version to 0.12.22 [skip ci] 2026-01-29 12:26:33 +05:00
61f24305fb ADR-024 Feature Flagging Strategy
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 22m13s
2026-01-28 23:23:45 -08:00
Gitea Actions
de3f0cf26e ci: Bump version to 0.12.21 [skip ci] 2026-01-29 05:37:59 +05:00
45ac4fccf5 comprehensive documentation review + test fixes
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 2m15s
2026-01-28 16:35:38 -08:00
Gitea Actions
b6c3ca9abe ci: Bump version to 0.12.20 [skip ci] 2026-01-29 04:36:43 +05:00
4f06698dfd test fixes and doc work
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 2m50s
2026-01-28 15:33:48 -08:00
Gitea Actions
e548d1b0cc ci: Bump version to 0.12.19 [skip ci] 2026-01-28 23:03:57 +05:00
771f59d009 more api versioning work -whee
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 22m47s
2026-01-28 09:58:28 -08:00
203 changed files with 50746 additions and 9716 deletions

View File

@@ -128,3 +128,35 @@ GENERATE_SOURCE_MAPS=true
SENTRY_AUTH_TOKEN=
# URL of your Bugsink instance (for source map uploads)
SENTRY_URL=https://bugsink.projectium.com
# ===================
# Feature Flags (ADR-024)
# ===================
# Feature flags control the availability of features at runtime.
# All flags default to disabled (false) when not set or set to any value other than 'true'.
# Set to 'true' to enable a feature.
#
# Backend flags use: FEATURE_SNAKE_CASE
# Frontend flags use: VITE_FEATURE_SNAKE_CASE (VITE_ prefix required for client-side access)
#
# Lifecycle:
# 1. Add flag with default false
# 2. Enable via env var when ready for testing/rollout
# 3. Remove conditional code when feature is fully rolled out
# 4. Remove flag from config within 3 months of full rollout
#
# See: docs/adr/0024-feature-flagging-strategy.md
# Backend Feature Flags
# FEATURE_BUGSINK_SYNC=false # Enable Bugsink error sync integration
# FEATURE_ADVANCED_RBAC=false # Enable advanced RBAC features
# FEATURE_NEW_DASHBOARD=false # Enable new dashboard experience
# FEATURE_BETA_RECIPES=false # Enable beta recipe features
# FEATURE_EXPERIMENTAL_AI=false # Enable experimental AI features
# FEATURE_DEBUG_MODE=false # Enable debug mode for development
# Frontend Feature Flags (VITE_ prefix required)
# VITE_FEATURE_NEW_DASHBOARD=false # Enable new dashboard experience
# VITE_FEATURE_BETA_RECIPES=false # Enable beta recipe features
# VITE_FEATURE_EXPERIMENTAL_AI=false # Enable experimental AI features
# VITE_FEATURE_DEBUG_MODE=false # Enable debug mode for development

View File

@@ -166,8 +166,8 @@ jobs:
npm install --omit=dev
# --- Cleanup Errored Processes ---
echo "Cleaning up errored or stopped PM2 processes..."
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if (p.pm2_env.status === 'errored' || p.pm2_env.status === 'stopped') { console.log('Deleting ' + p.pm2_env.status + ' process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id); } } }); } catch (e) { console.error('Error cleaning up processes:', e); }"
echo "Cleaning up errored or stopped PRODUCTION PM2 processes..."
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); const prodProcesses = ['flyer-crawler-api', 'flyer-crawler-worker', 'flyer-crawler-analytics-worker']; list.forEach(p => { if ((p.pm2_env.status === 'errored' || p.pm2_env.status === 'stopped') && prodProcesses.includes(p.name)) { console.log('Deleting ' + p.pm2_env.status + ' production process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id); } } }); console.log('✅ Production process cleanup complete.'); } catch (e) { console.error('Error cleaning up processes:', e); }"
# --- Version Check Logic ---
# Get the version from the newly deployed package.json

View File

@@ -490,8 +490,8 @@ jobs:
npm install --omit=dev
# --- Cleanup Errored Processes ---
echo "Cleaning up errored or stopped PM2 processes..."
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if (p.pm2_env.status === 'errored' || p.pm2_env.status === 'stopped') { console.log('Deleting ' + p.pm2_env.status + ' process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id); } } }); } catch (e) { console.error('Error cleaning up processes:', e); }"
echo "Cleaning up errored or stopped TEST PM2 processes..."
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if ((p.pm2_env.status === 'errored' || p.pm2_env.status === 'stopped') && p.name && p.name.endsWith('-test')) { console.log('Deleting ' + p.pm2_env.status + ' test process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id); } } }); console.log('✅ Test process cleanup complete.'); } catch (e) { console.error('Error cleaning up processes:', e); }"
# Use `startOrReload` with the TEST ecosystem file. This starts test-specific processes
# (flyer-crawler-api-test, flyer-crawler-worker-test, flyer-crawler-analytics-worker-test)

View File

@@ -56,9 +56,9 @@ jobs:
- name: Step 1 - Stop Application Server
run: |
echo "Stopping all PM2 processes to release database connections..."
pm2 stop all || echo "PM2 processes were not running."
echo "✅ Application server stopped."
echo "Stopping PRODUCTION PM2 processes to release database connections..."
pm2 stop flyer-crawler-api flyer-crawler-worker flyer-crawler-analytics-worker || echo "Production PM2 processes were not running."
echo "✅ Production application server stopped."
- name: Step 2 - Drop and Recreate Database
run: |

View File

@@ -139,8 +139,8 @@ jobs:
npm install --omit=dev
# --- Cleanup Errored Processes ---
echo "Cleaning up errored or stopped PM2 processes..."
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if (p.pm2_env.status === 'errored' || p.pm2_env.status === 'stopped') { console.log('Deleting ' + p.pm2_env.status + ' process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id); } } }); } catch (e) { console.error('Error cleaning up processes:', e); }"
echo "Cleaning up errored or stopped PRODUCTION PM2 processes..."
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); const prodProcesses = ['flyer-crawler-api', 'flyer-crawler-worker', 'flyer-crawler-analytics-worker']; list.forEach(p => { if ((p.pm2_env.status === 'errored' || p.pm2_env.status === 'stopped') && prodProcesses.includes(p.name)) { console.log('Deleting ' + p.pm2_env.status + ' production process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id); } } }); console.log('✅ Production process cleanup complete.'); } catch (e) { console.error('Error cleaning up processes:', e); }"
# --- Version Check Logic ---
# Get the version from the newly deployed package.json

8
.gitignore vendored
View File

@@ -14,6 +14,10 @@ dist-ssr
.env
*.tsbuildinfo
# tsoa generated files (regenerated on build)
src/routes/tsoa-generated.ts
src/config/tsoa-spec.json
# Test coverage
coverage
.nyc_output
@@ -38,3 +42,7 @@ Thumbs.db
.claude/settings.local.json
nul
tmpclaude*
test.tmp

1
.nvmrc Normal file
View File

@@ -0,0 +1 @@
22

189
CLAUDE.md
View File

@@ -27,6 +27,66 @@ podman exec -it flyer-crawler-dev npm run type-check
Out-of-sync = test failures.
### Server Access: READ-ONLY (Production/Test Servers)
**CRITICAL**: The `claude-win10` user has **READ-ONLY** access to production and test servers.
| Capability | Status |
| ---------------------- | ---------------------- |
| Root/sudo access | NO |
| Write permissions | NO |
| PM2 restart, systemctl | NO - User must execute |
**Server Operations Workflow**: Diagnose → User executes → Analyze → Fix (1-3 commands) → User executes → Verify
**Rules**:
- Provide diagnostic commands first, wait for user to report results
- Maximum 3 fix commands at a time (errors may cascade)
- Always verify after fixes complete
### PM2 Process Isolation (Production/Test Servers)
**CRITICAL**: Production and test environments share the same PM2 daemon on the server.
| Environment | Processes | Config File |
| ----------- | -------------------------------------------------------------------------------------------- | --------------------------- |
| Production | `flyer-crawler-api`, `flyer-crawler-worker`, `flyer-crawler-analytics-worker` | `ecosystem.config.cjs` |
| Test | `flyer-crawler-api-test`, `flyer-crawler-worker-test`, `flyer-crawler-analytics-worker-test` | `ecosystem-test.config.cjs` |
| Development | `flyer-crawler-api-dev`, `flyer-crawler-worker-dev`, `flyer-crawler-vite-dev` | `ecosystem.dev.config.cjs` |
**Deployment Scripts MUST:**
- ✅ Filter PM2 commands by exact process names or name patterns (e.g., `endsWith('-test')`)
- ❌ NEVER use `pm2 stop all`, `pm2 delete all`, or `pm2 restart all`
- ❌ NEVER delete/stop processes based solely on status without name filtering
- ✅ Always verify process names match the target environment before any operation
**Examples:**
```bash
# ✅ CORRECT - Production cleanup (filter by name)
pm2 stop flyer-crawler-api flyer-crawler-worker flyer-crawler-analytics-worker
# ✅ CORRECT - Test cleanup (filter by name pattern)
# Only delete test processes that are errored/stopped
list.forEach(p => {
if ((p.pm2_env.status === 'errored' || p.pm2_env.status === 'stopped') &&
p.name && p.name.endsWith('-test')) {
exec('pm2 delete ' + p.pm2_env.pm_id);
}
});
# ❌ WRONG - Affects all environments
pm2 stop all
pm2 delete all
# ❌ WRONG - No name filtering (could delete test processes during prod deploy)
if (p.pm2_env.status === 'errored') {
exec('pm2 delete ' + p.pm2_env.pm_id);
}
```
### Communication Style
Ask before assuming. Never assume:
@@ -60,25 +120,27 @@ Ask before assuming. Never assume:
### Key Patterns (with file locations)
| Pattern | ADR | Implementation | File |
| ------------------ | ------- | ------------------------------------------------- | ----------------------------------- |
| Error Handling | ADR-001 | `handleDbError()`, throw `NotFoundError` | `src/services/db/errors.db.ts` |
| Repository Methods | ADR-034 | `get*` (throws), `find*` (null), `list*` (array) | `src/services/db/*.db.ts` |
| API Responses | ADR-028 | `sendSuccess()`, `sendPaginated()`, `sendError()` | `src/utils/apiResponse.ts` |
| Transactions | ADR-002 | `withTransaction(async (client) => {...})` | `src/services/db/transaction.db.ts` |
| Pattern | ADR | Implementation | File |
| ------------------ | ------- | ------------------------------------------------- | ------------------------------------- |
| Error Handling | ADR-001 | `handleDbError()`, throw `NotFoundError` | `src/services/db/errors.db.ts` |
| Repository Methods | ADR-034 | `get*` (throws), `find*` (null), `list*` (array) | `src/services/db/*.db.ts` |
| API Responses | ADR-028 | `sendSuccess()`, `sendPaginated()`, `sendError()` | `src/utils/apiResponse.ts` |
| Transactions | ADR-002 | `withTransaction(async (client) => {...})` | `src/services/db/connection.db.ts` |
| Feature Flags | ADR-024 | `isFeatureEnabled()`, `useFeatureFlag()` | `src/services/featureFlags.server.ts` |
### Key Files Quick Access
| Purpose | File |
| ----------------- | -------------------------------- |
| Express app | `server.ts` |
| Environment | `src/config/env.ts` |
| Routes | `src/routes/*.routes.ts` |
| Repositories | `src/services/db/*.db.ts` |
| Workers | `src/services/workers.server.ts` |
| Queues | `src/services/queues.server.ts` |
| PM2 Config (Dev) | `ecosystem.dev.config.cjs` |
| PM2 Config (Prod) | `ecosystem.config.cjs` |
| Purpose | File |
| ----------------- | ------------------------------------- |
| Express app | `server.ts` |
| Environment | `src/config/env.ts` |
| Routes | `src/routes/*.routes.ts` |
| Repositories | `src/services/db/*.db.ts` |
| Workers | `src/services/workers.server.ts` |
| Queues | `src/services/queues.server.ts` |
| Feature Flags | `src/services/featureFlags.server.ts` |
| PM2 Config (Dev) | `ecosystem.dev.config.cjs` |
| PM2 Config (Prod) | `ecosystem.config.cjs` |
---
@@ -121,7 +183,7 @@ The dev container now matches production by using PM2 for process management.
- `flyer-crawler-worker-dev` - Background job worker
- `flyer-crawler-vite-dev` - Vite frontend dev server (port 5173)
### Log Aggregation (ADR-050)
### Log Aggregation (ADR-015)
All logs flow to Bugsink via Logstash with 3-project routing:
@@ -204,7 +266,7 @@ All logs flow to Bugsink via Logstash with 3-project routing:
**Launch Pattern**:
```
```text
Use Task tool with subagent_type: "coder", "db-dev", "tester", etc.
```
@@ -285,8 +347,8 @@ podman cp "d:/path/file" container:/tmp/file
**Quick Access**:
- **Dev**: https://localhost:8443 (`admin@localhost`/`admin`)
- **Prod**: https://bugsink.projectium.com
- **Dev**: <https://localhost:8443> (`admin@localhost`/`admin`)
- **Prod**: <https://bugsink.projectium.com>
**Token Creation** (required for MCP):
@@ -294,15 +356,15 @@ podman cp "d:/path/file" container:/tmp/file
# Dev container
MSYS_NO_PATHCONV=1 podman exec -e DATABASE_URL=postgresql://bugsink:bugsink_dev_password@postgres:5432/bugsink -e SECRET_KEY=dev-bugsink-secret-key-minimum-50-characters-for-security flyer-crawler-dev sh -c 'cd /opt/bugsink/conf && DJANGO_SETTINGS_MODULE=bugsink_conf PYTHONPATH=/opt/bugsink/conf:/opt/bugsink/lib/python3.10/site-packages /opt/bugsink/bin/python -m django create_auth_token'
# Production (via SSH)
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage create_auth_token"
# Production (user executes on server)
cd /opt/bugsink && bugsink-manage create_auth_token
```
### Logstash
**See**: [docs/operations/LOGSTASH-QUICK-REF.md](docs/operations/LOGSTASH-QUICK-REF.md)
Log aggregation: PostgreSQL + PM2 + Redis + NGINX → Bugsink (ADR-050)
Log aggregation: PostgreSQL + PM2 + Redis + NGINX → Bugsink (ADR-015)
---
@@ -322,84 +384,3 @@ Log aggregation: PostgreSQL + PM2 + Redis + NGINX → Bugsink (ADR-050)
| **Logstash** | [LOGSTASH-QUICK-REF.md](docs/operations/LOGSTASH-QUICK-REF.md) |
| **ADRs** | [docs/adr/index.md](docs/adr/index.md) |
| **All Docs** | [docs/README.md](docs/README.md) |
---
## Appendix: Integration Test Issues (Full Details)
### 1. Vitest globalSetup Context Isolation
Vitest's `globalSetup` runs in separate Node.js context. Singletons, spies, mocks do NOT share instances with test files.
**Affected**: BullMQ worker service mocks (AI/DB failure tests)
**Solutions**: Mark `.todo()`, create test-only API endpoints, use Redis-based mock flags
```typescript
// DOES NOT WORK - different instances
const { flyerProcessingService } = await import('../../services/workers.server');
flyerProcessingService._getAiProcessor()._setExtractAndValidateData(mockFn);
```
### 2. Cleanup Queue Deletes Before Verification
Cleanup worker processes jobs in globalSetup context, ignoring test spies.
**Solution**: Drain and pause queue:
```typescript
const { cleanupQueue } = await import('../../services/queues.server');
await cleanupQueue.drain();
await cleanupQueue.pause();
// ... test ...
await cleanupQueue.resume();
```
### 3. Cache Stale After Direct SQL
Direct `pool.query()` inserts bypass cache invalidation.
**Solution**: `await cacheService.invalidateFlyers();` after inserts
### 4. Test Filename Collisions
Multer predictable filenames cause race conditions.
**Solution**: Use unique suffix: `${Date.now()}-${Math.round(Math.random() * 1e9)}`
### 5. Response Format Mismatches
API formats change: `data.jobId` vs `data.job.id`, nested vs flat, string vs number IDs.
**Solution**: Log response bodies, update assertions
### 6. External Service Availability
PM2/Redis health checks fail when unavailable.
**Solution**: try/catch with graceful degradation or mock
### 7. TZ Environment Variable Breaking Async Hooks
**Problem**: When `TZ=America/Los_Angeles` (or other timezone values) is set in the environment, Node.js async_hooks module can produce `RangeError: Invalid triggerAsyncId value: NaN`. This breaks React Testing Library's `render()` function which uses async hooks internally.
**Root Cause**: Setting `TZ` to certain timezone values interferes with Node.js's internal async tracking mechanism, causing invalid async IDs to be generated.
**Symptoms**:
```text
RangeError: Invalid triggerAsyncId value: NaN
process.env.NODE_ENV.queueSeveralMicrotasks node_modules/react/cjs/react.development.js:751:15
process.env.NODE_ENV.exports.act node_modules/react/cjs/react.development.js:886:11
node_modules/@testing-library/react/dist/act-compat.js:46:25
renderRoot node_modules/@testing-library/react/dist/pure.js:189:26
```
**Solution**: Explicitly unset `TZ` in all test scripts by adding `TZ=` (empty value) to cross-env:
```json
"test:unit": "cross-env NODE_ENV=test TZ= tsx ..."
"test:integration": "cross-env NODE_ENV=test TZ= tsx ..."
```
**Context**: This issue was introduced in commit `d03900c` which added `TZ: 'America/Los_Angeles'` to PM2 ecosystem configs for consistent log timestamps in production/dev environments. Tests must explicitly override this to prevent the async hooks error.

View File

@@ -0,0 +1,393 @@
# AI Documentation Index
Machine-optimized navigation for AI agents. Structured for vector retrieval and semantic search.
---
## Quick Lookup Table
| Task/Question | Primary Doc | Section/ADR |
| ----------------------- | --------------------------------------------------- | --------------------------------------- |
| Add new API endpoint | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | API Response Patterns, Input Validation |
| Add repository method | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Repository Patterns (get*/find*/list\*) |
| Fix failing test | [TESTING.md](development/TESTING.md) | Known Integration Test Issues |
| Run tests correctly | [TESTING.md](development/TESTING.md) | Test Execution Environment |
| Add database column | [DATABASE-GUIDE.md](subagents/DATABASE-GUIDE.md) | Schema sync required |
| Deploy to production | [DEPLOYMENT.md](operations/DEPLOYMENT.md) | Application Deployment |
| Debug container issue | [DEBUGGING.md](development/DEBUGGING.md) | Container Issues |
| Configure environment | [ENVIRONMENT.md](getting-started/ENVIRONMENT.md) | Configuration by Environment |
| Add background job | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Background Jobs |
| Handle errors correctly | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Error Handling |
| Use transactions | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Transaction Management |
| Add authentication | [AUTHENTICATION.md](architecture/AUTHENTICATION.md) | JWT Token Architecture |
| Cache data | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Caching |
| Check PM2 status | [DEV-CONTAINER.md](development/DEV-CONTAINER.md) | PM2 Process Management |
| View logs | [DEBUGGING.md](development/DEBUGGING.md) | PM2 Log Access |
| Understand architecture | [OVERVIEW.md](architecture/OVERVIEW.md) | System Architecture Diagram |
| Check ADR for decision | [adr/index.md](adr/index.md) | ADR by category |
| Use subagent | [subagents/OVERVIEW.md](subagents/OVERVIEW.md) | Available Subagents |
| API versioning | [API-VERSIONING.md](development/API-VERSIONING.md) | Phase 2 infrastructure |
---
## Documentation Tree
```
docs/
+-- AI-DOCUMENTATION-INDEX.md # THIS FILE - AI navigation index
+-- README.md # Human-readable doc hub
|
+-- adr/ # Architecture Decision Records (57 ADRs)
| +-- index.md # ADR index by category
| +-- 0001-*.md # Standardized error handling
| +-- 0002-*.md # Transaction management (withTransaction)
| +-- 0003-*.md # Input validation (Zod middleware)
| +-- 0008-*.md # API versioning (/api/v1/)
| +-- 0014-*.md # Platform: Linux only (CRITICAL)
| +-- 0028-*.md # API response (sendSuccess/sendError)
| +-- 0034-*.md # Repository pattern (get*/find*/list*)
| +-- 0035-*.md # Service layer architecture
| +-- 0050-*.md # PostgreSQL observability + Logstash
| +-- 0057-*.md # Test remediation post-API versioning
| +-- adr-implementation-tracker.md # Implementation status
|
+-- architecture/
| +-- OVERVIEW.md # System architecture, data flows, entities
| +-- DATABASE.md # Schema design, extensions, setup
| +-- AUTHENTICATION.md # OAuth, JWT, security features
| +-- WEBSOCKET_USAGE.md # Real-time communication patterns
| +-- api-versioning-infrastructure.md # Phase 2 versioning details
|
+-- development/
| +-- CODE-PATTERNS.md # Error handling, repos, API responses
| +-- TESTING.md # Unit/integration/E2E, known issues
| +-- DEBUGGING.md # Container, DB, API, PM2 debugging
| +-- DEV-CONTAINER.md # PM2, Logstash, container services
| +-- API-VERSIONING.md # API versioning workflows
| +-- DESIGN_TOKENS.md # Neo-Brutalism design system
| +-- ERROR-LOGGING-PATHS.md # req.originalUrl pattern
| +-- test-path-migration.md # Test file reorganization
|
+-- getting-started/
| +-- QUICKSTART.md # Quick setup instructions
| +-- INSTALL.md # Full installation guide
| +-- ENVIRONMENT.md # Environment variables reference
|
+-- operations/
| +-- DEPLOYMENT.md # Production deployment guide
| +-- BARE-METAL-SETUP.md # Server provisioning
| +-- MONITORING.md # Bugsink, health checks
| +-- LOGSTASH-QUICK-REF.md # Log aggregation reference
| +-- LOGSTASH-TROUBLESHOOTING.md # Logstash debugging
|
+-- subagents/
| +-- OVERVIEW.md # Subagent system introduction
| +-- CODER-GUIDE.md # Code development patterns
| +-- TESTER-GUIDE.md # Testing strategies
| +-- DATABASE-GUIDE.md # Database workflows
| +-- DEVOPS-GUIDE.md # Deployment/infrastructure
| +-- FRONTEND-GUIDE.md # UI/UX development
| +-- AI-USAGE-GUIDE.md # Gemini integration
| +-- DOCUMENTATION-GUIDE.md # Writing docs
| +-- SECURITY-DEBUG-GUIDE.md # Security and debugging
|
+-- tools/
| +-- MCP-CONFIGURATION.md # MCP servers setup
| +-- BUGSINK-SETUP.md # Error tracking setup
| +-- VSCODE-SETUP.md # Editor configuration
|
+-- archive/ # Historical docs, session notes
+-- sessions/ # Development session logs
+-- plans/ # Feature implementation plans
+-- research/ # Investigation notes
```
---
## Problem-to-Document Mapping
### Database Issues
| Problem | Documents |
| -------------------- | ----------------------------------------------------------------------------------------------- |
| Schema out of sync | [DATABASE-GUIDE.md](subagents/DATABASE-GUIDE.md), [CLAUDE.md](../CLAUDE.md) schema sync section |
| Migration needed | [DATABASE.md](architecture/DATABASE.md), ADR-013, ADR-023 |
| Query performance | [DEBUGGING.md](development/DEBUGGING.md) Query Performance Issues |
| Connection errors | [DEBUGGING.md](development/DEBUGGING.md) Database Issues |
| Transaction patterns | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Transaction Management, ADR-002 |
| Repository methods | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Repository Patterns, ADR-034 |
### Test Failures
| Problem | Documents |
| ---------------------------- | --------------------------------------------------------------------- |
| Tests fail in container | [TESTING.md](development/TESTING.md), ADR-014 |
| Vitest globalSetup isolation | [CLAUDE.md](../CLAUDE.md) Integration Test Issues #1 |
| Cache stale after insert | [CLAUDE.md](../CLAUDE.md) Integration Test Issues #3 |
| Queue interference | [CLAUDE.md](../CLAUDE.md) Integration Test Issues #2 |
| API path mismatches | [TESTING.md](development/TESTING.md) API Versioning in Tests, ADR-057 |
| Type check failures | [DEBUGGING.md](development/DEBUGGING.md) Type Check Failures |
| TZ environment breaks async | [CLAUDE.md](../CLAUDE.md) Integration Test Issues #7 |
### Deployment Issues
| Problem | Documents |
| --------------------- | ------------------------------------------------------------------------------------- |
| PM2 not starting | [DEBUGGING.md](development/DEBUGGING.md) PM2 Process Issues |
| NGINX configuration | [DEPLOYMENT.md](operations/DEPLOYMENT.md) NGINX Configuration |
| SSL certificates | [DEBUGGING.md](development/DEBUGGING.md) SSL Certificate Issues |
| CI/CD failures | [DEPLOYMENT.md](operations/DEPLOYMENT.md) CI/CD Pipeline, ADR-017 |
| Container won't start | [DEBUGGING.md](development/DEBUGGING.md) Container Issues |
| Bugsink not receiving | [BUGSINK-SETUP.md](tools/BUGSINK-SETUP.md), [MONITORING.md](operations/MONITORING.md) |
### Frontend/UI Changes
| Problem | Documents |
| ------------------ | --------------------------------------------------------------- |
| Component patterns | [FRONTEND-GUIDE.md](subagents/FRONTEND-GUIDE.md), ADR-044 |
| Design tokens | [DESIGN_TOKENS.md](development/DESIGN_TOKENS.md), ADR-012 |
| State management | ADR-005, [OVERVIEW.md](architecture/OVERVIEW.md) Frontend Stack |
| Hot reload broken | [DEBUGGING.md](development/DEBUGGING.md) Frontend Issues |
| CORS errors | [DEBUGGING.md](development/DEBUGGING.md) API Calls Failing |
### API Development
| Problem | Documents |
| ---------------- | ------------------------------------------------------------------------------- |
| Response format | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) API Response Patterns, ADR-028 |
| Input validation | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Input Validation, ADR-003 |
| Error handling | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Error Handling, ADR-001 |
| Rate limiting | ADR-032, [OVERVIEW.md](architecture/OVERVIEW.md) |
| API versioning | [API-VERSIONING.md](development/API-VERSIONING.md), ADR-008 |
| Authentication | [AUTHENTICATION.md](architecture/AUTHENTICATION.md), ADR-048 |
### Background Jobs
| Problem | Documents |
| ------------------- | ------------------------------------------------------------------------- |
| Jobs not processing | [DEBUGGING.md](development/DEBUGGING.md) Background Job Issues |
| Queue configuration | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Background Jobs, ADR-006 |
| Worker crashes | [DEBUGGING.md](development/DEBUGGING.md), ADR-053 |
| Scheduled jobs | ADR-037, [OVERVIEW.md](architecture/OVERVIEW.md) Scheduled Jobs |
---
## Document Priority Matrix
### CRITICAL (Read First)
| Document | Purpose | Key Content |
| --------------------------------------------------------------- | ----------------------- | ----------------------------- |
| [CLAUDE.md](../CLAUDE.md) | AI agent instructions | Rules, patterns, known issues |
| [ADR-014](adr/0014-containerization-and-deployment-strategy.md) | Platform requirement | Tests MUST run in container |
| [DEV-CONTAINER.md](development/DEV-CONTAINER.md) | Development environment | PM2, Logstash, services |
### HIGH (Core Development)
| Document | Purpose | Key Content |
| --------------------------------------------------- | ----------------- | ---------------------------- |
| [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Code templates | Error handling, repos, APIs |
| [TESTING.md](development/TESTING.md) | Test execution | Commands, known issues |
| [DATABASE.md](architecture/DATABASE.md) | Schema reference | Setup, extensions, users |
| [ADR-034](adr/0034-repository-pattern-standards.md) | Repository naming | get*/find*/list\* |
| [ADR-028](adr/0028-api-response-standardization.md) | API responses | sendSuccess/sendError |
| [ADR-001](adr/0001-standardized-error-handling.md) | Error handling | handleDbError, NotFoundError |
### MEDIUM (Specialized Tasks)
| Document | Purpose | Key Content |
| --------------------------------------------------- | --------------------- | ------------------------ |
| [subagents/OVERVIEW.md](subagents/OVERVIEW.md) | Subagent selection | When to delegate |
| [DEPLOYMENT.md](operations/DEPLOYMENT.md) | Production deployment | PM2, NGINX, CI/CD |
| [DEBUGGING.md](development/DEBUGGING.md) | Troubleshooting | Common issues, solutions |
| [ENVIRONMENT.md](getting-started/ENVIRONMENT.md) | Config reference | Variables by environment |
| [AUTHENTICATION.md](architecture/AUTHENTICATION.md) | Auth patterns | OAuth, JWT, security |
| [API-VERSIONING.md](development/API-VERSIONING.md) | Versioning | /api/v1/ prefix |
### LOW (Reference/Historical)
| Document | Purpose | Key Content |
| -------------------- | ------------------ | ------------------------- |
| [archive/](archive/) | Historical docs | Session notes, old plans |
| ADR-013, ADR-023 | Migration strategy | Proposed, not implemented |
| ADR-024 | Feature flags | Proposed |
| ADR-025 | i18n/l10n | Proposed |
---
## Cross-Reference Matrix
| Document | References | Referenced By |
| -------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------ |
| **CLAUDE.md** | ADR-001, ADR-002, ADR-008, ADR-014, ADR-028, ADR-034, ADR-035, ADR-050, ADR-057 | All development docs |
| **ADR-008** | ADR-028 | API-VERSIONING.md, TESTING.md, ADR-057 |
| **ADR-014** | - | CLAUDE.md, TESTING.md, DEPLOYMENT.md, DEV-CONTAINER.md |
| **ADR-028** | ADR-001 | CODE-PATTERNS.md, OVERVIEW.md |
| **ADR-034** | ADR-001 | CODE-PATTERNS.md, DATABASE-GUIDE.md |
| **ADR-057** | ADR-008, ADR-028 | TESTING.md |
| **CODE-PATTERNS.md** | ADR-001, ADR-002, ADR-003, ADR-028, ADR-034, ADR-036, ADR-048 | CODER-GUIDE.md |
| **TESTING.md** | ADR-014, ADR-057, CLAUDE.md | TESTER-GUIDE.md, DEBUGGING.md |
| **DEBUGGING.md** | DEV-CONTAINER.md, TESTING.md, MONITORING.md | DEVOPS-GUIDE.md |
| **DEV-CONTAINER.md** | ADR-014, ADR-050, ecosystem.dev.config.cjs | DEBUGGING.md, CLAUDE.md |
| **OVERVIEW.md** | ADR-001 through ADR-050+ | All architecture docs |
| **DATABASE.md** | ADR-002, ADR-013, ADR-055 | DATABASE-GUIDE.md |
---
## Navigation Patterns
### Adding a Feature
```
1. CLAUDE.md -> Project rules, patterns
2. CODE-PATTERNS.md -> Implementation templates
3. Relevant subagent guide -> Domain-specific patterns
4. Related ADRs -> Design decisions
5. TESTING.md -> Test requirements
```
### Fixing a Bug
```
1. DEBUGGING.md -> Common issues checklist
2. TESTING.md -> Run tests in container
3. Error logs (pm2/bugsink) -> Identify root cause
4. CODE-PATTERNS.md -> Correct pattern reference
5. Related ADR -> Architectural context
```
### Deploying
```
1. DEPLOYMENT.md -> Deployment procedures
2. ENVIRONMENT.md -> Required variables
3. MONITORING.md -> Health check verification
4. LOGSTASH-QUICK-REF.md -> Log aggregation check
```
### Database Changes
```
1. DATABASE-GUIDE.md -> Schema sync requirements (CRITICAL)
2. DATABASE.md -> Schema design patterns
3. ADR-002 -> Transaction patterns
4. ADR-034 -> Repository methods
5. ADR-055 -> Normalization rules
```
### Subagent Selection
| Task Type | Subagent | Guide |
| --------------------- | ------------------------- | ------------------------------------------------------------ |
| Write production code | `coder` | [CODER-GUIDE.md](subagents/CODER-GUIDE.md) |
| Database changes | `db-dev` | [DATABASE-GUIDE.md](subagents/DATABASE-GUIDE.md) |
| Create tests | `testwriter` | [TESTER-GUIDE.md](subagents/TESTER-GUIDE.md) |
| Fix failing tests | `tester` | [TESTER-GUIDE.md](subagents/TESTER-GUIDE.md) |
| Container/deployment | `devops` | [DEVOPS-GUIDE.md](subagents/DEVOPS-GUIDE.md) |
| UI components | `frontend-specialist` | [FRONTEND-GUIDE.md](subagents/FRONTEND-GUIDE.md) |
| External APIs | `integrations-specialist` | - |
| Security review | `security-engineer` | [SECURITY-DEBUG-GUIDE.md](subagents/SECURITY-DEBUG-GUIDE.md) |
| Production errors | `log-debug` | [SECURITY-DEBUG-GUIDE.md](subagents/SECURITY-DEBUG-GUIDE.md) |
| AI/Gemini issues | `ai-usage` | [AI-USAGE-GUIDE.md](subagents/AI-USAGE-GUIDE.md) |
---
## Key File Quick Reference
### Configuration
| File | Purpose |
| -------------------------- | ---------------------------- |
| `server.ts` | Express app setup |
| `src/config/env.ts` | Environment validation (Zod) |
| `ecosystem.dev.config.cjs` | PM2 dev config |
| `ecosystem.config.cjs` | PM2 prod config |
| `vite.config.ts` | Vite build config |
### Core Implementation
| File | Purpose |
| ----------------------------------- | ----------------------------------- |
| `src/routes/*.routes.ts` | API route handlers |
| `src/services/db/*.db.ts` | Repository layer |
| `src/services/*.server.ts` | Server-only services |
| `src/services/queues.server.ts` | BullMQ queue definitions |
| `src/services/workers.server.ts` | BullMQ workers |
| `src/utils/apiResponse.ts` | sendSuccess/sendError/sendPaginated |
| `src/services/db/errors.db.ts` | handleDbError, NotFoundError |
| `src/services/db/transaction.db.ts` | withTransaction |
### Database Schema
| File | Purpose |
| ------------------------------ | ----------------------------------- |
| `sql/master_schema_rollup.sql` | Test DB, complete reference |
| `sql/initial_schema.sql` | Fresh install (identical to rollup) |
| `sql/migrations/*.sql` | Production ALTER statements |
### Testing
| File | Purpose |
| ---------------------------------- | ----------------------- |
| `vitest.config.ts` | Unit test config |
| `vitest.config.integration.ts` | Integration test config |
| `vitest.config.e2e.ts` | E2E test config |
| `src/tests/utils/mockFactories.ts` | Mock data factories |
| `src/tests/utils/storeHelpers.ts` | Store test helpers |
---
## ADR Quick Reference
### By Implementation Status
**Implemented**: 001, 002, 003, 004, 006, 008, 009, 010, 016, 017, 020, 021, 028, 032, 033, 034, 035, 036, 037, 038, 040, 041, 043, 044, 045, 046, 050, 051, 052, 055, 057
**Partially Implemented**: 012, 014, 015, 048
**Proposed**: 011, 013, 022, 023, 024, 025, 029, 030, 031, 039, 047, 053, 054, 056
### By Category
| Category | ADRs |
| --------------------- | ------------------------------------------- |
| Core Infrastructure | 002, 007, 020, 030 |
| Data Management | 009, 013, 019, 023, 031, 055 |
| API & Integration | 003, 008, 018, 022, 028 |
| Security | 001, 011, 016, 029, 032, 033, 048 |
| Observability | 004, 015, 050, 051, 052, 056 |
| Deployment & Ops | 006, 014, 017, 024, 037, 038, 053, 054 |
| Frontend/UI | 005, 012, 025, 026, 044 |
| Dev Workflow | 010, 021, 027, 040, 045, 047, 057 |
| Architecture Patterns | 034, 035, 036, 039, 041, 042, 043, 046, 049 |
---
## Essential Commands
```bash
# Run all tests (MUST use container)
podman exec -it flyer-crawler-dev npm test
# Run unit tests
podman exec -it flyer-crawler-dev npm run test:unit
# Run type check
podman exec -it flyer-crawler-dev npm run type-check
# Run integration tests
podman exec -it flyer-crawler-dev npm run test:integration
# PM2 status
podman exec -it flyer-crawler-dev pm2 status
# PM2 logs
podman exec -it flyer-crawler-dev pm2 logs
# Restart all processes
podman exec -it flyer-crawler-dev pm2 restart all
```
---
_This index is optimized for AI agent consumption. Updated: 2026-01-28_

View File

@@ -1,5 +1,21 @@
# DevOps Subagent Reference
## Critical Rule: Server Access is READ-ONLY
**Claude Code has READ-ONLY access to production/test servers.** The `claude-win10` user cannot execute write operations directly.
When working with production/test servers:
1. **Provide commands** for the user to execute (do not attempt SSH)
2. **Wait for user** to report command output
3. **Provide fix commands** 1-3 at a time (errors may cascade)
4. **Verify success** with read-only commands after user executes fixes
5. **Document findings** in relevant documentation
Commands in this reference are for the **user to run on the server**, not for Claude to execute.
---
## Critical Rule: Git Bash Path Conversion
Git Bash on Windows auto-converts Unix paths, breaking container commands.
@@ -69,12 +85,11 @@ MSYS_NO_PATHCONV=1 podman exec -it flyer-crawler-dev psql -U postgres -d flyer_c
## PM2 Commands
### Production Server (via SSH)
### Production Server
> **Note**: These commands are for the **user to execute on the server**. Claude Code provides commands but cannot run them directly. See [Server Access is READ-ONLY](#critical-rule-server-access-is-read-only) above.
```bash
# SSH to server
ssh root@projectium.com
# List all apps
pm2 list
@@ -210,9 +225,10 @@ INFO
### Production
> **Note**: User executes these commands on the server.
```bash
# Via SSH
ssh root@projectium.com
# Access Redis CLI
redis-cli -a $REDIS_PASSWORD
# Flush cache (use with caution)
@@ -278,10 +294,9 @@ Trigger `manual-db-backup.yml` from Gitea Actions UI.
### Manual Backup
```bash
# SSH to server
ssh root@projectium.com
> **Note**: User executes these commands on the server.
```bash
# Backup
PGPASSWORD=$DB_PASSWORD pg_dump -h $DB_HOST -U $DB_USER $DB_NAME > backup_$(date +%Y%m%d).sql
@@ -301,8 +316,10 @@ MSYS_NO_PATHCONV=1 podman exec -e DATABASE_URL=postgresql://bugsink:bugsink_dev_
### Production Token Generation
> **Note**: User executes this command on the server.
```bash
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage create_auth_token"
cd /opt/bugsink && bugsink-manage create_auth_token
```
---

View File

@@ -316,6 +316,7 @@ app.use('/api/v1', (req, res, next) => {
- [ADR-018](./0018-api-documentation-strategy.md) - API Documentation Strategy (versioned OpenAPI specs)
- [ADR-028](./0028-api-response-standardization.md) - Response Standardization (envelope pattern applies to all versions)
- [ADR-016](./0016-api-security-hardening.md) - Security Hardening (applies to all versions)
- [ADR-057](./0057-test-remediation-post-api-versioning.md) - Test Remediation Post-API Versioning (documents test migration)
## Implementation Checklist

View File

@@ -363,6 +363,13 @@ The following files contain acknowledged code smell violations that are deferred
- `src/tests/utils/mockFactories.ts` - Mock factories (1553 lines)
- `src/tests/utils/testHelpers.ts` - Test utilities
## Related ADRs
- [ADR-014](./0014-containerization-and-deployment-strategy.md) - Containerization (tests must run in dev container)
- [ADR-040](./0040-testing-economics-and-priorities.md) - Testing Economics and Priorities
- [ADR-045](./0045-test-data-factories-and-fixtures.md) - Test Data Factories and Fixtures
- [ADR-057](./0057-test-remediation-post-api-versioning.md) - Test Remediation Post-API Versioning
## Future Enhancements
1. **Browser E2E Tests**: Consider adding Playwright for actual browser testing

View File

@@ -2,7 +2,9 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Superseded by [ADR-023](./0023-database-schema-migration-strategy.md)
**Note**: This ADR was an early draft. ADR-023 provides a more detailed specification for the same topic.
## Context

View File

@@ -2,9 +2,11 @@
**Date**: 2025-12-12
**Status**: Accepted
**Status**: Superseded
**Implemented**: 2026-01-11
**Superseded By**: This ADR was updated in February 2026 to reflect the migration from swagger-jsdoc to tsoa. The original approach using JSDoc annotations has been replaced with a decorator-based controller pattern.
**Implemented**: 2026-02-12
## Context
@@ -16,139 +18,296 @@ Key requirements:
2. **Code-Documentation Sync**: Documentation should stay in sync with the actual code to prevent drift.
3. **Low Maintenance Overhead**: The documentation approach should be "fast and lite" - minimal additional work for developers.
4. **Security**: Documentation should not expose sensitive information in production environments.
5. **Type Safety**: Documentation should be derived from TypeScript types to ensure accuracy.
### Why We Migrated from swagger-jsdoc to tsoa
The original implementation used `swagger-jsdoc` to generate OpenAPI specs from JSDoc comments. This approach had several limitations:
| Issue | Impact |
| --------------------------------------- | -------------------------------------------- |
| `swagger-jsdoc` unmaintained since 2022 | Security and compatibility risks |
| JSDoc duplication with TypeScript types | Maintenance burden, potential for drift |
| No runtime validation from schema | Validation logic separate from documentation |
| Manual type definitions in comments | Error-prone, no compiler verification |
## Decision
We will adopt **OpenAPI 3.0 (Swagger)** for API documentation using the following approach:
We adopt **tsoa** for API documentation using a decorator-based controller pattern:
1. **JSDoc Annotations**: Use `swagger-jsdoc` to generate OpenAPI specs from JSDoc comments in route files.
2. **Swagger UI**: Use `swagger-ui-express` to serve interactive documentation at `/docs/api-docs`.
3. **Environment Restriction**: Only expose the Swagger UI in development and test environments, not production.
4. **Incremental Adoption**: Start with key public routes and progressively add annotations to all endpoints.
1. **Controller Classes**: Use tsoa decorators (`@Route`, `@Get`, `@Post`, `@Security`, etc.) on controller classes.
2. **TypeScript-First**: OpenAPI specs are generated directly from TypeScript interfaces and types.
3. **Swagger UI**: Continue using `swagger-ui-express` to serve interactive documentation at `/docs/api-docs`.
4. **Environment Restriction**: Only expose the Swagger UI in development and test environments, not production.
5. **BaseController Pattern**: All controllers extend a base class providing response formatting utilities.
### Tooling Selection
| Tool | Purpose |
| -------------------- | ---------------------------------------------- |
| `swagger-jsdoc` | Generates OpenAPI 3.0 spec from JSDoc comments |
| `swagger-ui-express` | Serves interactive Swagger UI |
| Tool | Purpose |
| -------------------- | ----------------------------------------------------- |
| `tsoa` (6.6.0) | Generates OpenAPI 3.0 spec from decorators and routes |
| `swagger-ui-express` | Serves interactive Swagger UI |
**Why JSDoc over separate schema files?**
**Why tsoa over swagger-jsdoc?**
- Documentation lives with the code, reducing drift
- No separate files to maintain
- Developers see documentation when editing routes
- Lower learning curve for the team
- **Type-safe contracts**: Decorators derive types directly from TypeScript, eliminating duplicate definitions
- **Active maintenance**: tsoa has an active community and regular releases
- **Route generation**: tsoa generates Express routes automatically, reducing boilerplate
- **Validation integration**: Request body types serve as validation contracts
- **Reduced duplication**: No more parallel JSDoc + TypeScript type definitions
## Implementation Details
### OpenAPI Configuration
### tsoa Configuration
Located in `src/config/swagger.ts`:
Located in `tsoa.json`:
```typescript
import swaggerJsdoc from 'swagger-jsdoc';
const options: swaggerJsdoc.Options = {
definition: {
openapi: '3.0.0',
info: {
title: 'Flyer Crawler API',
version: '1.0.0',
description: 'API for the Flyer Crawler application',
contact: {
name: 'API Support',
},
},
servers: [
{
url: '/api',
description: 'API server',
},
],
components: {
securitySchemes: {
bearerAuth: {
type: 'http',
scheme: 'bearer',
bearerFormat: 'JWT',
},
},
```json
{
"entryFile": "server.ts",
"noImplicitAdditionalProperties": "throw-on-extras",
"controllerPathGlobs": ["src/controllers/**/*.controller.ts"],
"spec": {
"outputDirectory": "src/config",
"specVersion": 3,
"securityDefinitions": {
"bearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "JWT"
}
},
"basePath": "/api",
"specFileBaseName": "tsoa-spec",
"name": "Flyer Crawler API",
"version": "1.0.0"
},
apis: ['./src/routes/*.ts'],
};
export const swaggerSpec = swaggerJsdoc(options);
"routes": {
"routesDir": "src/routes",
"basePath": "/api",
"middleware": "express",
"routesFileName": "tsoa-generated.ts",
"esm": true,
"authenticationModule": "src/middleware/tsoaAuthentication.ts"
}
}
```
### JSDoc Annotation Pattern
### Controller Pattern
Each route handler should include OpenAPI annotations using the `@openapi` tag:
Each controller extends `BaseController` and uses tsoa decorators:
```typescript
/**
* @openapi
* /health/ping:
* get:
* summary: Simple ping endpoint
* description: Returns a pong response to verify server is responsive
* tags:
* - Health
* responses:
* 200:
* description: Server is responsive
* content:
* application/json:
* schema:
* type: object
* properties:
* success:
* type: boolean
* example: true
* data:
* type: object
* properties:
* message:
* type: string
* example: pong
*/
router.get('/ping', validateRequest(emptySchema), (_req: Request, res: Response) => {
return sendSuccess(res, { message: 'pong' });
});
import { Route, Tags, Get, Post, Body, Security, SuccessResponse, Response } from 'tsoa';
import {
BaseController,
SuccessResponse as SuccessResponseType,
ErrorResponse,
} from './base.controller';
interface CreateUserRequest {
email: string;
password: string;
full_name?: string;
}
@Route('users')
@Tags('Users')
export class UserController extends BaseController {
/**
* Create a new user account.
* @summary Create user
* @param requestBody User creation data
* @returns Created user profile
*/
@Post()
@SuccessResponse(201, 'User created')
@Response<ErrorResponse>(400, 'Validation error')
@Response<ErrorResponse>(409, 'Email already exists')
public async createUser(
@Body() requestBody: CreateUserRequest,
): Promise<SuccessResponseType<UserProfileDto>> {
// Implementation
const user = await userService.createUser(requestBody);
return this.created(user);
}
/**
* Get current user's profile.
* @summary Get my profile
* @param request Express request with authenticated user
* @returns User profile
*/
@Get('me')
@Security('bearerAuth')
@SuccessResponse(200, 'Profile retrieved')
@Response<ErrorResponse>(401, 'Not authenticated')
public async getMyProfile(
@Request() request: Express.Request,
): Promise<SuccessResponseType<UserProfileDto>> {
const user = request.user as UserProfile;
return this.success(toUserProfileDto(user));
}
}
```
### Route Documentation Priority
### BaseController Helpers
Document routes in this order of priority:
The `BaseController` class provides standardized response formatting:
1. **Health Routes** - `/api/health/*` (public, critical for operations)
2. **Auth Routes** - `/api/auth/*` (public, essential for integration)
3. **Gamification Routes** - `/api/achievements/*` (simple, good example)
4. **Flyer Routes** - `/api/flyers/*` (core functionality)
5. **User Routes** - `/api/users/*` (common CRUD patterns)
6. **Remaining Routes** - Budget, Recipe, Admin, etc.
```typescript
export abstract class BaseController extends Controller {
// Success response with data
protected success<T>(data: T): SuccessResponse<T> {
return { success: true, data };
}
// Success with 201 Created status
protected created<T>(data: T): SuccessResponse<T> {
this.setStatus(201);
return this.success(data);
}
// Paginated response with metadata
protected paginated<T>(data: T[], pagination: PaginationInput): PaginatedResponse<T> {
return {
success: true,
data,
meta: { pagination: this.calculatePagination(pagination) },
};
}
// Message-only response
protected message(message: string): SuccessResponse<{ message: string }> {
return this.success({ message });
}
// No content response (204)
protected noContent(): void {
this.setStatus(204);
}
// Error response (prefer throwing errors instead)
protected error(code: string, message: string, details?: unknown): ErrorResponse {
return { success: false, error: { code, message, details } };
}
}
```
### Authentication with @Security
tsoa integrates with the existing passport-jwt strategy via a custom authentication module:
```typescript
// src/middleware/tsoaAuthentication.ts
export async function expressAuthentication(
request: Request,
securityName: string,
_scopes?: string[],
): Promise<UserProfile> {
if (securityName !== 'bearerAuth') {
throw new AuthenticationError(`Unknown security scheme: ${securityName}`);
}
const token = extractBearerToken(request);
const decoded = jwt.verify(token, process.env.JWT_SECRET!);
const userProfile = await userRepo.findUserProfileById(decoded.user_id);
if (!userProfile) {
throw new AuthenticationError('User not found');
}
request.user = userProfile;
return userProfile;
}
```
Usage in controllers:
```typescript
@Get('profile')
@Security('bearerAuth')
public async getProfile(@Request() req: Express.Request): Promise<...> {
const user = req.user as UserProfile;
// ...
}
```
### DTO Organization
Shared DTOs are defined in `src/dtos/common.dto.ts` to avoid duplicate type definitions across controllers:
```typescript
// src/dtos/common.dto.ts
/**
* Address with flattened coordinates (tsoa-compatible).
* GeoJSONPoint uses coordinates: [number, number] which tsoa cannot handle.
*/
export interface AddressDto {
address_id: number;
address_line_1: string;
city: string;
province_state: string;
postal_code: string;
country: string;
latitude?: number | null; // Flattened from GeoJSONPoint
longitude?: number | null; // Flattened from GeoJSONPoint
// ...
}
export interface UserDto {
user_id: string;
email: string;
created_at: string;
updated_at: string;
}
export interface UserProfileDto {
full_name?: string | null;
role: 'admin' | 'user';
points: number;
user: UserDto;
address?: AddressDto | null;
// ...
}
```
### Swagger UI Setup
In `server.ts`, add the Swagger UI middleware (development/test only):
In `server.ts`, the Swagger UI middleware serves the tsoa-generated spec:
```typescript
import swaggerUi from 'swagger-ui-express';
import { swaggerSpec } from './src/config/swagger';
import tsoaSpec from './src/config/tsoa-spec.json' with { type: 'json' };
// Only serve Swagger UI in non-production environments
if (process.env.NODE_ENV !== 'production') {
app.use('/docs/api-docs', swaggerUi.serve, swaggerUi.setup(swaggerSpec));
app.use('/docs/api-docs', swaggerUi.serve, swaggerUi.setup(tsoaSpec));
// Optionally expose raw JSON spec for tooling
// Raw JSON spec for tooling
app.get('/docs/api-docs.json', (_req, res) => {
res.setHeader('Content-Type', 'application/json');
res.send(swaggerSpec);
res.send(tsoaSpec);
});
}
```
### Build Integration
tsoa spec and route generation is integrated into the build pipeline:
```json
{
"scripts": {
"tsoa:spec": "tsoa spec",
"tsoa:routes": "tsoa routes",
"prebuild": "npm run tsoa:spec && npm run tsoa:routes",
"build": "tsc"
}
}
```
### Response Schema Standardization
All API responses follow the standardized format from [ADR-028](./0028-api-response-standardization.md):
@@ -160,107 +319,144 @@ All API responses follow the standardized format from [ADR-028](./0028-api-respo
"data": { ... }
}
// Paginated response
{
"success": true,
"data": [...],
"meta": {
"pagination": {
"page": 1,
"limit": 20,
"total": 100,
"totalPages": 5,
"hasNextPage": true,
"hasPrevPage": false
}
}
}
// Error response
{
"success": false,
"error": {
"code": "ERROR_CODE",
"message": "Human-readable message"
"code": "NOT_FOUND",
"message": "User not found"
}
}
```
Define reusable schema components for these patterns:
```typescript
/**
* @openapi
* components:
* schemas:
* SuccessResponse:
* type: object
* properties:
* success:
* type: boolean
* example: true
* data:
* type: object
* ErrorResponse:
* type: object
* properties:
* success:
* type: boolean
* example: false
* error:
* type: object
* properties:
* code:
* type: string
* message:
* type: string
*/
```
### Security Considerations
1. **Production Disabled**: Swagger UI is not available in production to prevent information disclosure.
2. **No Sensitive Data**: Never include actual secrets, tokens, or PII in example values.
3. **Authentication Documented**: Clearly document which endpoints require authentication.
## API Route Tags
Organize endpoints using consistent tags:
| Tag | Description | Routes |
| Tag | Description | Route Prefix |
| ------------ | ---------------------------------- | --------------------- |
| Health | Server health and readiness checks | `/api/health/*` |
| Auth | Authentication and authorization | `/api/auth/*` |
| Users | User profile management | `/api/users/*` |
| Flyers | Flyer uploads and retrieval | `/api/flyers/*` |
| Achievements | Gamification and leaderboards | `/api/achievements/*` |
| Budgets | Budget tracking | `/api/budgets/*` |
| Deals | Deal search and management | `/api/deals/*` |
| Stores | Store information | `/api/stores/*` |
| Recipes | Recipe management | `/api/recipes/*` |
| Budgets | Budget tracking | `/api/budgets/*` |
| Inventory | User inventory management | `/api/inventory/*` |
| Gamification | Achievements and leaderboards | `/api/achievements/*` |
| Admin | Administrative operations | `/api/admin/*` |
| System | System status and monitoring | `/api/system/*` |
## Controller Inventory
The following controllers have been migrated to tsoa:
| Controller | Endpoints | Description |
| ------------------------------- | --------- | ----------------------------------------- |
| `health.controller.ts` | 10 | Health checks, probes, service status |
| `auth.controller.ts` | 8 | Login, register, password reset, OAuth |
| `user.controller.ts` | 30 | User profiles, preferences, notifications |
| `admin.controller.ts` | 32 | System administration, user management |
| `ai.controller.ts` | 15 | AI-powered extraction and analysis |
| `flyer.controller.ts` | 12 | Flyer upload and management |
| `store.controller.ts` | 8 | Store information |
| `recipe.controller.ts` | 10 | Recipe CRUD and suggestions |
| `upc.controller.ts` | 6 | UPC barcode lookups |
| `inventory.controller.ts` | 8 | User inventory management |
| `receipt.controller.ts` | 6 | Receipt processing |
| `budget.controller.ts` | 8 | Budget tracking |
| `category.controller.ts` | 4 | Category management |
| `deals.controller.ts` | 8 | Deal search and discovery |
| `stats.controller.ts` | 6 | Usage statistics |
| `price.controller.ts` | 6 | Price history and tracking |
| `system.controller.ts` | 4 | System status |
| `gamification.controller.ts` | 10 | Achievements, leaderboards |
| `personalization.controller.ts` | 6 | User recommendations |
| `reactions.controller.ts` | 4 | Item reactions and ratings |
## Security Considerations
1. **Production Disabled**: Swagger UI is not available in production to prevent information disclosure.
2. **No Sensitive Data**: Never include actual secrets, tokens, or PII in example values.
3. **Authentication Documented**: Clearly document which endpoints require authentication.
4. **Rate Limiting**: Rate limiters are applied via `@Middlewares` decorator.
## Testing
Verify API documentation is correct by:
1. **Manual Review**: Navigate to `/docs/api-docs` and test each endpoint.
2. **Spec Validation**: Use OpenAPI validators to check the generated spec.
3. **Integration Tests**: Existing integration tests serve as implicit documentation verification.
3. **Controller Tests**: Each controller has comprehensive test coverage (369 controller tests total).
4. **Integration Tests**: 345 integration tests verify endpoint behavior.
## Consequences
### Positive
- **Single Source of Truth**: Documentation lives with the code and stays in sync.
- **Interactive Exploration**: Developers can try endpoints directly from the UI.
- **SDK Generation**: OpenAPI spec enables automatic client SDK generation.
- **Onboarding**: New developers can quickly understand the API surface.
- **Low Overhead**: JSDoc annotations are minimal additions to existing code.
- **Type-safe API contracts**: tsoa decorators derive types from TypeScript, eliminating duplicate definitions
- **Single Source of Truth**: Documentation lives with the code and stays in sync
- **Active Maintenance**: tsoa is actively maintained with regular releases
- **Interactive Exploration**: Developers can try endpoints directly from Swagger UI
- **SDK Generation**: OpenAPI spec enables automatic client SDK generation
- **Reduced Boilerplate**: tsoa generates Express routes automatically
### Negative
- **Maintenance Required**: Developers must update annotations when routes change.
- **Build Dependency**: Adds `swagger-jsdoc` and `swagger-ui-express` packages.
- **Initial Investment**: Existing routes need annotations added incrementally.
- **Learning Curve**: Decorator-based controller pattern differs from Express handlers
- **Generated Code**: `tsoa-generated.ts` must be regenerated when controllers change
- **Build Step**: Adds `tsoa spec && tsoa routes` to the build pipeline
### Mitigation
- Include documentation checks in code review process.
- Start with high-priority routes and expand coverage over time.
- Use TypeScript types to reduce documentation duplication where possible.
- **Migration Guide**: Created comprehensive TSOA-MIGRATION-GUIDE.md for developers
- **BaseController**: Provides familiar response helpers matching existing patterns
- **Incremental Adoption**: Existing Express routes continue to work alongside tsoa controllers
## Key Files
- `src/config/swagger.ts` - OpenAPI configuration
- `src/routes/*.ts` - Route files with JSDoc annotations
- `server.ts` - Swagger UI middleware setup
| File | Purpose |
| -------------------------------------- | --------------------------------------- |
| `tsoa.json` | tsoa configuration |
| `src/controllers/base.controller.ts` | Base controller with response utilities |
| `src/controllers/types.ts` | Shared controller type definitions |
| `src/controllers/*.controller.ts` | Individual domain controllers |
| `src/dtos/common.dto.ts` | Shared DTO definitions |
| `src/middleware/tsoaAuthentication.ts` | JWT authentication handler |
| `src/routes/tsoa-generated.ts` | tsoa-generated Express routes |
| `src/config/tsoa-spec.json` | Generated OpenAPI 3.0 spec |
| `server.ts` | Swagger UI middleware setup |
## Migration History
| Date | Change |
| ---------- | --------------------------------------------------------------- |
| 2025-12-12 | Initial ADR created with swagger-jsdoc approach |
| 2026-01-11 | Began implementation with swagger-jsdoc |
| 2026-02-12 | Completed migration to tsoa, superseding swagger-jsdoc approach |
## Related ADRs
- [ADR-059](./0059-dependency-modernization.md) - Dependency Modernization (tsoa migration plan)
- [ADR-003](./0003-standardized-input-validation-using-middleware.md) - Input Validation (Zod schemas)
- [ADR-028](./0028-api-response-standardization.md) - Response Standardization
- [ADR-001](./0001-standardized-error-handling.md) - Error Handling
- [ADR-016](./0016-api-security-hardening.md) - Security Hardening
- [ADR-048](./0048-authentication-strategy.md) - Authentication Strategy

View File

@@ -4,6 +4,8 @@
**Status**: Proposed
**Supersedes**: [ADR-013](./0013-database-schema-migration-strategy.md)
## Context
The `README.md` indicates that the database schema is managed by manually running a large `schema.sql.txt` file. This approach is highly error-prone, makes tracking changes difficult, and is not feasible for updating a live production database without downtime or data loss.

View File

@@ -1,18 +1,333 @@
# ADR-024: Feature Flagging Strategy
**Date**: 2025-12-12
**Status**: Accepted
**Implemented**: 2026-01-28
**Implementation Plan**: [2026-01-28-adr-024-feature-flags-implementation.md](../plans/2026-01-28-adr-024-feature-flags-implementation.md)
**Status**: Proposed
## Implementation Summary
Feature flag infrastructure fully implemented with 89 new tests (all passing). Total test suite: 3,616 tests passing.
**Backend**:
- Zod-validated schema in `src/config/env.ts` with 6 feature flags
- Service module `src/services/featureFlags.server.ts` with `isFeatureEnabled()`, `getFeatureFlags()`, `getEnabledFeatureFlags()`
- Admin endpoint `GET /api/v1/admin/feature-flags` (requires admin authentication)
- Convenience exports for direct boolean access
**Frontend**:
- Config section in `src/config.ts` with `VITE_FEATURE_*` environment variables
- Type declarations in `src/vite-env.d.ts`
- React hook `useFeatureFlag()` and `useAllFeatureFlags()` in `src/hooks/useFeatureFlag.ts`
- Declarative component `<FeatureFlag>` in `src/components/FeatureFlag.tsx`
**Current Flags**: `bugsinkSync`, `advancedRbac`, `newDashboard`, `betaRecipes`, `experimentalAi`, `debugMode`
---
## Context
As the application grows, there is no way to roll out new features to a subset of users (e.g., for beta testing) or to quickly disable a problematic feature in production without a full redeployment.
Application lacks controlled feature rollout capability. No mechanism for beta testing, quick production disablement, or gradual rollouts without full redeployment. Need type-safe, configuration-based system integrating with ADR-007 Zod validation.
## Decision
We will implement a feature flagging system. This could start with a simple configuration-based approach (defined in `ADR-007`) and evolve to use a dedicated service like **Flagsmith** or **LaunchDarkly**. This ADR will define how feature flags are created, managed, and checked in both the backend and frontend code.
Implement environment-variable-based feature flag system. Backend: Zod-validated schema in `src/config/env.ts` + dedicated service. Frontend: Vite env vars + React hook + declarative component. All flags default `false` (opt-in model). Future migration path to Flagsmith/LaunchDarkly preserved via abstraction layer.
## Consequences
**Positive**: Decouples feature releases from code deployments, reducing risk and allowing for more controlled, gradual rollouts and A/B testing. Enables easier experimentation and faster iteration.
**Negative**: Adds complexity to the codebase with conditional logic around features. Requires careful management of feature flag states to avoid technical debt.
- **Positive**: Decouples releases from deployments reduced risk, gradual rollouts, A/B testing capability
- **Negative**: Conditional logic complexity → requires sunset policy (3-month max after full rollout)
- **Neutral**: Restart required for flag changes (acceptable for current scale, external service removes this constraint)
---
## Implementation Details
### Architecture Overview
```text
Environment Variables (FEATURE_*, VITE_FEATURE_*)
├── Backend ──► src/config/env.ts (Zod) ──► src/services/featureFlags.server.ts
│ │
│ ┌──────────┴──────────┐
│ │ │
│ isFeatureEnabled() getAllFeatureFlags()
│ │
│ Routes/Services
└── Frontend ─► src/config.ts ──► src/hooks/useFeatureFlag.ts
┌──────────────┼──────────────┐
│ │ │
useFeatureFlag() useAllFeatureFlags() <FeatureFlag>
│ Component
Components
```
### File Structure
| File | Purpose | Layer |
| ------------------------------------- | ------------------------ | ---------------- |
| `src/config/env.ts` | Zod schema + env loading | Backend config |
| `src/services/featureFlags.server.ts` | Flag access service | Backend runtime |
| `src/config.ts` | Vite env parsing | Frontend config |
| `src/vite-env.d.ts` | TypeScript declarations | Frontend types |
| `src/hooks/useFeatureFlag.ts` | React hook | Frontend runtime |
| `src/components/FeatureFlag.tsx` | Declarative wrapper | Frontend UI |
### Naming Convention
| Context | Pattern | Example |
| ------------------- | ------------------------- | ---------------------------------- |
| Backend env var | `FEATURE_SNAKE_CASE` | `FEATURE_NEW_DASHBOARD` |
| Frontend env var | `VITE_FEATURE_SNAKE_CASE` | `VITE_FEATURE_NEW_DASHBOARD` |
| Config property | `camelCase` | `config.featureFlags.newDashboard` |
| Hook/function param | `camelCase` literal | `isFeatureEnabled('newDashboard')` |
### Backend Implementation
#### Schema Definition (`src/config/env.ts`)
```typescript
/**
* Feature flags schema (ADR-024).
* All flags default false (disabled) for safety.
*/
const featureFlagsSchema = z.object({
newDashboard: booleanString(false), // FEATURE_NEW_DASHBOARD
betaRecipes: booleanString(false), // FEATURE_BETA_RECIPES
experimentalAi: booleanString(false), // FEATURE_EXPERIMENTAL_AI
debugMode: booleanString(false), // FEATURE_DEBUG_MODE
});
// In loadEnvVars():
featureFlags: {
newDashboard: process.env.FEATURE_NEW_DASHBOARD,
betaRecipes: process.env.FEATURE_BETA_RECIPES,
experimentalAi: process.env.FEATURE_EXPERIMENTAL_AI,
debugMode: process.env.FEATURE_DEBUG_MODE,
},
```
#### Service Module (`src/services/featureFlags.server.ts`)
```typescript
import { config, isDevelopment } from '../config/env';
import { logger } from './logger.server';
export type FeatureFlagName = keyof typeof config.featureFlags;
/**
* Check feature flag state. Logs in development mode.
*/
export function isFeatureEnabled(flagName: FeatureFlagName): boolean {
const enabled = config.featureFlags[flagName];
if (isDevelopment) {
logger.debug({ flag: flagName, enabled }, 'Feature flag checked');
}
return enabled;
}
/**
* Get all flags (admin/debug endpoints).
*/
export function getAllFeatureFlags(): Record<FeatureFlagName, boolean> {
return { ...config.featureFlags };
}
// Convenience exports (evaluated once at startup)
export const isNewDashboardEnabled = config.featureFlags.newDashboard;
export const isBetaRecipesEnabled = config.featureFlags.betaRecipes;
```
#### Usage in Routes
```typescript
import { isFeatureEnabled } from '../services/featureFlags.server';
router.get('/dashboard', async (req, res) => {
if (isFeatureEnabled('newDashboard')) {
return sendSuccess(res, { version: 'v2', data: await getNewDashboardData() });
}
return sendSuccess(res, { version: 'v1', data: await getLegacyDashboardData() });
});
```
### Frontend Implementation
#### Config (`src/config.ts`)
```typescript
const config = {
// ... existing sections ...
featureFlags: {
newDashboard: import.meta.env.VITE_FEATURE_NEW_DASHBOARD === 'true',
betaRecipes: import.meta.env.VITE_FEATURE_BETA_RECIPES === 'true',
experimentalAi: import.meta.env.VITE_FEATURE_EXPERIMENTAL_AI === 'true',
debugMode: import.meta.env.VITE_FEATURE_DEBUG_MODE === 'true',
},
};
```
#### Type Declarations (`src/vite-env.d.ts`)
```typescript
interface ImportMetaEnv {
readonly VITE_FEATURE_NEW_DASHBOARD?: string;
readonly VITE_FEATURE_BETA_RECIPES?: string;
readonly VITE_FEATURE_EXPERIMENTAL_AI?: string;
readonly VITE_FEATURE_DEBUG_MODE?: string;
}
```
#### React Hook (`src/hooks/useFeatureFlag.ts`)
```typescript
import { useMemo } from 'react';
import config from '../config';
export type FeatureFlagName = keyof typeof config.featureFlags;
export function useFeatureFlag(flagName: FeatureFlagName): boolean {
return useMemo(() => config.featureFlags[flagName], [flagName]);
}
export function useAllFeatureFlags(): Record<FeatureFlagName, boolean> {
return useMemo(() => ({ ...config.featureFlags }), []);
}
```
#### Declarative Component (`src/components/FeatureFlag.tsx`)
```typescript
import { ReactNode } from 'react';
import { useFeatureFlag, FeatureFlagName } from '../hooks/useFeatureFlag';
interface FeatureFlagProps {
name: FeatureFlagName;
children: ReactNode;
fallback?: ReactNode;
}
export function FeatureFlag({ name, children, fallback = null }: FeatureFlagProps) {
const isEnabled = useFeatureFlag(name);
return <>{isEnabled ? children : fallback}</>;
}
```
#### Usage in Components
```tsx
// Declarative approach
<FeatureFlag name="newDashboard" fallback={<LegacyDashboard />}>
<NewDashboard />
</FeatureFlag>;
// Hook approach (for logic beyond rendering)
const isNewDashboard = useFeatureFlag('newDashboard');
useEffect(() => {
if (isNewDashboard) analytics.track('new_dashboard_viewed');
}, [isNewDashboard]);
```
### Testing Patterns
#### Backend Test Setup
```typescript
// Reset modules to test different flag states
beforeEach(() => {
vi.resetModules();
process.env.FEATURE_NEW_DASHBOARD = 'true';
});
// src/services/featureFlags.server.test.ts
describe('isFeatureEnabled', () => {
it('returns false for disabled flags', () => {
expect(isFeatureEnabled('newDashboard')).toBe(false);
});
});
```
#### Frontend Test Setup
```typescript
// Mock config module
vi.mock('../config', () => ({
default: {
featureFlags: {
newDashboard: true,
betaRecipes: false,
},
},
}));
// Component test
describe('FeatureFlag', () => {
it('renders fallback when disabled', () => {
render(
<FeatureFlag name="betaRecipes" fallback={<div>Old</div>}>
<div>New</div>
</FeatureFlag>
);
expect(screen.getByText('Old')).toBeInTheDocument();
});
});
```
### Flag Lifecycle
| Phase | Actions |
| ---------- | -------------------------------------------------------------------------------------------- |
| **Add** | 1. Add to both schemas (backend + frontend) 2. Default `false` 3. Document in `.env.example` |
| **Enable** | Set env var `='true'` → restart application |
| **Remove** | 1. Remove conditional code 2. Remove from schemas 3. Remove env vars |
| **Sunset** | Max 3 months after full rollout → remove flag |
### Admin Endpoint (Optional)
```typescript
// GET /api/v1/admin/feature-flags (admin-only)
router.get('/feature-flags', requireAdmin, async (req, res) => {
sendSuccess(res, { flags: getAllFeatureFlags() });
});
```
### Integration with ADR-007
Feature flags extend existing Zod configuration pattern:
- **Validation**: Same `booleanString()` transform used by other config
- **Loading**: Same `loadEnvVars()` function loads `FEATURE_*` vars
- **Type Safety**: `FeatureFlagName` type derived from config schema
- **Fail-Fast**: Invalid flag values fail at startup (Zod validation)
### Future Migration Path
Current implementation abstracts flag access via `isFeatureEnabled()` function and `useFeatureFlag()` hook. External service migration requires:
1. Replace implementation internals of these functions
2. Add API client for Flagsmith/LaunchDarkly
3. No changes to consuming code (routes/components)
### Explicitly Out of Scope
- External service integration (Flagsmith/LaunchDarkly)
- Database-stored flags
- Real-time flag updates (WebSocket/SSE)
- User-specific flags (A/B testing percentages)
- Flag inheritance/hierarchy
- Flag audit logging
### Key Files Reference
| Action | Files |
| --------------------- | ------------------------------------------------------------------------------------------------- |
| Add new flag | `src/config/env.ts`, `src/config.ts`, `src/vite-env.d.ts`, `.env.example` |
| Check flag (backend) | Import from `src/services/featureFlags.server.ts` |
| Check flag (frontend) | Import hook from `src/hooks/useFeatureFlag.ts` or component from `src/components/FeatureFlag.tsx` |
| Test flag behavior | Mock via `vi.resetModules()` (backend) or `vi.mock('../config')` (frontend) |

View File

@@ -195,6 +195,12 @@ Do NOT add tests:
- Coverage percentages may not satisfy external audits
- Requires judgment calls that may be inconsistent
## Related ADRs
- [ADR-010](./0010-testing-strategy-and-standards.md) - Testing Strategy and Standards (this ADR extends ADR-010)
- [ADR-045](./0045-test-data-factories-and-fixtures.md) - Test Data Factories and Fixtures
- [ADR-057](./0057-test-remediation-post-api-versioning.md) - Test Remediation Post-API Versioning
## Key Files
- `docs/adr/0010-testing-strategy-and-standards.md` - Testing mechanics

View File

@@ -4,7 +4,7 @@
**Status**: Accepted (Fully Implemented)
**Related**: [ADR-015](0015-application-performance-monitoring-and-error-tracking.md), [ADR-004](0004-standardized-application-wide-structured-logging.md)
**Related**: [ADR-015](0015-error-tracking-and-observability.md), [ADR-004](0004-standardized-application-wide-structured-logging.md)
## Context
@@ -335,7 +335,7 @@ SELECT award_achievement('user-uuid', 'Nonexistent Badge');
## References
- [ADR-015: Application Performance Monitoring](0015-application-performance-monitoring-and-error-tracking.md)
- [ADR-015: Error Tracking and Observability](0015-error-tracking-and-observability.md)
- [ADR-004: Standardized Structured Logging](0004-standardized-application-wide-structured-logging.md)
- [PostgreSQL RAISE Documentation](https://www.postgresql.org/docs/current/plpgsql-errors-and-messages.html)
- [PostgreSQL Logging Configuration](https://www.postgresql.org/docs/current/runtime-config-logging.html)

View File

@@ -332,6 +332,6 @@ Response:
## References
- [ADR-006: Background Job Processing](./0006-background-job-processing-and-task-queues.md)
- [ADR-015: Application Performance Monitoring](./0015-application-performance-monitoring-and-error-tracking.md)
- [ADR-015: Error Tracking and Observability](./0015-error-tracking-and-observability.md)
- [Bugsink API Documentation](https://bugsink.com/docs/api/)
- [Gitea API Documentation](https://docs.gitea.io/en-us/api-usage/)

View File

@@ -0,0 +1,367 @@
# ADR-057: Test Remediation Post-API Versioning and Frontend Rework
**Date**: 2026-01-28
**Status**: Accepted
**Context**: Major test remediation effort completed after ADR-008 API versioning implementation and frontend style rework
## Context
Following the completion of ADR-008 Phase 2 (API Versioning Strategy) and a concurrent frontend style/design rework, the test suite experienced 105 test failures across unit tests and E2E tests. This ADR documents the systematic remediation effort, root cause analysis, and lessons learned to prevent similar issues in future migrations.
### Scope of Failures
| Test Type | Failures | Total Tests | Pass Rate After Fix |
| ---------- | -------- | ----------- | ------------------- |
| Unit Tests | 69 | 3,392 | 100% |
| E2E Tests | 36 | 36 | 100% |
| **Total** | **105** | **3,428** | **100%** |
### Root Causes Identified
The failures were categorized into six distinct categories:
1. **API Versioning Path Mismatches** (71 failures)
- Test files using `/api/` instead of `/api/v1/`
- Environment variables not set for API base URL
- Integration and E2E tests calling unversioned endpoints
2. **Dark Mode Class Assertion Failures** (8 failures)
- Frontend rework changed Tailwind dark mode utility classes
- Test assertions checking for outdated class names
3. **Selected Item Styling Changes** (6 failures)
- Component styling refactored to new design tokens
- Test assertions expecting old CSS class combinations
4. **Admin-Only Component Visibility** (12 failures)
- MainLayout tests not properly mocking admin role
- ActivityLog component visibility tied to role-based access
5. **Mock Hoisting Issues** (5 failures)
- Queue mocks not available during module initialization
- Vitest's module hoisting order causing mock setup failures
6. **Error Log Path Hardcoding** (3 failures)
- Route handlers logging hardcoded paths like `/api/flyers`
- Test assertions expecting versioned paths `/api/v1/flyers`
## Decision
We implemented a systematic remediation approach addressing each failure category with targeted fixes while establishing patterns to prevent regression.
### 1. API Versioning Configuration Updates
**Files Modified**:
- `vite.config.ts`
- `vitest.config.e2e.ts`
- `vitest.config.integration.ts`
**Pattern Applied**: Centralize API base URL in Vitest environment variables
```typescript
// vite.config.ts - Unit test configuration
test: {
env: {
// ADR-008: Ensure API versioning is correctly set for unit tests
VITE_API_BASE_URL: '/api/v1',
},
// ...
}
// vitest.config.e2e.ts - E2E test configuration
test: {
env: {
// ADR-008: API versioning - all routes use /api/v1 prefix
VITE_API_BASE_URL: 'http://localhost:3098/api/v1',
},
// ...
}
// vitest.config.integration.ts - Integration test configuration
test: {
env: {
// ADR-008: API versioning - all routes use /api/v1 prefix
VITE_API_BASE_URL: 'http://localhost:3099/api/v1',
},
// ...
}
```
### 2. E2E Test URL Path Updates
**Files Modified** (7 files, 31 URL occurrences):
- `src/tests/e2e/budget-journey.e2e.test.ts`
- `src/tests/e2e/deals-journey.e2e.test.ts`
- `src/tests/e2e/flyer-upload.e2e.test.ts`
- `src/tests/e2e/inventory-journey.e2e.test.ts`
- `src/tests/e2e/receipt-journey.e2e.test.ts`
- `src/tests/e2e/upc-journey.e2e.test.ts`
- `src/tests/e2e/user-journey.e2e.test.ts`
**Pattern Applied**: Update all hardcoded API paths to versioned endpoints
```typescript
// Before
const response = await getRequest().post('/api/auth/register').send({...});
// After
const response = await getRequest().post('/api/v1/auth/register').send({...});
```
### 3. Unit Test Assertion Updates for UI Changes
**Files Modified**:
- `src/features/flyer/FlyerDisplay.test.tsx`
- `src/features/flyer/FlyerList.test.tsx`
**Pattern Applied**: Update CSS class assertions to match new design system
```typescript
// FlyerDisplay.test.tsx - Dark mode class update
// Before
expect(image).toHaveClass('dark:brightness-75');
// After
expect(image).toHaveClass('dark:brightness-90');
// FlyerList.test.tsx - Selected item styling update
// Before
expect(selectedItem).toHaveClass('ring-2', 'ring-brand-primary');
// After
expect(selectedItem).toHaveClass('border-brand-primary', 'bg-teal-50/50', 'dark:bg-teal-900/10');
```
### 4. Admin-Only Component Test Separation
**File Modified**: `src/layouts/MainLayout.test.tsx`
**Pattern Applied**: Separate test cases for admin vs. regular user visibility
```typescript
describe('for authenticated users', () => {
beforeEach(() => {
mockedUseAuth.mockReturnValue({
...defaultUseAuthReturn,
authStatus: 'AUTHENTICATED',
userProfile: createMockUserProfile({ user: mockUser }),
});
});
it('renders auth-gated components for regular users (PriceHistoryChart, Leaderboard)', () => {
renderWithRouter(<MainLayout {...defaultProps} />);
expect(screen.getByTestId('price-history-chart')).toBeInTheDocument();
expect(screen.getByTestId('leaderboard')).toBeInTheDocument();
// ActivityLog is admin-only, should NOT be present for regular users
expect(screen.queryByTestId('activity-log')).not.toBeInTheDocument();
});
it('renders ActivityLog for admin users', () => {
mockedUseAuth.mockReturnValue({
...defaultUseAuthReturn,
authStatus: 'AUTHENTICATED',
userProfile: createMockUserProfile({ user: mockUser, role: 'admin' }),
});
renderWithRouter(<MainLayout {...defaultProps} />);
expect(screen.getByTestId('activity-log')).toBeInTheDocument();
});
});
```
### 5. vi.hoisted() Pattern for Queue Mocks
**File Modified**: `src/routes/health.routes.test.ts`
**Pattern Applied**: Use `vi.hoisted()` to ensure mocks are available during module hoisting
```typescript
// Use vi.hoisted to create mock queue objects that are available during vi.mock hoisting.
// This ensures the mock objects exist when the factory function runs.
const { mockQueuesModule } = vi.hoisted(() => {
// Helper function to create a mock queue object with vi.fn()
const createMockQueue = () => ({
getJobCounts: vi.fn().mockResolvedValue({
waiting: 0,
active: 0,
failed: 0,
delayed: 0,
}),
});
return {
mockQueuesModule: {
flyerQueue: createMockQueue(),
emailQueue: createMockQueue(),
// ... additional queues
},
};
});
// Mock the queues.server module BEFORE the health router imports it.
vi.mock('../services/queues.server', () => mockQueuesModule);
// Import the router AFTER all mocks are defined.
import healthRouter from './health.routes';
```
### 6. Dynamic Error Log Paths
**Pattern Applied**: Use `req.originalUrl` instead of hardcoded paths in error handlers
```typescript
// Before (INCORRECT - hardcoded path)
req.log.error({ error }, 'Error in /api/flyers/:id:');
// After (CORRECT - dynamic path)
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
```
## Implementation Summary
### Files Modified (14 total)
| Category | Files | Changes |
| -------------------- | ----- | ------------------------------------------------- |
| Vitest Configuration | 3 | Added `VITE_API_BASE_URL` environment variables |
| E2E Tests | 7 | Updated 31 API endpoint URLs |
| Unit Tests | 4 | Updated assertions for UI, mocks, and admin roles |
### Verification Results
After remediation, all tests pass in the dev container environment:
```text
Unit Tests: 3,392 passing
E2E Tests: 36 passing
Integration: 345/348 passing (3 known issues, unrelated)
Type Check: Passing
```
## Consequences
### Positive
1. **Test Suite Stability**: All tests now pass consistently in the dev container
2. **API Versioning Compliance**: Tests enforce the `/api/v1/` path requirement
3. **Pattern Documentation**: Clear patterns established for future test maintenance
4. **Separation of Concerns**: Admin vs. user test cases properly separated
5. **Mock Reliability**: `vi.hoisted()` pattern prevents mock timing issues
### Negative
1. **Maintenance Overhead**: Future API version changes will require test updates
2. **Manual Migration**: No automated tool to update test paths during versioning
### Neutral
1. **Test Execution Time**: No significant impact on test execution duration
2. **Coverage Metrics**: Coverage percentages unchanged
## Best Practices Established
### 1. API Versioning in Tests
**Always use versioned API paths in tests**:
```typescript
// Good
const response = await request.get('/api/v1/users/profile');
// Bad
const response = await request.get('/api/users/profile');
```
**Configure environment variables centrally in Vitest configs** rather than in individual test files.
### 2. vi.hoisted() for Module-Level Mocks
When mocking modules that are imported at the top level of other modules:
```typescript
// Pattern: Define mocks with vi.hoisted() BEFORE vi.mock() calls
const { mockModule } = vi.hoisted(() => ({
mockModule: {
someFunction: vi.fn(),
},
}));
vi.mock('./some-module', () => mockModule);
// Import AFTER mocks
import { something } from './module-that-imports-some-module';
```
### 3. Testing Conditional Component Rendering
When testing components that render differently based on user role:
1. Create separate `describe` blocks for each role
2. Set up role-specific mocks in `beforeEach`
3. Explicitly test both presence AND absence of role-gated components
### 4. CSS Class Assertions After UI Refactors
After frontend style changes:
1. Review component implementation for new class names
2. Update test assertions to match actual CSS classes
3. Consider using partial matching for complex class combinations:
```typescript
// Flexible matching for Tailwind classes
expect(element).toHaveClass('border-brand-primary');
// vs exact matching
expect(element).toHaveClass('border-brand-primary', 'bg-teal-50/50', 'dark:bg-teal-900/10');
```
### 5. Error Logging Paths
**Always use dynamic paths in error logs**:
```typescript
// Pattern: Use req.originalUrl for request path logging
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
```
This ensures error logs reflect the actual request URL including version prefixes.
## Migration Checklist for Future API Version Changes
When implementing a new API version (e.g., v2), follow this checklist:
- [ ] Update `vite.config.ts` test environment `VITE_API_BASE_URL`
- [ ] Update `vitest.config.e2e.ts` test environment `VITE_API_BASE_URL`
- [ ] Update `vitest.config.integration.ts` test environment `VITE_API_BASE_URL`
- [ ] Search and replace `/api/v1/` with `/api/v2/` in E2E test files
- [ ] Search and replace `/api/v1/` with `/api/v2/` in integration test files
- [ ] Verify route handler error logs use `req.originalUrl`
- [ ] Run full test suite in dev container to verify
**Search command for finding hardcoded paths**:
```bash
grep -r "/api/v1/" src/tests/
grep -r "'/api/" src/routes/*.ts
```
## Related ADRs
- [ADR-008](./0008-api-versioning-strategy.md) - API Versioning Strategy
- [ADR-010](./0010-testing-strategy-and-standards.md) - Testing Strategy and Standards
- [ADR-014](./0014-containerization-and-deployment-strategy.md) - Platform: Linux Only
- [ADR-040](./0040-testing-economics-and-priorities.md) - Testing Economics and Priorities
- [ADR-012](./0012-frontend-component-library-and-design-system.md) - Frontend Component Library
## Key Files
| File | Purpose |
| ------------------------------ | -------------------------------------------- |
| `vite.config.ts` | Unit test environment configuration |
| `vitest.config.e2e.ts` | E2E test environment configuration |
| `vitest.config.integration.ts` | Integration test environment configuration |
| `src/tests/e2e/*.e2e.test.ts` | E2E test files with versioned API paths |
| `src/routes/*.routes.test.ts` | Route test files with `vi.hoisted()` pattern |
| `docs/development/TESTING.md` | Testing guide with best practices |

View File

@@ -0,0 +1,517 @@
# ADR-0042: Browser Test Performance Optimization
**Status**: Accepted
**Date**: 2026-02-10
**Authors**: Claude Code AI Agent
## Context
### Current State
The stock-alert project has 64 Playwright browser tests across 5 spec files taking approximately 240 seconds (~4 minutes) to execute. Analysis reveals three major performance bottlenecks:
| Metric | Count | Impact |
| ----------------------------------- | ----- | -------------------------------------------- |
| Hardcoded `waitForTimeout()` calls | 66 | ~120s cumulative wait time |
| Redundant login calls per test | 43 | ~2-3s each = 86-129s overhead |
| Visual regression tests blocking CI | 4 | Cannot run in parallel with functional tests |
### Test Distribution
| File | Tests | `waitForTimeout` Calls | `login()` Calls |
| ------------------- | ------ | ---------------------- | ------------------------ |
| `dashboard.spec.js` | 10 | 8 | 10 |
| `alerts.spec.js` | 14 | 25 | 1 (beforeEach) |
| `gaps.spec.js` | 20 | 29 | 1 (beforeEach) |
| `login.spec.js` | 11 | 4 | 0 (tests login itself) |
| `visual.spec.js` | 4 | 0 | 4 (via navigateWithAuth) |
| **Total** | **59** | **66** | **16 patterns** |
### Root Causes
1. **Anti-Pattern: Hardcoded Timeouts**
- `waitForTimeout(2000)` used to "wait for data to load"
- Unnecessarily slow on fast systems, flaky on slow systems
- No correlation to actual page readiness
2. **Anti-Pattern: Per-Test Authentication**
- Each test navigates to `/login`, enters password, submits
- Session cookie persists across requests but not across tests
- `beforeEach` login adds 2-3 seconds per test
3. **Architecture: Mixed Test Types**
- Visual regression tests require different infrastructure (baseline images)
- Functional tests and visual tests compete for worker slots
- Cannot optimize CI parallelization
### Requirements
1. Reduce test suite runtime by 40-55%
2. Improve test determinism (eliminate flakiness)
3. Maintain test coverage and reliability
4. Enable parallel CI execution where possible
5. Document patterns for other projects
## Decision
Implement three optimization phases:
### Phase 1: Event-Based Wait Replacement (Primary Impact: ~50% of time savings)
Replace all 66 `waitForTimeout()` calls with Playwright's event-based waiting APIs.
**Replacement Patterns:**
| Current Pattern | Replacement | Rationale |
| --------------------------------------- | ------------------------------------------------- | ----------------------------- |
| `waitForTimeout(2000)` after navigation | `waitForLoadState('networkidle')` | Waits for network quiescence |
| `waitForTimeout(1000)` after click | `waitForSelector('.result')` | Waits for specific DOM change |
| `waitForTimeout(3000)` for charts | `waitForSelector('canvas', { state: 'visible' })` | Waits for chart render |
| `waitForTimeout(500)` for viewport | `waitForFunction(() => ...)` | Waits for layout reflow |
**Implementation Examples:**
```javascript
// BEFORE: Hardcoded timeout
await page.goto('/alerts');
await page.waitForTimeout(2000);
const rows = await page.locator('tbody tr').count();
// AFTER: Event-based wait
await page.goto('/alerts');
await page.waitForLoadState('networkidle');
await page.waitForSelector('tbody tr', { state: 'attached' });
const rows = await page.locator('tbody tr').count();
```
```javascript
// BEFORE: Hardcoded timeout after action
await page.click('#runCheckBtn');
await page.waitForTimeout(2000);
// AFTER: Wait for response
const [response] = await Promise.all([
page.waitForResponse((resp) => resp.url().includes('/api/check')),
page.click('#runCheckBtn'),
]);
```
**Helper Function Addition to `helpers.js`:**
```javascript
/**
* Waits for page to be fully loaded with data.
* Replaces hardcoded waitForTimeout calls.
*/
async function waitForPageReady(page, options = {}) {
const { dataSelector = null, networkIdle = true, minTime = 0 } = options;
const promises = [];
if (networkIdle) {
promises.push(page.waitForLoadState('networkidle'));
}
if (dataSelector) {
promises.push(page.waitForSelector(dataSelector, { state: 'visible' }));
}
if (minTime > 0) {
promises.push(page.waitForTimeout(minTime)); // Escape hatch for animations
}
await Promise.all(promises);
}
```
**Estimated Time Savings:** 60-80 seconds (eliminates ~120s of cumulative waits, but event waits have overhead)
### Phase 2: Global Authentication Setup (Primary Impact: ~35% of time savings)
Share authenticated session across all tests using Playwright's global setup feature.
**Architecture:**
```
┌──────────────────┐
│ global-setup.js │
│ │
│ 1. Login once │
│ 2. Save storage │
└────────┬─────────┘
┌──────────────────────┼──────────────────────┐
│ │ │
▼ ▼ ▼
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ dashboard.spec │ │ alerts.spec │ │ gaps.spec │
│ (reuses auth) │ │ (reuses auth) │ │ (reuses auth) │
└─────────────────┘ └─────────────────┘ └─────────────────┘
```
**Implementation Files:**
**`tests/browser/global-setup.js`:**
```javascript
const { chromium } = require('@playwright/test');
const path = require('path');
const authFile = path.join(__dirname, '.auth', 'user.json');
module.exports = async function globalSetup() {
const browser = await chromium.launch();
const page = await browser.newPage();
// Only perform login if authentication is enabled
if (process.env.DASHBOARD_PASSWORD) {
await page.goto(process.env.PLAYWRIGHT_BASE_URL || 'http://localhost:8980');
// Perform login
await page.goto('/login');
await page.fill('#password', process.env.DASHBOARD_PASSWORD);
await page.click('button[type="submit"]');
await page.waitForURL('/');
// Save authentication state
await page.context().storageState({ path: authFile });
}
await browser.close();
};
```
**`playwright.config.js` Updates:**
```javascript
module.exports = defineConfig({
// ... existing config ...
// Global setup runs once before all tests
globalSetup: require.resolve('./tests/browser/global-setup.js'),
projects: [
{
name: 'chromium',
use: {
...devices['Desktop Chrome'],
// Reuse authentication state from global setup
storageState: './tests/browser/.auth/user.json',
},
},
],
});
```
**Test File Updates:**
```javascript
// BEFORE: Login in beforeEach
test.beforeEach(async ({ page }) => {
page.consoleErrors = captureConsoleErrors(page);
if (isAuthEnabled()) {
await login(page);
}
});
// AFTER: Remove login (handled by global setup)
test.beforeEach(async ({ page }) => {
page.consoleErrors = captureConsoleErrors(page);
// Authentication already applied via storageState
});
```
**Estimated Time Savings:** 80-100 seconds (43 logins x ~2-3s each, minus 3s for global setup)
### Phase 3: Visual Test Separation (Primary Impact: CI parallelization)
Separate visual regression tests into a dedicated project for parallel CI execution.
**Project Configuration:**
```javascript
// playwright.config.js
module.exports = defineConfig({
projects: [
// Functional tests - fast, event-based
{
name: 'functional',
testMatch: /^(?!.*visual).*\.spec\.js$/,
use: {
...devices['Desktop Chrome'],
storageState: './tests/browser/.auth/user.json',
},
},
// Visual tests - separate baseline management
{
name: 'visual',
testMatch: '**/visual.spec.js',
use: {
...devices['Desktop Chrome'],
storageState: './tests/browser/.auth/user.json',
},
// Different snapshot handling
snapshotPathTemplate: '{testDir}/__screenshots__/{projectName}/{testFilePath}/{arg}{ext}',
},
],
});
```
**CI Pipeline Updates:**
```yaml
# .gitea/workflows/test.yml
jobs:
browser-functional:
runs-on: ubuntu-latest
steps:
- run: npx playwright test --project=functional
browser-visual:
runs-on: ubuntu-latest
steps:
- run: npx playwright test --project=visual
```
**Estimated Time Savings:** 30-45 seconds (parallel execution vs sequential)
## Implementation Schedule
### Critical Path (Estimated 8-12 hours)
```
Phase 1 (Event Waits) ████████████████ [4-6h]
Phase 2 (Global Auth) ████████ [2-3h]
Phase 3 (Visual Separation) ████ [2-3h]
```
### Effort Summary
| Phase | Min Hours | Max Hours | Expected Savings |
| ------------------------ | --------- | --------- | --------------------- |
| 1. Event-Based Waits | 4 | 6 | 60-80s (25-33%) |
| 2. Global Authentication | 2 | 3 | 80-100s (33-42%) |
| 3. Visual Separation | 2 | 3 | 30-45s (CI parallel) |
| **Total** | **8** | **12** | **170-225s (70-94%)** |
### Expected Results
| Metric | Before | After | Improvement |
| ------------------ | ------ | --------- | ------------- |
| Total Runtime | 240s | 110-140s | 42-54% faster |
| Flaky Test Rate | ~5% | <1% | 80% reduction |
| CI Parallelization | None | 2 workers | 2x throughput |
| Login Operations | 43 | 1 | 98% reduction |
| Hardcoded Waits | 66 | <5 | 92% reduction |
## Consequences
### Positive
1. **Performance**: 40-55% reduction in test runtime
2. **Reliability**: Event-based waits eliminate timing flakiness
3. **Scalability**: Global setup pattern scales to N tests with O(1) login cost
4. **CI Efficiency**: Parallel visual tests enable faster feedback loops
5. **Maintainability**: Centralized auth logic reduces code duplication
6. **Transferable Knowledge**: Patterns applicable to any Playwright project
### Negative
1. **Initial Migration Effort**: 8-12 hours of refactoring
2. **Learning Curve**: Team must understand Playwright wait APIs
3. **Global Setup Complexity**: Adds shared state between tests
4. **Debugging Harder**: Shared auth can mask test isolation issues
### Mitigations
| Risk | Mitigation |
| ------------------ | ------------------------------------------------------------- |
| Global setup fails | Add retry logic; fallback to per-test login |
| Event waits flaky | Keep small timeout buffer (100ms) as escape hatch |
| Visual tests drift | Separate baseline management per environment |
| Test isolation | Run `--project=functional` without auth for isolation testing |
### Neutral
- Test count unchanged (59 tests)
- Coverage unchanged
- Visual baselines unchanged (path changes only)
## Alternatives Considered
### Alternative 1: Reduce Test Count
**Rejected:** Sacrifices coverage for speed. Tests exist for a reason.
### Alternative 2: Increase Worker Parallelism
**Rejected:** Server cannot handle >2 concurrent sessions reliably; creates resource contention.
### Alternative 3: Use `page.waitForTimeout()` with Shorter Durations
**Rejected:** Addresses symptom, not root cause. Still creates timing-dependent tests.
### Alternative 4: Cookie Injection Instead of Login
**Rejected:** Requires reverse-engineering session format; brittle if auth changes.
### Alternative 5: HTTP API Authentication (No Browser)
**Rejected:** Loses browser session behavior validation; tests login flow.
## Implementation Details
### Wait Replacement Mapping
| File | Current Timeouts | Replacement Strategy |
| ------------------- | ---------------------- | ---------------------------------------------------------------------- |
| `dashboard.spec.js` | 1000ms, 2000ms, 3000ms | `waitForSelector` for charts, `waitForLoadState` for navigation |
| `alerts.spec.js` | 500ms, 1000ms, 2000ms | `waitForResponse` for API calls, `waitForSelector` for table rows |
| `gaps.spec.js` | 500ms, 1000ms, 2000ms | `waitForResponse` for `/api/gaps`, `waitForSelector` for summary cards |
| `login.spec.js` | 500ms, 2000ms | `waitForURL` for redirects, `waitForSelector` for error messages |
### Common Wait Patterns for This Codebase
| Scenario | Recommended Pattern | Example |
| --------------------- | ------------------------------------------------- | ------------------------- |
| After page navigation | `waitForLoadState('networkidle')` | Loading dashboard data |
| After button click | `waitForResponse()` + `waitForSelector()` | Run Check button |
| After filter change | `waitForResponse(/api\/.*/)` | Status filter dropdown |
| For chart rendering | `waitForSelector('canvas', { state: 'visible' })` | Chart cards |
| For modal appearance | `waitForSelector('.modal', { state: 'visible' })` | Confirmation dialogs |
| For layout change | `waitForFunction()` | Responsive viewport tests |
### Auth Storage Structure
```
tests/browser/
├── .auth/
│ └── user.json # Generated by global-setup, gitignored
├── global-setup.js # Creates user.json
├── dashboard.spec.js # Uses storageState
├── alerts.spec.js
├── gaps.spec.js
├── login.spec.js # Tests login itself, may need special handling
└── visual.spec.js
```
**`.gitignore` Addition:**
```
tests/browser/.auth/
```
### Login.spec.js Special Handling
`login.spec.js` tests the login flow itself and must NOT use the shared auth state:
```javascript
// playwright.config.js
projects: [
{
name: 'functional',
testMatch: /^(?!.*login).*\.spec\.js$/,
use: { storageState: './tests/browser/.auth/user.json' },
},
{
name: 'login',
testMatch: '**/login.spec.js',
use: { storageState: undefined }, // No auth - tests login flow
},
];
```
## Testing the Optimization
### Baseline Measurement
```bash
# Before optimization: establish baseline
time npm run test:browser 2>&1 | tee baseline-timing.log
grep -E "passed|failed|skipped" baseline-timing.log
```
### Incremental Verification
```bash
# After Phase 1: verify wait replacement
npm run test:browser -- --reporter=list 2>&1 | grep -E "passed|failed|slow"
# After Phase 2: verify global auth
npm run test:browser -- --trace on
# Check trace for login occurrences (should be 1)
# After Phase 3: verify parallel execution
npm run test:browser -- --project=functional &
npm run test:browser -- --project=visual &
wait
```
### Success Criteria
| Metric | Target | Measurement |
| ---------------------- | ------ | -------------------------------------- |
| Total runtime | <150s | `time npm run test:browser` |
| Login count | 1 | Grep traces for `/login` navigation |
| Flaky rate | <2% | 50 consecutive CI runs |
| `waitForTimeout` count | <5 | `grep -r waitForTimeout tests/browser` |
## Lessons Learned / Patterns for Other Projects
### Pattern 1: Always Prefer Event-Based Waits
```javascript
// Bad
await page.click('#submit');
await page.waitForTimeout(2000);
expect(await page.title()).toBe('Success');
// Good
await Promise.all([page.waitForNavigation(), page.click('#submit')]);
expect(await page.title()).toBe('Success');
```
### Pattern 2: Global Setup for Authentication
Playwright's `storageState` feature should be the default for any authenticated app:
1. Create `global-setup.js` that performs login once
2. Save cookies/storage to JSON file
3. Configure `storageState` in `playwright.config.js`
4. Tests start authenticated with zero overhead
### Pattern 3: Separate Test Types by Execution Characteristics
| Test Type | Characteristics | Strategy |
| ---------- | ------------------------ | --------------------------------- |
| Functional | Fast, deterministic | Run first, gate deployment |
| Visual | Slow, baseline-dependent | Run in parallel, separate project |
| E2E | Cross-service, slow | Run nightly, separate workflow |
### Pattern 4: Measure Before and After
Always establish baseline metrics before optimization:
```bash
# Essential metrics to capture
time npm run test:browser # Total runtime
grep -c waitForTimeout *.js # Hardcoded wait count
grep -c 'await login' *.js # Login call count
```
## Related ADRs
- [ADR-0031](0031-quality-gates-eslint-playwright.md): Quality Gates - ESLint, Pre-commit Hooks, and Playwright Browser Testing
- [ADR-0035](0035-browser-test-selector-fixes.md): Browser Test Selector Fixes
- [ADR-0008](0008-testing-strategy.md): Testing Strategy
## References
- Playwright Best Practices: https://playwright.dev/docs/best-practices
- Playwright Authentication: https://playwright.dev/docs/auth
- Playwright Wait Strategies: https://playwright.dev/docs/actionability
- Test Files: `tests/browser/*.spec.js`
- Helper Module: `tests/browser/helpers.js`
- Configuration: `playwright.config.js`

View File

@@ -0,0 +1,308 @@
# ADR-059: Dependency Modernization Plan
**Status**: Accepted
**Date**: 2026-02-12
**Implemented**: 2026-02-12
## Context
NPM audit and security scanning identified deprecated dependencies requiring modernization:
| Dependency | Current | Issue | Replacement |
| --------------- | ------- | ----------------------- | --------------------------------------- |
| `swagger-jsdoc` | 6.2.8 | Unmaintained since 2022 | `tsoa` (decorator-based OpenAPI) |
| `rimraf` | 6.1.2 | Legacy cleanup utility | Node.js `fs.rm()` (native since v14.14) |
**Constraints**:
- Existing `@openapi` JSDoc annotations in 20 route files
- ADR-018 compliance (API documentation strategy)
- Zero-downtime migration (phased approach)
- Must maintain Express 5.x compatibility
## Decision
### 1. swagger-jsdoc → tsoa Migration
**Architecture**: tsoa controller classes + Express integration (no replacement of Express routing layer).
```text
Current: Route Files → JSDoc Annotations → swagger-jsdoc → OpenAPI Spec
Future: Controller Classes → @Route/@Get decorators → tsoa → OpenAPI Spec + Route Registration
```
**Controller Pattern**: Base controller providing common utilities:
```typescript
// src/controllers/base.controller.ts
export abstract class BaseController {
protected sendSuccess<T>(res: Response, data: T, status = 200) {
return sendSuccess(res, data, status);
}
protected sendError(
res: Response,
code: ErrorCode,
msg: string,
status: number,
details?: unknown,
) {
return sendError(res, code, msg, status, details);
}
}
```
**Express Integration Strategy**: tsoa generates routes.ts; wrap with Express middleware pipeline:
```typescript
// server.ts integration
import { RegisterRoutes } from './src/generated/routes';
RegisterRoutes(app); // tsoa registers routes with existing Express app
```
### 2. rimraf → fs.rm() Migration
**Change**: Replace `rimraf coverage .coverage` script with Node.js native API.
```json
// package.json (before)
"clean": "rimraf coverage .coverage"
// package.json (after)
"clean": "node -e \"import('fs/promises').then(fs => Promise.all([fs.rm('coverage', {recursive:true,force:true}), fs.rm('.coverage', {recursive:true,force:true})]))\""
```
**Alternative**: Create `scripts/clean.mjs` for maintainability:
```javascript
// scripts/clean.mjs
import { rm } from 'fs/promises';
await Promise.all([
rm('coverage', { recursive: true, force: true }),
rm('.coverage', { recursive: true, force: true }),
]);
```
## Implementation Plan
### Phase 1: Infrastructure (Tasks 1-4)
| Task | Description | Dependencies |
| ---- | ---------------------------------------------- | ------------ |
| 1 | Install tsoa, configure tsoa.json | None |
| 2 | Create BaseController with utility methods | Task 1 |
| 3 | Configure Express integration (RegisterRoutes) | Task 2 |
| 4 | Set up tsoa spec generation in build pipeline | Task 3 |
### Phase 2: Controller Migration (Tasks 5-14)
Priority order matches ADR-018:
| Task | Route File | Controller Class | Dependencies |
| ---- | ----------------------------------------------------------------------------------------------------------------- | ---------------------- | ------------ |
| 5 | health.routes.ts | HealthController | Task 4 |
| 6 | auth.routes.ts | AuthController | Task 4 |
| 7 | gamification.routes.ts | AchievementsController | Task 4 |
| 8 | flyer.routes.ts | FlyersController | Task 4 |
| 9 | user.routes.ts | UsersController | Task 4 |
| 10 | budget.routes.ts | BudgetController | Task 4 |
| 11 | recipe.routes.ts | RecipeController | Task 4 |
| 12 | store.routes.ts | StoreController | Task 4 |
| 13 | admin.routes.ts | AdminController | Task 4 |
| 14 | Remaining routes (deals, price, upc, inventory, ai, receipt, category, stats, personalization, reactions, system) | Various | Task 4 |
### Phase 3: Cleanup and rimraf (Tasks 15-18)
| Task | Description | Dependencies |
| ---- | -------------------------------- | ------------------- |
| 15 | Create scripts/clean.mjs | None |
| 16 | Update package.json clean script | Task 15 |
| 17 | Remove rimraf dependency | Task 16 |
| 18 | Remove swagger-jsdoc + types | Tasks 5-14 complete |
### Phase 4: Verification (Tasks 19-24)
| Task | Description | Dependencies |
| ---- | --------------------------------- | ------------ |
| 19 | Run type-check | Tasks 15-18 |
| 20 | Run unit tests | Task 19 |
| 21 | Run integration tests | Task 20 |
| 22 | Verify OpenAPI spec completeness | Task 21 |
| 23 | Update ADR-018 (reference tsoa) | Task 22 |
| 24 | Update CLAUDE.md (swagger → tsoa) | Task 23 |
### Task Dependency Graph
```text
[1: Install tsoa]
|
[2: BaseController]
|
[3: Express Integration]
|
[4: Build Pipeline]
|
+------------------+------------------+
| | | | |
[5] [6] [7] [8] [9-14]
Health Auth Gamif Flyer Others
| | | | |
+------------------+------------------+
|
[18: Remove swagger-jsdoc]
|
[15: clean.mjs] -----> [16: Update pkg.json]
|
[17: Remove rimraf]
|
[19: type-check]
|
[20: unit tests]
|
[21: integration tests]
|
[22: Verify OpenAPI]
|
[23: Update ADR-018]
|
[24: Update CLAUDE.md]
```
### Critical Path
**Minimum time to completion**: Tasks 1 → 2 → 3 → 4 → 5 (or any controller) → 18 → 19 → 20 → 21 → 22 → 23 → 24
**Parallelization opportunities**:
- Tasks 5-14 (all controller migrations) can run in parallel after Task 4
- Tasks 15-17 (rimraf removal) can run in parallel with controller migrations
## Technical Decisions
### tsoa Configuration
```json
// tsoa.json
{
"entryFile": "server.ts",
"noImplicitAdditionalProperties": "throw-on-extras",
"controllerPathGlobs": ["src/controllers/**/*.controller.ts"],
"spec": {
"outputDirectory": "src/generated",
"specVersion": 3,
"basePath": "/api/v1"
},
"routes": {
"routesDir": "src/generated",
"middleware": "express"
}
}
```
### Decorator Migration Example
**Before** (swagger-jsdoc):
```typescript
/**
* @openapi
* /health/ping:
* get:
* summary: Simple ping endpoint
* tags: [Health]
* responses:
* 200:
* description: Server is responsive
*/
router.get('/ping', validateRequest(emptySchema), handler);
```
**After** (tsoa):
```typescript
@Route('health')
@Tags('Health')
export class HealthController extends BaseController {
@Get('ping')
@SuccessResponse(200, 'Server is responsive')
public async ping(): Promise<{ message: string }> {
return { message: 'pong' };
}
}
```
### Zod Integration
tsoa uses its own validation. Options:
1. **Replace Zod with tsoa validation** - Use `@Body`, `@Query`, `@Path` decorators with TypeScript types
2. **Hybrid approach** - Keep Zod schemas, call `validateRequest()` within controller methods
3. **Custom template** - Generate tsoa routes that call Zod validation middleware
**Recommended**: Option 1 for new controllers; gradually migrate existing Zod schemas.
## Risk Mitigation
| Risk | Likelihood | Impact | Mitigation |
| --------------------------------------- | ---------- | ------ | ------------------------------------------- |
| tsoa/Express 5.x incompatibility | Medium | High | Test in dev container before migration |
| Missing OpenAPI coverage post-migration | Low | Medium | Compare generated specs before/after |
| Authentication middleware integration | Medium | Medium | Test @Security decorator with passport-jwt |
| Test regression from route changes | Low | High | Run full test suite after each controller |
| Build time increase (tsoa generation) | Low | Low | Add to npm run build; cache generated files |
## Consequences
### Positive
- **Type-safe API contracts**: tsoa decorators derive types from TypeScript
- **Reduced duplication**: No more parallel JSDoc + TypeScript type definitions
- **Modern tooling**: Active tsoa community (vs. unmaintained swagger-jsdoc)
- **Native Node.js**: fs.rm() is built-in, no external dependency
- **Smaller dependency tree**: Remove rimraf (5 transitive deps) + swagger-jsdoc (8 transitive deps)
### Negative
- **Learning curve**: Decorator-based controller pattern differs from Express handlers
- **Migration effort**: 20 route files require conversion
- **Generated code**: `src/generated/routes.ts` must be version-controlled or regenerated on build
### Neutral
- **Build step change**: Add `tsoa spec && tsoa routes` to build pipeline
- **Testing approach**: May need to adjust test structure for controller classes
## Alternatives Considered
### 1. Update swagger-jsdoc to fork/successor
**Rejected**: No active fork; community has moved to tsoa, fastify-swagger, or NestJS.
### 2. NestJS migration
**Rejected**: Full framework migration (Express → NestJS) is disproportionate to the problem scope.
### 3. fastify-swagger
**Rejected**: Requires Express → Fastify migration; out of scope.
### 4. Keep rimraf, accept deprecation warning
**Rejected**: Native fs.rm() is trivial replacement; no reason to maintain deprecated dependency.
## Key Files
| File | Purpose |
| ------------------------------------ | ------------------------------------- |
| `tsoa.json` | tsoa configuration |
| `src/controllers/base.controller.ts` | Base controller with utilities |
| `src/controllers/*.controller.ts` | Individual domain controllers |
| `src/generated/routes.ts` | tsoa-generated Express routes |
| `src/generated/swagger.json` | Generated OpenAPI 3.0 spec |
| `scripts/clean.mjs` | Native fs.rm() replacement for rimraf |
## Related ADRs
- [ADR-018](./0018-api-documentation-strategy.md) - API Documentation Strategy (will be updated)
- [ADR-003](./0003-standardized-input-validation-using-middleware.md) - Input Validation (Zod integration)
- [ADR-028](./0028-api-response-standardization.md) - Response Standardization (BaseController pattern)
- [ADR-001](./0001-standardized-error-handling.md) - Error Handling (error utilities in BaseController)

View File

@@ -0,0 +1,265 @@
# ADR-027: Standardized Application-Wide Structured Logging
**Date**: 2026-02-10
**Status**: Accepted
**Source**: Imported from flyer-crawler project (ADR-004)
**Related**: [ADR-017](ADR-017-structured-logging-with-pino.md), [ADR-028](ADR-028-client-side-structured-logging.md), [ADR-029](ADR-029-error-tracking-with-bugsink.md)
## Context
While ADR-017 established Pino as our logging framework, this ADR extends that foundation with application-wide standards for request tracing, context propagation, and structured log formats.
The implementation of logging can vary significantly across different modules. The error handler middleware may produce high-quality, structured JSON logs for errors, but logging within route handlers and service layers can become ad-hoc, using plain strings or inconsistent object structures.
This inconsistency leads to several problems:
- **Difficult Debugging**: It is hard to trace a single user request through the system or correlate events related to a specific operation
- **Ineffective Log Analysis**: Inconsistent log formats make it difficult to effectively query, filter, and create dashboards in log management systems (like Datadog, Splunk, or the ELK stack)
- **Security Risks**: There is no enforced standard for redacting sensitive information (like passwords or tokens) in logs outside of the error handler, increasing the risk of accidental data exposure
- **Missing Context**: Logs often lack crucial context, such as a unique request ID, the authenticated user's ID, or the source IP address, making them less useful for diagnosing issues
## Decision
We will adopt a standardized, application-wide structured logging policy. All log entries MUST be in JSON format and adhere to a consistent schema.
### 1. Request-Scoped Logger with Context
We will create a middleware that runs at the beginning of the request lifecycle. This middleware will:
- Generate a unique `request_id` for each incoming request
- Create a request-scoped logger instance (a "child logger") that automatically includes the `request_id`, `user_id` (if authenticated), and `ip_address` in every log message it generates
- Attach this child logger to the `req` object (e.g., `req.log`)
### 2. Mandatory Use of Request-Scoped Logger
All route handlers and any service functions called by them **MUST** use the request-scoped logger (`req.log`) instead of the global logger instance. This ensures all logs for a given request are automatically correlated.
### 3. Standardized Log Schema
All log messages should follow a base schema. The logger configuration will be updated to enforce this.
**Base Fields**: `level`, `timestamp`, `message`, `request_id`, `user_id`, `ip_address`
**Error Fields**: When logging an error, the log entry MUST include an `error` object with `name`, `message`, and `stack`.
### 4. Standardized Logging Practices
| Level | HTTP Status | Scenario |
| ----- | ----------- | -------------------------------------------------- |
| DEBUG | Any | Request incoming, internal state, development info |
| INFO | 2xx | Successful requests, business events |
| WARN | 4xx | Client errors, validation failures, not found |
| ERROR | 5xx | Server errors, unhandled exceptions |
## Implementation Details
### Logger Configuration
Located in `src/services/logger.server.ts`:
```typescript
import pino from 'pino';
const isProduction = process.env.NODE_ENV === 'production';
const isTest = process.env.NODE_ENV === 'test';
export const logger = pino({
level: isProduction ? 'info' : 'debug',
transport:
isProduction || isTest
? undefined
: {
target: 'pino-pretty',
options: {
colorize: true,
translateTime: 'SYS:standard',
ignore: 'pid,hostname',
},
},
redact: {
paths: [
'req.headers.authorization',
'req.headers.cookie',
'*.body.password',
'*.body.newPassword',
'*.body.currentPassword',
'*.body.confirmPassword',
'*.body.refreshToken',
'*.body.token',
],
censor: '[REDACTED]',
},
});
```
### Request Logger Middleware
Located in `server.ts`:
```typescript
import { randomUUID } from 'crypto';
import type { Request, Response, NextFunction } from 'express';
import { logger } from './services/logger.server';
const requestLogger = (req: Request, res: Response, next: NextFunction) => {
const requestId = randomUUID();
const user = req.user as UserProfile | undefined;
const start = process.hrtime();
// Create request-scoped logger
req.log = logger.child({
request_id: requestId,
user_id: user?.user.user_id,
ip_address: req.ip,
});
req.log.debug({ method: req.method, originalUrl: req.originalUrl }, 'INCOMING');
res.on('finish', () => {
const duration = getDurationInMilliseconds(start);
const { statusCode, statusMessage } = res;
const logDetails = {
user_id: (req.user as UserProfile | undefined)?.user.user_id,
method: req.method,
originalUrl: req.originalUrl,
statusCode,
statusMessage,
duration: duration.toFixed(2),
};
// Include request details for failed requests (for debugging)
if (statusCode >= 400) {
logDetails.req = { headers: req.headers, body: req.body };
}
if (statusCode >= 500) req.log.error(logDetails, 'Request completed with server error');
else if (statusCode >= 400) req.log.warn(logDetails, 'Request completed with client error');
else req.log.info(logDetails, 'Request completed successfully');
});
next();
};
app.use(requestLogger);
```
### TypeScript Support
The `req.log` property is typed via declaration merging in `src/types/express.d.ts`:
```typescript
import { Logger } from 'pino';
declare global {
namespace Express {
export interface Request {
log: Logger;
}
}
}
```
### Automatic Sensitive Data Redaction
The Pino logger automatically redacts sensitive fields:
```json
// Before redaction
{
"body": {
"email": "user@example.com",
"password": "secret123",
"newPassword": "newsecret456"
}
}
// After redaction (in logs)
{
"body": {
"email": "user@example.com",
"password": "[REDACTED]",
"newPassword": "[REDACTED]"
}
}
```
### Service Layer Logging
Services accept the request-scoped logger as an optional parameter:
```typescript
export async function registerUser(email: string, password: string, reqLog?: Logger) {
const log = reqLog || logger; // Fall back to global logger
log.info({ email }, 'Registering new user');
// ... implementation
log.debug({ userId: user.user_id }, 'User created successfully');
return user;
}
// In route handler
router.post('/register', async (req, res, next) => {
await authService.registerUser(req.body.email, req.body.password, req.log);
});
```
### Log Output Format
**Development** (pino-pretty):
```text
[2026-01-09 12:34:56.789] INFO (request_id=abc123): Request completed successfully
method: "GET"
originalUrl: "/api/users"
statusCode: 200
duration: "45.23"
```
**Production** (JSON):
```json
{
"level": 30,
"time": 1704812096789,
"request_id": "abc123",
"user_id": "user_456",
"ip_address": "192.168.1.1",
"method": "GET",
"originalUrl": "/api/users",
"statusCode": 200,
"duration": "45.23",
"msg": "Request completed successfully"
}
```
## Consequences
### Positive
- **Enhanced Observability**: Every log line from a single request can be instantly grouped and analyzed, dramatically speeding up debugging
- **Improved Security**: Centralizing the addition of context (like `user_id`) reduces the chance of developers manually logging sensitive data
- **Scalable Log Management**: Consistent JSON logs are easily ingested and indexed by any modern log aggregation tool
- **Clearer Code**: Removes the need to manually pass contextual information (like user ID) down to service functions just for logging purposes
### Negative
- **Refactoring Effort**: Requires adding the `requestLogger` middleware and refactoring all routes and services to use `req.log` instead of the global `logger`
- **Slight Performance Overhead**: Creating a child logger for every request adds a minor performance cost, though this is negligible for most modern logging libraries
## Key Files
- `src/services/logger.server.ts` - Pino logger configuration
- `src/services/logger.client.ts` - Client-side logger (for frontend)
- `src/types/express.d.ts` - TypeScript declaration for `req.log`
- `server.ts` - Request logger middleware
## References
- [ADR-017: Structured Logging with Pino](ADR-017-structured-logging-with-pino.md)
- [ADR-001: Standardized Error Handling](ADR-001-standardized-error-handling.md) - Error handler uses `req.log` for error logging
- [ADR-028: Client-Side Structured Logging](ADR-028-client-side-structured-logging.md) - Client-side logging strategy
- [Pino Documentation](https://getpino.io/#/)

View File

@@ -0,0 +1,242 @@
# ADR-028: Standardized Client-Side Structured Logging
**Date**: 2026-02-10
**Status**: Accepted
**Source**: Imported from flyer-crawler project (ADR-026)
**Related**: [ADR-027](ADR-027-application-wide-structured-logging.md), [ADR-029](ADR-029-error-tracking-with-bugsink.md)
## Context
Following the standardization of backend logging in ADR-027, it is clear that our frontend components also require a consistent logging strategy. Currently, components either use `console.log` directly or a simple wrapper, but without a formal standard, this can lead to inconsistent log formats and difficulty in debugging user-facing issues.
While the frontend does not have the concept of a "request-scoped" logger, the principles of structured, context-rich logging are equally important for:
1. **Effective Debugging**: Understanding the state of a component or the sequence of user interactions that led to an error
2. **Integration with Monitoring Tools**: Sending structured logs to services like Sentry/Bugsink or LogRocket allows for powerful analysis and error tracking in production
3. **Clean Test Outputs**: Uncontrolled logging can pollute test runner output, making it difficult to spot actual test failures
An existing client-side logger at `src/services/logger.client.ts` provides a simple, structured logging interface. This ADR formalizes its use as the application standard.
## Decision
We will adopt a standardized, application-wide structured logging policy for all client-side (React) code.
### 1. Mandatory Use of the Global Client Logger
All frontend components, hooks, and services **MUST** use the global logger singleton exported from `src/services/logger.client.ts`. Direct use of `console.log`, `console.error`, etc., is discouraged.
### 2. Pino-like API for Structured Logging
The client logger mimics the `pino` API, which is the standard on the backend. It supports two primary call signatures:
- `logger.info('A simple message');`
- `logger.info({ key: 'value' }, 'A message with a structured data payload');`
The second signature, which includes a data object as the first argument, is **strongly preferred**, especially for logging errors or complex state.
### 3. Mocking in Tests
All Jest/Vitest tests for components or hooks that use the logger **MUST** mock the `src/services/logger.client.ts` module. This prevents logs from appearing in test output and allows for assertions that the logger was called correctly.
## Implementation
### Client Logger Service
Located in `src/services/logger.client.ts`:
```typescript
type LogLevel = 'debug' | 'info' | 'warn' | 'error';
interface LoggerOptions {
level?: LogLevel;
enabled?: boolean;
}
const LOG_LEVELS: Record<LogLevel, number> = {
debug: 0,
info: 1,
warn: 2,
error: 3,
};
class ClientLogger {
private level: LogLevel;
private enabled: boolean;
constructor(options: LoggerOptions = {}) {
this.level = options.level ?? 'info';
this.enabled = options.enabled ?? import.meta.env.DEV;
}
private shouldLog(level: LogLevel): boolean {
return this.enabled && LOG_LEVELS[level] >= LOG_LEVELS[this.level];
}
private formatMessage(data: object | string, message?: string): string {
if (typeof data === 'string') {
return data;
}
const payload = JSON.stringify(data, null, 2);
return message ? `${message}\n${payload}` : payload;
}
debug(data: object | string, message?: string): void {
if (this.shouldLog('debug')) {
console.debug(`[DEBUG] ${this.formatMessage(data, message)}`);
}
}
info(data: object | string, message?: string): void {
if (this.shouldLog('info')) {
console.info(`[INFO] ${this.formatMessage(data, message)}`);
}
}
warn(data: object | string, message?: string): void {
if (this.shouldLog('warn')) {
console.warn(`[WARN] ${this.formatMessage(data, message)}`);
}
}
error(data: object | string, message?: string): void {
if (this.shouldLog('error')) {
console.error(`[ERROR] ${this.formatMessage(data, message)}`);
}
}
}
export const logger = new ClientLogger({
level: import.meta.env.DEV ? 'debug' : 'warn',
enabled: true,
});
```
### Example Usage
**Logging an Error in a Component:**
```typescript
// In a React component or hook
import { logger } from '../services/logger.client';
import { notifyError } from '../services/notificationService';
const fetchData = async () => {
try {
const data = await apiClient.getData();
return data;
} catch (err) {
// Log the full error object for context, along with a descriptive message.
logger.error({ err }, 'Failed to fetch component data');
notifyError('Something went wrong. Please try again.');
}
};
```
**Logging State Changes:**
```typescript
// In a Zustand store or state hook
import { logger } from '../services/logger.client';
const useAuthStore = create((set) => ({
login: async (credentials) => {
logger.info({ email: credentials.email }, 'User login attempt');
try {
const user = await authService.login(credentials);
logger.info({ userId: user.id }, 'User logged in successfully');
set({ user, isAuthenticated: true });
} catch (error) {
logger.error({ error }, 'Login failed');
throw error;
}
},
}));
```
### Mocking the Logger in Tests
```typescript
// In a *.test.tsx file
import { vi } from 'vitest';
// Mock the logger at the top of the test file
vi.mock('../services/logger.client', () => ({
logger: {
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
},
}));
describe('MyComponent', () => {
beforeEach(() => {
vi.clearAllMocks(); // Clear mocks between tests
});
it('should log an error when fetching fails', async () => {
// ... test setup to make fetch fail ...
// Assert that the logger was called with the expected structure
expect(logger.error).toHaveBeenCalledWith(
expect.objectContaining({ err: expect.any(Error) }),
'Failed to fetch component data',
);
});
});
```
## Integration with Error Tracking
When using Sentry/Bugsink for error tracking (see ADR-029), the client logger can be extended to send logs as breadcrumbs:
```typescript
import * as Sentry from '@sentry/react';
class ClientLogger {
// ... existing implementation
error(data: object | string, message?: string): void {
if (this.shouldLog('error')) {
console.error(`[ERROR] ${this.formatMessage(data, message)}`);
}
// Add to Sentry breadcrumbs for error context
Sentry.addBreadcrumb({
category: 'log',
level: 'error',
message: typeof data === 'string' ? data : message,
data: typeof data === 'object' ? data : undefined,
});
}
}
```
## Consequences
### Positive
- **Consistency**: All client-side logs will have a predictable structure, making them easier to read and parse
- **Debuggability**: Errors logged with a full object (`{ err }`) capture the stack trace and other properties, which is invaluable for debugging
- **Testability**: Components that log are easier to test without polluting CI/CD output. We can also assert that logging occurs when expected
- **Future-Proof**: If we later decide to send client-side logs to a remote service, we only need to modify the central `logger.client.ts` file instead of every component
- **Error Tracking Integration**: Logs can be used as breadcrumbs in Sentry/Bugsink for better error context
### Negative
- **Minor Boilerplate**: Requires importing the logger in every file that needs it and mocking it in every corresponding test file. However, this is a small and consistent effort
- **Production Noise**: Care must be taken to configure appropriate log levels in production to avoid performance impact
## Key Files
- `src/services/logger.client.ts` - Client-side logger implementation
- `src/services/logger.server.ts` - Backend logger (for reference)
## References
- [ADR-027: Application-Wide Structured Logging](ADR-027-application-wide-structured-logging.md)
- [ADR-029: Error Tracking with Bugsink](ADR-029-error-tracking-with-bugsink.md)
- [Pino Documentation](https://getpino.io/#/)

View File

@@ -0,0 +1,389 @@
# ADR-029: Error Tracking and Observability with Bugsink
**Date**: 2026-02-10
**Status**: Accepted
**Source**: Imported from flyer-crawler project (ADR-015)
**Related**: [ADR-027](ADR-027-application-wide-structured-logging.md), [ADR-028](ADR-028-client-side-structured-logging.md), [ADR-030](ADR-030-postgresql-function-observability.md), [ADR-032](ADR-032-application-performance-monitoring.md)
## Context
While ADR-027 established structured logging with Pino, the application lacks a high-level, aggregated view of its health and errors. It is difficult to spot trends, identify recurring issues, or be proactively notified of new types of errors.
Key requirements:
1. **Self-hosted**: No external SaaS dependencies for error tracking
2. **Sentry SDK compatible**: Leverage mature, well-documented SDKs
3. **Lightweight**: Minimal resource overhead in the dev container
4. **Production-ready**: Same architecture works on bare-metal production servers
5. **AI-accessible**: MCP server integration for Claude Code and other AI tools
**Note**: Application Performance Monitoring (APM) and distributed tracing are covered separately in [ADR-032](ADR-032-application-performance-monitoring.md).
## Decision
We implement a self-hosted error tracking stack using **Bugsink** as the Sentry-compatible backend, with the following components:
### 1. Error Tracking Backend: Bugsink
**Bugsink** is a lightweight, self-hosted Sentry alternative that:
- Runs as a single process (no Kafka, Redis, ClickHouse required)
- Is fully compatible with Sentry SDKs
- Supports ARM64 and AMD64 architectures
- Can use SQLite (dev) or PostgreSQL (production)
**Deployment**:
- **Dev container**: Installed as a systemd service inside the container
- **Production**: Runs as a systemd service on bare-metal, listening on localhost only
- **Database**: Uses PostgreSQL with a dedicated `bugsink` user and `bugsink` database (same PostgreSQL instance as the main application)
### 2. Backend Integration: @sentry/node
The Express backend integrates `@sentry/node` SDK to:
- Capture unhandled exceptions before PM2/process manager restarts
- Report errors with full stack traces and context
- Integrate with Pino logger for breadcrumbs
- Filter errors by severity (only 5xx errors sent by default)
### 3. Frontend Integration: @sentry/react
The React frontend integrates `@sentry/react` SDK to:
- Wrap the app in an Error Boundary for graceful error handling
- Capture unhandled JavaScript errors
- Report errors with component stack traces
- Filter out browser extension errors
- **Frontend Error Correlation**: The global API client intercepts 4xx/5xx responses and can attach the `x-request-id` header to Sentry scope for correlation with backend logs
### 4. Log Aggregation: Logstash
**Logstash** parses application and infrastructure logs, forwarding error patterns to Bugsink:
- **Installation**: Installed inside the dev container (and on bare-metal prod servers)
- **Inputs**:
- Pino JSON logs from the Node.js application (PM2 managed)
- Redis logs (connection errors, memory warnings, slow commands)
- PostgreSQL function logs (via `fn_log()` - see ADR-030)
- NGINX access/error logs
- **Filter**: Identifies error-level logs (5xx responses, unhandled exceptions, Redis errors)
- **Output**: Sends to Bugsink via Sentry-compatible HTTP API
This provides a secondary error capture path for:
- Errors that occur before Sentry SDK initialization
- Log-based errors that do not throw exceptions
- Redis connection/performance issues
- Database function errors and slow queries
- Historical error analysis from log files
### 5. MCP Server Integration: bugsink-mcp
For AI tool integration (Claude Code, Cursor, etc.), we use the open-source [bugsink-mcp](https://github.com/j-shelfwood/bugsink-mcp) server:
- **No code changes required**: Configurable via environment variables
- **Capabilities**: List projects, get issues, view events, get stacktraces, manage releases
- **Configuration**:
- `BUGSINK_URL`: Points to Bugsink instance (`http://localhost:8000` for dev, `https://bugsink.example.com` for prod)
- `BUGSINK_API_TOKEN`: API token from Bugsink (created via Django management command)
- `BUGSINK_ORG_SLUG`: Organization identifier (usually "sentry")
## Architecture
```text
+---------------------------------------------------------------------------+
| Dev Container / Production Server |
+---------------------------------------------------------------------------+
| |
| +------------------+ +------------------+ |
| | Frontend | | Backend | |
| | (React) | | (Express) | |
| | @sentry/react | | @sentry/node | |
| +--------+---------+ +--------+---------+ |
| | | |
| | Sentry SDK Protocol | |
| +-----------+---------------+ |
| | |
| v |
| +----------------------+ |
| | Bugsink | |
| | (localhost:8000) |<------------------+ |
| | | | |
| | PostgreSQL backend | | |
| +----------------------+ | |
| | |
| +----------------------+ | |
| | Logstash |-------------------+ |
| | (Log Aggregator) | Sentry Output |
| | | |
| | Inputs: | |
| | - PM2/Pino logs | |
| | - Redis logs | |
| | - PostgreSQL logs | |
| | - NGINX logs | |
| +----------------------+ |
| ^ ^ ^ ^ |
| | | | | |
| +-----------+ | | +-----------+ |
| | | | | |
| +----+-----+ +-----+----+ +-----+----+ +-----+----+ |
| | PM2 | | Redis | | PostgreSQL| | NGINX | |
| | Logs | | Logs | | Logs | | Logs | |
| +----------+ +----------+ +-----------+ +---------+ |
| |
| +----------------------+ |
| | PostgreSQL | |
| | +----------------+ | |
| | | app_database | | (main app database) |
| | +----------------+ | |
| | | bugsink | | (error tracking database) |
| | +----------------+ | |
| +----------------------+ |
| |
+---------------------------------------------------------------------------+
External (Developer Machine):
+--------------------------------------+
| Claude Code / Cursor / VS Code |
| +--------------------------------+ |
| | bugsink-mcp | |
| | (MCP Server) | |
| | | |
| | BUGSINK_URL=http://localhost:8000
| | BUGSINK_API_TOKEN=... | |
| | BUGSINK_ORG_SLUG=... | |
| +--------------------------------+ |
+--------------------------------------+
```
## Implementation Details
### Environment Variables
| Variable | Description | Default (Dev) |
| -------------------- | -------------------------------- | -------------------------- |
| `SENTRY_DSN` | Sentry-compatible DSN (backend) | Set after project creation |
| `VITE_SENTRY_DSN` | Sentry-compatible DSN (frontend) | Set after project creation |
| `SENTRY_ENVIRONMENT` | Environment name | `development` |
| `SENTRY_DEBUG` | Enable debug logging | `false` |
| `SENTRY_ENABLED` | Enable/disable error reporting | `true` |
### PostgreSQL Setup
```sql
-- Create dedicated Bugsink database and user
CREATE USER bugsink WITH PASSWORD 'bugsink_dev_password';
CREATE DATABASE bugsink OWNER bugsink;
GRANT ALL PRIVILEGES ON DATABASE bugsink TO bugsink;
```
### Bugsink Configuration
```bash
# Environment variables for Bugsink service
SECRET_KEY=<random-50-char-string>
DATABASE_URL=postgresql://bugsink:bugsink_dev_password@localhost:5432/bugsink
BASE_URL=http://localhost:8000
PORT=8000
```
### Backend Sentry Integration
Located in `src/services/sentry.server.ts`:
```typescript
import * as Sentry from '@sentry/node';
import { config } from '../config/env';
export function initSentry() {
if (!config.sentry.enabled || !config.sentry.dsn) {
return;
}
Sentry.init({
dsn: config.sentry.dsn,
environment: config.sentry.environment || config.server.nodeEnv,
debug: config.sentry.debug,
// Performance monitoring - disabled by default (see ADR-032)
tracesSampleRate: 0,
// Filter out 4xx errors - only report server errors
beforeSend(event) {
const statusCode = event.contexts?.response?.status_code;
if (statusCode && statusCode >= 400 && statusCode < 500) {
return null;
}
return event;
},
});
}
// Set user context after authentication
export function setUserContext(user: { id: string; email: string; name?: string }) {
Sentry.setUser({
id: user.id,
email: user.email,
username: user.name,
});
}
// Clear user context on logout
export function clearUserContext() {
Sentry.setUser(null);
}
```
### Frontend Sentry Integration
Located in `src/services/sentry.client.ts`:
```typescript
import * as Sentry from '@sentry/react';
import { config } from '../config';
export function initSentry() {
if (!config.sentry.enabled || !config.sentry.dsn) {
return;
}
Sentry.init({
dsn: config.sentry.dsn,
environment: config.sentry.environment,
// Performance monitoring - disabled by default (see ADR-032)
tracesSampleRate: 0,
// Filter out browser extension errors
beforeSend(event) {
// Ignore errors from browser extensions
if (
event.exception?.values?.[0]?.stacktrace?.frames?.some((frame) =>
frame.filename?.includes('extension://'),
)
) {
return null;
}
return event;
},
});
}
// Set user context after login
export function setUserContext(user: { id: string; email: string; name?: string }) {
Sentry.setUser({
id: user.id,
email: user.email,
username: user.name,
});
}
// Clear user context on logout
export function clearUserContext() {
Sentry.setUser(null);
}
```
### Error Boundary Component
Located in `src/components/ErrorBoundary.tsx`:
```typescript
import * as Sentry from '@sentry/react';
import { Component, ErrorInfo, ReactNode } from 'react';
interface Props {
children: ReactNode;
fallback?: ReactNode;
}
interface State {
hasError: boolean;
}
export class ErrorBoundary extends Component<Props, State> {
constructor(props: Props) {
super(props);
this.state = { hasError: false };
}
static getDerivedStateFromError(): State {
return { hasError: true };
}
componentDidCatch(error: Error, errorInfo: ErrorInfo) {
Sentry.withScope((scope) => {
scope.setExtras({ componentStack: errorInfo.componentStack });
Sentry.captureException(error);
});
}
render() {
if (this.state.hasError) {
return this.props.fallback || (
<div className="error-boundary">
<h1>Something went wrong</h1>
<p>Please refresh the page or contact support.</p>
</div>
);
}
return this.props.children;
}
}
```
### Logstash Pipeline Configuration
Key routing for log sources:
| Source | Bugsink Project |
| --------------- | --------------- |
| Backend (Pino) | Backend API |
| Worker (Pino) | Backend API |
| PostgreSQL logs | Backend API |
| Vite logs | Infrastructure |
| Redis logs | Infrastructure |
| NGINX logs | Infrastructure |
| Frontend errors | Frontend |
## Consequences
### Positive
- **Full observability**: Aggregated view of errors and trends
- **Self-hosted**: No external SaaS dependencies or subscription costs
- **SDK compatibility**: Leverages mature Sentry SDKs with excellent documentation
- **AI integration**: MCP server enables Claude Code to query and analyze errors
- **Unified architecture**: Same setup works in dev container and production
- **Lightweight**: Bugsink runs in a single process, unlike full Sentry (16GB+ RAM)
- **Error correlation**: Request IDs allow correlation between frontend errors and backend logs
### Negative
- **Additional services**: Bugsink and Logstash add complexity to the container
- **PostgreSQL overhead**: Additional database for error tracking
- **Initial setup**: Requires configuration of multiple components
- **Logstash learning curve**: Pipeline configuration requires Logstash knowledge
## Alternatives Considered
1. **Full Sentry self-hosted**: Rejected due to complexity (Kafka, Redis, ClickHouse, 16GB+ RAM minimum)
2. **GlitchTip**: Considered, but Bugsink is lighter weight and easier to deploy
3. **Sentry SaaS**: Rejected due to self-hosted requirement
4. **Custom error aggregation**: Rejected in favor of proven Sentry SDK ecosystem
## References
- [Bugsink Documentation](https://www.bugsink.com/docs/)
- [Bugsink Docker Install](https://www.bugsink.com/docs/docker-install/)
- [@sentry/node Documentation](https://docs.sentry.io/platforms/javascript/guides/node/)
- [@sentry/react Documentation](https://docs.sentry.io/platforms/javascript/guides/react/)
- [bugsink-mcp](https://github.com/j-shelfwood/bugsink-mcp)
- [Logstash Reference](https://www.elastic.co/guide/en/logstash/current/index.html)
- [ADR-030: PostgreSQL Function Observability](ADR-030-postgresql-function-observability.md)
- [ADR-032: Application Performance Monitoring](ADR-032-application-performance-monitoring.md)

View File

@@ -0,0 +1,336 @@
# ADR-030: PostgreSQL Function Observability
**Date**: 2026-02-10
**Status**: Accepted
**Source**: Imported from flyer-crawler project (ADR-050)
**Related**: [ADR-029](ADR-029-error-tracking-with-bugsink.md), [ADR-027](ADR-027-application-wide-structured-logging.md)
## Context
Applications often use PostgreSQL functions and triggers for business logic, including:
- Data transformations and validations
- Complex query encapsulation
- Trigger-based side effects
- Audit logging
**Current Problem**: These database functions can fail silently in several ways:
1. **`ON CONFLICT DO NOTHING`** - Swallows constraint violations without notification
2. **`IF NOT FOUND THEN RETURN;`** - Silently exits when data is missing
3. **Trigger functions returning `NULL`** - No indication of partial failures
4. **No logging inside functions** - No visibility into function execution
When these silent failures occur:
- The application layer receives no error (function "succeeds" but does nothing)
- No logs are generated for debugging
- Issues are only discovered when users report missing data
- Root cause analysis is extremely difficult
**Example of Silent Failure**:
```sql
-- This function silently does nothing if record doesn't exist
CREATE OR REPLACE FUNCTION public.process_item(p_user_id UUID, p_item_name TEXT)
RETURNS void AS $$
BEGIN
SELECT item_id INTO v_item_id FROM items WHERE name = p_item_name;
IF v_item_id IS NULL THEN
RETURN; -- Silent failure - no log, no error
END IF;
-- ...
END;
$$;
```
ADR-029 established Logstash + Bugsink for error tracking, with PostgreSQL log integration. This ADR defines the implementation.
## Decision
We will implement a standardized PostgreSQL function observability strategy with three tiers of logging severity.
### 1. Function Logging Helper
Create a reusable logging function that outputs structured JSON to PostgreSQL logs:
```sql
-- Function to emit structured log messages from PL/pgSQL
CREATE OR REPLACE FUNCTION public.fn_log(
p_level TEXT, -- 'DEBUG', 'INFO', 'NOTICE', 'WARNING', 'ERROR'
p_function_name TEXT, -- The calling function name
p_message TEXT, -- Human-readable message
p_context JSONB DEFAULT NULL -- Additional context (user_id, params, etc.)
)
RETURNS void
LANGUAGE plpgsql
AS $$
DECLARE
log_line TEXT;
BEGIN
-- Build structured JSON log line
log_line := jsonb_build_object(
'timestamp', now(),
'level', p_level,
'source', 'postgresql',
'function', p_function_name,
'message', p_message,
'context', COALESCE(p_context, '{}'::jsonb)
)::text;
-- Use appropriate RAISE level
CASE p_level
WHEN 'DEBUG' THEN RAISE DEBUG '%', log_line;
WHEN 'INFO' THEN RAISE INFO '%', log_line;
WHEN 'NOTICE' THEN RAISE NOTICE '%', log_line;
WHEN 'WARNING' THEN RAISE WARNING '%', log_line;
WHEN 'ERROR' THEN RAISE LOG '%', log_line; -- Use LOG for errors to ensure capture
ELSE RAISE NOTICE '%', log_line;
END CASE;
END;
$$;
```
### 2. Logging Tiers
#### Tier 1: Critical Functions (Always Log)
Functions where silent failure causes data corruption or user-facing issues:
| Function Type | Log Events |
| ---------------------------- | --------------------------------------- |
| User creation/management | User creation, profile creation, errors |
| Permission/role changes | Role not found, permission denied |
| Financial transactions | Transaction not found, balance issues |
| Data approval workflows | Record not found, permission denied |
| Critical business operations | Items added, operations completed |
**Pattern**:
```sql
CREATE OR REPLACE FUNCTION public.process_critical_operation(p_user_id UUID, p_operation_name TEXT)
RETURNS void AS $$
DECLARE
v_operation_id BIGINT;
v_context JSONB;
BEGIN
v_context := jsonb_build_object('user_id', p_user_id, 'operation_name', p_operation_name);
SELECT operation_id INTO v_operation_id
FROM public.operations WHERE name = p_operation_name;
IF v_operation_id IS NULL THEN
-- Log the issue instead of silent return
PERFORM fn_log('WARNING', 'process_critical_operation',
'Operation not found: ' || p_operation_name, v_context);
RETURN;
END IF;
-- Perform operation
INSERT INTO public.user_operations (user_id, operation_id)
VALUES (p_user_id, v_operation_id)
ON CONFLICT (user_id, operation_id) DO NOTHING;
IF FOUND THEN
PERFORM fn_log('INFO', 'process_critical_operation',
'Operation completed: ' || p_operation_name, v_context);
END IF;
END;
$$;
```
#### Tier 2: Business Logic Functions (Log on Anomalies)
Functions where unexpected conditions should be logged but are not critical:
| Function Type | Log Events |
| --------------------------- | -------------------------------- |
| Search/suggestion functions | No match found (below threshold) |
| Recommendation engines | No recommendations generated |
| Data lookup functions | Empty results, no matches found |
| Price/analytics queries | No data available, stale data |
**Pattern**: Log when results are unexpectedly empty or inputs are invalid.
#### Tier 3: Triggers (Log Errors Only)
Triggers should be fast, so only log when something goes wrong:
| Trigger Type | Log Events |
| --------------------- | ---------------------------- |
| Audit triggers | Failed to update audit trail |
| Aggregation triggers | Calculation failed |
| Cascade triggers | Related record lookup failed |
| Notification triggers | External service call failed |
### 3. PostgreSQL Configuration
Enable logging in `postgresql.conf`:
```ini
# Log all function notices and above
log_min_messages = notice
# Include function name in log prefix
log_line_prefix = '%t [%p] %u@%d '
# Log to file for Logstash pickup
logging_collector = on
log_directory = '/var/log/postgresql'
log_filename = 'postgresql-%Y-%m-%d.log'
log_rotation_age = 1d
log_rotation_size = 100MB
# Capture slow queries from functions
log_min_duration_statement = 1000 # Log queries over 1 second
```
### 4. Logstash Integration
Update the Logstash pipeline (extends ADR-029 configuration):
```conf
# PostgreSQL function log input
input {
file {
path => "/var/log/postgresql/*.log"
type => "postgres"
tags => ["postgres"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_postgres"
}
}
filter {
if [type] == "postgres" {
# Extract timestamp and process ID from PostgreSQL log prefix
grok {
match => { "message" => "%{TIMESTAMP_ISO8601:pg_timestamp} \[%{POSINT:pg_pid}\] %{USER:pg_user}@%{WORD:pg_database} %{GREEDYDATA:pg_message}" }
}
# Check if this is a structured JSON log from fn_log()
if [pg_message] =~ /^\{.*"source":"postgresql".*\}$/ {
json {
source => "pg_message"
target => "fn_log"
}
# Mark as error if level is WARNING or ERROR
if [fn_log][level] in ["WARNING", "ERROR"] {
mutate { add_tag => ["error", "db_function"] }
}
}
# Also catch native PostgreSQL errors
if [pg_message] =~ /^ERROR:/ or [pg_message] =~ /^FATAL:/ {
mutate { add_tag => ["error", "postgres_native"] }
}
}
}
output {
if "error" in [tags] and "postgres" in [tags] {
http {
url => "http://localhost:8000/api/store/"
http_method => "post"
format => "json"
}
}
}
```
### 5. Dual-File Update Requirement
**IMPORTANT**: All SQL function changes must be applied to BOTH files:
1. `sql/Initial_triggers_and_functions.sql` - Used for incremental updates
2. `sql/master_schema_rollup.sql` - Used for fresh database setup
Both files must remain in sync for triggers and functions.
## Implementation Steps
1. **Create `fn_log()` helper function**:
- Add to both SQL files
- Test with `SELECT fn_log('INFO', 'test', 'Test message', '{"key": "value"}'::jsonb);`
2. **Update Tier 1 critical functions** (highest priority):
- Identify functions with silent failures
- Add appropriate logging calls
- Test error paths
3. **Update Tier 2 business logic functions**:
- Add anomaly logging to suggestion/recommendation functions
- Log empty result sets with context
4. **Update Tier 3 trigger functions**:
- Add error-only logging to critical triggers
- Wrap complex trigger logic in exception handlers
5. **Configure PostgreSQL logging**:
- Update `postgresql.conf` in dev container
- Update production PostgreSQL configuration
- Verify logs appear in expected location
6. **Update Logstash pipeline**:
- Add PostgreSQL input to Logstash config
- Add filter rules for structured JSON extraction
- Test end-to-end: function log -> Logstash -> Bugsink
7. **Verify in Bugsink**:
- Confirm database function errors appear as issues
- Verify context (user_id, function name, params) is captured
## Consequences
### Positive
- **Visibility**: Silent failures become visible in error tracking
- **Debugging**: Function execution context captured for root cause analysis
- **Proactive detection**: Anomalies logged before users report issues
- **Unified monitoring**: Database errors appear alongside application errors in Bugsink
- **Structured logs**: JSON format enables filtering and aggregation
### Negative
- **Performance overhead**: Logging adds latency to function execution
- **Log volume**: Tier 1/2 functions may generate significant log volume
- **Maintenance**: Two SQL files must be kept in sync
- **PostgreSQL configuration**: Requires access to `postgresql.conf`
### Mitigations
- **Performance**: Only log meaningful events, not every function call
- **Log volume**: Use appropriate log levels; Logstash filters reduce noise
- **Sync**: Add CI check to verify SQL files match for function definitions
- **Configuration**: Document PostgreSQL settings in deployment runbook
## Examples
### Before (Silent Failure)
```sql
-- User thinks operation completed, but it silently failed
SELECT process_item('user-uuid', 'Nonexistent Item');
-- Returns: void (no error, no log)
-- Result: User never gets expected result, nobody knows why
```
### After (Observable Failure)
```sql
SELECT process_item('user-uuid', 'Nonexistent Item');
-- Returns: void
-- PostgreSQL log: {"timestamp":"2026-01-11T10:30:00Z","level":"WARNING","source":"postgresql","function":"process_item","message":"Item not found: Nonexistent Item","context":{"user_id":"user-uuid","item_name":"Nonexistent Item"}}
-- Bugsink: New issue created with full context
```
## References
- [ADR-029: Error Tracking with Bugsink](ADR-029-error-tracking-with-bugsink.md)
- [ADR-027: Application-Wide Structured Logging](ADR-027-application-wide-structured-logging.md)
- [PostgreSQL RAISE Documentation](https://www.postgresql.org/docs/current/plpgsql-errors-and-messages.html)
- [PostgreSQL Logging Configuration](https://www.postgresql.org/docs/current/runtime-config-logging.html)

View File

@@ -0,0 +1,262 @@
# ADR-031: Granular Debug Logging Strategy
**Date**: 2026-02-10
**Status**: Accepted
**Source**: Imported from flyer-crawler project (ADR-052)
**Related**: [ADR-027](ADR-027-application-wide-structured-logging.md), [ADR-017](ADR-017-structured-logging-with-pino.md)
## Context
Global log levels (INFO vs DEBUG) are too coarse. Developers need to inspect detailed debug information for specific subsystems (e.g., `ai-service`, `db-pool`, `auth-service`) without being flooded by logs from the entire application.
When debugging a specific feature:
- Setting `LOG_LEVEL=debug` globally produces too much noise
- Manually adding/removing debug statements is error-prone
- No standard way to enable targeted debugging in production
## Decision
We will adopt a namespace-based debug filter pattern, similar to the `debug` npm package, but integrated into our Pino logger.
1. **Logger Namespaces**: Every service/module logger must be initialized with a `module` property (e.g., `logger.child({ module: 'ai-service' })`).
2. **Environment Filter**: We will support a `DEBUG_MODULES` environment variable that overrides the log level for matching modules.
## Implementation
### Core Implementation
Implemented in `src/services/logger.server.ts`:
```typescript
import pino from 'pino';
// Parse DEBUG_MODULES from environment
const debugModules = (process.env.DEBUG_MODULES || '').split(',').map((s) => s.trim());
// Base logger configuration
export const logger = pino({
level: process.env.LOG_LEVEL || (process.env.NODE_ENV === 'production' ? 'info' : 'debug'),
// ... other configuration
});
/**
* Creates a scoped logger for a specific module.
* If DEBUG_MODULES includes this module or '*', debug level is enabled.
*/
export const createScopedLogger = (moduleName: string) => {
// If DEBUG_MODULES contains the module name or "*", force level to 'debug'
const isDebugEnabled = debugModules.includes('*') || debugModules.includes(moduleName);
return logger.child({
module: moduleName,
level: isDebugEnabled ? 'debug' : logger.level,
});
};
```
### Service Usage Examples
```typescript
// src/services/aiService.server.ts
import { createScopedLogger } from './logger.server';
const logger = createScopedLogger('ai-service');
export async function processWithAI(data: unknown) {
logger.debug({ data }, 'Starting AI processing');
// ... implementation
logger.info({ result }, 'AI processing completed');
}
```
```typescript
// src/services/authService.server.ts
import { createScopedLogger } from './logger.server';
const logger = createScopedLogger('auth-service');
export async function validateToken(token: string) {
logger.debug({ tokenLength: token.length }, 'Validating token');
// ... implementation
}
```
### Module Naming Convention
Use kebab-case suffixed with `-service` or `-worker`:
| Module Name | Purpose | File |
| --------------- | -------------------------------- | ------------------------------------- |
| `ai-service` | AI/external API interactions | `src/services/aiService.server.ts` |
| `auth-service` | Authentication and authorization | `src/services/authService.server.ts` |
| `db-pool` | Database connection pooling | `src/services/database.server.ts` |
| `cache-service` | Redis/caching operations | `src/services/cacheService.server.ts` |
| `queue-worker` | Background job processing | `src/workers/queueWorker.ts` |
| `email-service` | Email sending | `src/services/emailService.server.ts` |
## Usage
### Enable Debug Logging for Specific Modules
To debug only AI and authentication:
```bash
DEBUG_MODULES=ai-service,auth-service npm run dev
```
### Enable All Debug Logging
Use wildcard to enable debug logging for all modules:
```bash
DEBUG_MODULES=* npm run dev
```
### Development Environment
In `.env.development`:
```bash
# Enable debug logging for specific modules during development
DEBUG_MODULES=ai-service
```
### Production Troubleshooting
Temporarily enable debug logging for a specific subsystem:
```bash
# SSH into production server
ssh root@example.com
# Set environment variable and restart
DEBUG_MODULES=ai-service pm2 restart app-api
# View logs
pm2 logs app-api --lines 100
# Disable debug logging
pm2 unset DEBUG_MODULES app-api
pm2 restart app-api
```
### With PM2 Configuration
In `ecosystem.config.js`:
```javascript
module.exports = {
apps: [
{
name: 'app-api',
script: 'dist/server.js',
env: {
NODE_ENV: 'production',
// DEBUG_MODULES is unset by default
},
env_debug: {
NODE_ENV: 'production',
DEBUG_MODULES: 'ai-service,auth-service',
},
},
],
};
```
Start with debug logging:
```bash
pm2 start ecosystem.config.js --env debug
```
## Best Practices
### 1. Use Scoped Loggers for Long-Running Services
Services with complex workflows or external API calls should use `createScopedLogger` to allow targeted debugging:
```typescript
const logger = createScopedLogger('payment-service');
export async function processPayment(payment: Payment) {
logger.debug({ paymentId: payment.id }, 'Starting payment processing');
try {
const result = await externalPaymentAPI.process(payment);
logger.debug({ result }, 'External API response');
return result;
} catch (error) {
logger.error({ error, paymentId: payment.id }, 'Payment processing failed');
throw error;
}
}
```
### 2. Use Child Loggers for Contextual Data
Even within scoped loggers, create child loggers with job/request-specific context:
```typescript
const logger = createScopedLogger('queue-worker');
async function processJob(job: Job) {
const jobLogger = logger.child({ jobId: job.id, jobName: job.name });
jobLogger.debug('Starting job processing');
// ... processing
jobLogger.info('Job completed successfully');
}
```
### 3. Consistent Debug Message Patterns
Use consistent patterns for debug messages:
```typescript
// Function entry
logger.debug({ params: sanitizedParams }, 'Function entry: processOrder');
// External API calls
logger.debug({ url, method }, 'External API request');
logger.debug({ statusCode, duration }, 'External API response');
// State changes
logger.debug({ before, after }, 'State transition');
// Decision points
logger.debug({ condition, result }, 'Branch decision');
```
### 4. Production Usage Guidelines
- `DEBUG_MODULES` can be set in production for temporary debugging
- Should not be used continuously due to increased log volume
- Always unset after troubleshooting is complete
- Monitor log storage when debug logging is enabled
## Consequences
### Positive
- Developers can inspect detailed logs for specific subsystems without log flooding
- Production debugging becomes more targeted and efficient
- No performance impact when debug logging is disabled
- Compatible with existing Pino logging infrastructure
- Follows familiar pattern from `debug` npm package
### Negative
- Requires developers to know module names (mitigated by documentation)
- Not all services have adopted scoped loggers yet (gradual migration)
- Additional configuration complexity
## References
- [ADR-027: Application-Wide Structured Logging](ADR-027-application-wide-structured-logging.md)
- [ADR-017: Structured Logging with Pino](ADR-017-structured-logging-with-pino.md)
- [debug npm package](https://www.npmjs.com/package/debug) - Inspiration for namespace pattern
- [Pino Child Loggers](https://getpino.io/#/docs/child-loggers)

View File

@@ -0,0 +1,263 @@
# ADR-032: Application Performance Monitoring (APM)
**Date**: 2026-02-10
**Status**: Proposed
**Source**: Imported from flyer-crawler project (ADR-056)
**Related**: [ADR-029](ADR-029-error-tracking-with-bugsink.md) (Error Tracking with Bugsink)
## Context
Application Performance Monitoring (APM) provides visibility into application behavior through:
- **Distributed Tracing**: Track requests across services, queues, and database calls
- **Performance Metrics**: Response times, throughput, error rates
- **Resource Monitoring**: Memory usage, CPU, database connections
- **Transaction Analysis**: Identify slow endpoints and bottlenecks
While ADR-029 covers error tracking and observability, APM is a distinct concern focused on performance rather than errors. The Sentry SDK supports APM through its tracing features, but this capability is currently **intentionally disabled** in our application.
### Current State
The Sentry SDK is installed and configured for error tracking (see ADR-029), but APM features are disabled:
```typescript
// src/services/sentry.client.ts
Sentry.init({
dsn: config.sentry.dsn,
environment: config.sentry.environment,
// Performance monitoring - disabled for now to keep it simple
tracesSampleRate: 0,
// ...
});
```
```typescript
// src/services/sentry.server.ts
Sentry.init({
dsn: config.sentry.dsn,
environment: config.sentry.environment || config.server.nodeEnv,
// Performance monitoring - disabled for now to keep it simple
tracesSampleRate: 0,
// ...
});
```
### Why APM is Currently Disabled
1. **Complexity**: APM adds overhead and complexity to debugging
2. **Bugsink Limitations**: Bugsink's APM support is less mature than its error tracking
3. **Resource Overhead**: Tracing adds memory and CPU overhead
4. **Focus**: Error tracking provides more immediate value for our current scale
5. **Cost**: High sample rates can significantly increase storage requirements
## Decision
We propose a **staged approach** to APM implementation:
### Phase 1: Selective Backend Tracing (Low Priority)
Enable tracing for specific high-value operations:
```typescript
// Enable tracing for specific transactions only
Sentry.init({
dsn: config.sentry.dsn,
tracesSampleRate: 0, // Keep default at 0
// Trace only specific high-value transactions
tracesSampler: (samplingContext) => {
const transactionName = samplingContext.transactionContext?.name;
// Always trace long-running jobs
if (transactionName?.includes('job-processing')) {
return 0.1; // 10% sample rate
}
// Always trace AI/external API calls
if (transactionName?.includes('external-api')) {
return 0.5; // 50% sample rate
}
// Trace slow endpoints (determined by custom logic)
if (samplingContext.parentSampled) {
return 0.1; // 10% for child transactions
}
return 0; // Don't trace other transactions
},
});
```
### Phase 2: Custom Performance Metrics
Add custom metrics without full tracing overhead:
```typescript
// Custom metric for slow database queries
import { metrics } from '@sentry/node';
// In repository methods
const startTime = performance.now();
const result = await pool.query(sql, params);
const duration = performance.now() - startTime;
metrics.distribution('db.query.duration', duration, {
tags: { query_type: 'select', table: 'users' },
});
if (duration > 1000) {
logger.warn({ duration, sql }, 'Slow query detected');
}
```
### Phase 3: Full APM Integration (Future)
When/if full APM is needed:
```typescript
Sentry.init({
dsn: config.sentry.dsn,
tracesSampleRate: 0.1, // 10% of transactions
profilesSampleRate: 0.1, // 10% of traced transactions get profiled
integrations: [
// Database tracing
Sentry.postgresIntegration(),
// Redis tracing
Sentry.redisIntegration(),
// BullMQ job tracing (custom integration)
],
});
```
## Implementation Steps
### To Enable Basic APM
1. **Update Sentry Configuration**:
- Set `tracesSampleRate` > 0 in `src/services/sentry.server.ts`
- Set `tracesSampleRate` > 0 in `src/services/sentry.client.ts`
- Add environment variable `SENTRY_TRACES_SAMPLE_RATE` (default: 0)
2. **Add Instrumentation**:
- Enable automatic Express instrumentation
- Add manual spans for BullMQ job processing
- Add database query instrumentation
3. **Frontend Tracing**:
- Add Browser Tracing integration
- Configure page load and navigation tracing
4. **Environment Variables**:
```bash
SENTRY_TRACES_SAMPLE_RATE=0.1 # 10% sampling
SENTRY_PROFILES_SAMPLE_RATE=0 # Profiling disabled
```
5. **Bugsink Configuration**:
- Verify Bugsink supports performance data ingestion
- Configure retention policies for performance data
### Configuration Changes Required
```typescript
// src/config/env.ts - Add new config
sentry: {
dsn: env.SENTRY_DSN,
environment: env.SENTRY_ENVIRONMENT,
debug: env.SENTRY_DEBUG === 'true',
tracesSampleRate: parseFloat(env.SENTRY_TRACES_SAMPLE_RATE || '0'),
profilesSampleRate: parseFloat(env.SENTRY_PROFILES_SAMPLE_RATE || '0'),
},
```
```typescript
// src/services/sentry.server.ts - Updated init
Sentry.init({
dsn: config.sentry.dsn,
environment: config.sentry.environment,
tracesSampleRate: config.sentry.tracesSampleRate,
profilesSampleRate: config.sentry.profilesSampleRate,
// ... rest of config
});
```
## Trade-offs
### Enabling APM
**Benefits**:
- Identify performance bottlenecks
- Track distributed transactions across services
- Profile slow endpoints
- Monitor resource utilization trends
**Costs**:
- Increased memory usage (~5-15% overhead)
- Additional CPU for trace processing
- Increased storage in Bugsink/Sentry
- More complex debugging (noise in traces)
- Potential latency from tracing overhead
### Keeping APM Disabled
**Benefits**:
- Simpler operation and debugging
- Lower resource overhead
- Focused on error tracking (higher priority)
- No additional storage costs
**Costs**:
- No automated performance insights
- Manual profiling required for bottleneck detection
- Limited visibility into slow transactions
## Alternatives Considered
1. **OpenTelemetry**: More vendor-neutral, but adds another dependency and complexity
2. **Prometheus + Grafana**: Good for metrics, but doesn't provide distributed tracing
3. **Jaeger/Zipkin**: Purpose-built for tracing, but requires additional infrastructure
4. **New Relic/Datadog SaaS**: Full-featured but conflicts with self-hosted requirement
## Current Recommendation
**Keep APM disabled** (`tracesSampleRate: 0`) until:
1. Specific performance issues are identified that require tracing
2. Bugsink's APM support is verified and tested
3. Infrastructure can support the additional overhead
4. There is a clear business need for performance visibility
When enabling APM becomes necessary, start with Phase 1 (selective tracing) to minimize overhead while gaining targeted insights.
## Consequences
### Positive (When Implemented)
- Automated identification of slow endpoints
- Distributed trace visualization across async operations
- Correlation between errors and performance issues
- Proactive alerting on performance degradation
### Negative
- Additional infrastructure complexity
- Storage overhead for trace data
- Potential performance impact from tracing itself
- Learning curve for trace analysis
## References
- [Sentry Performance Monitoring](https://docs.sentry.io/product/performance/)
- [@sentry/node Performance](https://docs.sentry.io/platforms/javascript/guides/node/performance/)
- [@sentry/react Performance](https://docs.sentry.io/platforms/javascript/guides/react/performance/)
- [OpenTelemetry](https://opentelemetry.io/) (alternative approach)
- [ADR-029: Error Tracking with Bugsink](ADR-029-error-tracking-with-bugsink.md)

View File

@@ -0,0 +1,340 @@
# ADR-033: Bugsink to Gitea Issue Synchronization
**Date**: 2026-02-10
**Status**: Proposed
**Source**: Imported from flyer-crawler project (ADR-054)
**Related**: [ADR-029](ADR-029-error-tracking-with-bugsink.md), [ADR-012](ADR-012-bullmq-background-job-processing.md)
## Context
The application uses Bugsink (Sentry-compatible self-hosted error tracking) to capture runtime errors across multiple projects:
| Project Type | Environment | Description |
| -------------- | ------------ | ---------------------------------------- |
| Backend | Production | Main API server errors |
| Backend | Test/Staging | Pre-production API errors |
| Frontend | Production | Client-side JavaScript errors |
| Frontend | Test/Staging | Pre-production frontend errors |
| Infrastructure | Production | Infrastructure-level errors (Redis, PM2) |
| Infrastructure | Test/Staging | Pre-production infrastructure errors |
Currently, errors remain in Bugsink until manually reviewed. There is no automated workflow to:
1. Create trackable tickets for errors
2. Assign errors to developers
3. Track resolution progress
4. Prevent errors from being forgotten
## Decision
Implement an automated background worker that synchronizes unresolved Bugsink issues to Gitea as trackable tickets. The sync worker will:
1. **Run only on the test/staging server** (not production, not dev container)
2. **Poll all Bugsink projects** for unresolved issues
3. **Create Gitea issues** with full error context
4. **Mark synced issues as resolved** in Bugsink (to prevent re-polling)
5. **Track sync state in Redis** to ensure idempotency
### Why Test/Staging Only?
- The sync worker is a background service that needs API tokens for both Bugsink and Gitea
- Running on test/staging provides a single sync point without duplicating infrastructure
- All Bugsink projects (including production) are synced from this one worker
- Production server stays focused on serving users, not running sync jobs
## Architecture
### Component Overview
```
+-----------------------------------------------------------------------+
| TEST/STAGING SERVER |
| |
| +------------------+ +------------------+ +-------------------+ |
| | BullMQ Queue |--->| Sync Worker |--->| Redis DB 15 | |
| | bugsink-sync | | (15min repeat) | | Sync State | |
| +------------------+ +--------+---------+ +-------------------+ |
| | |
+-----------------------------------+------------------------------------+
|
+---------------+---------------+
v v
+------------------+ +------------------+
| Bugsink | | Gitea |
| (all projects) | | (1 repo) |
+------------------+ +------------------+
```
### Queue Configuration
| Setting | Value | Rationale |
| --------------- | ---------------------- | -------------------------------------------- |
| Queue Name | `bugsink-sync` | Follows existing naming pattern |
| Repeat Interval | 15 minutes | Balances responsiveness with API rate limits |
| Retry Attempts | 3 | Standard retry policy |
| Backoff | Exponential (30s base) | Handles temporary API failures |
| Concurrency | 1 | Serial processing prevents race conditions |
### Redis Database Allocation
| Database | Usage | Owner |
| -------- | ------------------- | --------------- |
| 0 | BullMQ (Production) | Existing queues |
| 1 | BullMQ (Test) | Existing queues |
| 2-14 | Reserved | Future use |
| 15 | Bugsink Sync State | This feature |
### Redis Key Schema
```
bugsink:synced:{bugsink_issue_id}
+-- Value: JSON {
gitea_issue_number: number,
synced_at: ISO timestamp,
project: string,
title: string
}
```
### Gitea Labels
The following labels should be created in the repository:
| Label | Color | Purpose |
| -------------------- | ------------------ | ---------------------------------- |
| `bug:frontend` | #e11d48 (Red) | Frontend JavaScript/React errors |
| `bug:backend` | #ea580c (Orange) | Backend Node.js/API errors |
| `bug:infrastructure` | #7c3aed (Purple) | Infrastructure errors (Redis, PM2) |
| `env:production` | #dc2626 (Dark Red) | Production environment |
| `env:test` | #2563eb (Blue) | Test/staging environment |
| `env:development` | #6b7280 (Gray) | Development environment |
| `source:bugsink` | #10b981 (Green) | Auto-synced from Bugsink |
### Label Mapping
| Bugsink Project Type | Bug Label | Env Label |
| --------------------- | ------------------ | -------------- |
| backend (prod) | bug:backend | env:production |
| backend (test) | bug:backend | env:test |
| frontend (prod) | bug:frontend | env:production |
| frontend (test) | bug:frontend | env:test |
| infrastructure (prod) | bug:infrastructure | env:production |
| infrastructure (test) | bug:infrastructure | env:test |
All synced issues also receive the `source:bugsink` label.
## Implementation Details
### New Files
| File | Purpose |
| -------------------------------------- | ------------------------------------------- |
| `src/services/bugsinkSync.server.ts` | Core synchronization logic |
| `src/services/bugsinkClient.server.ts` | HTTP client for Bugsink API |
| `src/services/giteaClient.server.ts` | HTTP client for Gitea API |
| `src/types/bugsink.ts` | TypeScript interfaces for Bugsink responses |
| `src/routes/admin/bugsink-sync.ts` | Admin endpoints for manual trigger |
### Modified Files
| File | Changes |
| -------------------------------- | ------------------------------------- |
| `src/services/queues.server.ts` | Add `bugsinkSyncQueue` definition |
| `src/services/workers.server.ts` | Add sync worker implementation |
| `src/config/env.ts` | Add bugsink sync configuration schema |
| `.env.example` | Document new environment variables |
### Environment Variables
```bash
# Bugsink Configuration
BUGSINK_URL=https://bugsink.example.com
BUGSINK_API_TOKEN=... # Created via Django management command
# Gitea Configuration
GITEA_URL=https://gitea.example.com
GITEA_API_TOKEN=... # Personal access token with repo scope
GITEA_OWNER=org-name
GITEA_REPO=project-repo
# Sync Control
BUGSINK_SYNC_ENABLED=false # Set true only in test environment
BUGSINK_SYNC_INTERVAL=15 # Minutes between sync runs
```
### Gitea Issue Template
```markdown
## Error Details
| Field | Value |
| ------------ | --------------- |
| **Type** | {error_type} |
| **Message** | {error_message} |
| **Platform** | {platform} |
| **Level** | {level} |
## Occurrence Statistics
- **First Seen**: {first_seen}
- **Last Seen**: {last_seen}
- **Total Occurrences**: {count}
## Request Context
- **URL**: {request_url}
- **Additional Context**: {context}
## Stacktrace
<details>
<summary>Click to expand</summary>
{stacktrace}
</details>
---
**Bugsink Issue**: {bugsink_url}
**Project**: {project_slug}
**Trace ID**: {trace_id}
```
### Sync Workflow
```
1. Worker triggered (every 15 min or manual)
2. For each Bugsink project:
a. List issues with status='unresolved'
b. For each issue:
i. Check Redis for existing sync record
ii. If already synced -> skip
iii. Fetch issue details + stacktrace
iv. Create Gitea issue with labels
v. Store sync record in Redis
vi. Mark issue as 'resolved' in Bugsink
3. Log summary (synced: N, skipped: N, failed: N)
```
### Idempotency Guarantees
1. **Redis check before creation**: Prevents duplicate Gitea issues
2. **Atomic Redis write after Gitea create**: Ensures state consistency
3. **Query only unresolved issues**: Resolved issues won't appear in polls
4. **No TTL on Redis keys**: Permanent sync history
## Admin Interface
### Manual Sync Endpoint
```
POST /api/admin/bugsink/sync
Authorization: Bearer {admin_jwt}
Response:
{
"success": true,
"data": {
"synced": 3,
"skipped": 12,
"failed": 0,
"duration_ms": 2340
}
}
```
### Sync Status Endpoint
```
GET /api/admin/bugsink/sync/status
Authorization: Bearer {admin_jwt}
Response:
{
"success": true,
"data": {
"enabled": true,
"last_run": "2026-01-17T10:30:00Z",
"next_run": "2026-01-17T10:45:00Z",
"total_synced": 47,
"projects": [
{ "slug": "backend-prod", "synced_count": 12 },
...
]
}
}
```
## Implementation Phases
### Phase 1: Core Infrastructure
- Add environment variables to `env.ts` schema
- Create `BugsinkClient` service (HTTP client)
- Create `GiteaClient` service (HTTP client)
- Add Redis db 15 connection for sync tracking
### Phase 2: Sync Logic
- Create `BugsinkSyncService` with sync logic
- Add `bugsink-sync` queue to `queues.server.ts`
- Add sync worker to `workers.server.ts`
- Create TypeScript types for API responses
### Phase 3: Integration
- Add admin endpoints for manual sync trigger
- Update CI/CD with new secrets
- Add secrets to repository settings
- Test end-to-end in staging environment
### Phase 4: Documentation
- Update CLAUDE.md with sync information
- Create operational runbook for sync issues
## Consequences
### Positive
1. **Visibility**: All application errors become trackable tickets
2. **Accountability**: Errors can be assigned to developers
3. **History**: Complete audit trail of when errors were discovered and resolved
4. **Integration**: Errors appear alongside feature work in Gitea
5. **Automation**: No manual error triage required
### Negative
1. **API Dependencies**: Requires both Bugsink and Gitea APIs to be available
2. **Token Management**: Additional secrets to manage in CI/CD
3. **Potential Noise**: High-frequency errors could create many tickets (mitigated by Bugsink's issue grouping)
4. **Single Point**: Sync only runs on test server (if test server is down, no sync occurs)
### Risks and Mitigations
| Risk | Mitigation |
| ----------------------- | ------------------------------------------------- |
| Bugsink API rate limits | 15-minute polling interval |
| Gitea API rate limits | Sequential processing with delays |
| Redis connection issues | Reuse existing connection patterns |
| Duplicate issues | Redis tracking + idempotent checks |
| Missing stacktrace | Graceful degradation (create issue without trace) |
## Future Enhancements
1. **Bi-directional sync**: Update Bugsink when Gitea issue is closed
2. **Smart deduplication**: Detect similar errors across projects
3. **Priority mapping**: High occurrence count -> high priority label
4. **Slack/Discord notifications**: Alert on new critical errors
5. **Metrics dashboard**: Track error trends over time
## References
- [ADR-012: BullMQ Background Job Processing](ADR-012-bullmq-background-job-processing.md)
- [ADR-029: Error Tracking with Bugsink](ADR-029-error-tracking-with-bugsink.md)
- [Bugsink API Documentation](https://bugsink.com/docs/api/)
- [Gitea API Documentation](https://docs.gitea.io/en-us/api-usage/)

View File

@@ -15,9 +15,10 @@ This document tracks the implementation status and estimated effort for all Arch
| Status | Count |
| ---------------------------- | ----- |
| Accepted (Fully Implemented) | 40 |
| Accepted (Fully Implemented) | 42 |
| Partially Implemented | 2 |
| Proposed (Not Started) | 14 |
| Proposed (Not Started) | 12 |
| Superseded | 1 |
---
@@ -34,13 +35,13 @@ This document tracks the implementation status and estimated effort for all Arch
### Category 2: Data Management
| ADR | Title | Status | Effort | Notes |
| --------------------------------------------------------------- | ------------------------ | -------- | ------ | ------------------------------ |
| [ADR-009](./0009-caching-strategy-for-read-heavy-operations.md) | Caching Strategy | Accepted | - | Fully implemented |
| [ADR-013](./0013-database-schema-migration-strategy.md) | Schema Migrations v1 | Proposed | M | Superseded by ADR-023 |
| [ADR-019](./0019-data-backup-and-recovery-strategy.md) | Backup & Recovery | Accepted | - | Fully implemented |
| [ADR-023](./0023-database-schema-migration-strategy.md) | Schema Migrations v2 | Proposed | L | Requires tooling setup |
| [ADR-031](./0031-data-retention-and-privacy-compliance.md) | Data Retention & Privacy | Proposed | XL | Legal/compliance review needed |
| ADR | Title | Status | Effort | Notes |
| --------------------------------------------------------------- | ------------------------ | ---------- | ------ | ------------------------------ |
| [ADR-009](./0009-caching-strategy-for-read-heavy-operations.md) | Caching Strategy | Accepted | - | Fully implemented |
| [ADR-013](./0013-database-schema-migration-strategy.md) | Schema Migrations v1 | Superseded | - | Superseded by ADR-023 |
| [ADR-019](./0019-data-backup-and-recovery-strategy.md) | Backup & Recovery | Accepted | - | Fully implemented |
| [ADR-023](./0023-database-schema-migration-strategy.md) | Schema Migrations v2 | Proposed | L | Requires tooling setup |
| [ADR-031](./0031-data-retention-and-privacy-compliance.md) | Data Retention & Privacy | Proposed | XL | Legal/compliance review needed |
### Category 3: API & Integration
@@ -77,16 +78,16 @@ This document tracks the implementation status and estimated effort for all Arch
### Category 6: Deployment & Operations
| ADR | Title | Status | Effort | Notes |
| -------------------------------------------------------------- | ------------------ | -------- | ------ | -------------------------- |
| [ADR-006](./0006-background-job-processing-and-task-queues.md) | Background Jobs | Accepted | - | Fully implemented |
| [ADR-014](./0014-containerization-and-deployment-strategy.md) | Containerization | Partial | M | Docker done, K8s pending |
| [ADR-017](./0017-ci-cd-and-branching-strategy.md) | CI/CD & Branching | Accepted | - | Fully implemented |
| [ADR-024](./0024-feature-flagging-strategy.md) | Feature Flags | Proposed | M | New service/library needed |
| [ADR-037](./0037-scheduled-jobs-and-cron-pattern.md) | Scheduled Jobs | Accepted | - | Fully implemented |
| [ADR-038](./0038-graceful-shutdown-pattern.md) | Graceful Shutdown | Accepted | - | Fully implemented |
| [ADR-053](./0053-worker-health-checks.md) | Worker Health | Accepted | - | Fully implemented |
| [ADR-054](./0054-bugsink-gitea-issue-sync.md) | Bugsink-Gitea Sync | Proposed | L | Automated issue creation |
| ADR | Title | Status | Effort | Notes |
| -------------------------------------------------------------- | ------------------ | -------- | ------ | ------------------------ |
| [ADR-006](./0006-background-job-processing-and-task-queues.md) | Background Jobs | Accepted | - | Fully implemented |
| [ADR-014](./0014-containerization-and-deployment-strategy.md) | Containerization | Partial | M | Docker done, K8s pending |
| [ADR-017](./0017-ci-cd-and-branching-strategy.md) | CI/CD & Branching | Accepted | - | Fully implemented |
| [ADR-024](./0024-feature-flagging-strategy.md) | Feature Flags | Accepted | - | Fully implemented |
| [ADR-037](./0037-scheduled-jobs-and-cron-pattern.md) | Scheduled Jobs | Accepted | - | Fully implemented |
| [ADR-038](./0038-graceful-shutdown-pattern.md) | Graceful Shutdown | Accepted | - | Fully implemented |
| [ADR-053](./0053-worker-health-checks.md) | Worker Health | Accepted | - | Fully implemented |
| [ADR-054](./0054-bugsink-gitea-issue-sync.md) | Bugsink-Gitea Sync | Proposed | L | Automated issue creation |
### Category 7: Frontend / User Interface
@@ -108,6 +109,7 @@ This document tracks the implementation status and estimated effort for all Arch
| [ADR-040](./0040-testing-economics-and-priorities.md) | Testing Economics | Accepted | - | Fully implemented |
| [ADR-045](./0045-test-data-factories-and-fixtures.md) | Test Data Factories | Accepted | - | Fully implemented |
| [ADR-047](./0047-project-file-and-folder-organization.md) | Project Organization | Proposed | XL | Major reorganization |
| [ADR-057](./0057-test-remediation-post-api-versioning.md) | Test Remediation | Accepted | - | Fully implemented |
### Category 9: Architecture Patterns
@@ -132,15 +134,14 @@ These ADRs are proposed or partially implemented, ordered by suggested implement
| Priority | ADR | Title | Status | Effort | Rationale |
| -------- | ------- | ------------------------ | -------- | ------ | ------------------------------------ |
| 1 | ADR-024 | Feature Flags | Proposed | M | Safer deployments, A/B testing |
| 2 | ADR-054 | Bugsink-Gitea Sync | Proposed | L | Automated issue tracking from errors |
| 3 | ADR-023 | Schema Migrations v2 | Proposed | L | Database evolution support |
| 4 | ADR-029 | Secret Rotation | Proposed | L | Security improvement |
| 5 | ADR-030 | Circuit Breaker | Proposed | L | Resilience improvement |
| 6 | ADR-056 | APM (Performance) | Proposed | M | Enable when performance issues arise |
| 7 | ADR-011 | Authorization & RBAC | Proposed | XL | Advanced permission system |
| 8 | ADR-025 | i18n & l10n | Proposed | XL | Multi-language support |
| 9 | ADR-031 | Data Retention & Privacy | Proposed | XL | Compliance requirements |
| 1 | ADR-054 | Bugsink-Gitea Sync | Proposed | L | Automated issue tracking from errors |
| 2 | ADR-023 | Schema Migrations v2 | Proposed | L | Database evolution support |
| 3 | ADR-029 | Secret Rotation | Proposed | L | Security improvement |
| 4 | ADR-030 | Circuit Breaker | Proposed | L | Resilience improvement |
| 5 | ADR-056 | APM (Performance) | Proposed | M | Enable when performance issues arise |
| 6 | ADR-011 | Authorization & RBAC | Proposed | XL | Advanced permission system |
| 7 | ADR-025 | i18n & l10n | Proposed | XL | Multi-language support |
| 8 | ADR-031 | Data Retention & Privacy | Proposed | XL | Compliance requirements |
---
@@ -148,6 +149,9 @@ These ADRs are proposed or partially implemented, ordered by suggested implement
| Date | ADR | Change |
| ---------- | ------- | ----------------------------------------------------------------------------------- |
| 2026-01-28 | ADR-024 | Fully implemented - Backend/frontend feature flags, 89 tests, admin endpoint |
| 2026-01-28 | ADR-057 | Created - Test remediation documentation for ADR-008 Phase 2 migration |
| 2026-01-28 | ADR-013 | Marked as Superseded by ADR-023 |
| 2026-01-27 | ADR-008 | Test path migration complete - 23 files, ~70 paths updated, 274->345 tests passing |
| 2026-01-27 | ADR-008 | Phase 2 Complete - Version router factory, deprecation headers, 82 versioning tests |
| 2026-01-26 | ADR-015 | Completed - Added Sentry user context in AuthProvider, now fully implemented |

View File

@@ -2,6 +2,8 @@
This directory contains a log of the architectural decisions made for the Flyer Crawler project.
**[Implementation Tracker](./adr-implementation-tracker.md)**: Track implementation status and effort estimates for all ADRs.
## 1. Foundational / Core Infrastructure
**[ADR-002](./0002-standardized-transaction-management.md)**: Standardized Transaction Management and Unit of Work Pattern (Accepted)
@@ -12,7 +14,7 @@ This directory contains a log of the architectural decisions made for the Flyer
## 2. Data Management
**[ADR-009](./0009-caching-strategy-for-read-heavy-operations.md)**: Caching Strategy for Read-Heavy Operations (Accepted)
**[ADR-013](./0013-database-schema-migration-strategy.md)**: Database Schema Migration Strategy (Proposed)
**[ADR-013](./0013-database-schema-migration-strategy.md)**: Database Schema Migration Strategy (Superseded by ADR-023)
**[ADR-019](./0019-data-backup-and-recovery-strategy.md)**: Data Backup and Recovery Strategy (Accepted)
**[ADR-023](./0023-database-schema-migration-strategy.md)**: Database Schema Migration Strategy (Proposed)
**[ADR-031](./0031-data-retention-and-privacy-compliance.md)**: Data Retention and Privacy Compliance (Proposed)
@@ -20,9 +22,9 @@ This directory contains a log of the architectural decisions made for the Flyer
## 3. API & Integration
**[ADR-003](./0003-standardized-input-validation-using-middleware.md)**: Standardized Input Validation using Middleware (Accepted)
**[ADR-008](./0008-api-versioning-strategy.md)**: API Versioning Strategy (Accepted - Phase 1 Complete)
**[ADR-018](./0018-api-documentation-strategy.md)**: API Documentation Strategy (Accepted)
**[ADR-022](./0022-real-time-notification-system.md)**: Real-time Notification System (Proposed)
**[ADR-008](./0008-api-versioning-strategy.md)**: API Versioning Strategy (Accepted - Phase 2 Complete)
**[ADR-018](./0018-api-documentation-strategy.md)**: API Documentation Strategy (Superseded - tsoa migration complete)
**[ADR-022](./0022-real-time-notification-system.md)**: Real-time Notification System (Accepted)
**[ADR-028](./0028-api-response-standardization.md)**: API Response Standardization and Envelope Pattern (Implemented)
## 4. Security & Compliance
@@ -33,12 +35,12 @@ This directory contains a log of the architectural decisions made for the Flyer
**[ADR-029](./0029-secret-rotation-and-key-management.md)**: Secret Rotation and Key Management Strategy (Proposed)
**[ADR-032](./0032-rate-limiting-strategy.md)**: Rate Limiting Strategy (Accepted)
**[ADR-033](./0033-file-upload-and-storage-strategy.md)**: File Upload and Storage Strategy (Accepted)
**[ADR-048](./0048-authentication-strategy.md)**: Authentication Strategy (Partially Implemented)
**[ADR-048](./0048-authentication-strategy.md)**: Authentication Strategy (Accepted)
## 5. Observability & Monitoring
**[ADR-004](./0004-standardized-application-wide-structured-logging.md)**: Standardized Application-Wide Structured Logging (Accepted)
**[ADR-015](./0015-error-tracking-and-observability.md)**: Error Tracking and Observability (Partial)
**[ADR-015](./0015-error-tracking-and-observability.md)**: Error Tracking and Observability (Accepted)
**[ADR-050](./0050-postgresql-function-observability.md)**: PostgreSQL Function Observability (Accepted)
**[ADR-051](./0051-asynchronous-context-propagation.md)**: Asynchronous Context Propagation (Accepted)
**[ADR-052](./0052-granular-debug-logging-strategy.md)**: Granular Debug Logging Strategy (Accepted)
@@ -52,7 +54,7 @@ This directory contains a log of the architectural decisions made for the Flyer
**[ADR-024](./0024-feature-flagging-strategy.md)**: Feature Flagging Strategy (Proposed)
**[ADR-037](./0037-scheduled-jobs-and-cron-pattern.md)**: Scheduled Jobs and Cron Pattern (Accepted)
**[ADR-038](./0038-graceful-shutdown-pattern.md)**: Graceful Shutdown Pattern (Accepted)
**[ADR-053](./0053-worker-health-checks-and-monitoring.md)**: Worker Health Checks and Monitoring (Proposed)
**[ADR-053](./0053-worker-health-checks.md)**: Worker Health Checks and Stalled Job Monitoring (Accepted)
**[ADR-054](./0054-bugsink-gitea-issue-sync.md)**: Bugsink to Gitea Issue Synchronization (Proposed)
## 7. Frontend / User Interface
@@ -71,6 +73,8 @@ This directory contains a log of the architectural decisions made for the Flyer
**[ADR-040](./0040-testing-economics-and-priorities.md)**: Testing Economics and Priorities (Accepted)
**[ADR-045](./0045-test-data-factories-and-fixtures.md)**: Test Data Factories and Fixtures (Accepted)
**[ADR-047](./0047-project-file-and-folder-organization.md)**: Project File and Folder Organization (Proposed)
**[ADR-057](./0057-test-remediation-post-api-versioning.md)**: Test Remediation Post-API Versioning (Accepted)
**[ADR-059](./0059-dependency-modernization.md)**: Dependency Modernization - tsoa Migration (Accepted)
## 9. Architecture Patterns

View File

@@ -1,10 +1,168 @@
# Database Setup
# Database Architecture
Flyer Crawler uses PostgreSQL with several extensions for full-text search, geographic data, and UUID generation.
**Version**: 0.12.20
**Last Updated**: 2026-01-28
Flyer Crawler uses PostgreSQL 16 with PostGIS for geographic data, pg_trgm for fuzzy text search, and uuid-ossp for UUID generation. The database contains 65 tables organized into logical domains.
## Table of Contents
1. [Schema Overview](#schema-overview)
2. [Database Setup](#database-setup)
3. [Schema Reference](#schema-reference)
4. [Related Documentation](#related-documentation)
---
## Required Extensions
## Schema Overview
The database is organized into the following domains:
### Core Infrastructure (6 tables)
| Table | Purpose | Primary Key |
| ----------------------- | ----------------------------------------- | ----------------- |
| `users` | Authentication credentials and login data | `user_id` (UUID) |
| `profiles` | Public user data, preferences, points | `user_id` (UUID) |
| `addresses` | Normalized address storage with geocoding | `address_id` |
| `activity_log` | User activity audit trail | `activity_log_id` |
| `password_reset_tokens` | Temporary tokens for password reset | `token_id` |
| `schema_info` | Schema deployment metadata | `environment` |
### Stores and Locations (4 tables)
| Table | Purpose | Primary Key |
| ------------------------ | --------------------------------------- | ------------------- |
| `stores` | Grocery store chains (Safeway, Kroger) | `store_id` |
| `store_locations` | Physical store locations with addresses | `store_location_id` |
| `favorite_stores` | User store favorites | `user_id, store_id` |
| `store_receipt_patterns` | Receipt text patterns for store ID | `pattern_id` |
### Flyers and Items (7 tables)
| Table | Purpose | Primary Key |
| ----------------------- | -------------------------------------- | ------------------------ |
| `flyers` | Uploaded flyer metadata and status | `flyer_id` |
| `flyer_items` | Individual deals extracted from flyers | `flyer_item_id` |
| `flyer_locations` | Flyer-to-location associations | `flyer_location_id` |
| `categories` | Item categorization (Produce, Dairy) | `category_id` |
| `master_grocery_items` | Canonical grocery item dictionary | `master_grocery_item_id` |
| `master_item_aliases` | Alternative names for master items | `alias_id` |
| `unmatched_flyer_items` | Items pending master item matching | `unmatched_item_id` |
### Products and Brands (2 tables)
| Table | Purpose | Primary Key |
| ---------- | ---------------------------------------------- | ------------ |
| `brands` | Brand names (Coca-Cola, Kraft) | `brand_id` |
| `products` | Specific products (master item + brand + size) | `product_id` |
### Price Tracking (3 tables)
| Table | Purpose | Primary Key |
| ----------------------- | ---------------------------------- | ------------------ |
| `item_price_history` | Historical prices for master items | `price_history_id` |
| `user_submitted_prices` | User-contributed price reports | `submission_id` |
| `suggested_corrections` | Suggested edits to flyer items | `correction_id` |
### User Features (8 tables)
| Table | Purpose | Primary Key |
| -------------------- | ------------------------------------ | --------------------------- |
| `user_watched_items` | Items user wants to track prices for | `user_watched_item_id` |
| `user_alerts` | Price alert thresholds | `alert_id` |
| `notifications` | User notifications | `notification_id` |
| `user_item_aliases` | User-defined item name aliases | `alias_id` |
| `user_follows` | User-to-user follow relationships | `follower_id, following_id` |
| `user_reactions` | Reactions to content (likes, etc.) | `reaction_id` |
| `budgets` | User-defined spending budgets | `budget_id` |
| `search_queries` | Search history for analytics | `query_id` |
### Shopping Lists (4 tables)
| Table | Purpose | Primary Key |
| ----------------------- | ------------------------ | ------------------------- |
| `shopping_lists` | User shopping lists | `shopping_list_id` |
| `shopping_list_items` | Items on shopping lists | `shopping_list_item_id` |
| `shared_shopping_lists` | Shopping list sharing | `shared_shopping_list_id` |
| `shopping_trips` | Completed shopping trips | `trip_id` |
| `shopping_trip_items` | Items purchased on trips | `trip_item_id` |
### Recipes (11 tables)
| Table | Purpose | Primary Key |
| --------------------------------- | -------------------------------- | ------------------------- |
| `recipes` | User recipes with metadata | `recipe_id` |
| `recipe_ingredients` | Recipe ingredient list | `recipe_ingredient_id` |
| `recipe_ingredient_substitutions` | Ingredient alternatives | `substitution_id` |
| `tags` | Recipe tags (vegan, quick, etc.) | `tag_id` |
| `recipe_tags` | Recipe-to-tag associations | `recipe_id, tag_id` |
| `appliances` | Kitchen appliances | `appliance_id` |
| `recipe_appliances` | Appliances needed for recipes | `recipe_id, appliance_id` |
| `recipe_ratings` | User ratings for recipes | `rating_id` |
| `recipe_comments` | User comments on recipes | `comment_id` |
| `favorite_recipes` | User recipe favorites | `user_id, recipe_id` |
| `recipe_collections` | User recipe collections | `collection_id` |
### Meal Planning (3 tables)
| Table | Purpose | Primary Key |
| ------------------- | -------------------------- | ----------------- |
| `menu_plans` | Weekly/monthly meal plans | `menu_plan_id` |
| `shared_menu_plans` | Menu plan sharing | `share_id` |
| `planned_meals` | Individual meals in a plan | `planned_meal_id` |
### Pantry and Inventory (4 tables)
| Table | Purpose | Primary Key |
| -------------------- | ------------------------------------ | ----------------- |
| `pantry_items` | User pantry inventory | `pantry_item_id` |
| `pantry_locations` | Storage locations (fridge, freezer) | `location_id` |
| `expiry_date_ranges` | Reference shelf life data | `expiry_range_id` |
| `expiry_alerts` | User expiry notification preferences | `expiry_alert_id` |
| `expiry_alert_log` | Sent expiry notifications | `alert_log_id` |
### Receipts (4 tables)
| Table | Purpose | Primary Key |
| ------------------------ | ----------------------------- | ----------------- |
| `receipts` | Scanned receipt metadata | `receipt_id` |
| `receipt_items` | Items parsed from receipts | `receipt_item_id` |
| `receipt_processing_log` | OCR/AI processing audit trail | `log_id` |
### UPC Scanning (2 tables)
| Table | Purpose | Primary Key |
| ---------------------- | ------------------------------- | ----------- |
| `upc_scan_history` | User barcode scan history | `scan_id` |
| `upc_external_lookups` | External UPC API response cache | `lookup_id` |
### Gamification (2 tables)
| Table | Purpose | Primary Key |
| ------------------- | ---------------------------- | ------------------------- |
| `achievements` | Defined achievements | `achievement_id` |
| `user_achievements` | Achievements earned by users | `user_id, achievement_id` |
### User Preferences (3 tables)
| Table | Purpose | Primary Key |
| --------------------------- | ---------------------------- | ------------------------- |
| `dietary_restrictions` | Defined dietary restrictions | `restriction_id` |
| `user_dietary_restrictions` | User dietary preferences | `user_id, restriction_id` |
| `user_appliances` | Appliances user owns | `user_id, appliance_id` |
### Reference Data (1 table)
| Table | Purpose | Primary Key |
| ------------------ | ----------------------- | --------------- |
| `unit_conversions` | Unit conversion factors | `conversion_id` |
---
## Database Setup
### Required Extensions
| Extension | Purpose |
| ----------- | ------------------------------------------- |
@@ -14,7 +172,7 @@ Flyer Crawler uses PostgreSQL with several extensions for full-text search, geog
---
## Database Users
### Database Users
This project uses **environment-specific database users** to isolate production and test environments:

View File

@@ -1,7 +1,7 @@
# Flyer Crawler - System Architecture Overview
**Version**: 0.12.5
**Last Updated**: 2026-01-22
**Version**: 0.12.20
**Last Updated**: 2026-01-28
**Platform**: Linux (Production and Development)
---
@@ -41,7 +41,7 @@
## System Architecture Diagram
```
```text
+-----------------------------------------------------------------------------------+
| CLIENT LAYER |
+-----------------------------------------------------------------------------------+
@@ -153,10 +153,10 @@
| Component | Technology | Version | Purpose |
| ---------------------- | ---------- | -------- | -------------------------------- |
| **Runtime** | Node.js | 22.x LTS | Server-side JavaScript runtime |
| **Language** | TypeScript | 5.9.x | Type-safe JavaScript superset |
| **Web Framework** | Express.js | 5.1.x | HTTP server and routing |
| **Frontend Framework** | React | 19.2.x | UI component library |
| **Build Tool** | Vite | 7.2.x | Frontend bundling and dev server |
| **Language** | TypeScript | 5.9.3 | Type-safe JavaScript superset |
| **Web Framework** | Express.js | 5.1.0 | HTTP server and routing |
| **Frontend Framework** | React | 19.2.0 | UI component library |
| **Build Tool** | Vite | 7.2.4 | Frontend bundling and dev server |
### Data Storage
@@ -176,23 +176,23 @@
| **OAuth** | Google, GitHub | Social authentication |
| **Email** | Nodemailer (SMTP) | Transactional emails |
### Background Processing
### Background Processing Stack
| Component | Technology | Version | Purpose |
| ------------------- | ---------- | ------- | --------------------------------- |
| **Job Queues** | BullMQ | 5.65.x | Reliable async job processing |
| **Job Queues** | BullMQ | 5.65.1 | Reliable async job processing |
| **Process Manager** | PM2 | Latest | Process management and clustering |
| **Scheduler** | node-cron | 4.2.x | Scheduled tasks |
| **Scheduler** | node-cron | 4.2.1 | Scheduled tasks |
### Frontend Stack
| Component | Technology | Version | Purpose |
| -------------------- | -------------- | ------- | ---------------------------------------- |
| **State Management** | TanStack Query | 5.90.x | Server state caching and synchronization |
| **Routing** | React Router | 7.9.x | Client-side routing |
| **Styling** | Tailwind CSS | 4.1.x | Utility-first CSS framework |
| **Icons** | Lucide React | 0.555.x | Icon components |
| **Charts** | Recharts | 3.4.x | Data visualization |
| **State Management** | TanStack Query | 5.90.12 | Server state caching and synchronization |
| **Routing** | React Router | 7.9.6 | Client-side routing |
| **Styling** | Tailwind CSS | 4.1.17 | Utility-first CSS framework |
| **Icons** | Lucide React | 0.555.0 | Icon components |
| **Charts** | Recharts | 3.4.1 | Data visualization |
### Observability and Quality
@@ -221,7 +221,7 @@ The frontend is a single-page application (SPA) built with React 19 and Vite.
**Directory Structure**:
```
```text
src/
+-- components/ # Reusable UI components
+-- contexts/ # React context providers
@@ -244,17 +244,30 @@ The backend is a RESTful API server built with Express.js 5.
- Structured logging with Pino
- Standardized error handling (ADR-001)
**API Route Modules**:
| Route | Purpose |
|-------|---------|
| `/api/auth` | Authentication (login, register, OAuth) |
| `/api/users` | User profile management |
| `/api/flyers` | Flyer CRUD and processing |
| `/api/recipes` | Recipe management |
| `/api/deals` | Best prices and deal discovery |
| `/api/stores` | Store management |
| `/api/admin` | Administrative functions |
| `/api/health` | Health checks and monitoring |
**API Route Modules** (all versioned under `/api/v1/*`):
| Route | Purpose |
| ------------------------- | ----------------------------------------------- |
| `/api/v1/auth` | Authentication (login, register, OAuth) |
| `/api/v1/health` | Health checks and monitoring |
| `/api/v1/system` | System administration (PM2 status, server info) |
| `/api/v1/users` | User profile management |
| `/api/v1/ai` | AI-powered features and flyer processing |
| `/api/v1/admin` | Administrative functions |
| `/api/v1/budgets` | Budget management and spending analysis |
| `/api/v1/achievements` | Gamification and achievement system |
| `/api/v1/flyers` | Flyer CRUD and processing |
| `/api/v1/recipes` | Recipe management and recommendations |
| `/api/v1/personalization` | Master items and user preferences |
| `/api/v1/price-history` | Price tracking and trend analysis |
| `/api/v1/stats` | Public statistics and analytics |
| `/api/v1/upc` | UPC barcode scanning and product lookup |
| `/api/v1/inventory` | Inventory and expiry tracking |
| `/api/v1/receipts` | Receipt scanning and purchase history |
| `/api/v1/deals` | Best prices and deal discovery |
| `/api/v1/reactions` | Social features (reactions, sharing) |
| `/api/v1/stores` | Store management and location services |
| `/api/v1/categories` | Category browsing and product categorization |
### Database (PostgreSQL/PostGIS)
@@ -331,7 +344,7 @@ BullMQ workers handle asynchronous processing tasks. PM2 manages both the API se
### Flyer Processing Pipeline
```
```text
+-------------+ +----------------+ +------------------+ +---------------+
| User | | Express | | BullMQ | | PostgreSQL |
| Upload +---->+ Route +---->+ Queue +---->+ Storage |
@@ -395,7 +408,7 @@ BullMQ workers handle asynchronous processing tasks. PM2 manages both the API se
The application follows a strict layered architecture as defined in ADR-035.
```
```text
+-----------------------------------------------------------------------+
| ROUTES LAYER |
| Responsibilities: |
@@ -458,7 +471,7 @@ The application follows a strict layered architecture as defined in ADR-035.
### Entity Relationship Overview
```
```text
+------------------+ +------------------+ +------------------+
| users | | profiles | | addresses |
|------------------| |------------------| |------------------|
@@ -537,7 +550,7 @@ The application follows a strict layered architecture as defined in ADR-035.
### JWT Token Architecture
```
```text
+-------------------+ +-------------------+ +-------------------+
| Login Request | | Server | | Database |
| (email/pass) +---->+ Validates +---->+ Verify User |
@@ -576,7 +589,7 @@ The application follows a strict layered architecture as defined in ADR-035.
### Protected Route Flow
```
```text
+-------------------+ +-------------------+ +-------------------+
| API Request | | requireAuth | | JWT Strategy |
| + Bearer Token +---->+ Middleware +---->+ Validate |
@@ -603,7 +616,7 @@ The application follows a strict layered architecture as defined in ADR-035.
### Worker Architecture
```
```text
+-------------------+ +-------------------+ +-------------------+
| API Server | | Redis | | Worker Process |
| (Queue Producer)| | (Job Storage) | | (Consumer) |
@@ -635,7 +648,7 @@ The application follows a strict layered architecture as defined in ADR-035.
Jobs use exponential backoff for retries:
```
```text
Attempt 1: Immediate
Attempt 2: Initial delay (e.g., 5 seconds)
Attempt 3: 2x delay (e.g., 10 seconds)
@@ -658,7 +671,7 @@ Attempt 4: 4x delay (e.g., 20 seconds)
### Environment Overview
```
```text
+-----------------------------------------------------------------------------------+
| DEVELOPMENT |
+-----------------------------------------------------------------------------------+
@@ -710,7 +723,7 @@ Attempt 4: 4x delay (e.g., 20 seconds)
### Deployment Pipeline (ADR-017)
```
```text
+------------+ +------------+ +------------+ +------------+
| Push to | | Gitea | | Build & | | Deploy |
| main +---->+ Actions +---->+ Test +---->+ to Prod |
@@ -839,22 +852,55 @@ The system architecture is governed by Architecture Decision Records (ADRs). Key
| File | Purpose |
| ----------------------------------------------- | --------------------------------------- |
| `src/services/flyerProcessingService.server.ts` | Flyer processing pipeline orchestration |
| `src/services/flyerAiProcessor.server.ts` | AI extraction for flyers |
| `src/services/aiService.server.ts` | Google Gemini AI integration |
| `src/services/cacheService.server.ts` | Redis caching abstraction |
| `src/services/emailService.server.ts` | Email sending |
| `src/services/queues.server.ts` | BullMQ queue definitions |
| `src/services/queueService.server.ts` | Queue management and scheduling |
| `src/services/workers.server.ts` | BullMQ worker definitions |
| `src/services/websocketService.server.ts` | Real-time WebSocket notifications |
| `src/services/receiptService.server.ts` | Receipt scanning and OCR |
| `src/services/upcService.server.ts` | UPC barcode lookup |
| `src/services/expiryService.server.ts` | Pantry expiry tracking |
| `src/services/geocodingService.server.ts` | Address geocoding |
| `src/services/analyticsService.server.ts` | Analytics and reporting |
| `src/services/monitoringService.server.ts` | Health monitoring |
| `src/services/barcodeService.server.ts` | Barcode detection |
| `src/services/logger.server.ts` | Structured logging (Pino) |
| `src/services/redis.server.ts` | Redis connection management |
| `src/services/sentry.server.ts` | Error tracking (Sentry/Bugsink) |
### Database Files
| File | Purpose |
| ---------------------------------- | -------------------------------------------- |
| `src/services/db/connection.db.ts` | Database pool and transaction management |
| `src/services/db/errors.db.ts` | Database error types |
| `src/services/db/user.db.ts` | User repository |
| `src/services/db/flyer.db.ts` | Flyer repository |
| `sql/master_schema_rollup.sql` | Complete database schema (for test DB setup) |
| `sql/initial_schema.sql` | Fresh installation schema |
| File | Purpose |
| --------------------------------------- | -------------------------------------------- |
| `src/services/db/connection.db.ts` | Database pool and transaction management |
| `src/services/db/errors.db.ts` | Database error types |
| `src/services/db/index.db.ts` | Repository exports |
| `src/services/db/user.db.ts` | User repository |
| `src/services/db/flyer.db.ts` | Flyer repository |
| `src/services/db/store.db.ts` | Store repository |
| `src/services/db/storeLocation.db.ts` | Store location repository |
| `src/services/db/recipe.db.ts` | Recipe repository |
| `src/services/db/category.db.ts` | Category repository |
| `src/services/db/personalization.db.ts` | Master items and personalization |
| `src/services/db/shopping.db.ts` | Shopping lists repository |
| `src/services/db/deals.db.ts` | Deals and best prices repository |
| `src/services/db/price.db.ts` | Price history repository |
| `src/services/db/receipt.db.ts` | Receipt repository |
| `src/services/db/upc.db.ts` | UPC scan history repository |
| `src/services/db/expiry.db.ts` | Expiry tracking repository |
| `src/services/db/gamification.db.ts` | Achievements repository |
| `src/services/db/budget.db.ts` | Budget repository |
| `src/services/db/reaction.db.ts` | User reactions repository |
| `src/services/db/notification.db.ts` | Notifications repository |
| `src/services/db/address.db.ts` | Address repository |
| `src/services/db/admin.db.ts` | Admin operations repository |
| `src/services/db/conversion.db.ts` | Unit conversion repository |
| `src/services/db/flyerLocation.db.ts` | Flyer locations repository |
| `sql/master_schema_rollup.sql` | Complete database schema (for test DB setup) |
| `sql/initial_schema.sql` | Fresh installation schema |
### Type Definitions

View File

@@ -2,8 +2,26 @@
Common code patterns extracted from Architecture Decision Records (ADRs). Use these as templates when writing new code.
## Quick Reference
| Pattern | Key Function/Class | Import From |
| -------------------- | ------------------------------------------------- | ------------------------------------- |
| **tsoa Controllers** | `BaseController`, `@Route`, `@Security` | `src/controllers/base.controller.ts` |
| Error Handling | `handleDbError()`, `NotFoundError` | `src/services/db/errors.db.ts` |
| Repository Methods | `get*`, `find*`, `list*` | `src/services/db/*.db.ts` |
| API Responses | `sendSuccess()`, `sendPaginated()`, `sendError()` | `src/utils/apiResponse.ts` |
| Transactions | `withTransaction()` | `src/services/db/connection.db.ts` |
| Validation | `validateRequest()` | `src/middleware/validation.ts` |
| Authentication | `authenticateJWT`, `@Security('bearerAuth')` | `src/middleware/auth.ts` |
| Caching | `cacheService` | `src/services/cache.server.ts` |
| Background Jobs | Queue classes | `src/services/queues.server.ts` |
| Feature Flags | `isFeatureEnabled()`, `useFeatureFlag()` | `src/services/featureFlags.server.ts` |
---
## Table of Contents
- [tsoa Controllers](#tsoa-controllers)
- [Error Handling](#error-handling)
- [Repository Patterns](#repository-patterns)
- [API Response Patterns](#api-response-patterns)
@@ -12,12 +30,166 @@ Common code patterns extracted from Architecture Decision Records (ADRs). Use th
- [Authentication](#authentication)
- [Caching](#caching)
- [Background Jobs](#background-jobs)
- [Feature Flags](#feature-flags)
---
## tsoa Controllers
**ADR**: [ADR-018](../adr/0018-api-documentation-strategy.md), [ADR-059](../adr/0059-dependency-modernization.md)
All API endpoints are implemented as tsoa controller classes that extend `BaseController`. This pattern provides type-safe OpenAPI documentation generation and standardized response formatting.
### Basic Controller Structure
```typescript
import {
Route,
Tags,
Get,
Post,
Body,
Path,
Query,
Security,
SuccessResponse,
Response,
} from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import {
BaseController,
SuccessResponse as SuccessResponseType,
ErrorResponse,
} from './base.controller';
interface CreateItemRequest {
name: string;
description?: string;
}
interface ItemResponse {
id: number;
name: string;
created_at: string;
}
@Route('items')
@Tags('Items')
export class ItemController extends BaseController {
/**
* Get an item by ID.
* @summary Get item
* @param id Item ID
*/
@Get('{id}')
@SuccessResponse(200, 'Item retrieved')
@Response<ErrorResponse>(404, 'Item not found')
public async getItem(@Path() id: number): Promise<SuccessResponseType<ItemResponse>> {
const item = await itemService.getItemById(id);
return this.success(item);
}
/**
* Create a new item. Requires authentication.
* @summary Create item
*/
@Post()
@Security('bearerAuth')
@SuccessResponse(201, 'Item created')
@Response<ErrorResponse>(401, 'Not authenticated')
public async createItem(
@Body() body: CreateItemRequest,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<ItemResponse>> {
const user = request.user as UserProfile;
const item = await itemService.createItem(body, user.user.user_id);
return this.created(item);
}
}
```
### BaseController Response Helpers
```typescript
// Success response (200)
return this.success(data);
// Created response (201)
return this.created(data);
// Paginated response
const { page, limit } = this.normalizePagination(queryPage, queryLimit);
return this.paginated(items, { page, limit, total });
// Message-only response
return this.message('Operation completed');
// No content (204)
return this.noContent();
```
### Authentication with @Security
```typescript
import { Security, Request } from 'tsoa';
import { requireAdminRole } from '../middleware/tsoaAuthentication';
// Require authentication
@Get('profile')
@Security('bearerAuth')
public async getProfile(@Request() req: ExpressRequest): Promise<...> {
const user = req.user as UserProfile;
return this.success(user);
}
// Require admin role
@Delete('users/{id}')
@Security('bearerAuth')
public async deleteUser(@Path() id: string, @Request() req: ExpressRequest): Promise<void> {
requireAdminRole(req.user as UserProfile);
await userService.deleteUser(id);
return this.noContent();
}
```
### Error Handling in Controllers
```typescript
import { NotFoundError, ValidationError, ForbiddenError } from './base.controller';
// Throw errors - they're handled by the global error handler
throw new NotFoundError('Item', id); // 404
throw new ValidationError([], 'Invalid'); // 400
throw new ForbiddenError('Admin only'); // 403
```
### Rate Limiting
```typescript
import { Middlewares } from 'tsoa';
import { loginLimiter } from '../config/rateLimiters';
@Post('login')
@Middlewares(loginLimiter)
@Response<ErrorResponse>(429, 'Too many attempts')
public async login(@Body() body: LoginRequest): Promise<...> { ... }
```
### Regenerating Routes
After modifying controllers, regenerate the tsoa routes:
```bash
npm run tsoa:spec && npm run tsoa:routes
```
**Full Guide**: See [TSOA-MIGRATION-GUIDE.md](./TSOA-MIGRATION-GUIDE.md) for comprehensive documentation.
---
## Error Handling
**ADR**: [ADR-001](../adr/0001-standardized-error-handling-for-database-operations.md)
**ADR**: [ADR-001](../adr/0001-standardized-error-handling.md)
### Repository Layer Error Handling
@@ -78,7 +250,7 @@ throw new DatabaseError('Failed to insert flyer', originalError);
## Repository Patterns
**ADR**: [ADR-034](../adr/0034-repository-layer-method-naming-conventions.md)
**ADR**: [ADR-034](../adr/0034-repository-pattern-standards.md)
### Method Naming Conventions
@@ -155,16 +327,17 @@ export async function listActiveFlyers(client?: PoolClient): Promise<Flyer[]> {
## API Response Patterns
**ADR**: [ADR-028](../adr/0028-consistent-api-response-format.md)
**ADR**: [ADR-028](../adr/0028-api-response-standardization.md)
### Success Response
```typescript
import { sendSuccess } from '../utils/apiResponse';
app.post('/api/flyers', async (req, res) => {
app.post('/api/v1/flyers', async (req, res) => {
const flyer = await flyerService.createFlyer(req.body);
return sendSuccess(res, flyer, 'Flyer created successfully', 201);
// sendSuccess(res, data, statusCode?, meta?)
return sendSuccess(res, flyer, 201);
});
```
@@ -173,30 +346,32 @@ app.post('/api/flyers', async (req, res) => {
```typescript
import { sendPaginated } from '../utils/apiResponse';
app.get('/api/flyers', async (req, res) => {
const { page = 1, pageSize = 20 } = req.query;
const { items, total } = await flyerService.listFlyers(page, pageSize);
app.get('/api/v1/flyers', async (req, res) => {
const page = parseInt(req.query.page as string) || 1;
const limit = parseInt(req.query.limit as string) || 20;
const { items, total } = await flyerService.listFlyers(page, limit);
return sendPaginated(res, {
items,
total,
page: parseInt(page),
pageSize: parseInt(pageSize),
});
// sendPaginated(res, data[], { page, limit, total }, meta?)
return sendPaginated(res, items, { page, limit, total });
});
```
### Error Response
```typescript
import { sendError } from '../utils/apiResponse';
import { sendError, sendSuccess, ErrorCode } from '../utils/apiResponse';
app.get('/api/flyers/:id', async (req, res) => {
app.get('/api/v1/flyers/:id', async (req, res) => {
try {
const flyer = await flyerDb.getFlyerById(parseInt(req.params.id));
return sendSuccess(res, flyer);
} catch (error) {
return sendError(res, error); // Automatically maps error to correct status
// sendError(res, code, message, statusCode?, details?, meta?)
if (error instanceof NotFoundError) {
return sendError(res, ErrorCode.NOT_FOUND, error.message, 404);
}
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
return sendError(res, ErrorCode.INTERNAL_ERROR, 'An error occurred', 500);
}
});
```
@@ -205,12 +380,12 @@ app.get('/api/flyers/:id', async (req, res) => {
## Transaction Management
**ADR**: [ADR-002](../adr/0002-transaction-management-pattern.md)
**ADR**: [ADR-002](../adr/0002-standardized-transaction-management.md)
### Basic Transaction
```typescript
import { withTransaction } from '../services/db/transaction.db';
import { withTransaction } from '../services/db/connection.db';
export async function createFlyerWithItems(
flyerData: FlyerInput,
@@ -262,7 +437,7 @@ export async function bulkImportFlyers(flyersData: FlyerInput[]): Promise<Import
## Input Validation
**ADR**: [ADR-003](../adr/0003-input-validation-framework.md)
**ADR**: [ADR-003](../adr/0003-standardized-input-validation-using-middleware.md)
### Zod Schema Definition
@@ -298,10 +473,10 @@ export type CreateFlyerInput = z.infer<typeof createFlyerSchema>;
import { validateRequest } from '../middleware/validation';
import { createFlyerSchema } from '../schemas/flyer.schemas';
app.post('/api/flyers', validateRequest(createFlyerSchema), async (req, res) => {
app.post('/api/v1/flyers', validateRequest(createFlyerSchema), async (req, res) => {
// req.body is now type-safe and validated
const flyer = await flyerService.createFlyer(req.body);
return sendSuccess(res, flyer, 'Flyer created successfully', 201);
return sendSuccess(res, flyer, 201);
});
```
@@ -331,7 +506,7 @@ export async function processFlyer(data: unknown): Promise<Flyer> {
import { authenticateJWT } from '../middleware/auth';
app.get(
'/api/profile',
'/api/v1/profile',
authenticateJWT, // Middleware adds req.user
async (req, res) => {
// req.user is guaranteed to exist
@@ -347,7 +522,7 @@ app.get(
import { optionalAuth } from '../middleware/auth';
app.get(
'/api/flyers',
'/api/v1/flyers',
optionalAuth, // req.user may or may not exist
async (req, res) => {
const flyers = req.user
@@ -374,7 +549,7 @@ export function generateToken(user: User): string {
## Caching
**ADR**: [ADR-029](../adr/0029-redis-caching-strategy.md)
**ADR**: [ADR-009](../adr/0009-caching-strategy-for-read-heavy-operations.md)
### Cache Pattern
@@ -414,7 +589,7 @@ export async function updateFlyer(id: number, data: UpdateFlyerInput): Promise<F
## Background Jobs
**ADR**: [ADR-036](../adr/0036-background-job-processing-architecture.md)
**ADR**: [ADR-006](../adr/0006-background-job-processing-and-task-queues.md)
### Queue Job
@@ -473,6 +648,153 @@ const flyerWorker = new Worker(
---
## Feature Flags
**ADR**: [ADR-024](../adr/0024-feature-flagging-strategy.md)
Feature flags enable controlled feature rollout, A/B testing, and quick production disablement without redeployment. All flags default to `false` (opt-in model).
### Backend Usage
```typescript
import { isFeatureEnabled, getFeatureFlags } from '../services/featureFlags.server';
// Check a specific flag in route handler
router.get('/dashboard', async (req, res) => {
if (isFeatureEnabled('newDashboard')) {
return sendSuccess(res, { version: 'v2', data: await getNewDashboardData() });
}
return sendSuccess(res, { version: 'v1', data: await getLegacyDashboardData() });
});
// Check flag in service layer
function processFlyer(flyer: Flyer): ProcessedFlyer {
if (isFeatureEnabled('experimentalAi')) {
return processWithExperimentalAi(flyer);
}
return processWithStandardAi(flyer);
}
// Get all flags (admin endpoint)
router.get('/admin/feature-flags', requireAdmin, async (req, res) => {
sendSuccess(res, { flags: getFeatureFlags() });
});
```
### Frontend Usage
```tsx
import { useFeatureFlag, useAllFeatureFlags } from '../hooks/useFeatureFlag';
import { FeatureFlag } from '../components/FeatureFlag';
// Hook approach - for logic beyond rendering
function Dashboard() {
const isNewDashboard = useFeatureFlag('newDashboard');
useEffect(() => {
if (isNewDashboard) {
analytics.track('new_dashboard_viewed');
}
}, [isNewDashboard]);
return isNewDashboard ? <NewDashboard /> : <LegacyDashboard />;
}
// Declarative component approach
function App() {
return (
<FeatureFlag feature="newDashboard" fallback={<LegacyDashboard />}>
<NewDashboard />
</FeatureFlag>
);
}
// Debug panel showing all flags
function DebugPanel() {
const flags = useAllFeatureFlags();
return (
<ul>
{Object.entries(flags).map(([name, enabled]) => (
<li key={name}>
{name}: {enabled ? 'ON' : 'OFF'}
</li>
))}
</ul>
);
}
```
### Adding a New Flag
1. **Backend** (`src/config/env.ts`):
```typescript
// In featureFlagsSchema
myNewFeature: booleanString(false), // FEATURE_MY_NEW_FEATURE
// In loadEnvVars()
myNewFeature: process.env.FEATURE_MY_NEW_FEATURE,
```
2. **Frontend** (`src/config.ts` and `src/vite-env.d.ts`):
```typescript
// In config.ts featureFlags section
myNewFeature: import.meta.env.VITE_FEATURE_MY_NEW_FEATURE === 'true',
// In vite-env.d.ts
readonly VITE_FEATURE_MY_NEW_FEATURE?: string;
```
3. **Environment** (`.env.example`):
```bash
# FEATURE_MY_NEW_FEATURE=false
# VITE_FEATURE_MY_NEW_FEATURE=false
```
### Testing Feature Flags
```typescript
// Backend - reset modules to test different states
beforeEach(() => {
vi.resetModules();
process.env.FEATURE_NEW_DASHBOARD = 'true';
});
// Frontend - mock config module
vi.mock('../config', () => ({
default: {
featureFlags: {
newDashboard: true,
betaRecipes: false,
},
},
}));
```
### Flag Lifecycle
| Phase | Actions |
| ---------- | -------------------------------------------------------------- |
| **Add** | Add to schemas (backend + frontend), default `false`, document |
| **Enable** | Set env var `='true'`, restart application |
| **Remove** | Remove conditional code, remove from schemas, remove env vars |
| **Sunset** | Max 3 months after full rollout - remove flag |
### Current Flags
| Flag | Backend Env Var | Frontend Env Var | Purpose |
| ---------------- | ------------------------- | ------------------------------ | ------------------------ |
| `bugsinkSync` | `FEATURE_BUGSINK_SYNC` | `VITE_FEATURE_BUGSINK_SYNC` | Bugsink error sync |
| `advancedRbac` | `FEATURE_ADVANCED_RBAC` | `VITE_FEATURE_ADVANCED_RBAC` | Advanced RBAC features |
| `newDashboard` | `FEATURE_NEW_DASHBOARD` | `VITE_FEATURE_NEW_DASHBOARD` | New dashboard experience |
| `betaRecipes` | `FEATURE_BETA_RECIPES` | `VITE_FEATURE_BETA_RECIPES` | Beta recipe features |
| `experimentalAi` | `FEATURE_EXPERIMENTAL_AI` | `VITE_FEATURE_EXPERIMENTAL_AI` | Experimental AI features |
| `debugMode` | `FEATURE_DEBUG_MODE` | `VITE_FEATURE_DEBUG_MODE` | Debug mode |
---
## Related Documentation
- [ADR Index](../adr/index.md) - All architecture decision records

View File

@@ -229,7 +229,7 @@ SELECT * FROM flyers WHERE store_id = 1;
- Add missing indexes
- Optimize WHERE clauses
- Use connection pooling
- See [ADR-034](../adr/0034-repository-layer-method-naming-conventions.md)
- See [ADR-034](../adr/0034-repository-pattern-standards.md)
---
@@ -237,7 +237,7 @@ SELECT * FROM flyers WHERE store_id = 1;
### Tests Pass on Windows, Fail in Container
**Cause**: Platform-specific behavior (ADR-014)
**Cause**: Platform-specific behavior ([ADR-014](../adr/0014-containerization-and-deployment-strategy.md))
**Rule**: Container results are authoritative. Windows results are unreliable.

View File

@@ -93,7 +93,7 @@ When the container starts (`scripts/dev-entrypoint.sh`):
PM2 manages three processes in the dev container:
```
```text
+--------------------+ +------------------------+ +--------------------+
| flyer-crawler- | | flyer-crawler- | | flyer-crawler- |
| api-dev | | worker-dev | | vite-dev |
@@ -404,5 +404,5 @@ podman exec -it flyer-crawler-dev pm2 restart flyer-crawler-api-dev
- [DEBUGGING.md](DEBUGGING.md) - Debugging strategies
- [LOGSTASH-QUICK-REF.md](../operations/LOGSTASH-QUICK-REF.md) - Logstash quick reference
- [DEV-CONTAINER-BUGSINK.md](../DEV-CONTAINER-BUGSINK.md) - Bugsink setup in dev container
- [ADR-014](../adr/0014-linux-only-platform.md) - Linux-only platform decision
- [ADR-050](../adr/0050-postgresql-function-observability.md) - PostgreSQL function observability
- [ADR-014](../adr/0014-containerization-and-deployment-strategy.md) - Containerization and deployment strategy
- [ADR-050](../adr/0050-postgresql-function-observability.md) - PostgreSQL function observability (includes log aggregation)

View File

@@ -147,6 +147,7 @@ When creating new route handlers:
## Related Documentation
- [ADR-008: API Versioning Strategy](../adr/0008-api-versioning-strategy.md) - Versioning implementation details
- [ADR-057: Test Remediation Post-API Versioning](../adr/0057-test-remediation-post-api-versioning.md) - Comprehensive remediation guide
- [ADR-004: Structured Logging](../adr/0004-standardized-application-wide-structured-logging.md) - Logging standards
- [CODE-PATTERNS.md](CODE-PATTERNS.md) - General code patterns
- [TESTING.md](TESTING.md) - Testing guidelines

View File

@@ -1,5 +1,19 @@
# Testing Guide
## Quick Reference
| Command | Purpose |
| ------------------------------------------------------------ | ---------------------------- |
| `podman exec -it flyer-crawler-dev npm test` | Run all tests |
| `podman exec -it flyer-crawler-dev npm run test:unit` | Unit tests (~2900) |
| `podman exec -it flyer-crawler-dev npm run test:integration` | Integration tests (28 files) |
| `podman exec -it flyer-crawler-dev npm run test:e2e` | E2E tests (11 files) |
| `podman exec -it flyer-crawler-dev npm run type-check` | TypeScript check |
**Critical**: Always run tests in the dev container. Windows results are unreliable.
---
## Overview
This project has comprehensive test coverage including unit tests, integration tests, and E2E tests. All tests must be run in the **Linux dev container environment** for reliable results.
@@ -76,7 +90,7 @@ To verify type-check is working correctly:
Example error output:
```
```text
src/pages/MyDealsPage.tsx:68:31 - error TS2339: Property 'store_name' does not exist on type 'WatchedItemDeal'.
68 <span>{deal.store_name}</span>
@@ -113,15 +127,26 @@ Located throughout `src/` directory alongside source files with `.test.ts` or `.
npm run test:unit
```
### Integration Tests (5 test files)
### Integration Tests (28 test files)
Located in `src/tests/integration/`:
Located in `src/tests/integration/`. Key test files include:
- `admin.integration.test.ts`
- `flyer.integration.test.ts`
- `price.integration.test.ts`
- `public.routes.integration.test.ts`
- `receipt.integration.test.ts`
| Test File | Domain |
| -------------------------------------- | -------------------------- |
| `admin.integration.test.ts` | Admin dashboard operations |
| `auth.integration.test.ts` | Authentication flows |
| `budget.integration.test.ts` | Budget management |
| `flyer.integration.test.ts` | Flyer CRUD operations |
| `flyer-processing.integration.test.ts` | AI flyer processing |
| `gamification.integration.test.ts` | Achievements and points |
| `inventory.integration.test.ts` | Inventory management |
| `notification.integration.test.ts` | User notifications |
| `receipt.integration.test.ts` | Receipt processing |
| `recipe.integration.test.ts` | Recipe management |
| `shopping-list.integration.test.ts` | Shopping list operations |
| `user.integration.test.ts` | User profile operations |
See `src/tests/integration/` for the complete list.
Requires PostgreSQL and Redis services running.
@@ -129,13 +154,23 @@ Requires PostgreSQL and Redis services running.
npm run test:integration
```
### E2E Tests (3 test files)
### E2E Tests (11 test files)
Located in `src/tests/e2e/`:
Located in `src/tests/e2e/`. Full user journey tests:
- `deals-journey.e2e.test.ts`
- `budget-journey.e2e.test.ts`
- `receipt-journey.e2e.test.ts`
| Test File | Journey |
| --------------------------------- | ----------------------------- |
| `admin-authorization.e2e.test.ts` | Admin access control |
| `admin-dashboard.e2e.test.ts` | Admin dashboard flows |
| `auth.e2e.test.ts` | Login/logout/registration |
| `budget-journey.e2e.test.ts` | Budget tracking workflow |
| `deals-journey.e2e.test.ts` | Finding and saving deals |
| `error-reporting.e2e.test.ts` | Error handling verification |
| `flyer-upload.e2e.test.ts` | Flyer upload and processing |
| `inventory-journey.e2e.test.ts` | Pantry management |
| `receipt-journey.e2e.test.ts` | Receipt scanning and tracking |
| `upc-journey.e2e.test.ts` | UPC barcode scanning |
| `user-journey.e2e.test.ts` | User profile management |
Requires all services (PostgreSQL, Redis, BullMQ workers) running.
@@ -157,20 +192,18 @@ Located in `src/tests/utils/storeHelpers.ts`:
```typescript
// Create a store with a location in one call
const store = await createStoreWithLocation({
storeName: 'Test Store',
address: {
address_line_1: '123 Main St',
city: 'Toronto',
province_state: 'ON',
postal_code: 'M1M 1M1',
},
pool,
log,
const store = await createStoreWithLocation(pool, {
name: 'Test Store',
address: '123 Main St',
city: 'Toronto',
province: 'ON',
postalCode: 'M1M 1M1',
});
// Returns: { storeId, addressId, storeLocationId }
// Cleanup stores and their locations
await cleanupStoreLocations([storeId1, storeId2], pool, log);
await cleanupStoreLocation(pool, store);
```
### Mock Factories
@@ -262,6 +295,8 @@ Opens a browser-based test runner with filtering and debugging capabilities.
6. **Use unique filenames** - file upload tests need timestamp-based filenames
7. **Check exit codes** - `npm run type-check` returns 0 on success, non-zero on error
8. **Use `req.originalUrl` in error logs** - never hardcode API paths in error messages
9. **Use versioned API paths** - always use `/api/v1/` prefix in test requests
10. **Use `vi.hoisted()` for module mocks** - ensure mocks are available during module initialization
## Testing Error Log Messages
@@ -314,3 +349,159 @@ expect(logSpy).toHaveBeenCalledWith(
```
See [Error Logging Path Patterns](ERROR-LOGGING-PATHS.md) for complete documentation.
## API Versioning in Tests (ADR-008, ADR-057)
All API endpoints use the `/api/v1/` prefix. Tests must use versioned paths.
### Configuration
API base URLs are configured centrally in Vitest config files:
| Config File | Environment Variable | Value |
| ------------------------------ | -------------------- | ------------------------------ |
| `vite.config.ts` | `VITE_API_BASE_URL` | `/api/v1` |
| `vitest.config.e2e.ts` | `VITE_API_BASE_URL` | `http://localhost:3098/api/v1` |
| `vitest.config.integration.ts` | `VITE_API_BASE_URL` | `http://localhost:3099/api/v1` |
### Writing API Tests
```typescript
// Good - versioned path
const response = await request.post('/api/v1/auth/login').send({...});
// Bad - unversioned path (will fail)
const response = await request.post('/api/auth/login').send({...});
```
### Migration Checklist
When API version changes (e.g., v1 to v2):
1. Update all Vitest config `VITE_API_BASE_URL` values
2. Search and replace API paths in E2E tests: `grep -r "/api/v1/" src/tests/e2e/`
3. Search and replace API paths in integration tests
4. Verify route handler error logs use `req.originalUrl`
5. Run full test suite in dev container
See [ADR-057](../adr/0057-test-remediation-post-api-versioning.md) for complete migration guidance.
## vi.hoisted() Pattern for Module Mocks
When mocking modules that are imported at module initialization time (like queues or database connections), use `vi.hoisted()` to ensure mocks are available during hoisting.
### Problem: Mock Not Available During Import
```typescript
// BAD: Mock might not be ready when module imports it
vi.mock('../services/queues.server', () => ({
flyerQueue: { getJobCounts: vi.fn() }, // May not exist yet
}));
import healthRouter from './health.routes'; // Imports queues.server
```
### Solution: Use vi.hoisted()
```typescript
// GOOD: Mocks are created during hoisting, before vi.mock runs
const { mockQueuesModule } = vi.hoisted(() => {
const createMockQueue = () => ({
getJobCounts: vi.fn().mockResolvedValue({
waiting: 0,
active: 0,
failed: 0,
delayed: 0,
}),
});
return {
mockQueuesModule: {
flyerQueue: createMockQueue(),
emailQueue: createMockQueue(),
// ... additional queues
},
};
});
// Now the mock object exists when vi.mock factory runs
vi.mock('../services/queues.server', () => mockQueuesModule);
// Safe to import after mocks are defined
import healthRouter from './health.routes';
```
See [ADR-057](../adr/0057-test-remediation-post-api-versioning.md) for additional patterns.
## Testing Role-Based Component Visibility
When testing components that render differently based on user roles:
### Pattern: Separate Test Cases by Role
```typescript
describe('for authenticated users', () => {
beforeEach(() => {
mockedUseAuth.mockReturnValue({
authStatus: 'AUTHENTICATED',
userProfile: createMockUserProfile({ role: 'user' }),
});
});
it('renders user-accessible components', () => {
render(<MyComponent />);
expect(screen.getByTestId('user-component')).toBeInTheDocument();
// Admin-only should NOT be present
expect(screen.queryByTestId('admin-only')).not.toBeInTheDocument();
});
});
describe('for admin users', () => {
beforeEach(() => {
mockedUseAuth.mockReturnValue({
authStatus: 'AUTHENTICATED',
userProfile: createMockUserProfile({ role: 'admin' }),
});
});
it('renders admin-only components', () => {
render(<MyComponent />);
expect(screen.getByTestId('admin-only')).toBeInTheDocument();
});
});
```
### Key Points
1. Create separate `describe` blocks for each role
2. Set up role-specific mocks in `beforeEach`
3. Test both presence AND absence of role-gated components
4. Use `screen.queryByTestId()` for elements that should NOT exist
## CSS Class Assertions After UI Refactors
After frontend style changes, update test assertions to match new CSS classes.
### Handling Tailwind Class Changes
```typescript
// Before refactor
expect(selectedItem).toHaveClass('ring-2', 'ring-brand-primary');
// After refactor - update to new classes
expect(selectedItem).toHaveClass('border-brand-primary', 'bg-teal-50/50');
```
### Flexible Matching
For complex class combinations, consider partial matching:
```typescript
// Check for key classes, ignore utility classes
expect(element).toHaveClass('border-brand-primary');
// Or use regex for patterns
expect(element.className).toMatch(/dark:bg-teal-\d+/);
```
See [ADR-057](../adr/0057-test-remediation-post-api-versioning.md) for lessons learned from the test remediation effort.

View File

@@ -0,0 +1,899 @@
# tsoa Migration Guide
This guide documents the migration from `swagger-jsdoc` to `tsoa` for API documentation and route generation in the Flyer Crawler project.
## Table of Contents
- [Overview](#overview)
- [Architecture](#architecture)
- [Creating a New Controller](#creating-a-new-controller)
- [BaseController Pattern](#basecontroller-pattern)
- [Authentication](#authentication)
- [Request Handling](#request-handling)
- [Response Formatting](#response-formatting)
- [DTOs and Type Definitions](#dtos-and-type-definitions)
- [File Uploads](#file-uploads)
- [Rate Limiting](#rate-limiting)
- [Error Handling](#error-handling)
- [Testing Controllers](#testing-controllers)
- [Build and Development](#build-and-development)
- [Troubleshooting](#troubleshooting)
- [Migration Lessons Learned](#migration-lessons-learned)
## Overview
### What Changed
| Before (swagger-jsdoc) | After (tsoa) |
| ---------------------------------------- | ------------------------------------------- |
| JSDoc `@openapi` comments in route files | TypeScript decorators on controller classes |
| Manual Express route registration | tsoa generates routes automatically |
| Separate Zod validation middleware | tsoa validates from TypeScript types |
| OpenAPI spec from comments | OpenAPI spec from decorators and types |
### Why tsoa?
1. **Type Safety**: OpenAPI spec is generated from TypeScript types, eliminating drift
2. **Active Maintenance**: tsoa is actively maintained (vs. unmaintained swagger-jsdoc)
3. **Reduced Duplication**: No more parallel JSDoc + TypeScript definitions
4. **Route Generation**: tsoa generates Express routes, reducing boilerplate
### Key Files
| File | Purpose |
| -------------------------------------- | --------------------------------------- |
| `tsoa.json` | tsoa configuration |
| `src/controllers/base.controller.ts` | Base controller with response utilities |
| `src/controllers/types.ts` | Shared controller type definitions |
| `src/controllers/*.controller.ts` | Domain controllers |
| `src/dtos/common.dto.ts` | Shared DTO definitions |
| `src/middleware/tsoaAuthentication.ts` | JWT authentication handler |
| `src/routes/tsoa-generated.ts` | Generated Express routes |
| `src/config/tsoa-spec.json` | Generated OpenAPI 3.0 spec |
## Architecture
### Request Flow
```
HTTP Request
|
v
Express Middleware (logging, CORS, body parsing)
|
v
tsoa-generated routes (src/routes/tsoa-generated.ts)
|
v
tsoaAuthentication (if @Security decorator present)
|
v
Controller Method
|
v
Service Layer
|
v
Repository Layer
|
v
Database
```
### Controller Structure
```
src/controllers/
base.controller.ts # Base class with response helpers
types.ts # Shared type definitions
health.controller.ts # Health check endpoints
auth.controller.ts # Authentication endpoints
user.controller.ts # User management endpoints
...
```
## Creating a New Controller
### Step 1: Create the Controller File
```typescript
// src/controllers/example.controller.ts
import {
Route,
Tags,
Get,
Post,
Put,
Delete,
Body,
Path,
Query,
Request,
Security,
SuccessResponse,
Response,
Middlewares,
} from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import {
BaseController,
SuccessResponse as SuccessResponseType,
ErrorResponse,
PaginatedResponse,
} from './base.controller';
import type { UserProfile } from '../types';
// ============================================================================
// REQUEST/RESPONSE TYPES
// ============================================================================
interface CreateExampleRequest {
/**
* Name of the example item.
* @minLength 1
* @maxLength 255
* @example "My Example"
*/
name: string;
/**
* Optional description.
* @example "This is an example item"
*/
description?: string;
}
interface ExampleResponse {
id: number;
name: string;
description?: string;
created_at: string;
}
// ============================================================================
// CONTROLLER
// ============================================================================
/**
* Example controller demonstrating tsoa patterns.
*/
@Route('examples')
@Tags('Examples')
export class ExampleController extends BaseController {
/**
* List all examples with pagination.
* @summary List examples
* @param page Page number (1-indexed)
* @param limit Items per page (max 100)
* @returns Paginated list of examples
*/
@Get()
@SuccessResponse(200, 'Examples retrieved')
public async listExamples(
@Query() page?: number,
@Query() limit?: number,
): Promise<PaginatedResponse<ExampleResponse>> {
const { page: p, limit: l } = this.normalizePagination(page, limit);
// Call service layer
const { items, total } = await exampleService.listExamples(p, l);
return this.paginated(items, { page: p, limit: l, total });
}
/**
* Get a single example by ID.
* @summary Get example
* @param id Example ID
* @returns The example
*/
@Get('{id}')
@SuccessResponse(200, 'Example retrieved')
@Response<ErrorResponse>(404, 'Example not found')
public async getExample(@Path() id: number): Promise<SuccessResponseType<ExampleResponse>> {
const example = await exampleService.getExampleById(id);
return this.success(example);
}
/**
* Create a new example.
* Requires authentication.
* @summary Create example
* @param requestBody Example data
* @param request Express request
* @returns Created example
*/
@Post()
@Security('bearerAuth')
@SuccessResponse(201, 'Example created')
@Response<ErrorResponse>(400, 'Validation error')
@Response<ErrorResponse>(401, 'Not authenticated')
public async createExample(
@Body() requestBody: CreateExampleRequest,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<ExampleResponse>> {
const user = request.user as UserProfile;
const example = await exampleService.createExample(requestBody, user.user.user_id);
return this.created(example);
}
/**
* Delete an example.
* Requires authentication.
* @summary Delete example
* @param id Example ID
* @param request Express request
*/
@Delete('{id}')
@Security('bearerAuth')
@SuccessResponse(204, 'Example deleted')
@Response<ErrorResponse>(401, 'Not authenticated')
@Response<ErrorResponse>(404, 'Example not found')
public async deleteExample(
@Path() id: number,
@Request() request: ExpressRequest,
): Promise<void> {
const user = request.user as UserProfile;
await exampleService.deleteExample(id, user.user.user_id);
return this.noContent();
}
}
```
### Step 2: Regenerate Routes
After creating or modifying a controller:
```bash
# Generate OpenAPI spec and routes
npm run tsoa:spec && npm run tsoa:routes
# Or use the combined command
npm run prebuild
```
### Step 3: Add Tests
Create a test file at `src/controllers/__tests__/example.controller.test.ts`.
## BaseController Pattern
All controllers extend `BaseController` which provides:
### Response Helpers
```typescript
// Success response (200)
return this.success(data);
// Created response (201)
return this.created(data);
// Paginated response (200 with pagination metadata)
return this.paginated(items, { page, limit, total });
// Message-only response
return this.message('Operation completed successfully');
// No content response (204)
return this.noContent();
// Error response (prefer throwing errors)
this.setStatus(400);
return this.error('BAD_REQUEST', 'Invalid input', details);
```
### Pagination Helpers
```typescript
// Normalize pagination with defaults and bounds
const { page, limit } = this.normalizePagination(queryPage, queryLimit);
// page defaults to 1, limit defaults to 20, max 100
// Calculate pagination metadata
const meta = this.calculatePagination({ page, limit, total });
// Returns: { page, limit, total, totalPages, hasNextPage, hasPrevPage }
```
### Error Codes
```typescript
// Access standard error codes
this.ErrorCode.VALIDATION_ERROR; // 'VALIDATION_ERROR'
this.ErrorCode.NOT_FOUND; // 'NOT_FOUND'
this.ErrorCode.UNAUTHORIZED; // 'UNAUTHORIZED'
this.ErrorCode.FORBIDDEN; // 'FORBIDDEN'
this.ErrorCode.CONFLICT; // 'CONFLICT'
this.ErrorCode.BAD_REQUEST; // 'BAD_REQUEST'
this.ErrorCode.INTERNAL_ERROR; // 'INTERNAL_ERROR'
```
## Authentication
### Using @Security Decorator
```typescript
import { Security, Request } from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import type { UserProfile } from '../types';
@Get('profile')
@Security('bearerAuth')
public async getProfile(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<UserProfileDto>> {
// request.user is populated by tsoaAuthentication.ts
const user = request.user as UserProfile;
return this.success(toUserProfileDto(user));
}
```
### Requiring Admin Role
```typescript
import { requireAdminRole } from '../middleware/tsoaAuthentication';
@Delete('users/{id}')
@Security('bearerAuth')
public async deleteUser(
@Path() id: string,
@Request() request: ExpressRequest,
): Promise<void> {
const user = request.user as UserProfile;
requireAdminRole(user); // Throws 403 if not admin
await userService.deleteUser(id);
return this.noContent();
}
```
### How Authentication Works
1. tsoa sees `@Security('bearerAuth')` decorator
2. tsoa calls `expressAuthentication()` from `src/middleware/tsoaAuthentication.ts`
3. The function extracts and validates the JWT token
4. User profile is fetched from database and attached to `request.user`
5. If authentication fails, an `AuthenticationError` is thrown
## Request Handling
### Path Parameters
```typescript
@Get('{id}')
public async getItem(@Path() id: number): Promise<...> { ... }
// Multiple path params
@Get('{userId}/items/{itemId}')
public async getUserItem(
@Path() userId: string,
@Path() itemId: number,
): Promise<...> { ... }
```
### Query Parameters
```typescript
@Get()
public async listItems(
@Query() page?: number,
@Query() limit?: number,
@Query() status?: 'active' | 'inactive',
@Query() search?: string,
): Promise<...> { ... }
```
### Request Body
```typescript
interface CreateItemRequest {
name: string;
description?: string;
}
@Post()
public async createItem(
@Body() requestBody: CreateItemRequest,
): Promise<...> { ... }
```
### Headers
```typescript
@Get()
public async getWithHeader(
@Header('X-Custom-Header') customHeader?: string,
): Promise<...> { ... }
```
### Accessing Express Request/Response
```typescript
import type { Request as ExpressRequest } from 'express';
@Post()
public async handleRequest(
@Request() request: ExpressRequest,
): Promise<...> {
const reqLog = request.log; // Pino logger
const cookies = request.cookies; // Cookies
const ip = request.ip; // Client IP
const res = request.res!; // Express response
// Set cookie
res.cookie('name', 'value', { httpOnly: true });
// ...
}
```
## Response Formatting
### Standard Success Response
```typescript
// Returns: { "success": true, "data": {...} }
return this.success({ id: 1, name: 'Item' });
```
### Created Response (201)
```typescript
// Sets status 201 and returns success response
return this.created(newItem);
```
### Paginated Response
```typescript
// Returns: { "success": true, "data": [...], "meta": { "pagination": {...} } }
return this.paginated(items, { page: 1, limit: 20, total: 100 });
```
### No Content (204)
```typescript
// Sets status 204 with no body
return this.noContent();
```
### Error Response
Prefer throwing errors rather than returning error responses:
```typescript
import { NotFoundError, ValidationError, ForbiddenError } from './base.controller';
// Throw for not found
throw new NotFoundError('Item', id);
// Throw for validation errors
throw new ValidationError([], 'Invalid input');
// Throw for forbidden
throw new ForbiddenError('Admin access required');
```
If you need manual error response:
```typescript
this.setStatus(400);
return this.error(this.ErrorCode.BAD_REQUEST, 'Invalid operation', { reason: '...' });
```
## DTOs and Type Definitions
### Why DTOs?
tsoa generates OpenAPI specs from TypeScript types. Some types cannot be serialized:
- Tuples: `[number, number]` (e.g., GeoJSON coordinates)
- Complex generics
- Circular references
DTOs flatten these into tsoa-compatible structures.
### Shared DTOs
Define shared DTOs in `src/dtos/common.dto.ts`:
```typescript
// src/dtos/common.dto.ts
/**
* Address with flattened coordinates.
* GeoJSONPoint uses coordinates: [number, number] which tsoa cannot handle.
*/
export interface AddressDto {
address_id: number;
address_line_1: string;
city: string;
province_state: string;
postal_code: string;
country: string;
// Flattened from GeoJSONPoint.coordinates
latitude?: number | null;
longitude?: number | null;
created_at: string;
updated_at: string;
}
export interface UserDto {
user_id: string;
email: string;
created_at: string;
updated_at: string;
}
```
### Conversion Functions
Create conversion functions to map domain types to DTOs:
```typescript
// In controller file
function toAddressDto(address: Address): AddressDto {
return {
address_id: address.address_id,
address_line_1: address.address_line_1,
city: address.city,
province_state: address.province_state,
postal_code: address.postal_code,
country: address.country,
latitude: address.location?.coordinates[1] ?? null,
longitude: address.location?.coordinates[0] ?? null,
created_at: address.created_at,
updated_at: address.updated_at,
};
}
```
### Important: Avoid Duplicate Type Names
tsoa requires unique type names across all controllers. If two controllers define an interface with the same name, tsoa will fail.
**Solution**: Define shared types in `src/dtos/common.dto.ts` and import them.
## File Uploads
tsoa supports file uploads via `@UploadedFile` and `@FormField` decorators:
```typescript
import { Post, Route, UploadedFile, FormField, Security } from 'tsoa';
import multer from 'multer';
// Configure multer
const upload = multer({
storage: multer.diskStorage({
destination: '/tmp/uploads',
filename: (req, file, cb) => {
cb(null, `${Date.now()}-${Math.round(Math.random() * 1e9)}-${file.originalname}`);
},
}),
limits: { fileSize: 10 * 1024 * 1024 }, // 10MB
});
@Route('flyers')
@Tags('Flyers')
export class FlyerController extends BaseController {
/**
* Upload a flyer image.
* @summary Upload flyer
* @param file The flyer image file
* @param storeId Associated store ID
* @param request Express request
*/
@Post('upload')
@Security('bearerAuth')
@Middlewares(upload.single('file'))
@SuccessResponse(201, 'Flyer uploaded')
public async uploadFlyer(
@UploadedFile() file: Express.Multer.File,
@FormField() storeId?: number,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<FlyerDto>> {
const user = request.user as UserProfile;
const flyer = await flyerService.processUpload(file, storeId, user.user.user_id);
return this.created(flyer);
}
}
```
## Rate Limiting
Apply rate limiters using the `@Middlewares` decorator:
```typescript
import { Middlewares } from 'tsoa';
import { loginLimiter, registerLimiter } from '../config/rateLimiters';
@Post('login')
@Middlewares(loginLimiter)
@SuccessResponse(200, 'Login successful')
@Response<ErrorResponse>(429, 'Too many login attempts')
public async login(@Body() body: LoginRequest): Promise<...> { ... }
@Post('register')
@Middlewares(registerLimiter)
@SuccessResponse(201, 'User registered')
@Response<ErrorResponse>(429, 'Too many registration attempts')
public async register(@Body() body: RegisterRequest): Promise<...> { ... }
```
## Error Handling
### Throwing Errors
Use the error classes from `base.controller.ts`:
```typescript
import {
NotFoundError,
ValidationError,
ForbiddenError,
UniqueConstraintError,
} from './base.controller';
// Not found (404)
throw new NotFoundError('User', userId);
// Validation error (400)
throw new ValidationError([], 'Invalid email format');
// Forbidden (403)
throw new ForbiddenError('Admin access required');
// Conflict (409) - e.g., duplicate email
throw new UniqueConstraintError('email', 'Email already registered');
```
### Global Error Handler
Errors are caught by the global error handler in `server.ts` which formats them according to ADR-028:
```json
{
"success": false,
"error": {
"code": "NOT_FOUND",
"message": "User not found"
}
}
```
### Authentication Errors
The `tsoaAuthentication.ts` module throws `AuthenticationError` with appropriate HTTP status codes:
- 401: Missing token, invalid token, expired token
- 403: User lacks required role
- 500: Server configuration error
## Testing Controllers
### Test File Location
```
src/controllers/__tests__/
example.controller.test.ts
auth.controller.test.ts
user.controller.test.ts
...
```
### Test Structure
```typescript
// src/controllers/__tests__/example.controller.test.ts
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ExampleController } from '../example.controller';
// Mock dependencies
vi.mock('../../services/exampleService', () => ({
exampleService: {
listExamples: vi.fn(),
getExampleById: vi.fn(),
createExample: vi.fn(),
deleteExample: vi.fn(),
},
}));
import { exampleService } from '../../services/exampleService';
describe('ExampleController', () => {
let controller: ExampleController;
beforeEach(() => {
controller = new ExampleController();
vi.clearAllMocks();
});
describe('listExamples', () => {
it('should return paginated examples', async () => {
const mockItems = [{ id: 1, name: 'Test' }];
vi.mocked(exampleService.listExamples).mockResolvedValue({
items: mockItems,
total: 1,
});
const result = await controller.listExamples(1, 20);
expect(result.success).toBe(true);
expect(result.data).toEqual(mockItems);
expect(result.meta?.pagination).toBeDefined();
expect(result.meta?.pagination?.total).toBe(1);
});
});
describe('createExample', () => {
it('should create example and return 201', async () => {
const mockExample = { id: 1, name: 'New', created_at: '2026-01-01' };
vi.mocked(exampleService.createExample).mockResolvedValue(mockExample);
const mockRequest = {
user: { user: { user_id: 'user-123' } },
} as any;
const result = await controller.createExample({ name: 'New' }, mockRequest);
expect(result.success).toBe(true);
expect(result.data).toEqual(mockExample);
// Note: setStatus is called internally, verify with spy if needed
});
});
});
```
### Testing Authentication
```typescript
describe('authenticated endpoints', () => {
it('should use user from request', async () => {
const mockRequest = {
user: {
user: { user_id: 'user-123', email: 'test@example.com' },
role: 'user',
},
} as any;
const result = await controller.getProfile(mockRequest);
expect(result.data.user.user_id).toBe('user-123');
});
});
```
### Known Test Limitations
Some test files have type errors with mock objects that are acceptable:
```typescript
// Type error: 'any' is not assignable to 'Express.Request'
// This is acceptable in tests - the mock has the properties we need
const mockRequest = { user: mockUser } as any;
```
These type errors do not affect test correctness. The 4603 unit tests and 345 integration tests all pass.
## Build and Development
### Development Workflow
1. Create or modify controller
2. Run `npm run tsoa:spec && npm run tsoa:routes`
3. Run `npm run type-check` to verify
4. Run tests
### NPM Scripts
```json
{
"tsoa:spec": "tsoa spec",
"tsoa:routes": "tsoa routes",
"prebuild": "npm run tsoa:spec && npm run tsoa:routes",
"build": "tsc"
}
```
### Watching for Changes
Currently, tsoa routes must be regenerated manually when controllers change. Consider adding a watch script:
```bash
# In development, regenerate on save
npm run tsoa:spec && npm run tsoa:routes
```
### Generated Files
| File | Regenerate When |
| ------------------------------ | ------------------------------- |
| `src/routes/tsoa-generated.ts` | Controller changes |
| `src/config/tsoa-spec.json` | Controller changes, DTO changes |
These files are committed to the repository for faster builds.
## Troubleshooting
### "Duplicate identifier" Error
**Problem**: tsoa fails with "Duplicate identifier" for a type.
**Solution**: Move the type to `src/dtos/common.dto.ts` and import it in all controllers.
### "Unable to resolve type" Error
**Problem**: tsoa cannot serialize a complex type (tuples, generics).
**Solution**: Create a DTO with flattened/simplified structure.
```typescript
// Before: GeoJSONPoint with coordinates: [number, number]
// After: AddressDto with latitude, longitude as separate fields
```
### Route Not Found (404)
**Problem**: New endpoint returns 404.
**Solution**:
1. Ensure controller file matches glob pattern: `src/controllers/**/*.controller.ts`
2. Regenerate routes: `npm run tsoa:routes`
3. Verify the route is in `src/routes/tsoa-generated.ts`
### Authentication Not Working
**Problem**: `request.user` is undefined.
**Solution**:
1. Ensure `@Security('bearerAuth')` decorator is on the method
2. Verify `tsoaAuthentication.ts` is correctly configured in `tsoa.json`
3. Check the Authorization header format: `Bearer <token>`
### Type Mismatch in Tests
**Problem**: TypeScript errors when mocking Express.Request.
**Solution**: Use `as any` cast for mock objects in tests. This is acceptable and does not affect test correctness.
```typescript
const mockRequest = {
user: mockUserProfile,
log: mockLogger,
} as any;
```
## Migration Lessons Learned
### What Worked Well
1. **BaseController Pattern**: Provides consistent response formatting and familiar helpers
2. **Incremental Migration**: Controllers can be migrated one at a time
3. **Type-First Design**: Defining request/response types first makes implementation clearer
4. **Shared DTOs**: Centralizing DTOs in `common.dto.ts` prevents duplicate type errors
### Challenges Encountered
1. **Tuple Types**: tsoa cannot serialize TypeScript tuples. Solution: Flatten to separate fields.
2. **Passport Integration**: OAuth callbacks use redirect-based flows that don't fit tsoa's JSON model. Solution: Keep OAuth callbacks in Express routes.
3. **Test Type Errors**: Mock objects don't perfectly match Express types. Solution: Accept `as any` casts in tests.
4. **Build Pipeline**: Must regenerate routes when controllers change. Solution: Add to prebuild script.
### Recommendations for Future Controllers
1. Start with the DTO/request/response types
2. Use `@SuccessResponse` and `@Response` decorators for all status codes
3. Add JSDoc comments for OpenAPI descriptions
4. Keep controller methods thin - delegate to service layer
5. Test controllers in isolation by mocking services
## Related Documentation
- [ADR-018: API Documentation Strategy](../adr/0018-api-documentation-strategy.md)
- [ADR-059: Dependency Modernization](../adr/0059-dependency-modernization.md)
- [ADR-028: API Response Standardization](../adr/0028-api-response-standardization.md)
- [CODE-PATTERNS.md](./CODE-PATTERNS.md)
- [TESTING.md](./TESTING.md)
- [tsoa Documentation](https://tsoa-community.github.io/docs/)

View File

@@ -2,134 +2,259 @@
Complete guide to environment variables used in Flyer Crawler.
---
## Quick Reference
### Minimum Required Variables (Development)
| Variable | Example | Purpose |
| ---------------- | ------------------------ | -------------------- |
| `DB_HOST` | `localhost` | PostgreSQL host |
| `DB_USER` | `postgres` | PostgreSQL username |
| `DB_PASSWORD` | `postgres` | PostgreSQL password |
| `DB_NAME` | `flyer_crawler_dev` | Database name |
| `REDIS_URL` | `redis://localhost:6379` | Redis connection URL |
| `JWT_SECRET` | (32+ character string) | JWT signing key |
| `GEMINI_API_KEY` | `AIzaSy...` | Google Gemini API |
### Source of Truth
The Zod schema at `src/config/env.ts` is the authoritative source for all environment variables. If a variable is not in this file, it is not used by the application.
---
## Configuration by Environment
### Production
**Location**: Gitea CI/CD secrets injected during deployment
**Path**: `/var/www/flyer-crawler.projectium.com/`
**Note**: No `.env` file exists - all variables come from CI/CD
| Aspect | Details |
| -------- | ------------------------------------------ |
| Location | Gitea CI/CD secrets injected at deployment |
| Path | `/var/www/flyer-crawler.projectium.com/` |
| File | No `.env` file - all from CI/CD secrets |
### Test
**Location**: Gitea CI/CD secrets + `.env.test` file
**Path**: `/var/www/flyer-crawler-test.projectium.com/`
**Note**: `.env.test` overrides for test-specific values
| Aspect | Details |
| -------- | --------------------------------------------- |
| Location | Gitea CI/CD secrets + `.env.test` overrides |
| Path | `/var/www/flyer-crawler-test.projectium.com/` |
| File | `.env.test` for test-specific values |
### Development Container
**Location**: `.env.local` file in project root
**Note**: Overrides default DSNs in `compose.dev.yml`
| Aspect | Details |
| -------- | --------------------------------------- |
| Location | `.env.local` file in project root |
| Priority | Overrides defaults in `compose.dev.yml` |
| File | `.env.local` (gitignored) |
## Required Variables
---
### Database
## Complete Variable Reference
| Variable | Description | Example |
| ------------------ | ---------------------------- | ------------------------------------------ |
| `DB_HOST` | PostgreSQL host | `localhost` (dev), `projectium.com` (prod) |
| `DB_PORT` | PostgreSQL port | `5432` |
| `DB_USER_PROD` | Production database user | `flyer_crawler_prod` |
| `DB_PASSWORD_PROD` | Production database password | (secret) |
| `DB_DATABASE_PROD` | Production database name | `flyer-crawler-prod` |
| `DB_USER_TEST` | Test database user | `flyer_crawler_test` |
| `DB_PASSWORD_TEST` | Test database password | (secret) |
| `DB_DATABASE_TEST` | Test database name | `flyer-crawler-test` |
| `DB_USER` | Dev database user | `postgres` |
| `DB_PASSWORD` | Dev database password | `postgres` |
| `DB_NAME` | Dev database name | `flyer_crawler_dev` |
### Database Configuration
**Note**: Production and test use separate `_PROD` and `_TEST` suffixed variables. Development uses unsuffixed variables.
| Variable | Required | Default | Description |
| ------------- | -------- | ------- | ----------------- |
| `DB_HOST` | Yes | - | PostgreSQL host |
| `DB_PORT` | No | `5432` | PostgreSQL port |
| `DB_USER` | Yes | - | Database username |
| `DB_PASSWORD` | Yes | - | Database password |
| `DB_NAME` | Yes | - | Database name |
### Redis
**Environment-Specific Variables** (Gitea Secrets):
| Variable | Description | Example |
| --------------------- | ------------------------- | ------------------------------ |
| `REDIS_URL` | Redis connection URL | `redis://localhost:6379` (dev) |
| `REDIS_PASSWORD_PROD` | Production Redis password | (secret) |
| `REDIS_PASSWORD_TEST` | Test Redis password | (secret) |
| Variable | Environment | Description |
| ------------------ | ----------- | ------------------------ |
| `DB_USER_PROD` | Production | Production database user |
| `DB_PASSWORD_PROD` | Production | Production database pass |
| `DB_DATABASE_PROD` | Production | Production database name |
| `DB_USER_TEST` | Test | Test database user |
| `DB_PASSWORD_TEST` | Test | Test database password |
| `DB_DATABASE_TEST` | Test | Test database name |
### Redis Configuration
| Variable | Required | Default | Description |
| ---------------- | -------- | ------- | ------------------------- |
| `REDIS_URL` | Yes | - | Redis connection URL |
| `REDIS_PASSWORD` | No | - | Redis password (optional) |
**URL Format**: `redis://[user:password@]host:port`
**Examples**:
```bash
# Development (no auth)
REDIS_URL=redis://localhost:6379
# Production (with auth)
REDIS_URL=redis://:${REDIS_PASSWORD_PROD}@localhost:6379
```
### Authentication
| Variable | Description | Example |
| ---------------------- | -------------------------- | -------------------------------- |
| `JWT_SECRET` | JWT token signing key | (minimum 32 characters) |
| `SESSION_SECRET` | Session encryption key | (minimum 32 characters) |
| `GOOGLE_CLIENT_ID` | Google OAuth client ID | `xxx.apps.googleusercontent.com` |
| `GOOGLE_CLIENT_SECRET` | Google OAuth client secret | (secret) |
| `GH_CLIENT_ID` | GitHub OAuth client ID | `xxx` |
| `GH_CLIENT_SECRET` | GitHub OAuth client secret | (secret) |
| Variable | Required | Min Length | Description |
| ---------------------- | -------- | ---------- | ----------------------- |
| `JWT_SECRET` | Yes | 32 chars | JWT token signing key |
| `JWT_SECRET_PREVIOUS` | No | - | Previous key (rotation) |
| `GOOGLE_CLIENT_ID` | No | - | Google OAuth client ID |
| `GOOGLE_CLIENT_SECRET` | No | - | Google OAuth secret |
| `GITHUB_CLIENT_ID` | No | - | GitHub OAuth client ID |
| `GITHUB_CLIENT_SECRET` | No | - | GitHub OAuth secret |
**Generate Secure Secret**:
```bash
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
```
### AI Services
| Variable | Description | Example |
| -------------------------------- | ---------------------------- | ----------- |
| `VITE_GOOGLE_GENAI_API_KEY` | Google Gemini API key (prod) | `AIzaSy...` |
| `VITE_GOOGLE_GENAI_API_KEY_TEST` | Google Gemini API key (test) | `AIzaSy...` |
| `GOOGLE_MAPS_API_KEY` | Google Maps Geocoding API | `AIzaSy...` |
| Variable | Required | Description |
| ---------------------------- | -------- | -------------------------------- |
| `GEMINI_API_KEY` | Yes\* | Google Gemini API key |
| `GEMINI_RPM` | No | Rate limit (default: 5) |
| `AI_PRICE_QUALITY_THRESHOLD` | No | Quality threshold (default: 0.5) |
### Application
\*Required for flyer processing. Application works without it but cannot extract flyer data.
| Variable | Description | Example |
| -------------- | ------------------------ | ----------------------------------- |
| `NODE_ENV` | Environment mode | `development`, `test`, `production` |
| `PORT` | Backend server port | `3001` |
| `FRONTEND_URL` | Frontend application URL | `http://localhost:5173` (dev) |
**Get API Key**: [Google AI Studio](https://aistudio.google.com/app/apikey)
### Error Tracking
### Google Services
| Variable | Description | Example |
| ---------------------- | -------------------------------- | --------------------------- |
| `SENTRY_DSN` | Sentry DSN (production) | `https://xxx@sentry.io/xxx` |
| `VITE_SENTRY_DSN` | Frontend Sentry DSN (production) | `https://xxx@sentry.io/xxx` |
| `SENTRY_DSN_TEST` | Sentry DSN (test) | `https://xxx@sentry.io/xxx` |
| `VITE_SENTRY_DSN_TEST` | Frontend Sentry DSN (test) | `https://xxx@sentry.io/xxx` |
| `SENTRY_AUTH_TOKEN` | Sentry API token for releases | (secret) |
| Variable | Required | Description |
| ---------------------- | -------- | -------------------------------- |
| `GOOGLE_MAPS_API_KEY` | No | Google Maps Geocoding API |
| `GOOGLE_CLIENT_ID` | No | OAuth (see Authentication above) |
| `GOOGLE_CLIENT_SECRET` | No | OAuth (see Authentication above) |
## Optional Variables
### UPC Lookup APIs
| Variable | Description | Default |
| ------------------- | ----------------------- | ----------------- |
| `LOG_LEVEL` | Logging verbosity | `info` |
| `REDIS_TTL` | Cache TTL in seconds | `3600` |
| `MAX_UPLOAD_SIZE` | Max file upload size | `10mb` |
| `RATE_LIMIT_WINDOW` | Rate limit window (ms) | `900000` (15 min) |
| `RATE_LIMIT_MAX` | Max requests per window | `100` |
| Variable | Required | Description |
| ------------------------ | -------- | ---------------------- |
| `UPC_ITEM_DB_API_KEY` | No | UPC Item DB API key |
| `BARCODE_LOOKUP_API_KEY` | No | Barcode Lookup API key |
### Application Settings
| Variable | Required | Default | Description |
| -------------- | -------- | ------------- | ------------------------ |
| `NODE_ENV` | No | `development` | Environment mode |
| `PORT` | No | `3001` | Backend server port |
| `FRONTEND_URL` | No | - | Frontend URL (CORS) |
| `BASE_URL` | No | - | API base URL |
| `STORAGE_PATH` | No | (see below) | Flyer image storage path |
**NODE_ENV Values**: `development`, `test`, `staging`, `production`
**Default STORAGE_PATH**: `/var/www/flyer-crawler.projectium.com/flyer-images`
### Email/SMTP Configuration
| Variable | Required | Default | Description |
| ----------------- | -------- | ------- | ----------------------- |
| `SMTP_HOST` | No | - | SMTP server hostname |
| `SMTP_PORT` | No | `587` | SMTP server port |
| `SMTP_USER` | No | - | SMTP username |
| `SMTP_PASS` | No | - | SMTP password |
| `SMTP_SECURE` | No | `false` | Use TLS |
| `SMTP_FROM_EMAIL` | No | - | From address for emails |
**Note**: Email functionality degrades gracefully if not configured.
### Worker Configuration
| Variable | Default | Description |
| ------------------------------------- | ------- | ---------------------------- |
| `WORKER_CONCURRENCY` | `1` | Main worker concurrency |
| `WORKER_LOCK_DURATION` | `30000` | Lock duration (ms) |
| `EMAIL_WORKER_CONCURRENCY` | `10` | Email worker concurrency |
| `ANALYTICS_WORKER_CONCURRENCY` | `1` | Analytics worker concurrency |
| `CLEANUP_WORKER_CONCURRENCY` | `10` | Cleanup worker concurrency |
| `WEEKLY_ANALYTICS_WORKER_CONCURRENCY` | `1` | Weekly analytics concurrency |
### Error Tracking (Bugsink/Sentry)
| Variable | Required | Default | Description |
| --------------------- | -------- | -------- | ------------------------------- |
| `SENTRY_DSN` | No | - | Backend Sentry DSN |
| `SENTRY_ENABLED` | No | `true` | Enable error tracking |
| `SENTRY_ENVIRONMENT` | No | NODE_ENV | Environment name for errors |
| `SENTRY_DEBUG` | No | `false` | Enable Sentry SDK debug logging |
| `VITE_SENTRY_DSN` | No | - | Frontend Sentry DSN |
| `VITE_SENTRY_ENABLED` | No | `true` | Enable frontend error tracking |
| `VITE_SENTRY_DEBUG` | No | `false` | Frontend SDK debug logging |
**DSN Format**: `http://[key]@[host]:[port]/[project_id]`
**Dev Container DSNs**:
```bash
# Backend (internal)
SENTRY_DSN=http://<key>@localhost:8000/1
# Frontend (via nginx proxy)
VITE_SENTRY_DSN=https://<key>@localhost/bugsink-api/2
```
---
## Configuration Files
| File | Purpose |
| ------------------------------------- | ------------------------------------------- |
| `src/config/env.ts` | Zod schema validation - **source of truth** |
| `ecosystem.config.cjs` | PM2 process manager config |
| `ecosystem.config.cjs` | PM2 process manager (production) |
| `ecosystem.dev.config.cjs` | PM2 process manager (development) |
| `.gitea/workflows/deploy-to-prod.yml` | Production deployment workflow |
| `.gitea/workflows/deploy-to-test.yml` | Test deployment workflow |
| `.env.example` | Template with all variables |
| `.env.local` | Dev container overrides (not in git) |
| `.env.test` | Test environment overrides (not in git) |
---
## Adding New Variables
### 1. Update Zod Schema
### Checklist
1. [ ] **Update Zod Schema** - Edit `src/config/env.ts`
2. [ ] **Add to Gitea Secrets** - For prod/test environments
3. [ ] **Update Deployment Workflows** - `.gitea/workflows/*.yml`
4. [ ] **Update PM2 Config** - `ecosystem.config.cjs`
5. [ ] **Update .env.example** - Template for developers
6. [ ] **Update this document** - Add to appropriate section
### Step-by-Step
#### 1. Update Zod Schema
Edit `src/config/env.ts`:
```typescript
const envSchema = z.object({
// ... existing variables ...
NEW_VARIABLE: z.string().min(1),
newSection: z.object({
newVariable: z.string().min(1, 'NEW_VARIABLE is required'),
}),
});
// In loadEnvVars():
newSection: {
newVariable: process.env.NEW_VARIABLE,
},
```
### 2. Add to Gitea Secrets
For prod/test environments:
#### 2. Add to Gitea Secrets
1. Go to Gitea repository Settings > Secrets
2. Add `NEW_VARIABLE` with value
2. Add `NEW_VARIABLE` with production value
3. Add `NEW_VARIABLE_TEST` if test needs different value
### 3. Update Deployment Workflows
#### 3. Update Deployment Workflows
Edit `.gitea/workflows/deploy-to-prod.yml`:
@@ -145,7 +270,7 @@ env:
NEW_VARIABLE: ${{ secrets.NEW_VARIABLE_TEST }}
```
### 4. Update PM2 Config
#### 4. Update PM2 Config
Edit `ecosystem.config.cjs`:
@@ -161,31 +286,36 @@ module.exports = {
};
```
### 5. Update Documentation
- Add to `.env.example`
- Update this document
- Document in relevant feature docs
---
## Security Best Practices
### Secrets Management
### Do
- **NEVER** commit secrets to git
- Use Gitea Secrets for prod/test
- Use `.env.local` for dev (gitignored)
- Generate secrets with cryptographic randomness
- Rotate secrets regularly
- Use environment-specific database users
### Do Not
- Commit secrets to git
- Use short or predictable secrets
- Share secrets across environments
- Log sensitive values
### Secret Generation
```bash
# Generate secure random secrets
# Generate secure random secrets (64 hex characters)
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
# Example output:
# a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2
```
### Database Users
Each environment has its own PostgreSQL user:
### Database Users by Environment
| Environment | User | Database |
| ----------- | -------------------- | -------------------- |
@@ -193,44 +323,61 @@ Each environment has its own PostgreSQL user:
| Test | `flyer_crawler_test` | `flyer-crawler-test` |
| Development | `postgres` | `flyer_crawler_dev` |
**Setup Commands** (as postgres superuser):
```sql
-- Production
CREATE DATABASE "flyer-crawler-prod";
CREATE USER flyer_crawler_prod WITH PASSWORD 'secure-password';
ALTER DATABASE "flyer-crawler-prod" OWNER TO flyer_crawler_prod;
\c "flyer-crawler-prod"
ALTER SCHEMA public OWNER TO flyer_crawler_prod;
GRANT CREATE, USAGE ON SCHEMA public TO flyer_crawler_prod;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
-- Test (similar commands with _test suffix)
```
---
## Validation
Environment variables are validated at startup via `src/config/env.ts`. If validation fails:
Environment variables are validated at startup via `src/config/env.ts`.
1. Check the error message for missing/invalid variables
2. Verify `.env.local` (dev) or Gitea Secrets (prod/test)
3. Ensure values match schema requirements (min length, format, etc.)
### Startup Validation
If validation fails, you will see:
```text
╔════════════════════════════════════════════════════════════════╗
║ CONFIGURATION ERROR - APPLICATION STARTUP ║
╚════════════════════════════════════════════════════════════════╝
The following environment variables are missing or invalid:
- database.host: DB_HOST is required
- auth.jwtSecret: JWT_SECRET must be at least 32 characters
Please check your .env file or environment configuration.
```
### Debugging Configuration
```bash
# Check what variables are set (dev container)
podman exec flyer-crawler-dev env | grep -E "^(DB_|REDIS_|JWT_|SENTRY_)"
# Test database connection
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "SELECT 1;"
# Test Redis connection
podman exec flyer-crawler-redis redis-cli ping
```
---
## Troubleshooting
### Variable Not Found
```
```text
Error: Missing required environment variable: JWT_SECRET
```
**Solution**: Add the variable to your environment configuration.
**Solutions**:
1. Check `.env.local` exists and has the variable
2. Verify variable name matches schema exactly
3. Restart the application after changes
### Invalid Value
```
```text
Error: JWT_SECRET must be at least 32 characters
```
@@ -240,32 +387,36 @@ Error: JWT_SECRET must be at least 32 characters
Check `NODE_ENV` is set correctly:
- `development` - Local dev container
- `test` - CI/CD test server
- `production` - Production server
| Value | Purpose |
| ------------- | ---------------------- |
| `development` | Local dev container |
| `test` | CI/CD test server |
| `staging` | Pre-production testing |
| `production` | Production server |
### Database Connection Issues
Verify database credentials:
```bash
# Development
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "SELECT 1;"
# Production (via SSH)
ssh root@projectium.com "psql -U flyer_crawler_prod -d flyer-crawler-prod -c 'SELECT 1;'"
# If connection fails, check:
# 1. Container is running: podman ps
# 2. DB_HOST matches container network
# 3. DB_PASSWORD is correct
```
## Reference
---
- **Validation Schema**: [src/config/env.ts](../../src/config/env.ts)
- **Template**: [.env.example](../../.env.example)
- **Deployment Workflows**: [.gitea/workflows/](../../.gitea/workflows/)
- **PM2 Config**: [ecosystem.config.cjs](../../ecosystem.config.cjs)
## See Also
## Related Documentation
- [QUICKSTART.md](QUICKSTART.md) - Quick setup guide
- [INSTALL.md](INSTALL.md) - Detailed installation
- [DEV-CONTAINER.md](../development/DEV-CONTAINER.md) - Dev container setup
- [DEPLOYMENT.md](../operations/DEPLOYMENT.md) - Production deployment
- [AUTHENTICATION.md](../architecture/AUTHENTICATION.md) - OAuth setup
- [ADR-007](../adr/0007-configuration-and-secrets-management.md) - Configuration decisions
---
Last updated: January 2026

View File

@@ -1,203 +1,453 @@
# Installation Guide
This guide covers setting up a local development environment for Flyer Crawler.
Complete setup instructions for the Flyer Crawler local development environment.
---
## Quick Reference
| Setup Method | Best For | Time | Document Section |
| ----------------- | --------------------------- | ------ | --------------------------------------------------- |
| Quick Start | Already have Postgres/Redis | 5 min | [Quick Start](#quick-start) |
| Dev Container | Full production-like setup | 15 min | [Dev Container](#development-container-recommended) |
| Manual Containers | Learning the components | 20 min | [Podman Setup](#podman-setup-manual) |
---
## Prerequisites
- Node.js 20.x or later
- Access to a PostgreSQL database (local or remote)
- Redis instance (for session management)
- Google Gemini API key
- Google Maps API key (for geocoding)
### Required Software
| Software | Minimum Version | Purpose | Download |
| -------------- | --------------- | -------------------- | ----------------------------------------------- |
| Node.js | 20.x | Runtime | [nodejs.org](https://nodejs.org/) |
| Podman Desktop | 4.x | Container management | [podman-desktop.io](https://podman-desktop.io/) |
| Git | 2.x | Version control | [git-scm.com](https://git-scm.com/) |
### Windows-Specific Requirements
| Requirement | Purpose | Setup Command |
| ----------- | ------------------------------ | ---------------------------------- |
| WSL 2 | Linux compatibility for Podman | `wsl --install` (admin PowerShell) |
### Verify Installation
```bash
# Check all prerequisites
node --version # Expected: v20.x or higher
podman --version # Expected: podman version 4.x or higher
git --version # Expected: git version 2.x or higher
wsl --list -v # Expected: Shows WSL 2 distro
```
---
## Quick Start
If you already have PostgreSQL and Redis configured:
If you already have PostgreSQL and Redis configured externally:
```bash
# Install dependencies
# 1. Clone the repository
git clone https://gitea.projectium.com/flyer-crawler/flyer-crawler.git
cd flyer-crawler
# 2. Install dependencies
npm install
# Run in development mode
# 3. Create .env.local (see Environment section below)
# 4. Run in development mode
npm run dev
```
**Access Points**:
- Frontend: `http://localhost:5173`
- Backend API: `http://localhost:3001`
---
## Development Environment with Podman (Recommended for Windows)
## Development Container (Recommended)
This approach uses Podman with an Ubuntu container for a consistent development environment.
The dev container provides a complete, production-like environment.
### What's Included
| Service | Purpose | Port |
| ---------- | ------------------------ | ---------- |
| Node.js | API server, worker, Vite | 3001, 5173 |
| PostgreSQL | Database with PostGIS | 5432 |
| Redis | Cache and job queues | 6379 |
| NGINX | HTTPS reverse proxy | 443 |
| Bugsink | Error tracking | 8443 |
| Logstash | Log aggregation | - |
| PM2 | Process management | - |
### Setup Steps
#### Step 1: Initialize Podman
```bash
# Windows: Start Podman Desktop, or from terminal:
podman machine init
podman machine start
```
#### Step 2: Start Dev Container
```bash
# Start all services
podman-compose -f compose.dev.yml up -d
# View logs (optional)
podman-compose -f compose.dev.yml logs -f
```
**Expected Output**:
```text
[+] Running 3/3
- Container flyer-crawler-postgres Started
- Container flyer-crawler-redis Started
- Container flyer-crawler-dev Started
```
#### Step 3: Verify Services
```bash
# Check containers are running
podman ps
# Check PM2 processes
podman exec -it flyer-crawler-dev pm2 status
```
**Expected PM2 Status**:
```text
+---------------------------+--------+-------+
| name | status | cpu |
+---------------------------+--------+-------+
| flyer-crawler-api-dev | online | 0% |
| flyer-crawler-worker-dev | online | 0% |
| flyer-crawler-vite-dev | online | 0% |
+---------------------------+--------+-------+
```
#### Step 4: Access Application
| Service | URL | Notes |
| ----------- | ------------------------ | ---------------------------- |
| Frontend | `https://localhost` | NGINX proxies to Vite |
| Backend API | `http://localhost:3001` | Express server |
| Bugsink | `https://localhost:8443` | Login: admin@localhost/admin |
### SSL Certificate Setup (Optional but Recommended)
To eliminate browser security warnings:
**Windows**:
1. Double-click `certs/mkcert-ca.crt`
2. Click "Install Certificate..."
3. Select "Local Machine" > Next
4. Select "Place all certificates in the following store"
5. Browse > Select "Trusted Root Certification Authorities" > OK
6. Click Next > Finish
7. Restart browser
**Other Platforms**: See [`certs/README.md`](../../certs/README.md)
### Managing the Dev Container
| Action | Command |
| --------- | ------------------------------------------- |
| Start | `podman-compose -f compose.dev.yml up -d` |
| Stop | `podman-compose -f compose.dev.yml down` |
| View logs | `podman-compose -f compose.dev.yml logs -f` |
| Restart | `podman-compose -f compose.dev.yml restart` |
| Rebuild | `podman-compose -f compose.dev.yml build` |
---
## Podman Setup (Manual)
For understanding the individual components or custom configurations.
### Step 1: Install Prerequisites on Windows
1. **Install WSL 2**: Podman on Windows relies on the Windows Subsystem for Linux.
```powershell
# Run in administrator PowerShell
wsl --install
```
```powershell
wsl --install
```
Restart computer after WSL installation.
Run this in an administrator PowerShell.
### Step 2: Initialize Podman
2. **Install Podman Desktop**: Download and install [Podman Desktop for Windows](https://podman-desktop.io/).
1. Launch **Podman Desktop**
2. Follow the setup wizard to initialize Podman machine
3. Start the Podman machine
### Step 2: Set Up Podman
1. **Initialize Podman**: Launch Podman Desktop. It will automatically set up its WSL 2 machine.
2. **Start Podman**: Ensure the Podman machine is running from the Podman Desktop interface.
### Step 3: Set Up the Ubuntu Container
1. **Pull Ubuntu Image**:
```bash
podman pull ubuntu:latest
```
2. **Create a Podman Volume** (persists node_modules between container restarts):
```bash
podman volume create node_modules_cache
```
3. **Run the Ubuntu Container**:
Open a terminal in your project's root directory and run:
```bash
podman run -it -p 3001:3001 -p 5173:5173 --name flyer-dev \
-v "$(pwd):/app" \
-v "node_modules_cache:/app/node_modules" \
ubuntu:latest
```
| Flag | Purpose |
| ------------------------------------------- | ------------------------------------------------ |
| `-p 3001:3001` | Forwards the backend server port |
| `-p 5173:5173` | Forwards the Vite frontend server port |
| `--name flyer-dev` | Names the container for easy reference |
| `-v "...:/app"` | Mounts your project directory into the container |
| `-v "node_modules_cache:/app/node_modules"` | Mounts the named volume for node_modules |
### Step 4: Configure the Ubuntu Environment
You are now inside the Ubuntu container's shell.
1. **Update Package Lists**:
```bash
apt-get update
```
2. **Install Dependencies**:
```bash
apt-get install -y curl git
curl -sL https://deb.nodesource.com/setup_20.x | bash -
apt-get install -y nodejs
```
3. **Navigate to Project Directory**:
```bash
cd /app
```
4. **Install Project Dependencies**:
```bash
npm install
```
### Step 5: Run the Development Server
Or from terminal:
```bash
podman machine init
podman machine start
```
### Step 3: Create Podman Network
```bash
podman network create flyer-crawler-net
```
### Step 4: Create PostgreSQL Container
```bash
podman run -d \
--name flyer-crawler-postgres \
--network flyer-crawler-net \
-e POSTGRES_USER=postgres \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_DB=flyer_crawler_dev \
-p 5432:5432 \
-v flyer-crawler-pgdata:/var/lib/postgresql/data \
docker.io/postgis/postgis:15-3.3
```
### Step 5: Create Redis Container
```bash
podman run -d \
--name flyer-crawler-redis \
--network flyer-crawler-net \
-p 6379:6379 \
-v flyer-crawler-redis:/data \
docker.io/library/redis:alpine
```
### Step 6: Initialize Database
```bash
# Wait for PostgreSQL to be ready
podman exec flyer-crawler-postgres pg_isready -U postgres
# Install required extensions
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";
"
# Apply schema
podman exec -i flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev < sql/master_schema_rollup.sql
```
### Step 7: Create Node.js Container
```bash
# Create volume for node_modules
podman volume create node_modules_cache
# Run Ubuntu container with project mounted
podman run -it \
--name flyer-dev \
--network flyer-crawler-net \
-p 3001:3001 \
-p 5173:5173 \
-v "$(pwd):/app" \
-v "node_modules_cache:/app/node_modules" \
ubuntu:latest
```
### Step 8: Configure Container Environment
Inside the container:
```bash
# Update and install dependencies
apt-get update
apt-get install -y curl git
# Install Node.js 20
curl -sL https://deb.nodesource.com/setup_20.x | bash -
apt-get install -y nodejs
# Navigate to project and install
cd /app
npm install
# Start development server
npm run dev
```
### Step 6: Access the Application
### Container Management Commands
- **Frontend**: http://localhost:5173
- **Backend API**: http://localhost:3001
### Dev Container with HTTPS (Full Stack)
When using the full dev container stack with NGINX (via `compose.dev.yml`), access the application over HTTPS:
- **Frontend**: https://localhost or https://127.0.0.1
- **Backend API**: http://localhost:3001
**SSL Certificate Notes:**
- The dev container uses self-signed certificates generated by mkcert
- Both `localhost` and `127.0.0.1` are valid hostnames (certificate includes both as SANs)
- If images fail to load with SSL errors, see [FLYER-URL-CONFIGURATION.md](../FLYER-URL-CONFIGURATION.md#ssl-certificate-configuration-dev-container)
**Eliminate SSL Warnings (Recommended):**
To avoid browser security warnings for self-signed certificates, install the mkcert CA certificate on your system. The CA certificate is located at `certs/mkcert-ca.crt` in the project root.
See [`certs/README.md`](../../certs/README.md) for platform-specific installation instructions (Windows, macOS, Linux, Firefox).
After installation:
- Your browser will trust all mkcert certificates without warnings
- Both `https://localhost/` and `https://127.0.0.1/` will work without SSL errors
- Flyer images will load without `ERR_CERT_AUTHORITY_INVALID` errors
### Managing the Container
| Action | Command |
| --------------------- | -------------------------------- |
| Stop the container | Press `Ctrl+C`, then type `exit` |
| Restart the container | `podman start -a -i flyer-dev` |
| Remove the container | `podman rm flyer-dev` |
| Action | Command |
| -------------- | ------------------------------ |
| Stop container | Press `Ctrl+C`, then `exit` |
| Restart | `podman start -a -i flyer-dev` |
| Remove | `podman rm flyer-dev` |
| List running | `podman ps` |
| List all | `podman ps -a` |
---
## Environment Variables
## Environment Configuration
This project is configured to run in a CI/CD environment and does not use `.env` files. All configuration must be provided as environment variables.
### Create .env.local
For local development, you can export these in your shell or use your IDE's environment configuration:
Create `.env.local` in the project root with your configuration:
| Variable | Description |
| --------------------------- | ------------------------------------- |
| `DB_HOST` | PostgreSQL server hostname |
| `DB_USER` | PostgreSQL username |
| `DB_PASSWORD` | PostgreSQL password |
| `DB_DATABASE_PROD` | Production database name |
| `JWT_SECRET` | Secret string for signing auth tokens |
| `VITE_GOOGLE_GENAI_API_KEY` | Google Gemini API key |
| `GOOGLE_MAPS_API_KEY` | Google Maps Geocoding API key |
| `REDIS_PASSWORD_PROD` | Production Redis password |
| `REDIS_PASSWORD_TEST` | Test Redis password |
```bash
# Database (adjust host based on your setup)
DB_HOST=localhost # Use 'postgres' if inside dev container
DB_PORT=5432
DB_USER=postgres
DB_PASSWORD=postgres
DB_NAME=flyer_crawler_dev
# Redis (adjust host based on your setup)
REDIS_URL=redis://localhost:6379 # Use 'redis://redis:6379' inside container
# Application
NODE_ENV=development
PORT=3001
FRONTEND_URL=http://localhost:5173
# Authentication (generate secure values)
JWT_SECRET=your-secret-at-least-32-characters-long
# AI Services
GEMINI_API_KEY=your-google-gemini-api-key
GOOGLE_MAPS_API_KEY=your-google-maps-api-key # Optional
```
**Generate Secure Secrets**:
```bash
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
```
### Environment Differences
| Variable | Host Development | Inside Dev Container |
| ----------- | ------------------------ | -------------------- |
| `DB_HOST` | `localhost` | `postgres` |
| `REDIS_URL` | `redis://localhost:6379` | `redis://redis:6379` |
See [ENVIRONMENT.md](ENVIRONMENT.md) for complete variable reference.
---
## Seeding Development Data
To create initial test accounts (`admin@example.com` and `user@example.com`) and sample data:
Create test accounts and sample data:
```bash
npm run seed
```
The seed script performs the following actions:
### What the Seed Script Does
1. Rebuilds the database schema from `sql/master_schema_rollup.sql`
2. Creates test user accounts (admin and regular user)
3. Copies test flyer images from `src/tests/assets/` to `public/flyer-images/`
4. Creates a sample flyer with items linked to the test images
5. Seeds watched items and a shopping list for the test user
1. Rebuilds database schema from `sql/master_schema_rollup.sql`
2. Creates test user accounts:
- `admin@example.com` (admin user)
- `user@example.com` (regular user)
3. Copies test flyer images to `public/flyer-images/`
4. Creates sample flyer with items
5. Seeds watched items and shopping list
**Test Images**: The seed script copies `test-flyer-image.jpg` and `test-flyer-icon.png` to the `public/flyer-images/` directory, which is served by NGINX at `/flyer-images/`.
### Test Images
After running, you may need to restart your IDE's TypeScript server to pick up any generated types.
The seed script copies these files from `src/tests/assets/`:
- `test-flyer-image.jpg`
- `test-flyer-icon.png`
Images are served by NGINX at `/flyer-images/`.
---
## Verification Checklist
After installation, verify everything works:
- [ ] **Containers running**: `podman ps` shows postgres and redis
- [ ] **Database accessible**: `podman exec flyer-crawler-postgres psql -U postgres -c "SELECT 1;"`
- [ ] **Frontend loads**: Open `http://localhost:5173` (or `https://localhost` for dev container)
- [ ] **API responds**: `curl http://localhost:3001/health`
- [ ] **Tests pass**: `npm run test:unit` (or in container: `podman exec -it flyer-crawler-dev npm run test:unit`)
- [ ] **Type check passes**: `npm run type-check`
---
## Troubleshooting
### Podman Machine Won't Start
```bash
# Reset Podman machine
podman machine rm
podman machine init
podman machine start
```
### Port Already in Use
```bash
# Find process using port
netstat -ano | findstr :5432
# Option: Use different port
podman run -d --name flyer-crawler-postgres -p 5433:5432 ...
# Then set DB_PORT=5433 in .env.local
```
### Database Extensions Missing
```bash
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";
"
```
### Permission Denied on Windows Paths
Use `MSYS_NO_PATHCONV=1` prefix:
```bash
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev /path/to/script.sh
```
### Tests Fail with Timezone Errors
Tests must run in the dev container, not on Windows host:
```bash
# CORRECT
podman exec -it flyer-crawler-dev npm test
# INCORRECT (may fail with TZ errors)
npm test
```
---
## Next Steps
- [Database Setup](DATABASE.md) - Set up PostgreSQL with required extensions
- [Authentication Setup](AUTHENTICATION.md) - Configure OAuth providers
- [Deployment Guide](DEPLOYMENT.md) - Deploy to production
| Goal | Document |
| --------------------- | ------------------------------------------------------ |
| Quick setup guide | [QUICKSTART.md](QUICKSTART.md) |
| Environment variables | [ENVIRONMENT.md](ENVIRONMENT.md) |
| Database schema | [DATABASE.md](../architecture/DATABASE.md) |
| Authentication setup | [AUTHENTICATION.md](../architecture/AUTHENTICATION.md) |
| Dev container details | [DEV-CONTAINER.md](../development/DEV-CONTAINER.md) |
| Deployment | [DEPLOYMENT.md](../operations/DEPLOYMENT.md) |
---
Last updated: January 2026

View File

@@ -2,13 +2,38 @@
Get Flyer Crawler running in 5 minutes.
## Prerequisites
---
- **Windows 10/11** with WSL 2
- **Podman Desktop** installed
- **Node.js 20+** installed
## Prerequisites Checklist
## 1. Start Containers (1 minute)
Before starting, verify you have:
- [ ] **Windows 10/11** with WSL 2 enabled
- [ ] **Podman Desktop** installed ([download](https://podman-desktop.io/))
- [ ] **Node.js 20+** installed
- [ ] **Git** for cloning the repository
**Verify Prerequisites**:
```bash
# Check Podman
podman --version
# Expected: podman version 4.x or higher
# Check Node.js
node --version
# Expected: v20.x or higher
# Check WSL
wsl --list --verbose
# Expected: Shows WSL 2 distro
```
---
## Quick Setup (5 Steps)
### Step 1: Start Containers (1 minute)
```bash
# Start PostgreSQL and Redis
@@ -27,11 +52,18 @@ podman run -d --name flyer-crawler-redis \
docker.io/library/redis:alpine
```
## 2. Initialize Database (2 minutes)
**Expected Output**:
```text
# Container IDs displayed, no errors
```
### Step 2: Initialize Database (2 minutes)
```bash
# Wait for PostgreSQL to be ready
podman exec flyer-crawler-postgres pg_isready -U postgres
# Expected: localhost:5432 - accepting connections
# Install extensions
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev \
@@ -41,7 +73,17 @@ podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev \
podman exec -i flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev < sql/master_schema_rollup.sql
```
## 3. Configure Environment (1 minute)
**Expected Output**:
```text
CREATE EXTENSION
CREATE EXTENSION
CREATE EXTENSION
CREATE TABLE
... (many tables created)
```
### Step 3: Configure Environment (1 minute)
Create `.env.local` in the project root:
@@ -61,16 +103,22 @@ NODE_ENV=development
PORT=3001
FRONTEND_URL=http://localhost:5173
# Secrets (generate your own)
# Secrets (generate your own - see command below)
JWT_SECRET=your-dev-jwt-secret-at-least-32-chars-long
SESSION_SECRET=your-dev-session-secret-at-least-32-chars-long
# AI Services (get your own keys)
VITE_GOOGLE_GENAI_API_KEY=your-google-genai-api-key
GEMINI_API_KEY=your-google-gemini-api-key
GOOGLE_MAPS_API_KEY=your-google-maps-api-key
```
## 4. Install & Run (1 minute)
**Generate Secure Secrets**:
```bash
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
```
### Step 4: Install and Run (1 minute)
```bash
# Install dependencies (first time only)
@@ -80,35 +128,61 @@ npm install
npm run dev
```
## 5. Access Application
**Expected Output**:
- **Frontend**: http://localhost:5173
- **Backend API**: http://localhost:3001
- **Health Check**: http://localhost:3001/health
```text
> flyer-crawler@x.x.x dev
> concurrently ...
### Dev Container (HTTPS)
[API] Server listening on port 3001
[Vite] VITE ready at http://localhost:5173
```
When using the full dev container with NGINX, access via HTTPS:
### Step 5: Verify Installation
- **Frontend**: https://localhost or https://127.0.0.1
- **Backend API**: http://localhost:3001
- **Bugsink**: `https://localhost:8443` (error tracking)
| Check | URL/Command | Expected Result |
| ----------- | ------------------------------ | ----------------------------------- |
| Frontend | `http://localhost:5173` | Flyer Crawler app loads |
| Backend API | `http://localhost:3001/health` | `{ "status": "ok", ... }` |
| Database | `podman exec ... psql -c ...` | `SELECT version()` returns Postgres |
| Containers | `podman ps` | Shows postgres and redis running |
**Note:** The dev container accepts both `localhost` and `127.0.0.1` for HTTPS connections. The self-signed certificate is valid for both hostnames.
---
**SSL Certificate Warnings:** To eliminate browser security warnings for self-signed certificates, install the mkcert CA certificate. See [`certs/README.md`](../../certs/README.md) for platform-specific installation instructions. This is optional but recommended for a better development experience.
## Full Dev Container (Recommended)
### Dev Container Architecture
For a production-like environment with NGINX, Bugsink error tracking, and PM2 process management:
The dev container uses PM2 for process management, matching production (ADR-014):
### Starting the Dev Container
| Process | Description | Port |
| -------------------------- | ------------------------ | ---- |
| `flyer-crawler-api-dev` | API server (tsx watch) | 3001 |
| `flyer-crawler-worker-dev` | Background job worker | - |
| `flyer-crawler-vite-dev` | Vite frontend dev server | 5173 |
```bash
# Start all services
podman-compose -f compose.dev.yml up -d
**PM2 Commands** (run inside container):
# View logs
podman-compose -f compose.dev.yml logs -f
```
### Access Points
| Service | URL | Notes |
| ----------- | ------------------------ | ---------------------------- |
| Frontend | `https://localhost` | NGINX proxy to Vite |
| Backend API | `http://localhost:3001` | Express server |
| Bugsink | `https://localhost:8443` | Error tracking (admin/admin) |
| PostgreSQL | `localhost:5432` | Database |
| Redis | `localhost:6379` | Cache |
**SSL Certificate Setup (Recommended)**:
To eliminate browser security warnings, install the mkcert CA certificate:
```bash
# Windows: Double-click certs/mkcert-ca.crt and install to Trusted Root CAs
# See certs/README.md for detailed instructions per platform
```
### PM2 Commands
```bash
# View process status
@@ -124,63 +198,152 @@ podman exec -it flyer-crawler-dev pm2 restart all
podman exec -it flyer-crawler-dev pm2 restart flyer-crawler-api-dev
```
## Verify Installation
### Dev Container Processes
| Process | Description | Port |
| -------------------------- | ------------------------ | ---- |
| `flyer-crawler-api-dev` | API server (tsx watch) | 3001 |
| `flyer-crawler-worker-dev` | Background job worker | - |
| `flyer-crawler-vite-dev` | Vite frontend dev server | 5173 |
---
## Verification Commands
Run these to confirm everything is working:
```bash
# Check containers are running
podman ps
# Expected: flyer-crawler-postgres and flyer-crawler-redis both running
# Test database connection
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "SELECT version();"
# Expected: PostgreSQL 15.x with PostGIS
# Run tests (in dev container)
podman exec -it flyer-crawler-dev npm run test:unit
# Expected: All tests pass
# Run type check
podman exec -it flyer-crawler-dev npm run type-check
# Expected: No type errors
```
## Common Issues
---
## Common Issues and Solutions
### "Unable to connect to Podman socket"
**Cause**: Podman machine not running
**Solution**:
```bash
podman machine start
```
### "Connection refused" to PostgreSQL
Wait a few seconds for PostgreSQL to initialize:
**Cause**: PostgreSQL still initializing
**Solution**:
```bash
# Wait for PostgreSQL to be ready
podman exec flyer-crawler-postgres pg_isready -U postgres
# Retry after "accepting connections" message
```
### Port 5432 or 6379 already in use
Stop conflicting services or change port mappings:
**Cause**: Another service using the port
**Solution**:
```bash
# Use different host port
# Option 1: Stop conflicting service
# Option 2: Use different host port
podman run -d --name flyer-crawler-postgres -p 5433:5432 ...
# Then update DB_PORT=5433 in .env.local
```
Then update `DB_PORT=5433` in `.env.local`.
### "JWT_SECRET must be at least 32 characters"
**Cause**: Secret too short in .env.local
**Solution**: Generate a longer secret:
```bash
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
```
### Tests fail with "TZ environment variable" errors
**Cause**: Timezone setting interfering with Node.js async hooks
**Solution**: Tests must run in dev container (not Windows host):
```bash
# CORRECT - run in container
podman exec -it flyer-crawler-dev npm test
# INCORRECT - do not run on Windows host
npm test
```
---
## Next Steps
- **Read the docs**: [docs/README.md](../README.md)
- **Understand the architecture**: [docs/architecture/DATABASE.md](../architecture/DATABASE.md)
- **Learn testing**: [docs/development/TESTING.md](../development/TESTING.md)
- **Explore ADRs**: [docs/adr/index.md](../adr/index.md)
- **Contributing**: [CONTRIBUTING.md](../../CONTRIBUTING.md)
| Goal | Document |
| ----------------------- | ----------------------------------------------------- |
| Understand the codebase | [Architecture Overview](../architecture/OVERVIEW.md) |
| Configure environment | [Environment Variables](ENVIRONMENT.md) |
| Set up MCP tools | [MCP Configuration](../tools/MCP-CONFIGURATION.md) |
| Learn testing | [Testing Guide](../development/TESTING.md) |
| Understand DB schema | [Database Documentation](../architecture/DATABASE.md) |
| Read ADRs | [ADR Index](../adr/index.md) |
| Full installation guide | [Installation Guide](INSTALL.md) |
## Development Workflow
---
## Daily Development Workflow
```bash
# Daily workflow
# 1. Start containers
podman start flyer-crawler-postgres flyer-crawler-redis
# 2. Start dev server
npm run dev
# ... make changes ...
# 3. Make changes and test
npm test
# 4. Type check before commit
npm run type-check
# 5. Commit changes
git commit
```
For detailed setup instructions, see [INSTALL.md](INSTALL.md).
**For dev container users**:
```bash
# 1. Start dev container
podman-compose -f compose.dev.yml up -d
# 2. View logs
podman exec -it flyer-crawler-dev pm2 logs
# 3. Run tests
podman exec -it flyer-crawler-dev npm test
# 4. Stop when done
podman-compose -f compose.dev.yml down
```
---
Last updated: January 2026

View File

@@ -2,8 +2,68 @@
This guide covers the manual installation of Flyer Crawler and its dependencies on a bare-metal Ubuntu server (e.g., a colocation server). This is the definitive reference for setting up a production environment without containers.
**Last verified**: 2026-01-28
**Target Environment**: Ubuntu 22.04 LTS (or newer)
**Related documentation**:
- [ADR-014: Containerization and Deployment Strategy](../adr/0014-containerization-and-deployment-strategy.md)
- [ADR-015: Error Tracking and Observability](../adr/0015-error-tracking-and-observability.md)
- [ADR-050: PostgreSQL Function Observability](../adr/0050-postgresql-function-observability.md)
- [Deployment Guide](DEPLOYMENT.md)
- [Monitoring Guide](MONITORING.md)
---
## Quick Reference
### Installation Time Estimates
| Component | Estimated Time | Notes |
| ----------- | --------------- | ----------------------------- |
| PostgreSQL | 10-15 minutes | Including PostGIS extensions |
| Redis | 5 minutes | Quick install |
| Node.js | 5 minutes | Via NodeSource repository |
| Application | 15-20 minutes | Clone, install, build |
| PM2 | 5 minutes | Global install + config |
| NGINX | 10-15 minutes | Including SSL via Certbot |
| Bugsink | 20-30 minutes | Python venv, systemd services |
| Logstash | 15-20 minutes | Including pipeline config |
| **Total** | **~90 minutes** | For complete fresh install |
### Post-Installation Verification
After completing setup, verify all services:
```bash
# Check all services are running
systemctl status postgresql nginx redis-server gunicorn-bugsink snappea logstash
# Verify application health
curl -s https://flyer-crawler.projectium.com/api/health/ready | jq .
# Check PM2 processes
pm2 list
# Verify Bugsink is accessible
curl -s https://bugsink.projectium.com/accounts/login/ | head -5
```
---
## Server Access Model
All commands in this guide are intended for the **system administrator** to execute directly on the server. Claude Code and AI tools have **READ-ONLY** access to production servers and cannot execute these commands directly.
When Claude assists with server setup or troubleshooting:
1. Claude provides commands for the administrator to execute
2. Administrator runs commands and reports output
3. Claude analyzes results and provides next steps (1-3 commands at a time)
4. Administrator executes and reports results
5. Claude provides verification commands to confirm success
---
## Table of Contents

View File

@@ -2,14 +2,81 @@
This guide covers deploying Flyer Crawler to a production server.
**Last verified**: 2026-01-28
**Related documentation**:
- [ADR-014: Containerization and Deployment Strategy](../adr/0014-containerization-and-deployment-strategy.md)
- [ADR-015: Error Tracking and Observability](../adr/0015-error-tracking-and-observability.md)
- [Bare-Metal Setup Guide](BARE-METAL-SETUP.md)
- [Monitoring Guide](MONITORING.md)
---
## Quick Reference
### Command Reference Table
| Task | Command |
| -------------------- | ----------------------------------------------------------------------- |
| Deploy to production | Gitea Actions workflow (manual trigger) |
| Deploy to test | Automatic on push to `main` |
| Check PM2 status | `pm2 list` |
| View logs | `pm2 logs flyer-crawler-api --lines 100` |
| Restart all | `pm2 restart all` |
| Check NGINX | `sudo nginx -t && sudo systemctl status nginx` |
| Check health | `curl -s https://flyer-crawler.projectium.com/api/health/ready \| jq .` |
### Deployment URLs
| Environment | URL | API Port |
| ------------- | ------------------------------------------- | -------- |
| Production | `https://flyer-crawler.projectium.com` | 3001 |
| Test | `https://flyer-crawler-test.projectium.com` | 3002 |
| Dev Container | `https://localhost` | 3001 |
---
## Server Access Model
**Important**: Claude Code (and AI tools) have **READ-ONLY** access to production/test servers. The deployment workflow is:
| Actor | Capability |
| ------------ | --------------------------------------------------------------- |
| Gitea CI/CD | Automated deployments via workflows (has write access) |
| User (human) | Manual server access for troubleshooting and emergency fixes |
| Claude Code | Provides commands for user to execute; cannot run them directly |
When troubleshooting deployment issues:
1. Claude provides **diagnostic commands** for the user to run
2. User executes commands and reports output
3. Claude analyzes results and provides **fix commands** (1-3 at a time)
4. User executes fixes and reports results
5. Claude provides **verification commands** to confirm success
---
## Prerequisites
- Ubuntu server (22.04 LTS recommended)
- PostgreSQL 14+ with PostGIS extension
- Redis
- Node.js 20.x
- NGINX (reverse proxy)
- PM2 (process manager)
| Component | Version | Purpose |
| ---------- | --------- | ------------------------------- |
| Ubuntu | 22.04 LTS | Operating system |
| PostgreSQL | 14+ | Database with PostGIS extension |
| Redis | 6+ | Caching and job queues |
| Node.js | 20.x LTS | Application runtime |
| NGINX | 1.18+ | Reverse proxy and static files |
| PM2 | Latest | Process manager |
**Verify prerequisites**:
```bash
node --version # Should be v20.x.x
psql --version # Should be 14+
redis-cli ping # Should return PONG
nginx -v # Should be 1.18+
pm2 --version # Any recent version
```
## Dev Container Parity (ADR-014)
@@ -190,7 +257,7 @@ types {
**Option 2**: Edit `/etc/nginx/mime.types` globally:
```
```text
# Change this line:
application/javascript js;
@@ -321,9 +388,78 @@ The Sentry SDK v10+ enforces HTTPS-only DSNs by default. Since Bugsink runs loca
---
## Deployment Troubleshooting
### Decision Tree: Deployment Issues
```text
Deployment failed?
|
+-- Build step failed?
| |
| +-- TypeScript errors --> Fix type issues, run `npm run type-check`
| +-- Missing dependencies --> Run `npm ci`
| +-- Out of memory --> Increase Node heap size
|
+-- Tests failed?
| |
| +-- Database connection --> Check DB_HOST, credentials
| +-- Redis connection --> Check REDIS_URL
| +-- Test isolation --> Check for race conditions
|
+-- SSH/Deploy failed?
|
+-- Permission denied --> Check SSH keys in Gitea secrets
+-- Host unreachable --> Check firewall, VPN
+-- PM2 error --> Check PM2 logs on server
```
### Common Deployment Issues
| Symptom | Diagnosis | Solution |
| ------------------------------------ | ----------------------- | ------------------------------------------------ |
| "Connection refused" on health check | API not started | Check `pm2 logs flyer-crawler-api` |
| 502 Bad Gateway | NGINX cannot reach API | Verify API port (3001), restart PM2 |
| CSS/JS not loading | Build artifacts missing | Re-run `npm run build`, check NGINX static paths |
| Database migrations failed | Schema mismatch | Run migrations manually, check DB connectivity |
| "ENOSPC" error | Disk full | Clear old logs: `pm2 flush`, clean npm cache |
| SSL certificate error | Cert expired/missing | Run `certbot renew`, check NGINX config |
### Post-Deployment Verification Checklist
After every deployment, verify:
- [ ] Health check passes: `curl -s https://flyer-crawler.projectium.com/api/health/ready`
- [ ] PM2 processes running: `pm2 list` shows `online` status
- [ ] No recent errors: Check Bugsink for new issues
- [ ] Frontend loads: Browser shows login page
- [ ] API responds: `curl https://flyer-crawler.projectium.com/api/health/ping`
### Rollback Procedure
If deployment causes issues:
```bash
# 1. Check current release
cd /var/www/flyer-crawler.projectium.com
git log --oneline -5
# 2. Revert to previous commit
git checkout HEAD~1
# 3. Rebuild and restart
npm ci && npm run build
pm2 restart all
# 4. Verify health
curl -s http://localhost:3001/api/health/ready | jq .
```
---
## Related Documentation
- [Database Setup](DATABASE.md) - PostgreSQL and PostGIS configuration
- [Authentication Setup](AUTHENTICATION.md) - OAuth provider configuration
- [Installation Guide](INSTALL.md) - Local development setup
- [Bare-Metal Server Setup](docs/BARE-METAL-SETUP.md) - Manual server installation guide
- [Database Setup](../architecture/DATABASE.md) - PostgreSQL and PostGIS configuration
- [Monitoring Guide](MONITORING.md) - Health checks and error tracking
- [Logstash Quick Reference](LOGSTASH-QUICK-REF.md) - Log aggregation
- [Bare-Metal Server Setup](BARE-METAL-SETUP.md) - Manual server installation guide

View File

@@ -2,10 +2,47 @@
Aggregates logs from PostgreSQL, PM2, Redis, NGINX; forwards errors to Bugsink.
**Last verified**: 2026-01-28
**Related documentation**:
- [ADR-050: PostgreSQL Function Observability](../adr/0050-postgresql-function-observability.md)
- [ADR-015: Error Tracking and Observability](../adr/0015-error-tracking-and-observability.md)
- [Monitoring Guide](MONITORING.md)
- [Logstash Troubleshooting Runbook](LOGSTASH-TROUBLESHOOTING.md)
---
## Quick Reference
### Bugsink Project Routing
| Source Type | Environment | Bugsink Project | Project ID |
| -------------- | ----------- | -------------------- | ---------- |
| PM2 API/Worker | Dev | Backend API (Dev) | 1 |
| PostgreSQL | Dev | Backend API (Dev) | 1 |
| Frontend JS | Dev | Frontend (Dev) | 2 |
| Redis/NGINX | Dev | Infrastructure (Dev) | 4 |
| PM2 API/Worker | Production | Backend API (Prod) | 1 |
| PostgreSQL | Production | Backend API (Prod) | 1 |
| PM2 API/Worker | Test | Backend API (Test) | 3 |
### Key DSN Keys (Dev Container)
| Project | DSN Key |
| -------------------- | ---------------------------------- |
| Backend API (Dev) | `cea01396c56246adb5878fa5ee6b1d22` |
| Frontend (Dev) | `d92663cb73cf4145b677b84029e4b762` |
| Infrastructure (Dev) | `14e8791da3d347fa98073261b596cab9` |
---
## Configuration
**Primary config**: `/etc/logstash/conf.d/bugsink.conf`
**Dev container config**: `docker/logstash/bugsink.conf`
### Related Files
| Path | Purpose |
@@ -89,6 +126,34 @@ MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev ls -la /var/log/redis/
## Troubleshooting
### Decision Tree: Logs Not Appearing in Bugsink
```text
Errors not showing in Bugsink?
|
+-- Logstash running?
| |
| +-- No --> systemctl start logstash
| +-- Yes --> Check pipeline stats
| |
| +-- Events in = 0?
| | |
| | +-- Log files exist? --> ls /var/log/pm2/*.log
| | +-- Permissions OK? --> groups logstash
| |
| +-- Events filtered = high?
| | |
| | +-- Grok failures --> Check log format matches pattern
| |
| +-- Events out but no Bugsink?
| |
| +-- 403 error --> Wrong DSN key
| +-- 500 error --> Invalid event format (check sentry_level)
| +-- Connection refused --> Bugsink not running
```
### Common Issues Table
| Issue | Check | Solution |
| --------------------- | ---------------- | ---------------------------------------------------------------------------------------------- |
| No Bugsink errors | Logstash running | `systemctl status logstash` |
@@ -103,6 +168,25 @@ MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev ls -la /var/log/redis/
| High disk usage | Log rotation | Verify `/etc/logrotate.d/logstash` configured |
| varchar(7) error | Level validation | Add Ruby filter to validate/normalize `sentry_level` before output |
### Expected Output Examples
**Successful Logstash pipeline stats**:
```json
{
"in": 1523,
"out": 1520,
"filtered": 1520,
"queue_push_duration_in_millis": 45
}
```
**Healthy Bugsink HTTP response**:
```json
{ "id": "a1b2c3d4e5f6..." }
```
## Related Documentation
- **Dev Container Guide**: [DEV-CONTAINER.md](../development/DEV-CONTAINER.md) - PM2 and log aggregation in dev

View File

@@ -2,6 +2,16 @@
This runbook provides step-by-step diagnostics and solutions for common Logstash issues in the PostgreSQL observability pipeline (ADR-050).
**Last verified**: 2026-01-28
**Related documentation**:
- [ADR-050: PostgreSQL Function Observability](../adr/0050-postgresql-function-observability.md)
- [Logstash Quick Reference](LOGSTASH-QUICK-REF.md)
- [Monitoring Guide](MONITORING.md)
---
## Quick Reference
| Symptom | Most Likely Cause | Quick Check |

View File

@@ -2,6 +2,72 @@
This guide covers all aspects of monitoring the Flyer Crawler application across development, test, and production environments.
**Last verified**: 2026-01-28
**Related documentation**:
- [ADR-015: Error Tracking and Observability](../adr/0015-error-tracking-and-observability.md)
- [ADR-020: Health Checks](../adr/0020-health-checks-and-liveness-readiness-probes.md)
- [ADR-050: PostgreSQL Function Observability](../adr/0050-postgresql-function-observability.md)
- [Logstash Quick Reference](LOGSTASH-QUICK-REF.md)
- [Deployment Guide](DEPLOYMENT.md)
---
## Quick Reference
### Monitoring URLs
| Service | Production URL | Dev Container URL |
| ------------ | ------------------------------------------------------- | ---------------------------------------- |
| Health Check | `https://flyer-crawler.projectium.com/api/health/ready` | `http://localhost:3001/api/health/ready` |
| Bugsink | `https://bugsink.projectium.com` | `https://localhost:8443` |
| Bull Board | `https://flyer-crawler.projectium.com/api/admin/jobs` | `http://localhost:3001/api/admin/jobs` |
### Quick Diagnostic Commands
```bash
# Check all services at once (production)
curl -s https://flyer-crawler.projectium.com/api/health/ready | jq '.data.services'
# Dev container health check
podman exec flyer-crawler-dev curl -s http://localhost:3001/api/health/ready | jq .
# PM2 process overview
pm2 list
# Recent errors in Bugsink (via MCP)
# mcp__bugsink__list_issues --project_id 1 --status unresolved
```
### Monitoring Decision Tree
```text
Application seems slow or unresponsive?
|
+-- Check health endpoint first
| |
| +-- Returns unhealthy?
| | |
| | +-- Database unhealthy --> Check DB pool, connections
| | +-- Redis unhealthy --> Check Redis memory, connection
| | +-- Storage unhealthy --> Check disk space, permissions
| |
| +-- Returns healthy but slow?
| |
| +-- Check PM2 memory/CPU usage
| +-- Check database slow query log
| +-- Check Redis queue depth
|
+-- Health endpoint not responding?
|
+-- Check PM2 status --> Process crashed?
+-- Check NGINX --> 502 errors?
+-- Check network --> Firewall/DNS issues?
```
---
## Table of Contents
1. [Health Checks](#health-checks)
@@ -276,10 +342,10 @@ Dev Container (in `.mcp.json`):
Bugsink 2.0.11 does not have a UI for API tokens. Create via Django management command.
**Production**:
**Production** (user executes on server):
```bash
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage create_auth_token"
cd /opt/bugsink && bugsink-manage create_auth_token
```
**Dev Container**:
@@ -294,7 +360,7 @@ The command outputs a 40-character hex token.
**Error Anatomy**:
```
```text
TypeError: Cannot read properties of undefined (reading 'map')
├── Exception Type: TypeError
├── Message: Cannot read properties of undefined (reading 'map')
@@ -357,7 +423,7 @@ Logstash aggregates logs from multiple sources and forwards errors to Bugsink (A
### Architecture
```
```text
Log Sources Logstash Outputs
┌──────────────┐ ┌─────────────┐ ┌─────────────┐
│ PostgreSQL │──────────────│ │───────────│ Bugsink │
@@ -388,11 +454,9 @@ Log Sources Logstash Outputs
### Pipeline Status
**Check Logstash Service**:
**Check Logstash Service** (user executes on server):
```bash
ssh root@projectium.com
# Service status
systemctl status logstash
@@ -485,9 +549,11 @@ PM2 manages the Node.js application processes in production.
### Basic Commands
> **Note**: These commands are for the user to execute on the server. Claude Code provides commands but cannot run them directly.
```bash
ssh root@projectium.com
su - gitea-runner # PM2 runs under this user
# Switch to gitea-runner user (PM2 runs under this user)
su - gitea-runner
# List all processes
pm2 list
@@ -520,7 +586,7 @@ pm2 stop flyer-crawler-api
**Healthy Process**:
```
```text
┌─────────────────────┬────┬─────────┬─────────┬───────┬────────┬─────────┬──────────┐
│ Name │ id │ mode │ status │ cpu │ mem │ uptime │ restarts │
├─────────────────────┼────┼─────────┼─────────┼───────┼────────┼─────────┼──────────┤
@@ -833,29 +899,28 @@ Configure alerts in your monitoring tool (UptimeRobot, Datadog, etc.):
2. Review during business hours
3. Create Gitea issue for tracking
### Quick Diagnostic Commands
### On-Call Diagnostic Commands
> **Note**: User executes these commands on the server. Claude Code provides commands but cannot run them directly.
```bash
# Full system health check
ssh root@projectium.com << 'EOF'
echo "=== Service Status ==="
# Service status checks
systemctl status pm2-gitea-runner --no-pager
systemctl status logstash --no-pager
systemctl status redis --no-pager
systemctl status postgresql --no-pager
echo "=== PM2 Processes ==="
# PM2 processes (run as gitea-runner)
su - gitea-runner -c "pm2 list"
echo "=== Disk Space ==="
# Disk space
df -h / /var
echo "=== Memory ==="
# Memory
free -h
echo "=== Recent Errors ==="
# Recent errors
journalctl -p err -n 20 --no-pager
EOF
```
### Runbook Quick Reference

View File

@@ -0,0 +1,849 @@
# ADR-024 Implementation Plan: Feature Flagging Strategy
**Date**: 2026-01-28
**Type**: Technical Implementation Plan
**Related**: [ADR-024: Feature Flagging Strategy](../adr/0024-feature-flagging-strategy.md), [ADR-007: Configuration and Secrets Management](../adr/0007-configuration-and-secrets-management.md)
**Status**: Ready for Implementation
---
## Project Overview
Implement a simple, configuration-based feature flag system that integrates with the existing Zod-validated configuration in `src/config/env.ts`. The system will support both backend and frontend feature flags through environment variables, with type-safe access patterns and helper utilities.
### Key Success Criteria
1. Feature flags accessible via type-safe API on both backend and frontend
2. Zero runtime overhead when flag is disabled (compile-time elimination where possible)
3. Consistent naming convention (environment variables and code access)
4. Graceful degradation (missing flag defaults to disabled)
5. Easy migration path to external service (Flagsmith/LaunchDarkly) in the future
6. Full test coverage with mocking utilities
### Estimated Total Effort
| Phase | Estimate |
| --------------------------------- | -------------- |
| Phase 1: Backend Infrastructure | 3-5 hours |
| Phase 2: Frontend Infrastructure | 2-3 hours |
| Phase 3: Documentation & Examples | 1-2 hours |
| **Total** | **6-10 hours** |
---
## Current State Analysis
### Backend Configuration (`src/config/env.ts`)
- Zod-based schema validation at startup
- Organized into logical groups (database, redis, auth, smtp, ai, etc.)
- Helper exports for service availability (`isSmtpConfigured`, `isAiConfigured`, etc.)
- Environment helpers (`isProduction`, `isTest`, `isDevelopment`)
- Fail-fast on invalid configuration
### Frontend Configuration (`src/config.ts`)
- Uses `import.meta.env` (Vite environment variables)
- Organized into sections (app, google, sentry)
- Boolean parsing for string env vars
- Type declarations in `src/vite-env.d.ts`
### Existing Patterns to Follow
```typescript
// Backend - service availability check pattern
export const isSmtpConfigured =
!!config.smtp.host && !!config.smtp.user && !!config.smtp.pass;
// Frontend - boolean parsing pattern
enabled: import.meta.env.VITE_SENTRY_ENABLED !== 'false',
```
---
## Task Breakdown
### Phase 1: Backend Feature Flag Infrastructure
#### [1.1] Define Feature Flag Schema in env.ts
**Complexity**: Low
**Estimate**: 30-45 minutes
**Dependencies**: None
**Parallelizable**: Yes
**Description**: Add a new `featureFlags` section to the Zod schema in `src/config/env.ts`.
**Acceptance Criteria**:
- [ ] New `featureFlagsSchema` Zod object defined
- [ ] Schema supports boolean flags with defaults to `false` (opt-in model)
- [ ] Schema added to main `envSchema` object
- [ ] Type exported as part of `EnvConfig`
**Implementation Details**:
```typescript
// src/config/env.ts
/**
* Feature flags configuration schema (ADR-024).
* All flags default to false (disabled) for safety.
* Set to 'true' in environment to enable.
*/
const featureFlagsSchema = z.object({
// Example flags - replace with actual feature flags as needed
newDashboard: booleanString(false), // FEATURE_NEW_DASHBOARD
betaRecipes: booleanString(false), // FEATURE_BETA_RECIPES
experimentalAi: booleanString(false), // FEATURE_EXPERIMENTAL_AI
debugMode: booleanString(false), // FEATURE_DEBUG_MODE
});
// In loadEnvVars():
featureFlags: {
newDashboard: process.env.FEATURE_NEW_DASHBOARD,
betaRecipes: process.env.FEATURE_BETA_RECIPES,
experimentalAi: process.env.FEATURE_EXPERIMENTAL_AI,
debugMode: process.env.FEATURE_DEBUG_MODE,
},
```
**Risks/Notes**:
- Naming convention: `FEATURE_*` prefix for all feature flag env vars
- Default to `false` ensures features are opt-in, preventing accidental exposure
---
#### [1.2] Create Feature Flag Service Module
**Complexity**: Medium
**Estimate**: 1-2 hours
**Dependencies**: [1.1]
**Parallelizable**: No (depends on 1.1)
**Description**: Create a dedicated service module for feature flag access with helper functions.
**File**: `src/services/featureFlags.server.ts`
**Acceptance Criteria**:
- [ ] `isFeatureEnabled(flagName)` function for checking flags
- [ ] `getAllFeatureFlags()` function for debugging/admin endpoints
- [ ] Type-safe flag name parameter (union type or enum)
- [ ] Exported helper booleans for common flags (similar to `isSmtpConfigured`)
- [ ] Logging when feature flag is checked in development mode
**Implementation Details**:
```typescript
// src/services/featureFlags.server.ts
import { config, isDevelopment } from '../config/env';
import { logger } from './logger.server';
export type FeatureFlagName = keyof typeof config.featureFlags;
/**
* Check if a feature flag is enabled.
* @param flagName - The name of the feature flag to check
* @returns boolean indicating if the feature is enabled
*/
export function isFeatureEnabled(flagName: FeatureFlagName): boolean {
const enabled = config.featureFlags[flagName];
if (isDevelopment) {
logger.debug({ flag: flagName, enabled }, 'Feature flag checked');
}
return enabled;
}
/**
* Get all feature flags and their current states.
* Useful for debugging and admin endpoints.
*/
export function getAllFeatureFlags(): Record<FeatureFlagName, boolean> {
return { ...config.featureFlags };
}
// Convenience exports for common flag checks
export const isNewDashboardEnabled = config.featureFlags.newDashboard;
export const isBetaRecipesEnabled = config.featureFlags.betaRecipes;
export const isExperimentalAiEnabled = config.featureFlags.experimentalAi;
export const isDebugModeEnabled = config.featureFlags.debugMode;
```
**Risks/Notes**:
- Keep logging minimal to avoid performance impact
- Convenience exports are evaluated once at startup (not dynamic)
---
#### [1.3] Add Admin Endpoint for Feature Flag Status
**Complexity**: Low
**Estimate**: 30-45 minutes
**Dependencies**: [1.2]
**Parallelizable**: No (depends on 1.2)
**Description**: Add an admin/health endpoint to view current feature flag states.
**File**: `src/routes/admin.routes.ts` (or `stats.routes.ts` if admin routes don't exist)
**Acceptance Criteria**:
- [ ] `GET /api/v1/admin/feature-flags` endpoint (admin-only)
- [ ] Returns JSON object with all flags and their states
- [ ] Requires admin authentication
- [ ] Endpoint documented in Swagger
**Implementation Details**:
```typescript
// In appropriate routes file
router.get('/feature-flags', requireAdmin, async (req, res) => {
const flags = getAllFeatureFlags();
sendSuccess(res, { flags });
});
```
**Risks/Notes**:
- Ensure endpoint is protected (admin-only)
- Consider caching response if called frequently
---
#### [1.4] Backend Unit Tests
**Complexity**: Medium
**Estimate**: 1-2 hours
**Dependencies**: [1.1], [1.2]
**Parallelizable**: Yes (can start after 1.1, in parallel with 1.3)
**Description**: Write unit tests for feature flag configuration and service.
**Files**:
- `src/config/env.test.ts` (add feature flag tests)
- `src/services/featureFlags.server.test.ts` (new file)
**Acceptance Criteria**:
- [ ] Test default values (all false)
- [ ] Test parsing 'true'/'false' strings
- [ ] Test `isFeatureEnabled()` function
- [ ] Test `getAllFeatureFlags()` function
- [ ] Test type safety (TypeScript compile-time checks)
**Implementation Details**:
```typescript
// src/config/env.test.ts - add to existing file
describe('featureFlags configuration', () => {
it('should default all feature flags to false', async () => {
setValidEnv();
const { config } = await import('./env');
expect(config.featureFlags.newDashboard).toBe(false);
expect(config.featureFlags.betaRecipes).toBe(false);
});
it('should parse FEATURE_NEW_DASHBOARD as true when set', async () => {
setValidEnv({ FEATURE_NEW_DASHBOARD: 'true' });
const { config } = await import('./env');
expect(config.featureFlags.newDashboard).toBe(true);
});
});
// src/services/featureFlags.server.test.ts - new file
describe('featureFlags service', () => {
describe('isFeatureEnabled', () => {
it('should return false for disabled flags', () => {
expect(isFeatureEnabled('newDashboard')).toBe(false);
});
// ... more tests
});
});
```
---
### Phase 2: Frontend Feature Flag Infrastructure
#### [2.1] Add Frontend Feature Flag Config
**Complexity**: Low
**Estimate**: 30-45 minutes
**Dependencies**: None (can run in parallel with Phase 1)
**Parallelizable**: Yes
**Description**: Add feature flags to the frontend config module.
**Files**:
- `src/config.ts` - Add featureFlags section
- `src/vite-env.d.ts` - Add type declarations
**Acceptance Criteria**:
- [ ] Feature flags section added to `src/config.ts`
- [ ] TypeScript declarations updated in `vite-env.d.ts`
- [ ] Boolean parsing consistent with existing pattern
- [ ] Default to false when env var not set
**Implementation Details**:
```typescript
// src/config.ts
const config = {
// ... existing sections ...
/**
* Feature flags for conditional feature rendering (ADR-024).
* All flags default to false (disabled) when not explicitly set.
*/
featureFlags: {
newDashboard: import.meta.env.VITE_FEATURE_NEW_DASHBOARD === 'true',
betaRecipes: import.meta.env.VITE_FEATURE_BETA_RECIPES === 'true',
experimentalAi: import.meta.env.VITE_FEATURE_EXPERIMENTAL_AI === 'true',
debugMode: import.meta.env.VITE_FEATURE_DEBUG_MODE === 'true',
},
};
// src/vite-env.d.ts
interface ImportMetaEnv {
// ... existing declarations ...
readonly VITE_FEATURE_NEW_DASHBOARD?: string;
readonly VITE_FEATURE_BETA_RECIPES?: string;
readonly VITE_FEATURE_EXPERIMENTAL_AI?: string;
readonly VITE_FEATURE_DEBUG_MODE?: string;
}
```
---
#### [2.2] Create useFeatureFlag React Hook
**Complexity**: Medium
**Estimate**: 1-1.5 hours
**Dependencies**: [2.1]
**Parallelizable**: No (depends on 2.1)
**Description**: Create a React hook for checking feature flags in components.
**File**: `src/hooks/useFeatureFlag.ts`
**Acceptance Criteria**:
- [ ] `useFeatureFlag(flagName)` hook returns boolean
- [ ] Type-safe flag name parameter
- [ ] Memoized to prevent unnecessary re-renders
- [ ] Optional `FeatureFlag` component for conditional rendering
**Implementation Details**:
```typescript
// src/hooks/useFeatureFlag.ts
import { useMemo } from 'react';
import config from '../config';
export type FeatureFlagName = keyof typeof config.featureFlags;
/**
* Hook to check if a feature flag is enabled.
*
* @param flagName - The name of the feature flag to check
* @returns boolean indicating if the feature is enabled
*
* @example
* const isNewDashboard = useFeatureFlag('newDashboard');
* if (isNewDashboard) {
* return <NewDashboard />;
* }
*/
export function useFeatureFlag(flagName: FeatureFlagName): boolean {
return useMemo(() => config.featureFlags[flagName], [flagName]);
}
/**
* Get all feature flags (useful for debugging).
*/
export function useAllFeatureFlags(): Record<FeatureFlagName, boolean> {
return useMemo(() => ({ ...config.featureFlags }), []);
}
```
---
#### [2.3] Create FeatureFlag Component
**Complexity**: Low
**Estimate**: 30-45 minutes
**Dependencies**: [2.2]
**Parallelizable**: No (depends on 2.2)
**Description**: Create a declarative component for feature flag conditional rendering.
**File**: `src/components/FeatureFlag.tsx`
**Acceptance Criteria**:
- [ ] `<FeatureFlag name="flagName">` component
- [ ] Children rendered only when flag is enabled
- [ ] Optional `fallback` prop for disabled state
- [ ] TypeScript-enforced flag names
**Implementation Details**:
```typescript
// src/components/FeatureFlag.tsx
import { ReactNode } from 'react';
import { useFeatureFlag, FeatureFlagName } from '../hooks/useFeatureFlag';
interface FeatureFlagProps {
/** The name of the feature flag to check */
name: FeatureFlagName;
/** Content to render when feature is enabled */
children: ReactNode;
/** Optional content to render when feature is disabled */
fallback?: ReactNode;
}
/**
* Conditionally renders children based on feature flag state.
*
* @example
* <FeatureFlag name="newDashboard" fallback={<OldDashboard />}>
* <NewDashboard />
* </FeatureFlag>
*/
export function FeatureFlag({ name, children, fallback = null }: FeatureFlagProps) {
const isEnabled = useFeatureFlag(name);
return <>{isEnabled ? children : fallback}</>;
}
```
---
#### [2.4] Frontend Unit Tests
**Complexity**: Medium
**Estimate**: 1-1.5 hours
**Dependencies**: [2.1], [2.2], [2.3]
**Parallelizable**: No (depends on previous frontend tasks)
**Description**: Write unit tests for frontend feature flag utilities.
**Files**:
- `src/config.test.ts` (add feature flag tests)
- `src/hooks/useFeatureFlag.test.ts` (new file)
- `src/components/FeatureFlag.test.tsx` (new file)
**Acceptance Criteria**:
- [ ] Test config structure includes featureFlags
- [ ] Test default values (all false)
- [ ] Test hook returns correct values
- [ ] Test component renders/hides children correctly
- [ ] Test fallback rendering
**Implementation Details**:
```typescript
// src/hooks/useFeatureFlag.test.ts
import { renderHook } from '@testing-library/react';
import { useFeatureFlag, useAllFeatureFlags } from './useFeatureFlag';
describe('useFeatureFlag', () => {
it('should return false for disabled flags', () => {
const { result } = renderHook(() => useFeatureFlag('newDashboard'));
expect(result.current).toBe(false);
});
});
// src/components/FeatureFlag.test.tsx
import { render, screen } from '@testing-library/react';
import { FeatureFlag } from './FeatureFlag';
describe('FeatureFlag', () => {
it('should not render children when flag is disabled', () => {
render(
<FeatureFlag name="newDashboard">
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
});
it('should render fallback when flag is disabled', () => {
render(
<FeatureFlag name="newDashboard" fallback={<div>Old Feature</div>}>
<div>New Feature</div>
</FeatureFlag>
);
expect(screen.getByText('Old Feature')).toBeInTheDocument();
});
});
```
---
### Phase 3: Documentation & Integration
#### [3.1] Update ADR-024 with Implementation Status
**Complexity**: Low
**Estimate**: 30 minutes
**Dependencies**: [1.1], [1.2], [2.1], [2.2]
**Parallelizable**: Yes (can be done after core implementation)
**Description**: Update ADR-024 to mark it as implemented and add implementation details.
**File**: `docs/adr/0024-feature-flagging-strategy.md`
**Acceptance Criteria**:
- [ ] Status changed from "Proposed" to "Accepted"
- [ ] Implementation status section added
- [ ] Key files documented
- [ ] Usage examples included
---
#### [3.2] Update Environment Documentation
**Complexity**: Low
**Estimate**: 30 minutes
**Dependencies**: [1.1], [2.1]
**Parallelizable**: Yes
**Description**: Add feature flag environment variables to documentation.
**Files**:
- `docs/getting-started/ENVIRONMENT.md`
- `.env.example`
**Acceptance Criteria**:
- [ ] Feature flag variables documented in ENVIRONMENT.md
- [ ] New section "Feature Flags" added
- [ ] `.env.example` updated with commented feature flag examples
**Implementation Details**:
```bash
# .env.example addition
# ===================
# Feature Flags (ADR-024)
# ===================
# All feature flags default to disabled (false) when not set.
# Set to 'true' to enable a feature.
#
# FEATURE_NEW_DASHBOARD=false
# FEATURE_BETA_RECIPES=false
# FEATURE_EXPERIMENTAL_AI=false
# FEATURE_DEBUG_MODE=false
#
# Frontend equivalents (prefix with VITE_):
# VITE_FEATURE_NEW_DASHBOARD=false
# VITE_FEATURE_BETA_RECIPES=false
```
---
#### [3.3] Create CODE-PATTERNS Entry
**Complexity**: Low
**Estimate**: 30 minutes
**Dependencies**: All implementation tasks
**Parallelizable**: Yes
**Description**: Add feature flag usage patterns to CODE-PATTERNS.md.
**File**: `docs/development/CODE-PATTERNS.md`
**Acceptance Criteria**:
- [ ] Feature flag section added with examples
- [ ] Backend usage pattern documented
- [ ] Frontend usage pattern documented
- [ ] Testing pattern documented
---
#### [3.4] Update CLAUDE.md Quick Reference
**Complexity**: Low
**Estimate**: 15 minutes
**Dependencies**: All implementation tasks
**Parallelizable**: Yes
**Description**: Add feature flags to the CLAUDE.md quick reference tables.
**File**: `CLAUDE.md`
**Acceptance Criteria**:
- [ ] Feature flags added to "Key Patterns" table
- [ ] Reference to featureFlags service added
---
## Implementation Sequence
### Phase 1 (Backend) - Can Start Immediately
```text
[1.1] Schema ──────────┬──> [1.2] Service ──> [1.3] Admin Endpoint
└──> [1.4] Backend Tests (can start after 1.1)
```
### Phase 2 (Frontend) - Can Start Immediately (Parallel with Phase 1)
```text
[2.1] Config ──> [2.2] Hook ──> [2.3] Component ──> [2.4] Frontend Tests
```
### Phase 3 (Documentation) - After Implementation
```text
All Phase 1 & 2 Tasks ──> [3.1] ADR Update
├──> [3.2] Env Docs
├──> [3.3] Code Patterns
└──> [3.4] CLAUDE.md
```
---
## Critical Path
The minimum path to a working feature flag system:
1. **[1.1] Schema** (30 min) - Required for backend
2. **[1.2] Service** (1.5 hr) - Required for backend access
3. **[2.1] Frontend Config** (30 min) - Required for frontend
4. **[2.2] Hook** (1 hr) - Required for React integration
**Critical path duration**: ~3.5 hours
Non-critical but recommended:
- Admin endpoint (debugging)
- FeatureFlag component (developer convenience)
- Tests (quality assurance)
- Documentation (maintainability)
---
## Scope Recommendations
### MVP (Minimum Viable Implementation)
Include in initial implementation:
- [1.1] Backend schema with 2-3 example flags
- [1.2] Feature flag service
- [2.1] Frontend config
- [2.2] useFeatureFlag hook
- [1.4] Core backend tests
- [2.4] Core frontend tests
### Enhancements (Future Iterations)
Defer to follow-up work:
- Admin endpoint for flag visibility
- FeatureFlag component (nice-to-have)
- Dynamic flag updates without restart (requires external service)
- User-specific flags (A/B testing)
- Flag analytics/usage tracking
- Gradual rollout percentages
### Explicitly Out of Scope
- Integration with Flagsmith/LaunchDarkly (future ADR)
- Database-stored flags (requires schema changes)
- Real-time flag updates (WebSocket/SSE)
- Flag inheritance/hierarchy
- Flag audit logging
---
## Testing Strategy
### Backend Tests
| Test Type | Coverage Target | Location |
| ----------------- | ---------------------------------------- | ------------------------------------------ |
| Schema validation | Parse true/false, defaults | `src/config/env.test.ts` |
| Service functions | `isFeatureEnabled`, `getAllFeatureFlags` | `src/services/featureFlags.server.test.ts` |
| Integration | Admin endpoint (if added) | `src/routes/admin.routes.test.ts` |
### Frontend Tests
| Test Type | Coverage Target | Location |
| ------------------- | --------------------------- | ------------------------------------- |
| Config structure | featureFlags section exists | `src/config.test.ts` |
| Hook behavior | Returns correct values | `src/hooks/useFeatureFlag.test.ts` |
| Component rendering | Conditional children | `src/components/FeatureFlag.test.tsx` |
### Mocking Pattern for Tests
```typescript
// Backend - reset modules to test different flag states
beforeEach(() => {
vi.resetModules();
process.env.FEATURE_NEW_DASHBOARD = 'true';
});
// Frontend - mock config module
vi.mock('../config', () => ({
default: {
featureFlags: {
newDashboard: true,
betaRecipes: false,
},
},
}));
```
---
## Risk Assessment
| Risk | Impact | Likelihood | Mitigation |
| ------------------------------------------- | ------ | ---------- | ------------------------------------------------------------- |
| Flag state inconsistency (backend/frontend) | Medium | Low | Use same env var naming, document sync requirements |
| Performance impact from flag checks | Low | Low | Flags cached at startup, no runtime DB calls |
| Stale flags after deployment | Medium | Medium | Document restart requirement, consider future dynamic loading |
| Feature creep (too many flags) | Medium | Medium | Require ADR for new flags, sunset policy |
| Missing flag causes crash | High | Low | Default to false, graceful degradation |
---
## Files to Create
| File | Purpose |
| ------------------------------------------ | ---------------------------- |
| `src/services/featureFlags.server.ts` | Backend feature flag service |
| `src/services/featureFlags.server.test.ts` | Backend tests |
| `src/hooks/useFeatureFlag.ts` | React hook for flag access |
| `src/hooks/useFeatureFlag.test.ts` | Hook tests |
| `src/components/FeatureFlag.tsx` | Declarative flag component |
| `src/components/FeatureFlag.test.tsx` | Component tests |
## Files to Modify
| File | Changes |
| -------------------------------------------- | ---------------------------------- |
| `src/config/env.ts` | Add featureFlagsSchema and loading |
| `src/config/env.test.ts` | Add feature flag tests |
| `src/config.ts` | Add featureFlags section |
| `src/config.test.ts` | Add feature flag tests |
| `src/vite-env.d.ts` | Add VITE*FEATURE*\* declarations |
| `.env.example` | Add feature flag examples |
| `docs/adr/0024-feature-flagging-strategy.md` | Update status and details |
| `docs/getting-started/ENVIRONMENT.md` | Document feature flag vars |
| `docs/development/CODE-PATTERNS.md` | Add usage patterns |
| `CLAUDE.md` | Add to quick reference |
---
## Verification Commands
After implementation, run these commands in the dev container:
```bash
# Type checking
podman exec -it flyer-crawler-dev npm run type-check
# Backend unit tests
podman exec -it flyer-crawler-dev npm run test:unit -- --grep "featureFlag"
# Frontend tests (includes hook and component tests)
podman exec -it flyer-crawler-dev npm run test:unit -- --grep "FeatureFlag"
# Full test suite
podman exec -it flyer-crawler-dev npm test
```
---
## Example Usage (Post-Implementation)
### Backend Route Handler
```typescript
// src/routes/flyers.routes.ts
import { isFeatureEnabled } from '../services/featureFlags.server';
router.get('/dashboard', async (req, res) => {
if (isFeatureEnabled('newDashboard')) {
// New dashboard logic
return sendSuccess(res, { version: 'v2', data: await getNewDashboardData() });
}
// Legacy dashboard
return sendSuccess(res, { version: 'v1', data: await getLegacyDashboardData() });
});
```
### React Component
```tsx
// src/pages/Dashboard.tsx
import { FeatureFlag } from '../components/FeatureFlag';
import { useFeatureFlag } from '../hooks/useFeatureFlag';
// Option 1: Declarative component
function Dashboard() {
return (
<FeatureFlag name="newDashboard" fallback={<LegacyDashboard />}>
<NewDashboard />
</FeatureFlag>
);
}
// Option 2: Hook for logic
function DashboardWithLogic() {
const isNewDashboard = useFeatureFlag('newDashboard');
useEffect(() => {
if (isNewDashboard) {
analytics.track('new_dashboard_viewed');
}
}, [isNewDashboard]);
return isNewDashboard ? <NewDashboard /> : <LegacyDashboard />;
}
```
---
## Implementation Notes
### Naming Convention
| Context | Pattern | Example |
| ---------------- | ------------------------- | ---------------------------------- |
| Backend env var | `FEATURE_SNAKE_CASE` | `FEATURE_NEW_DASHBOARD` |
| Frontend env var | `VITE_FEATURE_SNAKE_CASE` | `VITE_FEATURE_NEW_DASHBOARD` |
| Config property | `camelCase` | `config.featureFlags.newDashboard` |
| Type/Hook param | `camelCase` | `isFeatureEnabled('newDashboard')` |
### Flag Lifecycle
1. **Adding a flag**: Add to both schemas, set default to `false`, document
2. **Enabling a flag**: Set env var to `'true'`, restart application
3. **Removing a flag**: Remove conditional code first, then remove flag from schemas
4. **Sunset policy**: Flags should be removed within 3 months of full rollout
---
Last updated: 2026-01-28

View File

@@ -2,6 +2,17 @@
The **ai-usage** subagent specializes in LLM APIs (Gemini, Claude), prompt engineering, and AI-powered features in the Flyer Crawler project.
## Quick Reference
| Aspect | Details |
| ------------------ | ----------------------------------------------------------------------------------- |
| **Primary Use** | Gemini API integration, prompt engineering, AI extraction |
| **Key Files** | `src/services/aiService.server.ts`, `src/services/flyerProcessingService.server.ts` |
| **Key ADRs** | ADR-041 (AI Integration), ADR-046 (Image Processing) |
| **API Key Env** | `VITE_GOOGLE_GENAI_API_KEY` (prod), `VITE_GOOGLE_GENAI_API_KEY_TEST` (test) |
| **Error Handling** | Rate limits (429), JSON parse errors, timeout handling |
| **Delegate To** | `coder` (implementation), `testwriter` (tests), `integrations-specialist` |
## When to Use
Use the **ai-usage** subagent when you need to:
@@ -295,6 +306,9 @@ const fixtureResponse = await fs.readFile('fixtures/gemini-response.json');
## Related Documentation
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - For implementing AI features
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Testing AI features
- [INTEGRATIONS-GUIDE.md](./INTEGRATIONS-GUIDE.md) - External API patterns
- [../adr/0041-ai-gemini-integration-architecture.md](../adr/0041-ai-gemini-integration-architecture.md) - AI integration ADR
- [../adr/0046-image-processing-pipeline.md](../adr/0046-image-processing-pipeline.md) - Image processing
- [CODER-GUIDE.md](./CODER-GUIDE.md) - For implementing AI features
- [../getting-started/ENVIRONMENT.md](../getting-started/ENVIRONMENT.md) - Environment configuration

View File

@@ -2,6 +2,17 @@
The **coder** subagent is your primary tool for writing and modifying production Node.js/TypeScript code in the Flyer Crawler project. This guide explains how to work effectively with the coder subagent.
## Quick Reference
| Aspect | Details |
| ---------------- | ------------------------------------------------------------------------ |
| **Primary Use** | Write/modify production TypeScript code |
| **Key Files** | `src/routes/*.routes.ts`, `src/services/**/*.ts`, `src/components/*.tsx` |
| **Key ADRs** | ADR-034 (Repository), ADR-035 (Services), ADR-028 (API Response) |
| **Test Command** | `podman exec -it flyer-crawler-dev npm run test:unit` |
| **Type Check** | `podman exec -it flyer-crawler-dev npm run type-check` |
| **Delegate To** | `db-dev` (database), `frontend-specialist` (UI), `testwriter` (tests) |
## When to Use the Coder Subagent
Use the coder subagent when you need to:
@@ -307,6 +318,8 @@ error classes for all database operations"
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Testing strategies
- [DATABASE-GUIDE.md](./DATABASE-GUIDE.md) - Database development workflows
- [../adr/0034-repository-pattern-standards.md](../adr/0034-repository-pattern-standards.md) - Repository patterns
- [../adr/0035-service-layer-architecture.md](../adr/0035-service-layer-architecture.md) - Service layer architecture
- [../adr/0028-api-response-standardization.md](../adr/0028-api-response-standardization.md) - API response patterns
- [../development/CODE-PATTERNS.md](../development/CODE-PATTERNS.md) - Code patterns reference

View File

@@ -5,6 +5,17 @@ This guide covers two database-focused subagents:
- **db-dev**: Database development - schemas, queries, migrations, optimization
- **db-admin**: Database administration - PostgreSQL/Redis admin, security, backups
## Quick Reference
| Aspect | db-dev | db-admin |
| ---------------- | -------------------------------------------- | ------------------------------------------ |
| **Primary Use** | Schemas, queries, migrations | Performance tuning, backups, security |
| **Key Files** | `src/services/db/*.db.ts`, `sql/migrations/` | `postgresql.conf`, `pg_hba.conf` |
| **Key ADRs** | ADR-034 (Repository), ADR-002 (Transactions) | ADR-019 (Backups), ADR-050 (Observability) |
| **Test Command** | `podman exec -it flyer-crawler-dev npm test` | N/A |
| **MCP Tool** | `mcp__devdb__query` | SSH to production |
| **Delegate To** | `coder` (service layer), `db-admin` (perf) | `devops` (infrastructure) |
## Understanding the Difference
| Aspect | db-dev | db-admin |
@@ -412,8 +423,9 @@ This is useful for:
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - Working with the coder subagent
- [DEVOPS-GUIDE.md](./DEVOPS-GUIDE.md) - DevOps and deployment workflows
- [../adr/0034-repository-pattern-standards.md](../adr/0034-repository-pattern-standards.md) - Repository patterns
- [../adr/0002-standardized-transaction-management.md](../adr/0002-standardized-transaction-management.md) - Transaction management
- [../adr/0019-data-backup-and-recovery-strategy.md](../adr/0019-data-backup-and-recovery-strategy.md) - Backup strategy
- [../adr/0050-postgresql-function-observability.md](../adr/0050-postgresql-function-observability.md) - Database observability
- [../BARE-METAL-SETUP.md](../BARE-METAL-SETUP.md) - Production database setup
- [../operations/BARE-METAL-SETUP.md](../operations/BARE-METAL-SETUP.md) - Production database setup

View File

@@ -6,6 +6,90 @@ This guide covers DevOps-related subagents for deployment, infrastructure, and o
- **infra-architect**: Resource optimization, capacity planning
- **bg-worker**: Background jobs, PM2 workers, BullMQ queues
## Quick Reference
| Aspect | devops | infra-architect | bg-worker |
| ---------------- | ------------------------------------------ | --------------------------- | ------------------------------- |
| **Primary Use** | Containers, CI/CD, deployments | Resource optimization | BullMQ queues, PM2 workers |
| **Key Files** | `compose.dev.yml`, `.gitea/workflows/` | `ecosystem.config.cjs` | `src/services/queues.server.ts` |
| **Key ADRs** | ADR-014 (Containers), ADR-017 (CI/CD) | N/A | ADR-006 (Background Jobs) |
| **Commands** | `podman-compose`, `pm2` | `pm2 monit`, system metrics | Redis CLI, `pm2 logs` |
| **MCP Tools** | `mcp__podman__*` | N/A | N/A |
| **Access Model** | Read-only on production (provide commands) | Same | Same |
---
## CRITICAL: Server Access Model
**Claude Code has READ-ONLY access to production/test servers.**
The `claude-win10` user cannot execute write operations (PM2 restart, systemctl, file modifications) directly on servers. The devops subagent must **provide commands for the user to execute**, not attempt to run them via SSH.
### Command Delegation Workflow
When troubleshooting or making changes to production/test servers:
| Phase | Actor | Action |
| -------- | ------ | ----------------------------------------------------------- |
| Diagnose | Claude | Provide read-only diagnostic commands |
| Report | User | Execute commands, share output with Claude |
| Analyze | Claude | Interpret results, identify root cause |
| Fix | Claude | Provide 1-3 fix commands (never more, errors may cascade) |
| Execute | User | Run fix commands, report results |
| Verify | Claude | Provide verification commands to confirm success |
| Document | Claude | Update relevant documentation with findings and resolutions |
### Example: PM2 Process Issue
Step 1 - Diagnostic Commands (Claude provides, user runs):
```bash
# Check PM2 process status
pm2 list
# View recent error logs
pm2 logs flyer-crawler-api --err --lines 50
# Check system resources
free -h
df -h /var/www
```
Step 2 - User reports output to Claude
Step 3 - Fix Commands (Claude provides 1-3 at a time):
```bash
# Restart the failing process
pm2 restart flyer-crawler-api
```
Step 4 - User executes and reports result
Step 5 - Verification Commands:
```bash
# Confirm process is running
pm2 list
# Test API health
curl -s https://flyer-crawler.projectium.com/api/health/ready | jq .
```
### What NOT to Do
```bash
# WRONG - Claude cannot execute this directly
ssh root@projectium.com "pm2 restart all"
# WRONG - Providing too many commands at once
pm2 stop all && rm -rf node_modules && npm install && pm2 start all
# WRONG - Assuming commands succeeded without user confirmation
```
---
## The devops Subagent
### When to Use
@@ -372,6 +456,8 @@ redis-cli -a $REDIS_PASSWORD
## Service Management Commands
> **Note**: These commands are for the **user to execute on the server**. Claude Code provides these commands but cannot run them directly due to read-only server access. See [Server Access Model](#critical-server-access-model) above.
### PM2 Commands
```bash
@@ -468,8 +554,13 @@ podman exec -it flyer-crawler-dev npm test
## Related Documentation
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [../BARE-METAL-SETUP.md](../BARE-METAL-SETUP.md) - Production setup guide
- [DATABASE-GUIDE.md](./DATABASE-GUIDE.md) - Database administration
- [SECURITY-DEBUG-GUIDE.md](./SECURITY-DEBUG-GUIDE.md) - Production debugging
- [../operations/BARE-METAL-SETUP.md](../operations/BARE-METAL-SETUP.md) - Production setup guide
- [../operations/DEPLOYMENT.md](../operations/DEPLOYMENT.md) - Deployment guide
- [../operations/MONITORING.md](../operations/MONITORING.md) - Monitoring guide
- [../development/DEV-CONTAINER.md](../development/DEV-CONTAINER.md) - Dev container guide
- [../adr/0014-containerization-and-deployment-strategy.md](../adr/0014-containerization-and-deployment-strategy.md) - Containerization ADR
- [../adr/0006-background-job-processing-and-task-queues.md](../adr/0006-background-job-processing-and-task-queues.md) - Background jobs ADR
- [../adr/0017-ci-cd-and-branching-strategy.md](../adr/0017-ci-cd-and-branching-strategy.md) - CI/CD strategy
- [../adr/0053-worker-health-checks.md](../adr/0053-worker-health-checks.md) - Worker health checks
- [../adr/0053-worker-health-checks-and-monitoring.md](../adr/0053-worker-health-checks-and-monitoring.md) - Worker health checks

View File

@@ -7,6 +7,15 @@ This guide covers documentation-focused subagents:
- **planner**: Feature breakdown, roadmaps, scope management
- **product-owner**: Requirements, user stories, backlog prioritization
## Quick Reference
| Aspect | documenter | describer-for-ai | planner | product-owner |
| --------------- | -------------------- | ------------------------ | --------------------- | ---------------------- |
| **Primary Use** | User docs, API specs | ADRs, technical specs | Feature breakdown | User stories, backlog |
| **Key Files** | `docs/`, API docs | `docs/adr/`, `CLAUDE.md` | `docs/plans/` | Issue tracker |
| **Output** | Markdown guides | ADRs, context docs | Task lists, roadmaps | User stories, criteria |
| **Delegate To** | `coder` (implement) | `documenter` (user docs) | `coder` (build tasks) | `planner` (breakdown) |
## The documenter Subagent
### When to Use
@@ -437,6 +446,8 @@ Include dates on documentation that may become stale:
## Related Documentation
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - For implementing documented features
- [../adr/index.md](../adr/index.md) - ADR index
- [../TESTING.md](../TESTING.md) - Testing guide
- [../development/TESTING.md](../development/TESTING.md) - Testing guide
- [../development/CODE-PATTERNS.md](../development/CODE-PATTERNS.md) - Code patterns reference
- [../../CLAUDE.md](../../CLAUDE.md) - AI instructions

View File

@@ -5,6 +5,17 @@ This guide covers frontend-focused subagents:
- **frontend-specialist**: UI components, Neo-Brutalism, Core Web Vitals, accessibility
- **uiux-designer**: UI/UX decisions, component design, user experience
## Quick Reference
| Aspect | frontend-specialist | uiux-designer |
| ----------------- | ---------------------------------------------- | -------------------------------------- |
| **Primary Use** | React components, performance, accessibility | Design decisions, user flows |
| **Key Files** | `src/components/`, `src/features/` | Design specs, mockups |
| **Key ADRs** | ADR-012 (Design System), ADR-044 (Feature Org) | ADR-012 (Design System) |
| **Design System** | Neo-Brutalism (bold borders, high contrast) | Same |
| **State Mgmt** | TanStack Query (server), Zustand (client) | N/A |
| **Delegate To** | `coder` (backend), `tester` (test coverage) | `frontend-specialist` (implementation) |
## The frontend-specialist Subagent
### When to Use
@@ -406,7 +417,8 @@ const handleSelect = useCallback((id: string) => {
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - For implementing features
- [../DESIGN_TOKENS.md](../DESIGN_TOKENS.md) - Design token reference
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Component testing patterns
- [../development/DESIGN_TOKENS.md](../development/DESIGN_TOKENS.md) - Design token reference
- [../adr/0012-frontend-component-library-and-design-system.md](../adr/0012-frontend-component-library-and-design-system.md) - Design system ADR
- [../adr/0005-frontend-state-management-and-server-cache-strategy.md](../adr/0005-frontend-state-management-and-server-cache-strategy.md) - State management ADR
- [../adr/0044-frontend-feature-organization.md](../adr/0044-frontend-feature-organization.md) - Feature organization

View File

@@ -0,0 +1,396 @@
# Integrations Subagent Guide
The **integrations-specialist** subagent handles third-party services, webhooks, and external API integrations in the Flyer Crawler project.
## Quick Reference
| Aspect | Details |
| --------------- | --------------------------------------------------------------------------- |
| **Primary Use** | External APIs, webhooks, OAuth, third-party services |
| **Key Files** | `src/services/external/`, `src/routes/webhooks.routes.ts` |
| **Key ADRs** | ADR-041 (AI Integration), ADR-016 (API Security), ADR-048 (Auth) |
| **MCP Tools** | `mcp__gitea-projectium__*`, `mcp__bugsink__*` |
| **Security** | API key storage, webhook signatures, OAuth state param |
| **Delegate To** | `coder` (implementation), `security-engineer` (review), `ai-usage` (Gemini) |
## When to Use
Use the **integrations-specialist** subagent when you need to:
- Integrate with external APIs (OAuth, REST, GraphQL)
- Implement webhook handlers
- Configure third-party services
- Debug external service connectivity
- Handle API authentication flows
- Manage external service rate limits
## What integrations-specialist Knows
The integrations-specialist subagent understands:
- OAuth 2.0 flows (authorization code, client credentials)
- REST API integration patterns
- Webhook security (signature verification)
- External service error handling
- Rate limiting and retry strategies
- API key management
## Current Integrations
| Service | Purpose | Integration Type | Key Files |
| ------------- | ---------------------- | ---------------- | ---------------------------------- |
| Google Gemini | AI flyer extraction | REST API | `src/services/aiService.server.ts` |
| Bugsink | Error tracking | REST API | MCP: `mcp__bugsink__*` |
| Gitea | Repository and CI/CD | REST API | MCP: `mcp__gitea-projectium__*` |
| Redis | Caching and job queues | Native client | `src/services/redis.server.ts` |
| PostgreSQL | Primary database | Native client | `src/services/db/pool.db.ts` |
## Example Requests
### Adding External API Integration
```
"Use integrations-specialist to integrate with the Store API
to automatically fetch store location data. Include proper
error handling, rate limiting, and caching."
```
### OAuth Implementation
```
"Use integrations-specialist to implement Google OAuth for
user authentication. Include token refresh handling and
session management."
```
### Webhook Handler
```
"Use integrations-specialist to create a webhook handler for
receiving store inventory updates. Include signature verification
and idempotency handling."
```
### Debugging External Service Issues
```
"Use integrations-specialist to debug why the Gemini API calls
are intermittently failing with timeout errors. Check connection
pooling, retry logic, and error handling."
```
## Integration Patterns
### REST API Client Pattern
```typescript
// src/services/external/storeApi.server.ts
import { env } from '@/config/env';
import { log } from '@/services/logger.server';
interface StoreApiConfig {
baseUrl: string;
apiKey: string;
timeout: number;
}
class StoreApiClient {
private config: StoreApiConfig;
constructor(config: StoreApiConfig) {
this.config = config;
}
async getStoreLocations(storeId: string): Promise<StoreLocation[]> {
const url = `${this.config.baseUrl}/stores/${storeId}/locations`;
try {
const response = await fetch(url, {
headers: {
Authorization: `Bearer ${this.config.apiKey}`,
'Content-Type': 'application/json',
},
signal: AbortSignal.timeout(this.config.timeout),
});
if (!response.ok) {
throw new ExternalApiError(`Store API error: ${response.status}`, response.status);
}
return response.json();
} catch (error) {
log.error({ error, storeId }, 'Failed to fetch store locations');
throw error;
}
}
}
export const storeApiClient = new StoreApiClient({
baseUrl: env.STORE_API_BASE_URL,
apiKey: env.STORE_API_KEY,
timeout: 10000,
});
```
### Webhook Handler Pattern
```typescript
// src/routes/webhooks.routes.ts
import { Router } from 'express';
import crypto from 'crypto';
import { env } from '@/config/env';
const router = Router();
function verifyWebhookSignature(payload: string, signature: string, secret: string): boolean {
const expected = crypto.createHmac('sha256', secret).update(payload).digest('hex');
return crypto.timingSafeEqual(Buffer.from(signature), Buffer.from(`sha256=${expected}`));
}
router.post('/store-updates', async (req, res, next) => {
try {
const signature = req.headers['x-webhook-signature'] as string;
const payload = JSON.stringify(req.body);
if (!verifyWebhookSignature(payload, signature, env.WEBHOOK_SECRET)) {
return res.status(401).json({ error: 'Invalid signature' });
}
// Process webhook with idempotency check
const eventId = req.headers['x-event-id'] as string;
const alreadyProcessed = await checkIdempotencyKey(eventId);
if (alreadyProcessed) {
return res.status(200).json({ status: 'already_processed' });
}
await processStoreUpdate(req.body);
await markEventProcessed(eventId);
res.status(200).json({ status: 'processed' });
} catch (error) {
next(error);
}
});
```
### OAuth Flow Pattern
```typescript
// src/services/oauth/googleOAuth.server.ts
import { OAuth2Client } from 'google-auth-library';
import { env } from '@/config/env';
const oauth2Client = new OAuth2Client(
env.GOOGLE_CLIENT_ID,
env.GOOGLE_CLIENT_SECRET,
env.GOOGLE_REDIRECT_URI,
);
export function getAuthorizationUrl(): string {
return oauth2Client.generateAuthUrl({
access_type: 'offline',
scope: ['email', 'profile'],
prompt: 'consent',
});
}
export async function exchangeCodeForTokens(code: string) {
const { tokens } = await oauth2Client.getToken(code);
return tokens;
}
export async function refreshAccessToken(refreshToken: string) {
oauth2Client.setCredentials({ refresh_token: refreshToken });
const { credentials } = await oauth2Client.refreshAccessToken();
return credentials;
}
```
## Error Handling for External Services
### Custom Error Classes
```typescript
// src/services/external/errors.ts
export class ExternalApiError extends Error {
constructor(
message: string,
public statusCode: number,
public retryable: boolean = false,
) {
super(message);
this.name = 'ExternalApiError';
}
}
export class RateLimitError extends ExternalApiError {
constructor(
message: string,
public retryAfter: number,
) {
super(message, 429, true);
this.name = 'RateLimitError';
}
}
```
### Retry with Exponential Backoff
```typescript
async function fetchWithRetry<T>(
fn: () => Promise<T>,
options: { maxRetries: number; baseDelay: number },
): Promise<T> {
let lastError: Error;
for (let attempt = 0; attempt <= options.maxRetries; attempt++) {
try {
return await fn();
} catch (error) {
lastError = error as Error;
if (error instanceof ExternalApiError && !error.retryable) {
throw error;
}
if (attempt < options.maxRetries) {
const delay = options.baseDelay * Math.pow(2, attempt);
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
}
throw lastError!;
}
```
## Rate Limiting Strategies
### Token Bucket Pattern
```typescript
class RateLimiter {
private tokens: number;
private lastRefill: number;
private readonly maxTokens: number;
private readonly refillRate: number; // tokens per second
constructor(maxTokens: number, refillRate: number) {
this.maxTokens = maxTokens;
this.tokens = maxTokens;
this.refillRate = refillRate;
this.lastRefill = Date.now();
}
async acquire(): Promise<void> {
this.refill();
if (this.tokens < 1) {
const waitTime = ((1 - this.tokens) / this.refillRate) * 1000;
await new Promise((resolve) => setTimeout(resolve, waitTime));
this.refill();
}
this.tokens -= 1;
}
private refill(): void {
const now = Date.now();
const elapsed = (now - this.lastRefill) / 1000;
this.tokens = Math.min(this.maxTokens, this.tokens + elapsed * this.refillRate);
this.lastRefill = now;
}
}
```
## Testing Integrations
### Mocking External Services
```typescript
// src/tests/mocks/storeApi.mock.ts
import { vi } from 'vitest';
export const mockStoreApiClient = {
getStoreLocations: vi.fn(),
};
vi.mock('@/services/external/storeApi.server', () => ({
storeApiClient: mockStoreApiClient,
}));
```
### Integration Test with Real Service
```typescript
// src/tests/integration/storeApi.integration.test.ts
describe('Store API Integration', () => {
it.skipIf(!env.STORE_API_KEY)('fetches real store locations', async () => {
const locations = await storeApiClient.getStoreLocations('test-store');
expect(locations).toBeInstanceOf(Array);
});
});
```
## MCP Tools for Integrations
### Gitea Integration
```
// List repositories
mcp__gitea-projectium__list_my_repos()
// Create issue
mcp__gitea-projectium__create_issue({
owner: "projectium",
repo: "flyer-crawler",
title: "Issue title",
body: "Issue description"
})
```
### Bugsink Integration
```
// List projects
mcp__bugsink__list_projects()
// Get issue details
mcp__bugsink__get_issue({ issue_id: "..." })
// Get stacktrace
mcp__bugsink__get_stacktrace({ event_id: "..." })
```
## Security Considerations
### API Key Storage
- Never commit API keys to version control
- Use environment variables via `src/config/env.ts`
- Rotate keys periodically
- Use separate keys for dev/test/prod
### Webhook Security
- Always verify webhook signatures
- Use HTTPS for webhook endpoints
- Implement idempotency
- Log webhook events for audit
### OAuth Security
- Use state parameter to prevent CSRF
- Store tokens securely (encrypted at rest)
- Implement token refresh before expiration
- Validate token scopes
## Related Documentation
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [SECURITY-DEBUG-GUIDE.md](./SECURITY-DEBUG-GUIDE.md) - Security patterns
- [AI-USAGE-GUIDE.md](./AI-USAGE-GUIDE.md) - Gemini API integration
- [../adr/0041-ai-gemini-integration-architecture.md](../adr/0041-ai-gemini-integration-architecture.md) - AI integration ADR
- [../adr/0016-api-security-hardening.md](../adr/0016-api-security-hardening.md) - API security
- [../adr/0048-authentication-strategy.md](../adr/0048-authentication-strategy.md) - Authentication

View File

@@ -89,6 +89,47 @@ Or:
Claude will automatically invoke the appropriate subagent with the relevant context.
## Quick Reference Decision Tree
Use this flowchart to quickly identify the right subagent:
```
What do you need to do?
|
+-- Write/modify code? ----------------> Is it database-related?
| |
| +-- Yes -> db-dev
| +-- No --> Is it frontend?
| |
| +-- Yes -> frontend-specialist
| +-- No --> Is it AI/Gemini?
| |
| +-- Yes -> ai-usage
| +-- No --> coder
|
+-- Test something? -------------------> Write new tests? -> testwriter
| Find bugs/vulnerabilities? -> tester
| Review existing code? -> code-reviewer
|
+-- Debug an issue? -------------------> Production error? -> log-debug
| Database slow? -> db-admin
| External API failing? -> integrations-specialist
| AI extraction failing? -> ai-usage
|
+-- Infrastructure/Deployment? --------> Container/CI/CD? -> devops
| Resource optimization? -> infra-architect
| Background jobs? -> bg-worker
|
+-- Documentation? --------------------> User-facing docs? -> documenter
| ADRs/Technical specs? -> describer-for-ai
| Feature planning? -> planner
| User stories? -> product-owner
|
+-- Security? -------------------------> security-engineer
|
+-- Design/UX? ------------------------> uiux-designer
```
## Subagent Selection Guide
### Which Subagent Should I Use?
@@ -183,12 +224,26 @@ Subagents can pass information back to the main conversation and to each other t
## Related Documentation
- [CODER-GUIDE.md](./CODER-GUIDE.md) - Working with the coder subagent
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Testing strategies and patterns
- [DATABASE-GUIDE.md](./DATABASE-GUIDE.md) - Database development workflows
- [DEVOPS-GUIDE.md](./DEVOPS-GUIDE.md) - DevOps and deployment workflows
### Subagent Guides
| Guide | Subagents Covered |
| ---------------------------------------------------- | ----------------------------------------------------- |
| [CODER-GUIDE.md](./CODER-GUIDE.md) | coder |
| [TESTER-GUIDE.md](./TESTER-GUIDE.md) | tester, testwriter |
| [DATABASE-GUIDE.md](./DATABASE-GUIDE.md) | db-dev, db-admin |
| [DEVOPS-GUIDE.md](./DEVOPS-GUIDE.md) | devops, infra-architect, bg-worker |
| [FRONTEND-GUIDE.md](./FRONTEND-GUIDE.md) | frontend-specialist, uiux-designer |
| [SECURITY-DEBUG-GUIDE.md](./SECURITY-DEBUG-GUIDE.md) | security-engineer, log-debug, code-reviewer |
| [AI-USAGE-GUIDE.md](./AI-USAGE-GUIDE.md) | ai-usage |
| [INTEGRATIONS-GUIDE.md](./INTEGRATIONS-GUIDE.md) | integrations-specialist, tools-integration-specialist |
| [DOCUMENTATION-GUIDE.md](./DOCUMENTATION-GUIDE.md) | documenter, describer-for-ai, planner, product-owner |
### Project Documentation
- [../adr/index.md](../adr/index.md) - Architecture Decision Records
- [../TESTING.md](../TESTING.md) - Testing guide
- [../development/TESTING.md](../development/TESTING.md) - Testing guide
- [../development/CODE-PATTERNS.md](../development/CODE-PATTERNS.md) - Code patterns reference
- [../architecture/OVERVIEW.md](../architecture/OVERVIEW.md) - System architecture
## Troubleshooting

View File

@@ -6,6 +6,16 @@ This guide covers security and debugging-focused subagents:
- **log-debug**: Production errors, observability, Bugsink/Sentry analysis
- **code-reviewer**: Code quality, security review, best practices
## Quick Reference
| Aspect | security-engineer | log-debug | code-reviewer |
| --------------- | ---------------------------------- | ---------------------------------------- | --------------------------- |
| **Primary Use** | Security audits, OWASP | Production debugging | Code quality review |
| **Key ADRs** | ADR-016 (Security), ADR-032 (Rate) | ADR-050 (Observability) | ADR-034, ADR-035 (Patterns) |
| **MCP Tools** | N/A | `mcp__bugsink__*`, `mcp__localerrors__*` | N/A |
| **Key Checks** | Auth, input validation, CORS | Logs, stacktraces, error patterns | Patterns, tests, security |
| **Delegate To** | `coder` (fix issues) | `devops` (infra), `coder` (fixes) | `coder`, `testwriter` |
## The security-engineer Subagent
### When to Use
@@ -432,8 +442,10 @@ tail -f /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log | grep "duration:"
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [DEVOPS-GUIDE.md](./DEVOPS-GUIDE.md) - Infrastructure debugging
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Security testing
- [../adr/0016-api-security-hardening.md](../adr/0016-api-security-hardening.md) - Security ADR
- [../adr/0032-rate-limiting-strategy.md](../adr/0032-rate-limiting-strategy.md) - Rate limiting
- [../adr/0015-application-performance-monitoring-and-error-tracking.md](../adr/0015-application-performance-monitoring-and-error-tracking.md) - Monitoring ADR
- [../adr/0015-error-tracking-and-observability.md](../adr/0015-error-tracking-and-observability.md) - Monitoring ADR
- [../adr/0050-postgresql-function-observability.md](../adr/0050-postgresql-function-observability.md) - Database observability
- [../BARE-METAL-SETUP.md](../BARE-METAL-SETUP.md) - Production setup
- [../operations/BARE-METAL-SETUP.md](../operations/BARE-METAL-SETUP.md) - Production setup
- [../tools/BUGSINK-SETUP.md](../tools/BUGSINK-SETUP.md) - Bugsink configuration

View File

@@ -5,6 +5,17 @@ This guide covers two related but distinct subagents for testing in the Flyer Cr
- **tester**: Adversarial testing to find edge cases, race conditions, and vulnerabilities
- **testwriter**: Creating comprehensive test suites for features and fixes
## Quick Reference
| Aspect | tester | testwriter |
| ---------------- | -------------------------------------------- | ------------------------------------------ |
| **Primary Use** | Find bugs, security issues, edge cases | Create test suites, improve coverage |
| **Key Files** | N/A (analysis-focused) | `*.test.ts`, `src/tests/utils/` |
| **Key ADRs** | ADR-010 (Testing), ADR-040 (Test Economics) | ADR-010 (Testing), ADR-045 (Test Fixtures) |
| **Test Command** | `podman exec -it flyer-crawler-dev npm test` | Same |
| **Test Stack** | Vitest, Supertest, Testing Library | Same |
| **Delegate To** | `testwriter` (write tests for findings) | `coder` (fix failing tests) |
## Understanding the Difference
| Aspect | tester | testwriter |
@@ -399,6 +410,7 @@ A typical workflow for thorough testing:
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - Working with the coder subagent
- [../TESTING.md](../TESTING.md) - Testing guide
- [SECURITY-DEBUG-GUIDE.md](./SECURITY-DEBUG-GUIDE.md) - Security testing and code review
- [../development/TESTING.md](../development/TESTING.md) - Testing guide
- [../adr/0010-testing-strategy-and-standards.md](../adr/0010-testing-strategy-and-standards.md) - Testing ADR
- [../adr/0040-testing-economics-and-priorities.md](../adr/0040-testing-economics-and-priorities.md) - Testing priorities

View File

@@ -109,10 +109,10 @@ MSYS_NO_PATHCONV=1 podman exec -e DATABASE_URL=postgresql://bugsink:bugsink_dev_
### Production Token
SSH into the production server:
User executes this command on the production server:
```bash
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage create_auth_token"
cd /opt/bugsink && bugsink-manage create_auth_token
```
**Output:** Same format - 40-character hex token.
@@ -795,10 +795,10 @@ podman exec flyer-crawler-dev pg_isready -U bugsink -d bugsink -h postgres
podman exec flyer-crawler-dev psql -U postgres -h postgres -c "\l" | grep bugsink
```
**Production:**
**Production** (user executes on server):
```bash
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage check"
cd /opt/bugsink && bugsink-manage check
```
### PostgreSQL Sequence Out of Sync (Duplicate Key Errors)
@@ -834,10 +834,9 @@ SELECT
END as status;
"
# Production
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage dbshell" <<< "
SELECT MAX(id) as max_id, (SELECT last_value FROM projects_project_id_seq) as seq_value FROM projects_project;
"
# Production (user executes on server)
cd /opt/bugsink && bugsink-manage dbshell
# Then run: SELECT MAX(id) as max_id, (SELECT last_value FROM projects_project_id_seq) as seq_value FROM projects_project;
```
**Solution:**
@@ -850,10 +849,9 @@ podman exec flyer-crawler-dev psql -U bugsink -h postgres -d bugsink -c "
SELECT setval('projects_project_id_seq', COALESCE((SELECT MAX(id) FROM projects_project), 1), true);
"
# Production
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage dbshell" <<< "
SELECT setval('projects_project_id_seq', COALESCE((SELECT MAX(id) FROM projects_project), 1), true);
"
# Production (user executes on server)
cd /opt/bugsink && bugsink-manage dbshell
# Then run: SELECT setval('projects_project_id_seq', COALESCE((SELECT MAX(id) FROM projects_project), 1), true);
```
**Verification:**

6090
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,11 @@
{
"name": "flyer-crawler",
"private": true,
"version": "0.12.18",
"version": "0.14.2",
"type": "module",
"engines": {
"node": ">=18.0.0"
},
"scripts": {
"dev": "concurrently \"npm:start:dev\" \"vite\"",
"dev:container": "concurrently \"npm:start:dev\" \"vite --host\"",
@@ -24,14 +27,17 @@
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
"type-check": "tsc --noEmit",
"validate": "(prettier --check . || true) && npm run type-check && (npm run lint || true)",
"clean": "rimraf coverage .coverage",
"clean": "node scripts/clean.mjs",
"start:dev": "NODE_ENV=development tsx watch server.ts",
"start:prod": "NODE_ENV=production tsx server.ts",
"start:test": "NODE_ENV=test NODE_V8_COVERAGE=.coverage/tmp/integration-server tsx server.ts",
"db:reset:dev": "NODE_ENV=development tsx src/db/seed.ts",
"db:reset:test": "NODE_ENV=test tsx src/db/seed.ts",
"worker:prod": "NODE_ENV=production tsx src/services/queueService.server.ts",
"prepare": "node -e \"try { require.resolve('husky') } catch (e) { process.exit(0) }\" && husky || true"
"prepare": "node -e \"try { require.resolve('husky') } catch (e) { process.exit(0) }\" && husky || true",
"tsoa:spec": "tsoa spec",
"tsoa:routes": "tsoa routes",
"tsoa:build": "tsoa spec-and-routes"
},
"dependencies": {
"@bull-board/api": "^6.14.2",
@@ -74,8 +80,8 @@
"react-router-dom": "^7.9.6",
"recharts": "^3.4.1",
"sharp": "^0.34.5",
"swagger-jsdoc": "^6.2.8",
"swagger-ui-express": "^5.0.1",
"tsoa": "^6.6.0",
"tsx": "^4.20.6",
"zod": "^4.2.1",
"zxcvbn": "^4.4.2",
@@ -110,7 +116,6 @@
"@types/react-dom": "^19.2.3",
"@types/sharp": "^0.31.1",
"@types/supertest": "^6.0.3",
"@types/swagger-jsdoc": "^6.0.4",
"@types/swagger-ui-express": "^4.1.8",
"@types/ws": "^8.18.1",
"@types/zxcvbn": "^4.4.5",
@@ -139,7 +144,6 @@
"pino-pretty": "^13.1.3",
"postcss": "^8.5.6",
"prettier": "^3.3.2",
"rimraf": "^6.1.2",
"supertest": "^7.1.4",
"tailwindcss": "^4.1.17",
"testcontainers": "^11.8.1",

70
scripts/clean.mjs Normal file
View File

@@ -0,0 +1,70 @@
#!/usr/bin/env node
/**
* Clean script to remove coverage directories.
* Replaces rimraf dependency with native Node.js fs.rm API.
*
* Usage: node scripts/clean.mjs
*
* Behavior matches rimraf: errors are logged but script exits successfully.
* This allows build pipelines to continue even if directories don't exist.
*/
import { rm } from 'node:fs/promises';
import { resolve } from 'node:path';
/**
* Directories to clean, relative to project root.
* Add additional directories here as needed.
*/
const DIRECTORIES_TO_CLEAN = ['coverage', '.coverage'];
/**
* Removes a directory recursively, handling errors gracefully.
*
* @param {string} dirPath - Absolute path to the directory to remove
* @returns {Promise<boolean>} - True if removed successfully, false if error occurred
*/
async function removeDirectory(dirPath) {
try {
await rm(dirPath, { recursive: true, force: true });
console.log(`Removed: ${dirPath}`);
return true;
} catch (error) {
// Log error but don't fail - matches rimraf behavior
// force: true should handle ENOENT, but log other errors
console.error(`Warning: Could not remove ${dirPath}: ${error.message}`);
return false;
}
}
/**
* Main entry point. Cleans all configured directories.
*/
async function main() {
// Get project root (parent of scripts directory)
const projectRoot = resolve(import.meta.dirname, '..');
console.log('Cleaning coverage directories...');
const results = await Promise.all(
DIRECTORIES_TO_CLEAN.map((dir) => {
const absolutePath = resolve(projectRoot, dir);
return removeDirectory(absolutePath);
})
);
const successCount = results.filter(Boolean).length;
console.log(
`Clean complete: ${successCount}/${DIRECTORIES_TO_CLEAN.length} directories processed.`
);
// Always exit successfully (matches rimraf behavior)
process.exit(0);
}
main().catch((error) => {
// Catch any unexpected errors in main
console.error('Unexpected error during clean:', error.message);
// Still exit successfully to not break build pipelines
process.exit(0);
});

View File

@@ -25,9 +25,12 @@ import { backgroundJobService, startBackgroundJobs } from './src/services/backgr
import { websocketService } from './src/services/websocketService.server';
import type { UserProfile } from './src/types';
// API Documentation (ADR-018)
// API Documentation (ADR-018) - tsoa-generated OpenAPI spec
import swaggerUi from 'swagger-ui-express';
import { swaggerSpec } from './src/config/swagger';
import tsoaSpec from './src/config/tsoa-spec.json' with { type: 'json' };
// tsoa-generated routes
import { RegisterRoutes } from './src/routes/tsoa-generated';
import {
analyticsQueue,
weeklyAnalyticsQueue,
@@ -197,11 +200,13 @@ if (!process.env.JWT_SECRET) {
// --- API Documentation (ADR-018) ---
// Only serve Swagger UI in non-production environments to prevent information disclosure.
// Uses tsoa-generated OpenAPI specification.
if (process.env.NODE_ENV !== 'production') {
// Serve tsoa-generated OpenAPI documentation
app.use(
'/docs/api-docs',
swaggerUi.serve,
swaggerUi.setup(swaggerSpec, {
swaggerUi.setup(tsoaSpec, {
customCss: '.swagger-ui .topbar { display: none }',
customSiteTitle: 'Flyer Crawler API Documentation',
}),
@@ -210,7 +215,7 @@ if (process.env.NODE_ENV !== 'production') {
// Expose raw OpenAPI JSON spec for tooling (SDK generation, testing, etc.)
app.get('/docs/api-docs.json', (_req, res) => {
res.setHeader('Content-Type', 'application/json');
res.send(swaggerSpec);
res.send(tsoaSpec);
});
logger.info('API Documentation available at /docs/api-docs');
@@ -230,12 +235,27 @@ app.get('/api/v1/health/queues', async (req, res) => {
}
});
// --- tsoa-generated Routes ---
// Register routes generated by tsoa from controllers.
// These routes run in parallel with existing routes during migration.
// tsoa routes are mounted directly on the app (basePath in tsoa.json is '/api').
// The RegisterRoutes function adds routes at /api/health/*, /api/_tsoa/*, etc.
//
// IMPORTANT: tsoa routes are registered BEFORE the backwards compatibility redirect
// middleware so that tsoa routes (like /api/health/ping, /api/_tsoa/verify) are
// matched directly without being redirected to /api/v1/*.
// During migration, both tsoa routes and versioned routes coexist:
// - /api/health/ping -> handled by tsoa HealthController
// - /api/v1/health/ping -> handled by versioned health.routes.ts
// As controllers are migrated, the versioned routes will be removed.
RegisterRoutes(app);
// --- Backwards Compatibility Redirect (ADR-008: API Versioning Strategy) ---
// Redirect old /api/* paths to /api/v1/* for backwards compatibility.
// This allows clients to gradually migrate to the versioned API.
// IMPORTANT: This middleware MUST be mounted BEFORE createApiRouter() so that
// unversioned paths like /api/users are redirected to /api/v1/users BEFORE
// the versioned router's detectApiVersion middleware rejects them as invalid versions.
// IMPORTANT: This middleware MUST be mounted:
// - AFTER tsoa routes (so tsoa routes are matched directly)
// - BEFORE createApiRouter() (so unversioned paths are redirected to /api/v1/*)
app.use('/api', (req, res, next) => {
// Check if the path starts with a version-like prefix (/v followed by digits).
// This includes both supported versions (v1, v2) and unsupported ones (v99).

View File

@@ -0,0 +1,378 @@
// src/components/FeatureFlag.test.tsx
/**
* Unit tests for the FeatureFlag component (ADR-024).
*
* These tests verify:
* - Component renders children when feature is enabled
* - Component hides children when feature is disabled
* - Component renders fallback when feature is disabled
* - Component returns null when disabled and no fallback provided
* - All feature flag names are properly handled
*/
import React from 'react';
import { render, screen } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
// Mock the useFeatureFlag hook
const mockUseFeatureFlag = vi.fn();
vi.mock('../hooks/useFeatureFlag', () => ({
useFeatureFlag: (flagName: string) => mockUseFeatureFlag(flagName),
}));
// Import after mocking
import { FeatureFlag } from './FeatureFlag';
describe('FeatureFlag component', () => {
beforeEach(() => {
mockUseFeatureFlag.mockReset();
// Default to disabled
mockUseFeatureFlag.mockReturnValue(false);
});
describe('when feature is enabled', () => {
beforeEach(() => {
mockUseFeatureFlag.mockReturnValue(true);
});
it('should render children', () => {
render(
<FeatureFlag feature="newDashboard">
<div data-testid="new-feature">New Feature Content</div>
</FeatureFlag>,
);
expect(screen.getByTestId('new-feature')).toBeInTheDocument();
expect(screen.getByText('New Feature Content')).toBeInTheDocument();
});
it('should not render fallback', () => {
render(
<FeatureFlag feature="newDashboard" fallback={<div data-testid="fallback">Fallback</div>}>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
expect(screen.getByTestId('new-feature')).toBeInTheDocument();
expect(screen.queryByTestId('fallback')).not.toBeInTheDocument();
});
it('should render multiple children', () => {
render(
<FeatureFlag feature="newDashboard">
<div data-testid="child-1">Child 1</div>
<div data-testid="child-2">Child 2</div>
</FeatureFlag>,
);
expect(screen.getByTestId('child-1')).toBeInTheDocument();
expect(screen.getByTestId('child-2')).toBeInTheDocument();
});
it('should render text content', () => {
render(<FeatureFlag feature="newDashboard">Just some text</FeatureFlag>);
expect(screen.getByText('Just some text')).toBeInTheDocument();
});
it('should call useFeatureFlag with correct flag name', () => {
render(
<FeatureFlag feature="betaRecipes">
<div>Content</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('betaRecipes');
});
});
describe('when feature is disabled', () => {
beforeEach(() => {
mockUseFeatureFlag.mockReturnValue(false);
});
it('should not render children', () => {
render(
<FeatureFlag feature="newDashboard">
<div data-testid="new-feature">New Feature Content</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(screen.queryByText('New Feature Content')).not.toBeInTheDocument();
});
it('should render fallback when provided', () => {
render(
<FeatureFlag
feature="newDashboard"
fallback={<div data-testid="fallback">Legacy Feature</div>}
>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(screen.getByTestId('fallback')).toBeInTheDocument();
expect(screen.getByText('Legacy Feature')).toBeInTheDocument();
});
it('should render null when no fallback is provided', () => {
const { container } = render(
<FeatureFlag feature="newDashboard">
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
// Container should be empty (just the wrapper)
expect(container.innerHTML).toBe('');
});
it('should render complex fallback components', () => {
const FallbackComponent = () => (
<div data-testid="complex-fallback">
<h1>Legacy Dashboard</h1>
<p>This is the old version</p>
</div>
);
render(
<FeatureFlag feature="newDashboard" fallback={<FallbackComponent />}>
<div data-testid="new-feature">New Dashboard</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(screen.getByTestId('complex-fallback')).toBeInTheDocument();
expect(screen.getByText('Legacy Dashboard')).toBeInTheDocument();
expect(screen.getByText('This is the old version')).toBeInTheDocument();
});
it('should render text fallback', () => {
render(
<FeatureFlag feature="newDashboard" fallback="Feature not available">
<div>New Feature</div>
</FeatureFlag>,
);
expect(screen.getByText('Feature not available')).toBeInTheDocument();
});
});
describe('with different feature flags', () => {
it('should work with newDashboard flag', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<FeatureFlag feature="newDashboard">
<div data-testid="dashboard">Dashboard</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('newDashboard');
expect(screen.getByTestId('dashboard')).toBeInTheDocument();
});
it('should work with betaRecipes flag', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<FeatureFlag feature="betaRecipes">
<div data-testid="recipes">Recipes</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('betaRecipes');
expect(screen.getByTestId('recipes')).toBeInTheDocument();
});
it('should work with experimentalAi flag', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<FeatureFlag feature="experimentalAi">
<div data-testid="ai">AI Feature</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('experimentalAi');
expect(screen.getByTestId('ai')).toBeInTheDocument();
});
it('should work with debugMode flag', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<FeatureFlag feature="debugMode">
<div data-testid="debug">Debug Panel</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('debugMode');
expect(screen.getByTestId('debug')).toBeInTheDocument();
});
});
describe('real-world usage patterns', () => {
it('should work for A/B testing pattern', () => {
mockUseFeatureFlag.mockReturnValue(false);
render(
<FeatureFlag feature="newDashboard" fallback={<div data-testid="old-ui">Old UI</div>}>
<div data-testid="new-ui">New UI</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-ui')).not.toBeInTheDocument();
expect(screen.getByTestId('old-ui')).toBeInTheDocument();
});
it('should work for gradual rollout pattern', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<div>
<nav data-testid="nav">Navigation</nav>
<FeatureFlag feature="betaRecipes">
<aside data-testid="recipe-suggestions">Recipe Suggestions</aside>
</FeatureFlag>
<main data-testid="main">Main Content</main>
</div>,
);
expect(screen.getByTestId('nav')).toBeInTheDocument();
expect(screen.getByTestId('recipe-suggestions')).toBeInTheDocument();
expect(screen.getByTestId('main')).toBeInTheDocument();
});
it('should work nested within conditional logic', () => {
mockUseFeatureFlag.mockReturnValue(true);
const isLoggedIn = true;
render(
<div>
{isLoggedIn && (
<FeatureFlag
feature="experimentalAi"
fallback={<div data-testid="standard">Standard</div>}
>
<div data-testid="ai-search">AI Search</div>
</FeatureFlag>
)}
</div>,
);
expect(screen.getByTestId('ai-search')).toBeInTheDocument();
});
it('should work with multiple FeatureFlag components', () => {
// First call for newDashboard returns true
// Second call for debugMode returns false
mockUseFeatureFlag.mockImplementation((flag: string) => {
if (flag === 'newDashboard') return true;
if (flag === 'debugMode') return false;
return false;
});
render(
<div>
<FeatureFlag feature="newDashboard">
<div data-testid="new-dashboard">New Dashboard</div>
</FeatureFlag>
<FeatureFlag feature="debugMode" fallback={<div data-testid="no-debug">No Debug</div>}>
<div data-testid="debug-panel">Debug Panel</div>
</FeatureFlag>
</div>,
);
expect(screen.getByTestId('new-dashboard')).toBeInTheDocument();
expect(screen.queryByTestId('debug-panel')).not.toBeInTheDocument();
expect(screen.getByTestId('no-debug')).toBeInTheDocument();
});
});
describe('edge cases', () => {
it('should handle undefined fallback gracefully', () => {
mockUseFeatureFlag.mockReturnValue(false);
const { container } = render(
<FeatureFlag feature="newDashboard" fallback={undefined}>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(container.innerHTML).toBe('');
});
it('should handle null children gracefully when enabled', () => {
mockUseFeatureFlag.mockReturnValue(true);
const { container } = render(<FeatureFlag feature="newDashboard">{null}</FeatureFlag>);
// Should render nothing (null)
expect(container.innerHTML).toBe('');
});
it('should handle empty children when enabled', () => {
mockUseFeatureFlag.mockReturnValue(true);
const { container } = render(
<FeatureFlag feature="newDashboard">
<></>
</FeatureFlag>,
);
// Should render the empty fragment
expect(container.innerHTML).toBe('');
});
it('should handle boolean children', () => {
mockUseFeatureFlag.mockReturnValue(true);
// React ignores boolean children, so nothing should render
const { container } = render(
<FeatureFlag feature="newDashboard">{true as unknown as React.ReactNode}</FeatureFlag>,
);
expect(container.innerHTML).toBe('');
});
it('should handle number children', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(<FeatureFlag feature="newDashboard">{42}</FeatureFlag>);
expect(screen.getByText('42')).toBeInTheDocument();
});
});
describe('re-rendering behavior', () => {
it('should update when feature flag value changes', () => {
const { rerender } = render(
<FeatureFlag feature="newDashboard" fallback={<div data-testid="fallback">Fallback</div>}>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
// Initially disabled
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(screen.getByTestId('fallback')).toBeInTheDocument();
// Enable the flag
mockUseFeatureFlag.mockReturnValue(true);
rerender(
<FeatureFlag feature="newDashboard" fallback={<div data-testid="fallback">Fallback</div>}>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
// Now enabled
expect(screen.getByTestId('new-feature')).toBeInTheDocument();
expect(screen.queryByTestId('fallback')).not.toBeInTheDocument();
});
});
});

View File

@@ -0,0 +1,75 @@
// src/components/FeatureFlag.tsx
import type { ReactNode } from 'react';
import { useFeatureFlag, type FeatureFlagName } from '../hooks/useFeatureFlag';
/**
* Props for the FeatureFlag component.
*/
export interface FeatureFlagProps {
/**
* The name of the feature flag to check.
* Must be a valid FeatureFlagName defined in config.featureFlags.
*/
feature: FeatureFlagName;
/**
* Content to render when the feature flag is enabled.
*/
children: ReactNode;
/**
* Optional content to render when the feature flag is disabled.
* If not provided, nothing is rendered when the flag is disabled.
* @default null
*/
fallback?: ReactNode;
}
/**
* Declarative component for conditional rendering based on feature flag state.
*
* This component provides a clean, declarative API for rendering content based
* on whether a feature flag is enabled or disabled. It uses the useFeatureFlag
* hook internally and supports an optional fallback for disabled features.
*
* @param props - Component props
* @param props.feature - The feature flag name to check
* @param props.children - Content rendered when feature is enabled
* @param props.fallback - Content rendered when feature is disabled (default: null)
*
* @example
* // Basic usage - show new feature when enabled
* <FeatureFlag feature="newDashboard">
* <NewDashboard />
* </FeatureFlag>
*
* @example
* // With fallback - show alternative when feature is disabled
* <FeatureFlag feature="newDashboard" fallback={<LegacyDashboard />}>
* <NewDashboard />
* </FeatureFlag>
*
* @example
* // Wrap a section of UI that should only appear when flag is enabled
* <div className="sidebar">
* <Navigation />
* <FeatureFlag feature="betaRecipes">
* <RecipeSuggestions />
* </FeatureFlag>
* <Footer />
* </div>
*
* @example
* // Combine with other conditional logic
* {isLoggedIn && (
* <FeatureFlag feature="experimentalAi" fallback={<StandardSearch />}>
* <AiPoweredSearch />
* </FeatureFlag>
* )}
*
* @see docs/adr/0024-feature-flagging-strategy.md
*/
export function FeatureFlag({ feature, children, fallback = null }: FeatureFlagProps): ReactNode {
const isEnabled = useFeatureFlag(feature);
return isEnabled ? children : fallback;
}

View File

@@ -0,0 +1,424 @@
// src/components/NotificationBell.test.tsx
import React from 'react';
import { screen, fireEvent, act } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest';
import { NotificationBell, ConnectionStatus } from './NotificationBell';
import { renderWithProviders } from '../tests/utils/renderWithProviders';
// Mock the useWebSocket hook
vi.mock('../hooks/useWebSocket', () => ({
useWebSocket: vi.fn(),
}));
// Mock the useEventBus hook
vi.mock('../hooks/useEventBus', () => ({
useEventBus: vi.fn(),
}));
// Import the mocked modules
import { useWebSocket } from '../hooks/useWebSocket';
import { useEventBus } from '../hooks/useEventBus';
// Type the mocked functions
const mockUseWebSocket = useWebSocket as Mock;
const mockUseEventBus = useEventBus as Mock;
describe('NotificationBell', () => {
let eventBusCallback: ((data?: unknown) => void) | null = null;
beforeEach(() => {
vi.clearAllMocks();
eventBusCallback = null;
// Default mock: connected state, no error
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
// Capture the callback passed to useEventBus
mockUseEventBus.mockImplementation((_event: string, callback: (data?: unknown) => void) => {
eventBusCallback = callback;
});
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('rendering', () => {
it('should render the notification bell button', () => {
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button', { name: /notifications/i });
expect(button).toBeInTheDocument();
});
it('should render with custom className', () => {
renderWithProviders(<NotificationBell className="custom-class" />);
const container = screen.getByRole('button').parentElement;
expect(container).toHaveClass('custom-class');
});
it('should show connection status indicator by default', () => {
const { container } = renderWithProviders(<NotificationBell />);
// The status indicator is a span with inline style containing backgroundColor
const statusIndicator = container.querySelector('span[title="Connected"]');
expect(statusIndicator).toBeInTheDocument();
});
it('should hide connection status indicator when showConnectionStatus is false', () => {
const { container } = renderWithProviders(<NotificationBell showConnectionStatus={false} />);
// No status indicator should be present (no span with title Connected/Connecting/Disconnected)
const connectedIndicator = container.querySelector('span[title="Connected"]');
const connectingIndicator = container.querySelector('span[title="Connecting"]');
const disconnectedIndicator = container.querySelector('span[title="Disconnected"]');
expect(connectedIndicator).not.toBeInTheDocument();
expect(connectingIndicator).not.toBeInTheDocument();
expect(disconnectedIndicator).not.toBeInTheDocument();
});
});
describe('unread count badge', () => {
it('should not show badge when unread count is zero', () => {
renderWithProviders(<NotificationBell />);
// The badge displays numbers, check that no number badge exists
const badge = screen.queryByText(/^\d+$/);
expect(badge).not.toBeInTheDocument();
});
it('should show badge with count when notifications arrive', () => {
renderWithProviders(<NotificationBell />);
// Simulate a notification arriving via event bus
expect(eventBusCallback).not.toBeNull();
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test' }] });
});
const badge = screen.getByText('1');
expect(badge).toBeInTheDocument();
});
it('should increment count when multiple notifications arrive', () => {
renderWithProviders(<NotificationBell />);
// Simulate multiple notifications
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test 1' }] });
eventBusCallback!({ deals: [{ item_name: 'Test 2' }] });
eventBusCallback!({ deals: [{ item_name: 'Test 3' }] });
});
const badge = screen.getByText('3');
expect(badge).toBeInTheDocument();
});
it('should display 99+ when count exceeds 99', () => {
renderWithProviders(<NotificationBell />);
// Simulate 100 notifications
act(() => {
for (let i = 0; i < 100; i++) {
eventBusCallback!({ deals: [{ item_name: `Test ${i}` }] });
}
});
const badge = screen.getByText('99+');
expect(badge).toBeInTheDocument();
});
it('should not increment count when notification data is undefined', () => {
renderWithProviders(<NotificationBell />);
// Simulate a notification with undefined data
act(() => {
eventBusCallback!(undefined);
});
const badge = screen.queryByText(/^\d+$/);
expect(badge).not.toBeInTheDocument();
});
});
describe('click behavior', () => {
it('should reset unread count when clicked', () => {
renderWithProviders(<NotificationBell />);
// First, add some notifications
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test' }] });
});
expect(screen.getByText('1')).toBeInTheDocument();
// Click the bell
const button = screen.getByRole('button');
fireEvent.click(button);
// Badge should no longer show
expect(screen.queryByText('1')).not.toBeInTheDocument();
});
it('should call onClick callback when provided', () => {
const mockOnClick = vi.fn();
renderWithProviders(<NotificationBell onClick={mockOnClick} />);
const button = screen.getByRole('button');
fireEvent.click(button);
expect(mockOnClick).toHaveBeenCalledTimes(1);
});
it('should handle click without onClick callback', () => {
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
// Should not throw
expect(() => fireEvent.click(button)).not.toThrow();
});
});
describe('connection status', () => {
it('should show green indicator when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
const { container } = renderWithProviders(<NotificationBell />);
const statusIndicator = container.querySelector('span[title="Connected"]');
expect(statusIndicator).toBeInTheDocument();
expect(statusIndicator).toHaveStyle({ backgroundColor: 'rgb(16, 185, 129)' });
});
it('should show red indicator when error occurs', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Connection failed',
});
const { container } = renderWithProviders(<NotificationBell />);
const statusIndicator = container.querySelector('span[title="Disconnected"]');
expect(statusIndicator).toBeInTheDocument();
expect(statusIndicator).toHaveStyle({ backgroundColor: 'rgb(239, 68, 68)' });
});
it('should show amber indicator when connecting', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: null,
});
const { container } = renderWithProviders(<NotificationBell />);
const statusIndicator = container.querySelector('span[title="Connecting"]');
expect(statusIndicator).toBeInTheDocument();
expect(statusIndicator).toHaveStyle({ backgroundColor: 'rgb(245, 158, 11)' });
});
it('should show error tooltip when disconnected with error', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Connection failed',
});
renderWithProviders(<NotificationBell />);
expect(screen.getByText('Live notifications unavailable')).toBeInTheDocument();
});
it('should not show error tooltip when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<NotificationBell />);
expect(screen.queryByText('Live notifications unavailable')).not.toBeInTheDocument();
});
});
describe('aria attributes', () => {
it('should have correct aria-label without unread notifications', () => {
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
expect(button).toHaveAttribute('aria-label', 'Notifications');
});
it('should have correct aria-label with unread notifications', () => {
renderWithProviders(<NotificationBell />);
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test' }] });
eventBusCallback!({ deals: [{ item_name: 'Test2' }] });
});
const button = screen.getByRole('button');
expect(button).toHaveAttribute('aria-label', 'Notifications (2 unread)');
});
it('should have correct title when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
expect(button).toHaveAttribute('title', 'Connected to live notifications');
});
it('should have correct title when connecting', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: null,
});
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
expect(button).toHaveAttribute('title', 'Connecting...');
});
it('should have correct title when error occurs', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Network error',
});
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
expect(button).toHaveAttribute('title', 'WebSocket error: Network error');
});
});
describe('bell icon styling', () => {
it('should have default color when no unread notifications', () => {
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
const svg = button.querySelector('svg');
expect(svg).toHaveClass('text-gray-600');
});
it('should have highlighted color when there are unread notifications', () => {
renderWithProviders(<NotificationBell />);
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test' }] });
});
const button = screen.getByRole('button');
const svg = button.querySelector('svg');
expect(svg).toHaveClass('text-blue-600');
});
});
describe('event bus subscription', () => {
it('should subscribe to notification:deal event', () => {
renderWithProviders(<NotificationBell />);
expect(mockUseEventBus).toHaveBeenCalledWith('notification:deal', expect.any(Function));
});
});
describe('useWebSocket configuration', () => {
it('should call useWebSocket with autoConnect: true', () => {
renderWithProviders(<NotificationBell />);
expect(mockUseWebSocket).toHaveBeenCalledWith({ autoConnect: true });
});
});
});
describe('ConnectionStatus', () => {
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should show "Live" text when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<ConnectionStatus />);
expect(screen.getByText('Live')).toBeInTheDocument();
});
it('should show "Offline" text when disconnected with error', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Connection failed',
});
renderWithProviders(<ConnectionStatus />);
expect(screen.getByText('Offline')).toBeInTheDocument();
});
it('should show "Connecting..." text when connecting', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: null,
});
renderWithProviders(<ConnectionStatus />);
expect(screen.getByText('Connecting...')).toBeInTheDocument();
});
it('should call useWebSocket with autoConnect: true', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<ConnectionStatus />);
expect(mockUseWebSocket).toHaveBeenCalledWith({ autoConnect: true });
});
it('should render Wifi icon when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<ConnectionStatus />);
const container = screen.getByText('Live').parentElement;
const svg = container?.querySelector('svg');
expect(svg).toBeInTheDocument();
expect(svg).toHaveClass('text-green-600');
});
it('should render WifiOff icon when disconnected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Connection failed',
});
renderWithProviders(<ConnectionStatus />);
const container = screen.getByText('Offline').parentElement;
const svg = container?.querySelector('svg');
expect(svg).toBeInTheDocument();
expect(svg).toHaveClass('text-red-600');
});
});

View File

@@ -0,0 +1,776 @@
// src/components/NotificationToastHandler.test.tsx
import React from 'react';
import { render, act } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest';
import { NotificationToastHandler } from './NotificationToastHandler';
import type { DealNotificationData, SystemMessageData } from '../types/websocket';
// Use vi.hoisted to properly hoist mock functions
const { mockToastSuccess, mockToastError, mockToastDefault } = vi.hoisted(() => ({
mockToastSuccess: vi.fn(),
mockToastError: vi.fn(),
mockToastDefault: vi.fn(),
}));
// Mock react-hot-toast
vi.mock('react-hot-toast', () => {
const toastFn = (message: string, options?: unknown) => mockToastDefault(message, options);
toastFn.success = mockToastSuccess;
toastFn.error = mockToastError;
return {
default: toastFn,
};
});
// Mock useWebSocket hook
vi.mock('../hooks/useWebSocket', () => ({
useWebSocket: vi.fn(),
}));
// Mock useEventBus hook
vi.mock('../hooks/useEventBus', () => ({
useEventBus: vi.fn(),
}));
// Mock formatCurrency
vi.mock('../utils/formatUtils', () => ({
formatCurrency: vi.fn((cents: number) => `$${(cents / 100).toFixed(2)}`),
}));
// Import mocked modules
import { useWebSocket } from '../hooks/useWebSocket';
import { useEventBus } from '../hooks/useEventBus';
const mockUseWebSocket = useWebSocket as Mock;
const mockUseEventBus = useEventBus as Mock;
describe('NotificationToastHandler', () => {
let eventBusCallbacks: Map<string, (data?: unknown) => void>;
let onConnectCallback: (() => void) | undefined;
let onDisconnectCallback: (() => void) | undefined;
beforeEach(() => {
vi.clearAllMocks();
vi.useFakeTimers();
// Clear toast mocks
mockToastSuccess.mockClear();
mockToastError.mockClear();
mockToastDefault.mockClear();
eventBusCallbacks = new Map();
onConnectCallback = undefined;
onDisconnectCallback = undefined;
// Default mock implementation for useWebSocket
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: true,
error: null,
};
},
);
// Capture callbacks for different event types
mockUseEventBus.mockImplementation((event: string, callback: (data?: unknown) => void) => {
eventBusCallbacks.set(event, callback);
});
});
afterEach(() => {
vi.useRealTimers();
vi.restoreAllMocks();
});
describe('rendering', () => {
it('should render null (no visible output)', () => {
const { container } = render(<NotificationToastHandler />);
expect(container.firstChild).toBeNull();
});
it('should subscribe to event bus on mount', () => {
render(<NotificationToastHandler />);
expect(mockUseEventBus).toHaveBeenCalledWith('notification:deal', expect.any(Function));
expect(mockUseEventBus).toHaveBeenCalledWith('notification:system', expect.any(Function));
expect(mockUseEventBus).toHaveBeenCalledWith('notification:error', expect.any(Function));
});
});
describe('connection events', () => {
it('should show success toast on connect when enabled', () => {
render(<NotificationToastHandler enabled={true} />);
// Trigger onConnect callback
onConnectCallback?.();
expect(mockToastSuccess).toHaveBeenCalledWith(
'Connected to live notifications',
expect.objectContaining({
duration: 2000,
icon: expect.any(String),
}),
);
});
it('should not show success toast on connect when disabled', () => {
render(<NotificationToastHandler enabled={false} />);
onConnectCallback?.();
expect(mockToastSuccess).not.toHaveBeenCalled();
});
it('should show error toast on disconnect when error exists', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection lost',
};
},
);
render(<NotificationToastHandler enabled={true} />);
onDisconnectCallback?.();
expect(mockToastError).toHaveBeenCalledWith(
'Disconnected from live notifications',
expect.objectContaining({
duration: 3000,
icon: expect.any(String),
}),
);
});
it('should not show disconnect toast when disabled', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection lost',
};
},
);
render(<NotificationToastHandler enabled={false} />);
onDisconnectCallback?.();
expect(mockToastError).not.toHaveBeenCalled();
});
it('should not show disconnect toast when no error', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: null,
};
},
);
render(<NotificationToastHandler enabled={true} />);
onDisconnectCallback?.();
expect(mockToastError).not.toHaveBeenCalled();
});
});
describe('deal notifications', () => {
it('should show toast for single deal notification', () => {
render(<NotificationToastHandler />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal found',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(mockToastSuccess).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
duration: 5000,
icon: expect.any(String),
position: 'top-right',
}),
);
});
it('should show toast for multiple deals notification', () => {
render(<NotificationToastHandler />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Store A',
store_id: 1,
},
{
item_name: 'Bread',
best_price_in_cents: 299,
store_name: 'Store B',
store_id: 2,
},
{
item_name: 'Eggs',
best_price_in_cents: 499,
store_name: 'Store C',
store_id: 3,
},
],
user_id: 'user-123',
message: 'Multiple deals found',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(mockToastSuccess).toHaveBeenCalled();
});
it('should not show toast when disabled', () => {
render(<NotificationToastHandler enabled={false} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal found',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(mockToastSuccess).not.toHaveBeenCalled();
});
it('should not show toast when data is undefined', () => {
render(<NotificationToastHandler />);
const callback = eventBusCallbacks.get('notification:deal');
callback?.(undefined);
expect(mockToastSuccess).not.toHaveBeenCalled();
});
});
describe('system messages', () => {
it('should show error toast for error severity', () => {
render(<NotificationToastHandler />);
const systemData: SystemMessageData = {
message: 'System error occurred',
severity: 'error',
};
const callback = eventBusCallbacks.get('notification:system');
callback?.(systemData);
expect(mockToastError).toHaveBeenCalledWith(
'System error occurred',
expect.objectContaining({
duration: 6000,
position: 'top-center',
icon: expect.any(String),
}),
);
});
it('should show warning toast for warning severity', () => {
render(<NotificationToastHandler />);
const systemData: SystemMessageData = {
message: 'System warning',
severity: 'warning',
};
// For warning, the default toast() is called
const callback = eventBusCallbacks.get('notification:system');
callback?.(systemData);
// Warning uses the regular toast function (mockToastDefault)
expect(mockToastDefault).toHaveBeenCalledWith(
'System warning',
expect.objectContaining({
duration: 4000,
position: 'top-center',
icon: expect.any(String),
}),
);
});
it('should show info toast for info severity', () => {
render(<NotificationToastHandler />);
const systemData: SystemMessageData = {
message: 'System info',
severity: 'info',
};
const callback = eventBusCallbacks.get('notification:system');
callback?.(systemData);
// Info uses the regular toast function (mockToastDefault)
expect(mockToastDefault).toHaveBeenCalledWith(
'System info',
expect.objectContaining({
duration: 4000,
position: 'top-center',
icon: expect.any(String),
}),
);
});
it('should not show toast when disabled', () => {
render(<NotificationToastHandler enabled={false} />);
const systemData: SystemMessageData = {
message: 'System error',
severity: 'error',
};
const callback = eventBusCallbacks.get('notification:system');
callback?.(systemData);
expect(mockToastError).not.toHaveBeenCalled();
});
it('should not show toast when data is undefined', () => {
render(<NotificationToastHandler />);
const callback = eventBusCallbacks.get('notification:system');
callback?.(undefined);
expect(mockToastError).not.toHaveBeenCalled();
});
});
describe('error notifications', () => {
it('should show error toast with message and code', () => {
render(<NotificationToastHandler />);
const errorData = {
message: 'Something went wrong',
code: 'ERR_001',
};
const callback = eventBusCallbacks.get('notification:error');
callback?.(errorData);
expect(mockToastError).toHaveBeenCalledWith(
'Error: Something went wrong',
expect.objectContaining({
duration: 5000,
icon: expect.any(String),
}),
);
});
it('should show error toast without code', () => {
render(<NotificationToastHandler />);
const errorData = {
message: 'Something went wrong',
};
const callback = eventBusCallbacks.get('notification:error');
callback?.(errorData);
expect(mockToastError).toHaveBeenCalledWith(
'Error: Something went wrong',
expect.objectContaining({
duration: 5000,
}),
);
});
it('should not show toast when disabled', () => {
render(<NotificationToastHandler enabled={false} />);
const errorData = {
message: 'Something went wrong',
};
const callback = eventBusCallbacks.get('notification:error');
callback?.(errorData);
expect(mockToastError).not.toHaveBeenCalled();
});
it('should not show toast when data is undefined', () => {
render(<NotificationToastHandler />);
const callback = eventBusCallbacks.get('notification:error');
callback?.(undefined);
expect(mockToastError).not.toHaveBeenCalled();
});
});
describe('sound playback', () => {
it('should not play sound by default', () => {
const audioPlayMock = vi.fn().mockResolvedValue(undefined);
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={false} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(AudioMock).not.toHaveBeenCalled();
});
it('should create Audio instance when playSound is true', () => {
const audioPlayMock = vi.fn().mockResolvedValue(undefined);
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
// Verify Audio constructor was called with correct URL
expect(AudioMock).toHaveBeenCalledWith('/notification-sound.mp3');
});
it('should use custom sound URL', () => {
const audioPlayMock = vi.fn().mockResolvedValue(undefined);
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} soundUrl="/custom-sound.mp3" />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(AudioMock).toHaveBeenCalledWith('/custom-sound.mp3');
});
it('should handle audio play failure gracefully', () => {
vi.spyOn(console, 'warn').mockImplementation(() => {});
const audioPlayMock = vi.fn().mockRejectedValue(new Error('Autoplay blocked'));
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
// Should not throw even if play() fails
expect(() => callback?.(dealData)).not.toThrow();
// Audio constructor should still be called
expect(AudioMock).toHaveBeenCalled();
});
it('should handle Audio constructor failure gracefully', () => {
vi.spyOn(console, 'warn').mockImplementation(() => {});
const AudioMock = vi.fn().mockImplementation(() => {
throw new Error('Audio not supported');
});
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
// Should not throw
expect(() => callback?.(dealData)).not.toThrow();
});
});
describe('persistent connection error', () => {
it('should show error toast after delay when connection error persists', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection failed',
};
},
);
render(<NotificationToastHandler enabled={true} />);
// Fast-forward 5 seconds
act(() => {
vi.advanceTimersByTime(5000);
});
expect(mockToastError).toHaveBeenCalledWith(
'Unable to connect to live notifications. Some features may be limited.',
expect.objectContaining({
duration: 5000,
icon: expect.any(String),
}),
);
});
it('should not show error toast before delay', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection failed',
};
},
);
render(<NotificationToastHandler enabled={true} />);
// Advance only 4 seconds
act(() => {
vi.advanceTimersByTime(4000);
});
expect(mockToastError).not.toHaveBeenCalledWith(
expect.stringContaining('Unable to connect'),
expect.anything(),
);
});
it('should not show persistent error toast when disabled', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection failed',
};
},
);
render(<NotificationToastHandler enabled={false} />);
act(() => {
vi.advanceTimersByTime(5000);
});
expect(mockToastError).not.toHaveBeenCalled();
});
it('should clear timeout on unmount', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection failed',
};
},
);
const { unmount } = render(<NotificationToastHandler enabled={true} />);
// Unmount before timer fires
unmount();
act(() => {
vi.advanceTimersByTime(5000);
});
// The toast should not be called because component unmounted
expect(mockToastError).not.toHaveBeenCalledWith(
expect.stringContaining('Unable to connect'),
expect.anything(),
);
});
it('should not show persistent error toast when there is no error', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: null,
};
},
);
render(<NotificationToastHandler enabled={true} />);
act(() => {
vi.advanceTimersByTime(5000);
});
expect(mockToastError).not.toHaveBeenCalled();
});
});
describe('default props', () => {
it('should default enabled to true', () => {
render(<NotificationToastHandler />);
onConnectCallback?.();
expect(mockToastSuccess).toHaveBeenCalled();
});
it('should default playSound to false', () => {
const AudioMock = vi.fn();
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(AudioMock).not.toHaveBeenCalled();
});
it('should default soundUrl to /notification-sound.mp3', () => {
const audioPlayMock = vi.fn().mockResolvedValue(undefined);
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(AudioMock).toHaveBeenCalledWith('/notification-sound.mp3');
});
});
});

View File

@@ -24,6 +24,28 @@ const config = {
debug: import.meta.env.VITE_SENTRY_DEBUG === 'true',
enabled: import.meta.env.VITE_SENTRY_ENABLED !== 'false',
},
/**
* Feature flags for conditional feature rendering (ADR-024).
*
* All flags default to false (disabled) when the environment variable is not set
* or is set to any value other than 'true'. This opt-in model ensures features
* are explicitly enabled, preventing accidental exposure of incomplete features.
*
* Environment variables follow the naming convention: VITE_FEATURE_SNAKE_CASE
* Config properties use camelCase for consistency with JavaScript conventions.
*
* @see docs/adr/0024-feature-flagging-strategy.md
*/
featureFlags: {
/** Enable the redesigned dashboard UI (VITE_FEATURE_NEW_DASHBOARD) */
newDashboard: import.meta.env.VITE_FEATURE_NEW_DASHBOARD === 'true',
/** Enable beta recipe features (VITE_FEATURE_BETA_RECIPES) */
betaRecipes: import.meta.env.VITE_FEATURE_BETA_RECIPES === 'true',
/** Enable experimental AI features (VITE_FEATURE_EXPERIMENTAL_AI) */
experimentalAi: import.meta.env.VITE_FEATURE_EXPERIMENTAL_AI === 'true',
/** Enable debug mode UI elements (VITE_FEATURE_DEBUG_MODE) */
debugMode: import.meta.env.VITE_FEATURE_DEBUG_MODE === 'true',
},
};
export default config;

View File

@@ -155,6 +155,38 @@ const sentrySchema = z.object({
debug: booleanString(false),
});
/**
* Feature flags configuration schema (ADR-024).
*
* All flags default to `false` (disabled) for safety, following an opt-in model.
* Set the corresponding environment variable to 'true' to enable a feature.
*
* Environment variable naming convention: `FEATURE_SNAKE_CASE`
* Config property naming convention: `camelCase`
*
* @example
* // Enable via environment:
* FEATURE_BUGSINK_SYNC=true
*
* // Check in code:
* import { config } from './config/env';
* if (config.featureFlags.bugsinkSync) { ... }
*/
const featureFlagsSchema = z.object({
/** Enable Bugsink error sync integration (FEATURE_BUGSINK_SYNC) */
bugsinkSync: booleanString(false),
/** Enable advanced RBAC features (FEATURE_ADVANCED_RBAC) */
advancedRbac: booleanString(false),
/** Enable new dashboard experience (FEATURE_NEW_DASHBOARD) */
newDashboard: booleanString(false),
/** Enable beta recipe features (FEATURE_BETA_RECIPES) */
betaRecipes: booleanString(false),
/** Enable experimental AI features (FEATURE_EXPERIMENTAL_AI) */
experimentalAi: booleanString(false),
/** Enable debug mode for development (FEATURE_DEBUG_MODE) */
debugMode: booleanString(false),
});
/**
* Complete environment configuration schema.
*/
@@ -170,6 +202,7 @@ const envSchema = z.object({
worker: workerSchema,
server: serverSchema,
sentry: sentrySchema,
featureFlags: featureFlagsSchema,
});
export type EnvConfig = z.infer<typeof envSchema>;
@@ -244,6 +277,14 @@ function loadEnvVars(): unknown {
environment: process.env.SENTRY_ENVIRONMENT || process.env.NODE_ENV,
debug: process.env.SENTRY_DEBUG,
},
featureFlags: {
bugsinkSync: process.env.FEATURE_BUGSINK_SYNC,
advancedRbac: process.env.FEATURE_ADVANCED_RBAC,
newDashboard: process.env.FEATURE_NEW_DASHBOARD,
betaRecipes: process.env.FEATURE_BETA_RECIPES,
experimentalAi: process.env.FEATURE_EXPERIMENTAL_AI,
debugMode: process.env.FEATURE_DEBUG_MODE,
},
};
}
@@ -391,3 +432,33 @@ export const isGoogleOAuthConfigured = !!config.google.clientId && !!config.goog
* Returns true if GitHub OAuth is configured (both client ID and secret present).
*/
export const isGithubOAuthConfigured = !!config.github.clientId && !!config.github.clientSecret;
// --- Feature Flag Helpers (ADR-024) ---
/**
* Type representing valid feature flag names.
* Derived from the featureFlagsSchema for type safety.
*/
export type FeatureFlagName = keyof typeof config.featureFlags;
/**
* Check if a feature flag is enabled.
*
* This is a convenience function for checking feature flag state.
* For more advanced usage (logging, all flags), use the featureFlags service.
*
* @param flagName - The name of the feature flag to check
* @returns boolean indicating if the feature is enabled
*
* @example
* ```typescript
* import { isFeatureFlagEnabled } from './config/env';
*
* if (isFeatureFlagEnabled('newDashboard')) {
* // Use new dashboard
* }
* ```
*/
export function isFeatureFlagEnabled(flagName: FeatureFlagName): boolean {
return config.featureFlags[flagName];
}

View File

@@ -1,6 +1,12 @@
// src/config/swagger.test.ts
/**
* Tests for tsoa-generated OpenAPI specification.
*
* These tests verify the tsoa specification structure and content
* as generated from controllers decorated with tsoa decorators.
*/
import { describe, it, expect } from 'vitest';
import { swaggerSpec } from './swagger';
import tsoaSpec from './tsoa-spec.json';
// Type definition for OpenAPI 3.0 spec structure used in tests
interface OpenAPISpec {
@@ -10,18 +16,11 @@ interface OpenAPISpec {
version: string;
description?: string;
contact?: { name: string };
license?: { name: string };
license?: { name: string | { name: string } };
};
servers: Array<{ url: string; description?: string }>;
components: {
securitySchemes?: {
bearerAuth?: {
type: string;
scheme: string;
bearerFormat?: string;
description?: string;
};
};
securitySchemes?: Record<string, unknown>;
schemas?: Record<string, unknown>;
};
tags: Array<{ name: string; description?: string }>;
@@ -29,19 +28,13 @@ interface OpenAPISpec {
}
// Cast to typed spec for property access
const spec = swaggerSpec as OpenAPISpec;
const spec = tsoaSpec as unknown as OpenAPISpec;
/**
* Tests for src/config/swagger.ts - OpenAPI/Swagger configuration.
*
* These tests verify the swagger specification structure and content
* without testing the swagger-jsdoc library itself.
*/
describe('swagger configuration', () => {
describe('swaggerSpec export', () => {
describe('tsoa OpenAPI specification', () => {
describe('spec export', () => {
it('should export a swagger specification object', () => {
expect(swaggerSpec).toBeDefined();
expect(typeof swaggerSpec).toBe('object');
expect(tsoaSpec).toBeDefined();
expect(typeof tsoaSpec).toBe('object');
});
it('should have openapi version 3.0.0', () => {
@@ -63,12 +56,11 @@ describe('swagger configuration', () => {
it('should have contact information', () => {
expect(spec.info.contact).toBeDefined();
expect(spec.info.contact?.name).toBe('API Support');
expect(spec.info.contact?.name).toBeDefined();
});
it('should have license information', () => {
expect(spec.info.license).toBeDefined();
expect(spec.info.license?.name).toBe('Private');
});
});
@@ -79,10 +71,9 @@ describe('swagger configuration', () => {
expect(spec.servers.length).toBeGreaterThan(0);
});
it('should have /api/v1 as the server URL (ADR-008)', () => {
const apiServer = spec.servers.find((s) => s.url === '/api/v1');
it('should have /api as the server URL (tsoa basePath)', () => {
const apiServer = spec.servers.find((s) => s.url === '/api');
expect(apiServer).toBeDefined();
expect(apiServer?.description).toBe('API server (v1)');
});
});
@@ -91,96 +82,42 @@ describe('swagger configuration', () => {
expect(spec.components).toBeDefined();
});
describe('securitySchemes', () => {
it('should have bearerAuth security scheme', () => {
expect(spec.components.securitySchemes).toBeDefined();
expect(spec.components.securitySchemes?.bearerAuth).toBeDefined();
});
it('should configure bearerAuth as HTTP bearer with JWT format', () => {
const bearerAuth = spec.components.securitySchemes?.bearerAuth;
expect(bearerAuth?.type).toBe('http');
expect(bearerAuth?.scheme).toBe('bearer');
expect(bearerAuth?.bearerFormat).toBe('JWT');
});
it('should have description for bearerAuth', () => {
const bearerAuth = spec.components.securitySchemes?.bearerAuth;
expect(bearerAuth?.description).toContain('JWT token');
});
});
describe('schemas', () => {
const schemas = () => spec.components.schemas as Record<string, any>;
const schemas = () => spec.components.schemas as Record<string, unknown>;
it('should have schemas object', () => {
expect(spec.components.schemas).toBeDefined();
});
it('should have SuccessResponse schema (ADR-028)', () => {
const schema = schemas().SuccessResponse;
it('should have PaginationMeta schema (ADR-028)', () => {
const schema = schemas().PaginationMeta as Record<string, unknown>;
expect(schema).toBeDefined();
expect(schema.type).toBe('object');
expect(schema.properties.success).toBeDefined();
expect(schema.properties.data).toBeDefined();
expect(schema.required).toContain('success');
expect(schema.required).toContain('data');
const properties = schema.properties as Record<string, unknown>;
expect(properties.page).toBeDefined();
expect(properties.limit).toBeDefined();
expect(properties.total).toBeDefined();
expect(properties.totalPages).toBeDefined();
expect(properties.hasNextPage).toBeDefined();
expect(properties.hasPrevPage).toBeDefined();
});
it('should have ErrorResponse schema (ADR-028)', () => {
const schema = schemas().ErrorResponse;
it('should have ResponseMeta schema', () => {
const schema = schemas().ResponseMeta as Record<string, unknown>;
expect(schema).toBeDefined();
expect(schema.type).toBe('object');
expect(schema.properties.success).toBeDefined();
expect(schema.properties.error).toBeDefined();
expect(schema.required).toContain('success');
expect(schema.required).toContain('error');
const properties = schema.properties as Record<string, unknown>;
expect(properties.requestId).toBeDefined();
expect(properties.timestamp).toBeDefined();
});
it('should have ErrorResponse error object with code and message', () => {
const errorSchema = schemas().ErrorResponse.properties.error;
expect(errorSchema.properties.code).toBeDefined();
expect(errorSchema.properties.message).toBeDefined();
expect(errorSchema.required).toContain('code');
expect(errorSchema.required).toContain('message');
});
it('should have ServiceHealth schema', () => {
const schema = schemas().ServiceHealth;
it('should have ErrorDetails schema for error responses (ADR-028)', () => {
const schema = schemas().ErrorDetails as Record<string, unknown>;
expect(schema).toBeDefined();
expect(schema.type).toBe('object');
expect(schema.properties.status).toBeDefined();
expect(schema.properties.status.enum).toContain('healthy');
expect(schema.properties.status.enum).toContain('degraded');
expect(schema.properties.status.enum).toContain('unhealthy');
});
it('should have Achievement schema', () => {
const schema = schemas().Achievement;
expect(schema).toBeDefined();
expect(schema.type).toBe('object');
expect(schema.properties.achievement_id).toBeDefined();
expect(schema.properties.name).toBeDefined();
expect(schema.properties.description).toBeDefined();
expect(schema.properties.icon).toBeDefined();
expect(schema.properties.points_value).toBeDefined();
});
it('should have UserAchievement schema extending Achievement', () => {
const schema = schemas().UserAchievement;
expect(schema).toBeDefined();
expect(schema.allOf).toBeDefined();
expect(schema.allOf[0].$ref).toBe('#/components/schemas/Achievement');
});
it('should have LeaderboardUser schema', () => {
const schema = schemas().LeaderboardUser;
expect(schema).toBeDefined();
expect(schema.type).toBe('object');
expect(schema.properties.user_id).toBeDefined();
expect(schema.properties.full_name).toBeDefined();
expect(schema.properties.points).toBeDefined();
expect(schema.properties.rank).toBeDefined();
const properties = schema.properties as Record<string, unknown>;
expect(properties.code).toBeDefined();
expect(properties.message).toBeDefined();
});
});
});
@@ -194,7 +131,7 @@ describe('swagger configuration', () => {
it('should have Health tag', () => {
const tag = spec.tags.find((t) => t.name === 'Health');
expect(tag).toBeDefined();
expect(tag?.description).toContain('health');
expect(tag?.description).toContain('Health');
});
it('should have Auth tag', () => {
@@ -206,13 +143,6 @@ describe('swagger configuration', () => {
it('should have Users tag', () => {
const tag = spec.tags.find((t) => t.name === 'Users');
expect(tag).toBeDefined();
expect(tag?.description).toContain('User');
});
it('should have Achievements tag', () => {
const tag = spec.tags.find((t) => t.name === 'Achievements');
expect(tag).toBeDefined();
expect(tag?.description).toContain('Gamification');
});
it('should have Flyers tag', () => {
@@ -220,45 +150,37 @@ describe('swagger configuration', () => {
expect(tag).toBeDefined();
});
it('should have Recipes tag', () => {
const tag = spec.tags.find((t) => t.name === 'Recipes');
it('should have Deals tag', () => {
const tag = spec.tags.find((t) => t.name === 'Deals');
expect(tag).toBeDefined();
});
it('should have Budgets tag', () => {
const tag = spec.tags.find((t) => t.name === 'Budgets');
it('should have Stores tag', () => {
const tag = spec.tags.find((t) => t.name === 'Stores');
expect(tag).toBeDefined();
});
});
it('should have Admin tag', () => {
const tag = spec.tags.find((t) => t.name === 'Admin');
expect(tag).toBeDefined();
expect(tag?.description).toContain('admin');
describe('paths section', () => {
it('should have paths object with endpoints', () => {
expect(spec.paths).toBeDefined();
expect(typeof spec.paths).toBe('object');
expect(Object.keys(spec.paths as object).length).toBeGreaterThan(0);
});
it('should have System tag', () => {
const tag = spec.tags.find((t) => t.name === 'System');
expect(tag).toBeDefined();
});
it('should have 9 tags total', () => {
expect(spec.tags.length).toBe(9);
it('should have health ping endpoint', () => {
const paths = spec.paths as Record<string, unknown>;
expect(paths['/health/ping']).toBeDefined();
});
});
describe('specification validity', () => {
it('should have paths object (may be empty if no JSDoc annotations parsed)', () => {
// swagger-jsdoc creates paths from JSDoc annotations in route files
// In test environment, this may be empty if routes aren't scanned
expect(swaggerSpec).toHaveProperty('paths');
});
it('should be a valid JSON-serializable object', () => {
expect(() => JSON.stringify(swaggerSpec)).not.toThrow();
expect(() => JSON.stringify(tsoaSpec)).not.toThrow();
});
it('should produce valid JSON output', () => {
const json = JSON.stringify(swaggerSpec);
const json = JSON.stringify(tsoaSpec);
expect(() => JSON.parse(json)).not.toThrow();
});
});

View File

@@ -1,228 +0,0 @@
// src/config/swagger.ts
/**
* @file OpenAPI/Swagger configuration for API documentation.
* Implements ADR-018: API Documentation Strategy.
*
* This file configures swagger-jsdoc to generate an OpenAPI 3.0 specification
* from JSDoc annotations in route files. The specification is used by
* swagger-ui-express to serve interactive API documentation.
*/
import swaggerJsdoc from 'swagger-jsdoc';
const options: swaggerJsdoc.Options = {
definition: {
openapi: '3.0.0',
info: {
title: 'Flyer Crawler API',
version: '1.0.0',
description:
'API for the Flyer Crawler application - a platform for discovering grocery deals, managing recipes, and tracking budgets.',
contact: {
name: 'API Support',
},
license: {
name: 'Private',
},
},
servers: [
{
url: '/api/v1',
description: 'API server (v1)',
},
],
components: {
securitySchemes: {
bearerAuth: {
type: 'http',
scheme: 'bearer',
bearerFormat: 'JWT',
description: 'JWT token obtained from /auth/login or /auth/register',
},
},
schemas: {
// Standard success response wrapper (ADR-028)
SuccessResponse: {
type: 'object',
properties: {
success: {
type: 'boolean',
example: true,
},
data: {
type: 'object',
description: 'Response payload - structure varies by endpoint',
},
},
required: ['success', 'data'],
},
// Standard error response wrapper (ADR-028)
ErrorResponse: {
type: 'object',
properties: {
success: {
type: 'boolean',
example: false,
},
error: {
type: 'object',
properties: {
code: {
type: 'string',
description: 'Machine-readable error code',
example: 'VALIDATION_ERROR',
},
message: {
type: 'string',
description: 'Human-readable error message',
example: 'Invalid request parameters',
},
},
required: ['code', 'message'],
},
},
required: ['success', 'error'],
},
// Common service health status
ServiceHealth: {
type: 'object',
properties: {
status: {
type: 'string',
enum: ['healthy', 'degraded', 'unhealthy'],
},
latency: {
type: 'number',
description: 'Response time in milliseconds',
},
message: {
type: 'string',
description: 'Additional status information',
},
details: {
type: 'object',
description: 'Service-specific details',
},
},
required: ['status'],
},
// Achievement schema
Achievement: {
type: 'object',
properties: {
achievement_id: {
type: 'integer',
example: 1,
},
name: {
type: 'string',
example: 'First-Upload',
},
description: {
type: 'string',
example: 'Upload your first flyer',
},
icon: {
type: 'string',
example: 'upload-cloud',
},
points_value: {
type: 'integer',
example: 25,
},
created_at: {
type: 'string',
format: 'date-time',
},
},
},
// User achievement (with achieved_at)
UserAchievement: {
allOf: [
{ $ref: '#/components/schemas/Achievement' },
{
type: 'object',
properties: {
user_id: {
type: 'string',
format: 'uuid',
},
achieved_at: {
type: 'string',
format: 'date-time',
},
},
},
],
},
// Leaderboard entry
LeaderboardUser: {
type: 'object',
properties: {
user_id: {
type: 'string',
format: 'uuid',
},
full_name: {
type: 'string',
example: 'John Doe',
},
avatar_url: {
type: 'string',
nullable: true,
},
points: {
type: 'integer',
example: 150,
},
rank: {
type: 'integer',
example: 1,
},
},
},
},
},
tags: [
{
name: 'Health',
description: 'Server health and readiness checks',
},
{
name: 'Auth',
description: 'Authentication and authorization',
},
{
name: 'Users',
description: 'User profile management',
},
{
name: 'Achievements',
description: 'Gamification and leaderboards',
},
{
name: 'Flyers',
description: 'Flyer uploads and retrieval',
},
{
name: 'Recipes',
description: 'Recipe management',
},
{
name: 'Budgets',
description: 'Budget tracking and analysis',
},
{
name: 'Admin',
description: 'Administrative operations (requires admin role)',
},
{
name: 'System',
description: 'System status and monitoring',
},
],
},
// Path to the API routes files with JSDoc annotations
apis: ['./src/routes/*.ts'],
};
export const swaggerSpec = swaggerJsdoc(options);

420
src/controllers/README.md Normal file
View File

@@ -0,0 +1,420 @@
# tsoa Controller Standards
This document defines the coding standards and patterns for implementing tsoa controllers in the Flyer Crawler API.
## Overview
Controllers are the API layer that handles HTTP requests and responses. They use [tsoa](https://tsoa-community.github.io/docs/) decorators to define routes and generate OpenAPI specifications automatically.
**Key Principles:**
- Controllers handle HTTP concerns (parsing requests, formatting responses)
- Business logic belongs in the service layer
- All controllers extend `BaseController` for consistent response formatting
- Response formats follow ADR-028 (API Response Standards)
## Quick Start
```typescript
import { Route, Get, Post, Tags, Body, Path, Query, Security, Request } from 'tsoa';
import {
BaseController,
SuccessResponse,
PaginatedResponse,
RequestContext,
} from './base.controller';
import { userService } from '../services/userService';
import type { User, CreateUserRequest } from '../types';
@Route('users')
@Tags('Users')
export class UsersController extends BaseController {
/**
* Get a user by ID.
* @param id The user's unique identifier
*/
@Get('{id}')
@Security('jwt')
public async getUser(
@Path() id: string,
@Request() ctx: RequestContext,
): Promise<SuccessResponse<User>> {
ctx.logger.info({ userId: id }, 'Fetching user');
const user = await userService.getUserById(id, ctx.logger);
return this.success(user);
}
/**
* List all users with pagination.
*/
@Get()
@Security('jwt', ['admin'])
public async listUsers(
@Query() page?: number,
@Query() limit?: number,
@Request() ctx?: RequestContext,
): Promise<PaginatedResponse<User>> {
const { page: p, limit: l } = this.normalizePagination(page, limit);
const { users, total } = await userService.listUsers({ page: p, limit: l }, ctx?.logger);
return this.paginated(users, { page: p, limit: l, total });
}
/**
* Create a new user.
*/
@Post()
@Security('jwt', ['admin'])
public async createUser(
@Body() body: CreateUserRequest,
@Request() ctx: RequestContext,
): Promise<SuccessResponse<User>> {
const user = await userService.createUser(body, ctx.logger);
return this.created(user);
}
}
```
## File Structure
```
src/controllers/
├── base.controller.ts # Base class with response helpers
├── types.ts # Shared types for controllers
├── README.md # This file
├── health.controller.ts # Health check endpoints
├── auth.controller.ts # Authentication endpoints
├── users.controller.ts # User management endpoints
└── ...
```
## Response Format
All responses follow ADR-028:
### Success Response
```typescript
interface SuccessResponse<T> {
success: true;
data: T;
meta?: {
requestId?: string;
timestamp?: string;
pagination?: PaginationMeta;
};
}
```
### Error Response
```typescript
interface ErrorResponse {
success: false;
error: {
code: string; // e.g., 'NOT_FOUND', 'VALIDATION_ERROR'
message: string; // Human-readable message
details?: unknown; // Additional error details
};
meta?: {
requestId?: string;
timestamp?: string;
};
}
```
### Paginated Response
```typescript
interface PaginatedResponse<T> {
success: true;
data: T[];
meta: {
pagination: {
page: number;
limit: number;
total: number;
totalPages: number;
hasNextPage: boolean;
hasPrevPage: boolean;
};
};
}
```
## BaseController Methods
### Response Helpers
| Method | Description | HTTP Status |
| ------------------------------------ | --------------------------- | ----------- |
| `success(data, meta?)` | Standard success response | 200 |
| `created(data, meta?)` | Resource created | 201 |
| `noContent()` | Success with no body | 204 |
| `paginated(data, pagination, meta?)` | Paginated list response | 200 |
| `message(msg)` | Success with just a message | 200 |
### Pagination Helpers
| Method | Description |
| ------------------------------------ | ----------------------------------------------------- |
| `normalizePagination(page?, limit?)` | Apply defaults and bounds (page=1, limit=20, max=100) |
| `calculatePagination(input)` | Calculate totalPages, hasNextPage, etc. |
### Error Handling
Controllers should throw typed errors rather than constructing error responses manually:
```typescript
import { NotFoundError, ForbiddenError, ValidationError } from './base.controller';
// Throw when resource not found
throw new NotFoundError('User not found');
// Throw when access denied
throw new ForbiddenError('Cannot access this resource');
// Throw for validation errors (from Zod)
throw new ValidationError(zodError.issues);
```
The global error handler converts these to proper HTTP responses.
## tsoa Decorators
### Route Decorators
| Decorator | Description |
| ------------------------------------------------------ | ----------------------------------------- |
| `@Route('path')` | Base path for all endpoints in controller |
| `@Tags('TagName')` | OpenAPI tag for grouping |
| `@Get()`, `@Post()`, `@Put()`, `@Patch()`, `@Delete()` | HTTP methods |
| `@Security('jwt')` | Require authentication |
| `@Security('jwt', ['admin'])` | Require specific roles |
### Parameter Decorators
| Decorator | Description | Example |
| ------------ | ---------------------- | ------------------------------------- |
| `@Path()` | URL path parameter | `@Path() id: string` |
| `@Query()` | Query string parameter | `@Query() search?: string` |
| `@Body()` | Request body | `@Body() data: CreateUserRequest` |
| `@Header()` | Request header | `@Header('X-Custom') custom?: string` |
| `@Request()` | Full request context | `@Request() ctx: RequestContext` |
### Response Decorators
| Decorator | Description |
| -------------------------------------------- | -------------------------------- |
| `@Response<ErrorResponse>(404, 'Not Found')` | Document possible error response |
| `@SuccessResponse(201, 'Created')` | Document success response |
## RequestContext
The `RequestContext` interface provides access to request-scoped resources:
```typescript
interface RequestContext {
logger: Logger; // Request-scoped Pino logger (ADR-004)
requestId: string; // Unique request ID for correlation
user?: AuthenticatedUser; // Authenticated user (from JWT)
dbClient?: PoolClient; // Database client for transactions
}
interface AuthenticatedUser {
userId: string;
email: string;
roles?: string[];
}
```
## Naming Conventions
### Controller Class Names
- PascalCase with `Controller` suffix
- Example: `UsersController`, `HealthController`, `AuthController`
### Method Names
- camelCase, describing the action
- Use verbs: `getUser`, `listUsers`, `createUser`, `updateUser`, `deleteUser`
### File Names
- kebab-case with `.controller.ts` suffix
- Example: `users.controller.ts`, `health.controller.ts`
## Service Layer Integration
Controllers delegate to the service layer for business logic:
```typescript
@Route('flyers')
@Tags('Flyers')
export class FlyersController extends BaseController {
@Get('{id}')
public async getFlyer(
@Path() id: number,
@Request() ctx: RequestContext,
): Promise<SuccessResponse<Flyer>> {
// Service handles business logic and database access
const flyer = await flyerService.getFlyerById(id, ctx.logger);
// Controller only formats the response
return this.success(flyer);
}
}
```
**Rules:**
1. Never access repositories directly from controllers
2. Always pass the logger to service methods
3. Let services throw domain errors (NotFoundError, etc.)
4. Controllers catch and transform errors only when needed
## Validation
tsoa performs automatic validation based on TypeScript types. For complex validation, define request types:
```typescript
// In types.ts or a dedicated types file
interface CreateUserRequest {
/**
* User's email address
* @format email
*/
email: string;
/**
* User's password
* @minLength 8
*/
password: string;
/**
* Full name (optional)
*/
fullName?: string;
}
// In controller
@Post()
public async createUser(@Body() body: CreateUserRequest): Promise<SuccessResponse<User>> {
// body is already validated by tsoa
return this.created(await userService.createUser(body));
}
```
For additional runtime validation (e.g., database constraints), use Zod in the service layer.
## Error Handling Examples
### Not Found
```typescript
@Get('{id}')
public async getUser(@Path() id: string): Promise<SuccessResponse<User>> {
const user = await userService.findUserById(id);
if (!user) {
throw new NotFoundError(`User with ID ${id} not found`);
}
return this.success(user);
}
```
### Authorization
```typescript
@Delete('{id}')
@Security('jwt')
public async deleteUser(
@Path() id: string,
@Request() ctx: RequestContext,
): Promise<void> {
// Only allow users to delete their own account (or admins)
if (ctx.user?.userId !== id && !ctx.user?.roles?.includes('admin')) {
throw new ForbiddenError('Cannot delete another user account');
}
await userService.deleteUser(id, ctx.logger);
return this.noContent();
}
```
### Conflict (Duplicate)
```typescript
@Post()
public async createUser(@Body() body: CreateUserRequest): Promise<SuccessResponse<User>> {
try {
const user = await userService.createUser(body);
return this.created(user);
} catch (error) {
if (error instanceof UniqueConstraintError) {
this.setStatus(409);
throw error; // Let error handler format the response
}
throw error;
}
}
```
## Testing Controllers
Controllers should be tested via integration tests that verify the full HTTP request/response cycle:
```typescript
import { describe, it, expect } from 'vitest';
import request from 'supertest';
import { app } from '../app';
describe('UsersController', () => {
describe('GET /api/v1/users/:id', () => {
it('should return user when found', async () => {
const response = await request(app)
.get('/api/v1/users/123')
.set('Authorization', `Bearer ${validToken}`)
.expect(200);
expect(response.body).toEqual({
success: true,
data: expect.objectContaining({
user_id: '123',
}),
});
});
it('should return 404 when user not found', async () => {
const response = await request(app)
.get('/api/v1/users/nonexistent')
.set('Authorization', `Bearer ${validToken}`)
.expect(404);
expect(response.body).toEqual({
success: false,
error: {
code: 'NOT_FOUND',
message: expect.any(String),
},
});
});
});
});
```
## Migration from Express Routes
When migrating existing Express routes to tsoa controllers:
1. Create the controller class extending `BaseController`
2. Add route decorators matching existing paths
3. Move request handling logic (keep business logic in services)
4. Replace `sendSuccess`/`sendError` with `this.success()`/throwing errors
5. Update tests to use the new paths
6. Remove the old Express route file
## Related Documentation
- [ADR-028: API Response Standards](../../docs/adr/ADR-028-api-response-standards.md)
- [ADR-004: Request-Scoped Logging](../../docs/adr/0004-request-scoped-logging.md)
- [CODE-PATTERNS.md](../../docs/development/CODE-PATTERNS.md)
- [tsoa Documentation](https://tsoa-community.github.io/docs/)

View File

@@ -0,0 +1,862 @@
// src/controllers/admin.controller.test.ts
// ============================================================================
// ADMIN CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the AdminController class. These tests verify controller
// logic in isolation by mocking database repositories, services, and queues.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Post: () => () => {},
Put: () => () => {},
Delete: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Security: () => () => {},
Path: () => () => {},
Query: () => () => {},
Body: () => () => {},
Request: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
Middlewares: () => () => {},
}));
// Mock database repositories
vi.mock('../services/db/index.db', () => ({
adminRepo: {
getSuggestedCorrections: vi.fn(),
approveCorrection: vi.fn(),
rejectCorrection: vi.fn(),
updateSuggestedCorrection: vi.fn(),
getAllUsers: vi.fn(),
updateUserRole: vi.fn(),
updateRecipeStatus: vi.fn(),
updateRecipeCommentStatus: vi.fn(),
getFlyersForReview: vi.fn(),
getUnmatchedFlyerItems: vi.fn(),
getApplicationStats: vi.fn(),
getDailyStatsForLast30Days: vi.fn(),
getActivityLog: vi.fn(),
},
userRepo: {
findUserProfileById: vi.fn(),
},
flyerRepo: {
deleteFlyer: vi.fn(),
getAllBrands: vi.fn(),
},
recipeRepo: {
deleteRecipe: vi.fn(),
},
}));
// Mock services
vi.mock('../services/backgroundJobService', () => ({
backgroundJobService: {
runDailyDealCheck: vi.fn(),
triggerAnalyticsReport: vi.fn(),
triggerWeeklyAnalyticsReport: vi.fn(),
triggerTokenCleanup: vi.fn(),
},
}));
vi.mock('../services/monitoringService.server', () => ({
monitoringService: {
getWorkerStatuses: vi.fn(),
getQueueStatuses: vi.fn(),
retryFailedJob: vi.fn(),
},
}));
vi.mock('../services/geocodingService.server', () => ({
geocodingService: {
clearGeocodeCache: vi.fn(),
},
}));
vi.mock('../services/cacheService.server', () => ({
cacheService: {
invalidateFlyers: vi.fn(),
invalidateBrands: vi.fn(),
invalidateStats: vi.fn(),
},
}));
vi.mock('../services/brandService', () => ({
brandService: {
updateBrandLogo: vi.fn(),
},
}));
vi.mock('../services/userService', () => ({
userService: {
deleteUserAsAdmin: vi.fn(),
},
}));
vi.mock('../services/featureFlags.server', () => ({
getFeatureFlags: vi.fn(),
FeatureFlagName: {},
}));
// Mock queues
vi.mock('../services/queueService.server', () => ({
cleanupQueue: {
add: vi.fn(),
},
analyticsQueue: {
add: vi.fn(),
},
}));
// Mock rate limiters
vi.mock('../config/rateLimiters', () => ({
adminTriggerLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
adminUploadLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
}));
// Mock file utils
vi.mock('../utils/fileUtils', () => ({
cleanupUploadedFile: vi.fn(),
}));
// Mock websocket service (dynamic import)
vi.mock('../services/websocketService.server', () => ({
websocketService: {
getConnectionStats: vi.fn(),
},
}));
// Import mocked modules after mock definitions
import * as db from '../services/db/index.db';
import { backgroundJobService } from '../services/backgroundJobService';
import { monitoringService } from '../services/monitoringService.server';
import { geocodingService } from '../services/geocodingService.server';
import { cacheService } from '../services/cacheService.server';
import { brandService } from '../services/brandService';
import { userService } from '../services/userService';
import { getFeatureFlags } from '../services/featureFlags.server';
import { cleanupQueue, analyticsQueue } from '../services/queueService.server';
import { AdminController } from './admin.controller';
// Cast mocked modules for type-safe access using vi.mocked()
const mockedAdminRepo = vi.mocked(db.adminRepo);
const mockedUserRepo = vi.mocked(db.userRepo);
const mockedFlyerRepo = vi.mocked(db.flyerRepo);
const mockedRecipeRepo = vi.mocked(db.recipeRepo);
const mockedBackgroundJobService = vi.mocked(backgroundJobService);
const mockedMonitoringService = vi.mocked(monitoringService);
const mockedGeoCodingService = vi.mocked(geocodingService);
const mockedCacheService = vi.mocked(cacheService);
const _mockedBrandService = vi.mocked(brandService);
const mockedUserService = vi.mocked(userService);
const mockedGetFeatureFlags = vi.mocked(getFeatureFlags);
const mockedCleanupQueue = vi.mocked(cleanupQueue);
const mockedAnalyticsQueue = vi.mocked(analyticsQueue);
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object with admin user.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
file: undefined,
user: createMockAdminProfile(),
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock admin user profile.
*/
function createMockAdminProfile() {
return {
full_name: 'Admin User',
role: 'admin' as const,
user: {
user_id: 'admin-user-id',
email: 'admin@example.com',
},
};
}
/**
* Creates a mock correction object.
*/
function createMockCorrection(overrides: Record<string, unknown> = {}) {
return {
suggested_correction_id: 1,
flyer_item_id: 100,
user_id: 'user-123',
correction_type: 'master_item',
suggested_value: 'Organic Milk',
status: 'pending' as const,
flyer_item_name: 'Milk',
flyer_item_price_display: '$3.99',
user_email: 'user@example.com',
created_at: '2024-01-01T00:00:00.000Z',
updated_at: '2024-01-01T00:00:00.000Z',
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('AdminController', () => {
let controller: AdminController;
beforeEach(() => {
vi.clearAllMocks();
controller = new AdminController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// CORRECTIONS MANAGEMENT
// ==========================================================================
describe('getCorrections()', () => {
it('should return pending corrections', async () => {
// Arrange
const mockCorrections = [
createMockCorrection(),
createMockCorrection({ suggested_correction_id: 2 }),
];
const request = createMockRequest();
mockedAdminRepo.getSuggestedCorrections.mockResolvedValue(mockCorrections);
// Act
const result = await controller.getCorrections(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
}
});
});
describe('approveCorrection()', () => {
it('should approve a correction', async () => {
// Arrange
const request = createMockRequest();
mockedAdminRepo.approveCorrection.mockResolvedValue(undefined);
// Act
const result = await controller.approveCorrection(1, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Correction approved successfully.');
}
expect(mockedAdminRepo.approveCorrection).toHaveBeenCalledWith(1, expect.anything());
});
});
describe('rejectCorrection()', () => {
it('should reject a correction', async () => {
// Arrange
const request = createMockRequest();
mockedAdminRepo.rejectCorrection.mockResolvedValue(undefined);
// Act
const result = await controller.rejectCorrection(1, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Correction rejected successfully.');
}
});
});
describe('updateCorrection()', () => {
it('should update a correction value', async () => {
// Arrange
const mockUpdated = createMockCorrection({ suggested_value: 'Updated Value' });
const request = createMockRequest();
mockedAdminRepo.updateSuggestedCorrection.mockResolvedValue(mockUpdated);
// Act
const result = await controller.updateCorrection(
1,
{ suggested_value: 'Updated Value' },
request,
);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.suggested_value).toBe('Updated Value');
}
});
});
// ==========================================================================
// USER MANAGEMENT
// ==========================================================================
describe('getUsers()', () => {
it('should return paginated user list', async () => {
// Arrange
const mockResult = {
users: [
{ user_id: 'user-1', email: 'user1@example.com', role: 'user' as const },
{ user_id: 'user-2', email: 'user2@example.com', role: 'admin' as const },
],
total: 2,
};
const request = createMockRequest();
mockedAdminRepo.getAllUsers.mockResolvedValue(mockResult);
// Act
const result = await controller.getUsers(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.users).toHaveLength(2);
expect(result.data.total).toBe(2);
}
});
it('should respect pagination parameters', async () => {
// Arrange
const mockResult = { users: [], total: 0 };
const request = createMockRequest();
mockedAdminRepo.getAllUsers.mockResolvedValue(mockResult);
// Act
await controller.getUsers(request, 50, 20);
// Assert
expect(mockedAdminRepo.getAllUsers).toHaveBeenCalledWith(expect.anything(), 50, 20);
});
it('should cap limit at 100', async () => {
// Arrange
const mockResult = { users: [], total: 0 };
const request = createMockRequest();
mockedAdminRepo.getAllUsers.mockResolvedValue(mockResult);
// Act
await controller.getUsers(request, 200);
// Assert
expect(mockedAdminRepo.getAllUsers).toHaveBeenCalledWith(expect.anything(), 100, 0);
});
});
describe('getUserById()', () => {
it('should return user profile', async () => {
// Arrange
const mockProfile = {
full_name: 'Test User',
role: 'user',
user: { user_id: 'user-123', email: 'test@example.com' },
};
const request = createMockRequest();
mockedUserRepo.findUserProfileById.mockResolvedValue(mockProfile);
// Act
const result = await controller.getUserById('user-123', request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.user.user_id).toBe('user-123');
}
});
});
describe('updateUserRole()', () => {
it('should update user role', async () => {
// Arrange
const mockUpdated = { role: 'admin', points: 100 };
const request = createMockRequest();
mockedAdminRepo.updateUserRole.mockResolvedValue(mockUpdated);
// Act
const result = await controller.updateUserRole('user-123', { role: 'admin' }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.role).toBe('admin');
}
});
});
describe('deleteUser()', () => {
it('should delete a user', async () => {
// Arrange
const request = createMockRequest();
mockedUserService.deleteUserAsAdmin.mockResolvedValue(undefined);
// Act
await controller.deleteUser('user-to-delete', request);
// Assert
expect(mockedUserService.deleteUserAsAdmin).toHaveBeenCalledWith(
'admin-user-id',
'user-to-delete',
expect.anything(),
);
});
});
// ==========================================================================
// CONTENT MANAGEMENT
// ==========================================================================
describe('updateRecipeStatus()', () => {
it('should update recipe status', async () => {
// Arrange
const mockRecipe = { recipe_id: 1, status: 'public' };
const request = createMockRequest();
mockedAdminRepo.updateRecipeStatus.mockResolvedValue(mockRecipe);
// Act
const result = await controller.updateRecipeStatus(1, { status: 'public' }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.status).toBe('public');
}
});
});
describe('deleteRecipe()', () => {
it('should delete a recipe', async () => {
// Arrange
const request = createMockRequest();
mockedRecipeRepo.deleteRecipe.mockResolvedValue(undefined);
// Act
await controller.deleteRecipe(1, request);
// Assert
expect(mockedRecipeRepo.deleteRecipe).toHaveBeenCalledWith(
1,
'admin-user-id',
true, // isAdmin
expect.anything(),
);
});
});
describe('getFlyersForReview()', () => {
it('should return flyers needing review', async () => {
// Arrange
const mockFlyers = [
{ flyer_id: 1, status: 'needs_review' },
{ flyer_id: 2, status: 'needs_review' },
];
const request = createMockRequest();
mockedAdminRepo.getFlyersForReview.mockResolvedValue(mockFlyers);
// Act
const result = await controller.getFlyersForReview(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
}
});
});
describe('deleteFlyer()', () => {
it('should delete a flyer', async () => {
// Arrange
const request = createMockRequest();
mockedFlyerRepo.deleteFlyer.mockResolvedValue(undefined);
// Act
await controller.deleteFlyer(1, request);
// Assert
expect(mockedFlyerRepo.deleteFlyer).toHaveBeenCalledWith(1, expect.anything());
});
});
describe('triggerFlyerCleanup()', () => {
it('should enqueue cleanup job', async () => {
// Arrange
const request = createMockRequest();
mockedCleanupQueue.add.mockResolvedValue({} as never);
// Act
const result = await controller.triggerFlyerCleanup(1, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('File cleanup job');
}
expect(mockedCleanupQueue.add).toHaveBeenCalledWith('cleanup-flyer-files', { flyerId: 1 });
});
});
// ==========================================================================
// STATISTICS
// ==========================================================================
describe('getStats()', () => {
it('should return application statistics', async () => {
// Arrange
const mockStats = {
flyerCount: 100,
userCount: 50,
flyerItemCount: 500,
storeCount: 20,
pendingCorrectionCount: 5,
recipeCount: 30,
};
const request = createMockRequest();
mockedAdminRepo.getApplicationStats.mockResolvedValue(mockStats);
// Act
const result = await controller.getStats(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.flyerCount).toBe(100);
expect(result.data.userCount).toBe(50);
}
});
});
describe('getDailyStats()', () => {
it('should return daily statistics', async () => {
// Arrange
const mockDailyStats = [
{ date: '2024-01-01', new_users: 5, new_flyers: 10 },
{ date: '2024-01-02', new_users: 3, new_flyers: 8 },
];
const request = createMockRequest();
mockedAdminRepo.getDailyStatsForLast30Days.mockResolvedValue(mockDailyStats);
// Act
const result = await controller.getDailyStats(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
}
});
});
// ==========================================================================
// QUEUE/WORKER MONITORING
// ==========================================================================
describe('getWorkerStatuses()', () => {
it('should return worker statuses', async () => {
// Arrange
const mockStatuses = [
{ name: 'flyer-processor', isRunning: true },
{ name: 'email-sender', isRunning: true },
];
const request = createMockRequest();
mockedMonitoringService.getWorkerStatuses.mockResolvedValue(mockStatuses);
// Act
const result = await controller.getWorkerStatuses(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].isRunning).toBe(true);
}
});
});
describe('getQueueStatuses()', () => {
it('should return queue statuses', async () => {
// Arrange
const mockStatuses = [
{
name: 'flyer-processing',
counts: { waiting: 0, active: 1, completed: 100, failed: 2, delayed: 0, paused: 0 },
},
];
const request = createMockRequest();
mockedMonitoringService.getQueueStatuses.mockResolvedValue(mockStatuses);
// Act
const result = await controller.getQueueStatuses(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(1);
expect(result.data[0].counts.completed).toBe(100);
}
});
});
describe('retryJob()', () => {
it('should retry a failed job', async () => {
// Arrange
const request = createMockRequest();
mockedMonitoringService.retryFailedJob.mockResolvedValue(undefined);
// Act
const result = await controller.retryJob('flyer-processing', 'job-123', request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('job-123');
}
});
});
// ==========================================================================
// BACKGROUND JOB TRIGGERS
// ==========================================================================
describe('triggerDailyDealCheck()', () => {
it('should trigger daily deal check', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.triggerDailyDealCheck(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('Daily deal check');
}
expect(mockedBackgroundJobService.runDailyDealCheck).toHaveBeenCalled();
});
});
describe('triggerAnalyticsReport()', () => {
it('should trigger analytics report', async () => {
// Arrange
const request = createMockRequest();
mockedBackgroundJobService.triggerAnalyticsReport.mockResolvedValue('job-456');
// Act
const result = await controller.triggerAnalyticsReport(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.jobId).toBe('job-456');
}
});
});
describe('triggerWeeklyAnalytics()', () => {
it('should trigger weekly analytics', async () => {
// Arrange
const request = createMockRequest();
mockedBackgroundJobService.triggerWeeklyAnalyticsReport.mockResolvedValue('weekly-job-1');
// Act
const result = await controller.triggerWeeklyAnalytics(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.jobId).toBe('weekly-job-1');
}
});
});
describe('triggerTokenCleanup()', () => {
it('should trigger token cleanup', async () => {
// Arrange
const request = createMockRequest();
mockedBackgroundJobService.triggerTokenCleanup.mockResolvedValue('cleanup-job-1');
// Act
const result = await controller.triggerTokenCleanup(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.jobId).toBe('cleanup-job-1');
}
});
});
describe('triggerFailingJob()', () => {
it('should trigger a failing test job', async () => {
// Arrange
const request = createMockRequest();
mockedAnalyticsQueue.add.mockResolvedValue({ id: 'fail-job-1' } as never);
// Act
const result = await controller.triggerFailingJob(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.jobId).toBe('fail-job-1');
}
expect(mockedAnalyticsQueue.add).toHaveBeenCalledWith('generate-daily-report', {
reportDate: 'FAIL',
});
});
});
// ==========================================================================
// SYSTEM OPERATIONS
// ==========================================================================
describe('clearGeocodeCache()', () => {
it('should clear geocode cache', async () => {
// Arrange
const request = createMockRequest();
mockedGeoCodingService.clearGeocodeCache.mockResolvedValue(50);
// Act
const result = await controller.clearGeocodeCache(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('50 keys');
}
});
});
describe('clearApplicationCache()', () => {
it('should clear all application caches', async () => {
// Arrange
const request = createMockRequest();
mockedCacheService.invalidateFlyers.mockResolvedValue(10);
mockedCacheService.invalidateBrands.mockResolvedValue(5);
mockedCacheService.invalidateStats.mockResolvedValue(3);
// Act
const result = await controller.clearApplicationCache(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('18 keys');
expect(result.data.details?.flyers).toBe(10);
expect(result.data.details?.brands).toBe(5);
expect(result.data.details?.stats).toBe(3);
}
});
});
// ==========================================================================
// FEATURE FLAGS
// ==========================================================================
describe('getFeatureFlags()', () => {
it('should return feature flags', async () => {
// Arrange
const mockFlags = {
enableNewUI: true,
enableBetaFeatures: false,
};
const request = createMockRequest();
mockedGetFeatureFlags.mockReturnValue(mockFlags);
// Act
const result = await controller.getFeatureFlags(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.flags.enableNewUI).toBe(true);
expect(result.data.flags.enableBetaFeatures).toBe(false);
}
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const mockStats = { flyerCount: 0 };
const request = createMockRequest();
mockedAdminRepo.getApplicationStats.mockResolvedValue(mockStats);
// Act
const result = await controller.getStats(request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
});
});

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,632 @@
// src/controllers/ai.controller.test.ts
// ============================================================================
// AI CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the AIController class. These tests verify controller
// logic in isolation by mocking AI service and monitoring service.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Post: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Security: () => () => {},
Path: () => () => {},
Query: () => () => {},
Body: () => () => {},
Request: () => () => {},
FormField: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
Middlewares: () => () => {},
}));
// Mock AI service
vi.mock('../services/aiService.server', () => ({
aiService: {
enqueueFlyerProcessing: vi.fn(),
processLegacyFlyerUpload: vi.fn(),
extractTextFromImageArea: vi.fn(),
planTripWithMaps: vi.fn(),
},
DuplicateFlyerError: class DuplicateFlyerError extends Error {
flyerId: number;
constructor(message: string, flyerId: number) {
super(message);
this.flyerId = flyerId;
}
},
}));
// Mock monitoring service
vi.mock('../services/monitoringService.server', () => ({
monitoringService: {
getFlyerJobStatus: vi.fn(),
},
}));
// Mock file utils
vi.mock('../utils/fileUtils', () => ({
cleanupUploadedFile: vi.fn(),
cleanupUploadedFiles: vi.fn(),
}));
// Mock rate limiters
vi.mock('../config/rateLimiters', () => ({
aiUploadLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
aiGenerationLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
}));
// Import mocked modules after mock definitions
import { aiService, DuplicateFlyerError } from '../services/aiService.server';
import { monitoringService } from '../services/monitoringService.server';
import { AIController } from './ai.controller';
// Cast mocked modules for type-safe access
const mockedAiService = aiService as Mocked<typeof aiService>;
const mockedMonitoringService = monitoringService as Mocked<typeof monitoringService>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
headers: {},
ip: '127.0.0.1',
file: undefined,
files: undefined,
user: createMockUserProfile(),
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock user profile for testing.
*/
function createMockUserProfile() {
return {
full_name: 'Test User',
role: 'user' as const,
user: {
user_id: 'test-user-id',
email: 'test@example.com',
},
};
}
/**
* Creates a mock uploaded file.
*/
function createMockFile(overrides: Partial<Express.Multer.File> = {}): Express.Multer.File {
return {
fieldname: 'flyerFile',
originalname: 'test-flyer.jpg',
encoding: '7bit',
mimetype: 'image/jpeg',
size: 1024,
destination: '/tmp/uploads',
filename: 'abc123.jpg',
path: '/tmp/uploads/abc123.jpg',
buffer: Buffer.from('mock file content'),
stream: {} as never,
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('AIController', () => {
let controller: AIController;
beforeEach(() => {
vi.clearAllMocks();
controller = new AIController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// FLYER UPLOAD ENDPOINTS
// ==========================================================================
describe('uploadAndProcess()', () => {
it('should accept flyer for processing', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
mockedAiService.enqueueFlyerProcessing.mockResolvedValue({ id: 'job-123' } as never);
// Act
const result = await controller.uploadAndProcess(
request,
'a'.repeat(64), // valid checksum
);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.jobId).toBe('job-123');
expect(result.data.message).toContain('Flyer accepted');
}
});
it('should reject invalid checksum format', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
// Act & Assert
await expect(controller.uploadAndProcess(request, 'invalid')).rejects.toThrow(
'Checksum must be a 64-character hexadecimal string.',
);
});
it('should reject when no file uploaded', async () => {
// Arrange
const request = createMockRequest({ file: undefined });
// Act & Assert
await expect(controller.uploadAndProcess(request, 'a'.repeat(64))).rejects.toThrow(
'A flyer file (PDF or image) is required.',
);
});
it('should handle duplicate flyer error', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
mockedAiService.enqueueFlyerProcessing.mockRejectedValue(
new DuplicateFlyerError('Duplicate flyer', 42),
);
// Act
const result = await controller.uploadAndProcess(request, 'a'.repeat(64));
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.code).toBe('CONFLICT');
expect(result.error.details).toEqual({ flyerId: 42 });
}
});
});
describe('uploadLegacy()', () => {
it('should process legacy upload', async () => {
// Arrange
const mockFile = createMockFile();
const mockFlyer = { flyer_id: 1, file_name: 'test.jpg' };
const request = createMockRequest({ file: mockFile });
mockedAiService.processLegacyFlyerUpload.mockResolvedValue(mockFlyer as never);
// Act
const result = await controller.uploadLegacy(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.flyer_id).toBe(1);
}
});
it('should reject when no file uploaded', async () => {
// Arrange
const request = createMockRequest({ file: undefined });
// Act & Assert
await expect(controller.uploadLegacy(request)).rejects.toThrow('No flyer file uploaded.');
});
});
describe('processFlyer()', () => {
it('should process flyer data', async () => {
// Arrange
const mockFile = createMockFile();
const mockFlyer = { flyer_id: 1, file_name: 'test.jpg' };
const request = createMockRequest({ file: mockFile });
mockedAiService.processLegacyFlyerUpload.mockResolvedValue(mockFlyer as never);
// Act
const result = await controller.processFlyer(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('processed');
expect(result.data.flyer.flyer_id).toBe(1);
}
});
it('should reject when no file uploaded', async () => {
// Arrange
const request = createMockRequest({ file: undefined });
// Act & Assert
await expect(controller.processFlyer(request)).rejects.toThrow(
'Flyer image file is required.',
);
});
});
// ==========================================================================
// JOB STATUS ENDPOINT
// ==========================================================================
describe('getJobStatus()', () => {
it('should return job status', async () => {
// Arrange
const mockStatus = {
id: 'job-123',
state: 'completed',
progress: 100,
returnValue: { flyer_id: 1 },
failedReason: null,
};
const request = createMockRequest();
mockedMonitoringService.getFlyerJobStatus.mockResolvedValue(mockStatus);
// Act
const result = await controller.getJobStatus('job-123', request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.id).toBe('job-123');
expect(result.data.state).toBe('completed');
}
});
});
// ==========================================================================
// IMAGE ANALYSIS ENDPOINTS
// ==========================================================================
describe('checkFlyer()', () => {
it('should check if image is a flyer', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
// Act
const result = await controller.checkFlyer(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.is_flyer).toBe(true);
}
});
it('should reject when no file uploaded', async () => {
// Arrange
const request = createMockRequest({ file: undefined });
// Act & Assert
await expect(controller.checkFlyer(request)).rejects.toThrow('Image file is required.');
});
});
describe('extractAddress()', () => {
it('should extract address from image', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
// Act
const result = await controller.extractAddress(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.address).toBe('not identified');
}
});
});
describe('extractLogo()', () => {
it('should extract logo from images', async () => {
// Arrange
const mockFiles = [createMockFile()];
const request = createMockRequest({ files: mockFiles });
// Act
const result = await controller.extractLogo(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.store_logo_base_64).toBeNull();
}
});
it('should reject when no files uploaded', async () => {
// Arrange
const request = createMockRequest({ files: [] });
// Act & Assert
await expect(controller.extractLogo(request)).rejects.toThrow('Image files are required.');
});
});
describe('rescanArea()', () => {
it('should rescan a specific area', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
mockedAiService.extractTextFromImageArea.mockResolvedValue({ text: 'Extracted text' });
// Act
const result = await controller.rescanArea(
request,
JSON.stringify({ x: 10, y: 10, width: 100, height: 100 }),
'item_details',
);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.text).toBe('Extracted text');
}
});
it('should reject invalid cropArea JSON', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
// Act & Assert
await expect(controller.rescanArea(request, 'invalid json', 'item_details')).rejects.toThrow(
'cropArea must be a valid JSON string.',
);
});
it('should reject cropArea with missing properties', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
// Act & Assert
await expect(
controller.rescanArea(request, JSON.stringify({ x: 10 }), 'item_details'),
).rejects.toThrow('cropArea must contain numeric x, y, width, and height properties.');
});
it('should reject cropArea with zero width', async () => {
// Arrange
const mockFile = createMockFile();
const request = createMockRequest({ file: mockFile });
// Act & Assert
await expect(
controller.rescanArea(
request,
JSON.stringify({ x: 10, y: 10, width: 0, height: 100 }),
'item_details',
),
).rejects.toThrow('Crop area width must be positive.');
});
});
// ==========================================================================
// AI INSIGHTS ENDPOINTS
// ==========================================================================
describe('getQuickInsights()', () => {
it('should return quick insights', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getQuickInsights(request, {
items: [{ item: 'Milk' }],
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.text).toContain('quick insight');
}
});
});
describe('getDeepDive()', () => {
it('should return deep dive analysis', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getDeepDive(request, {
items: [{ item: 'Chicken' }],
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.text).toContain('deep dive');
}
});
});
describe('searchWeb()', () => {
it('should search the web', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.searchWeb(request, {
query: 'best grocery deals',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.text).toBeDefined();
expect(result.data.sources).toBeDefined();
}
});
});
describe('comparePrices()', () => {
it('should compare prices', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.comparePrices(request, {
items: [{ item: 'Milk' }],
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.text).toContain('price comparison');
}
});
});
describe('planTrip()', () => {
it('should plan a shopping trip', async () => {
// Arrange
const mockResult = {
text: 'Here is your trip plan',
sources: [{ uri: 'https://maps.google.com', title: 'Google Maps' }],
};
const request = createMockRequest();
mockedAiService.planTripWithMaps.mockResolvedValue(mockResult);
// Act
const result = await controller.planTrip(request, {
items: [{ item: 'Milk' }],
store: { name: 'SuperMart' },
userLocation: { latitude: 43.65, longitude: -79.38 },
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.text).toContain('trip plan');
}
});
});
// ==========================================================================
// STUBBED FUTURE ENDPOINTS
// ==========================================================================
describe('generateImage()', () => {
it('should return 501 not implemented', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.generateImage(request, {
prompt: 'A grocery store',
});
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.code).toBe('NOT_IMPLEMENTED');
}
});
});
describe('generateSpeech()', () => {
it('should return 501 not implemented', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.generateSpeech(request, {
text: 'Hello world',
});
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.code).toBe('NOT_IMPLEMENTED');
}
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const mockStatus = {
id: 'job-1',
state: 'active',
progress: 50,
returnValue: null,
failedReason: null,
};
const request = createMockRequest();
mockedMonitoringService.getFlyerJobStatus.mockResolvedValue(mockStatus);
// Act
const result = await controller.getJobStatus('job-1', request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
it('should use error helper for error responses', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.generateImage(request, { prompt: 'test' });
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error).toHaveProperty('code');
expect(result.error).toHaveProperty('message');
}
});
});
});

View File

@@ -0,0 +1,937 @@
// src/controllers/ai.controller.ts
// ============================================================================
// AI CONTROLLER
// ============================================================================
// Provides endpoints for AI-powered flyer processing, item extraction, and
// various AI-assisted features including OCR, insights generation, and
// trip planning.
//
// Key Features:
// - Flyer upload and asynchronous processing via BullMQ
// - Image-based text extraction (OCR) and rescan
// - AI-generated insights, price comparisons, and trip planning
// - Legacy upload endpoints for backward compatibility
//
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import {
Get,
Post,
Route,
Tags,
Security,
Body,
Path,
Request,
SuccessResponse,
Response,
FormField,
Middlewares,
} from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController, ControllerErrorCode } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import { aiService, DuplicateFlyerError } from '../services/aiService.server';
import { monitoringService } from '../services/monitoringService.server';
import { cleanupUploadedFile, cleanupUploadedFiles } from '../utils/fileUtils';
import { aiUploadLimiter, aiGenerationLimiter } from '../config/rateLimiters';
import type { UserProfile, FlyerItem } from '../types';
import type { FlyerDto } from '../dtos/common.dto';
// ============================================================================
// DTO TYPES FOR OPENAPI
// ============================================================================
// These Data Transfer Object types define the API contract for AI endpoints.
// They are used by tsoa to generate OpenAPI specifications.
// ============================================================================
/**
* Crop area coordinates for targeted image rescanning.
*/
interface CropArea {
/** X coordinate of the top-left corner */
x: number;
/** Y coordinate of the top-left corner */
y: number;
/** Width of the crop area in pixels */
width: number;
/** Height of the crop area in pixels */
height: number;
}
/**
* Type of data to extract from a rescan operation.
*/
type ExtractionType = 'store_name' | 'dates' | 'item_details';
/**
* Flyer item for AI analysis.
* At least one of 'item' or 'name' must be provided.
*/
interface FlyerItemForAnalysis {
/** Item name/description (primary identifier) */
item?: string;
/** Alternative item name field */
name?: string;
/** Additional properties are allowed */
[key: string]: unknown;
}
/**
* Store information for trip planning.
*/
interface StoreInfo {
/** Store name */
name: string;
}
/**
* User's geographic location for trip planning.
*/
interface UserLocation {
/**
* Latitude coordinate.
* @minimum -90
* @maximum 90
*/
latitude: number;
/**
* Longitude coordinate.
* @minimum -180
* @maximum 180
*/
longitude: number;
}
// ============================================================================
// REQUEST TYPES
// ============================================================================
/**
* Request body for quick insights or deep dive analysis.
*/
interface InsightsRequest {
/**
* Array of items to analyze.
* @minItems 1
*/
items: FlyerItemForAnalysis[];
}
/**
* Request body for price comparison.
*/
interface ComparePricesRequest {
/**
* Array of items to compare prices for.
* @minItems 1
*/
items: FlyerItemForAnalysis[];
}
/**
* Request body for trip planning.
*/
interface PlanTripRequest {
/** Items to buy on the trip */
items: FlyerItemForAnalysis[];
/** Target store information */
store: StoreInfo;
/** User's current location */
userLocation: UserLocation;
}
/**
* Request body for image generation (not implemented).
*/
interface GenerateImageRequest {
/**
* Prompt for image generation.
* @minLength 1
*/
prompt: string;
}
/**
* Request body for speech generation (not implemented).
*/
interface GenerateSpeechRequest {
/**
* Text to convert to speech.
* @minLength 1
*/
text: string;
}
/**
* Request body for web search.
*/
interface SearchWebRequest {
/**
* Search query.
* @minLength 1
*/
query: string;
}
// RescanAreaRequest is handled via route-level form parsing
// ============================================================================
// RESPONSE TYPES
// ============================================================================
/**
* Response for successful flyer upload.
*/
interface UploadProcessResponse {
/** Success message */
message: string;
/** Background job ID for tracking processing status */
jobId: string;
}
/**
* Response for duplicate flyer detection.
*/
interface DuplicateFlyerResponse {
/** Existing flyer ID */
flyerId: number;
}
/**
* Response for job status check.
*/
interface JobStatusResponse {
/** Job ID */
id: string;
/** Current job state */
state: string;
/** Processing progress (0-100 or object with details) */
progress: number | object | string | boolean;
/** Return value when job is completed */
returnValue: unknown;
/** Error reason if job failed */
failedReason: string | null;
}
/**
* Response for flyer check.
*/
interface FlyerCheckResponse {
/** Whether the image is identified as a flyer */
is_flyer: boolean;
}
/**
* Response for address extraction.
*/
interface ExtractAddressResponse {
/** Extracted address or 'not identified' if not found */
address: string;
}
/**
* Response for logo extraction.
*/
interface ExtractLogoResponse {
/** Base64-encoded logo image or null if not found */
store_logo_base_64: string | null;
}
/**
* Response for text-based AI features (insights, deep dive, etc.).
*/
interface TextResponse {
/** AI-generated text response */
text: string;
}
/**
* Response for web search.
*/
interface SearchWebResponse {
/** AI-generated response */
text: string;
/** Source references */
sources: { uri: string; title: string }[];
}
/**
* Response for trip planning.
*/
interface PlanTripResponse {
/** AI-generated trip plan */
text: string;
/** Map and store sources */
sources: { uri: string; title: string }[];
}
/**
* Response for rescan area.
*/
interface RescanAreaResponse {
/** Extracted text from the cropped area */
text: string | undefined;
}
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Parses cropArea JSON string into a validated CropArea object.
* @param cropAreaStr JSON string containing crop area coordinates
* @returns Parsed and validated CropArea object
* @throws Error if parsing fails or validation fails
*/
function parseCropArea(cropAreaStr: string): CropArea {
let parsed: unknown;
try {
parsed = JSON.parse(cropAreaStr);
} catch {
throw new Error('cropArea must be a valid JSON string.');
}
// Validate structure
if (typeof parsed !== 'object' || parsed === null) {
throw new Error('cropArea must be a valid JSON object.');
}
const obj = parsed as Record<string, unknown>;
if (
typeof obj.x !== 'number' ||
typeof obj.y !== 'number' ||
typeof obj.width !== 'number' ||
typeof obj.height !== 'number'
) {
throw new Error('cropArea must contain numeric x, y, width, and height properties.');
}
if (obj.width <= 0) {
throw new Error('Crop area width must be positive.');
}
if (obj.height <= 0) {
throw new Error('Crop area height must be positive.');
}
return {
x: obj.x,
y: obj.y,
width: obj.width,
height: obj.height,
};
}
// ============================================================================
// AI CONTROLLER
// ============================================================================
/**
* Controller for AI-powered flyer processing and analysis.
*
* Provides endpoints for:
* - Uploading and processing flyers with AI extraction
* - Checking processing job status
* - Targeted image rescanning for specific data extraction
* - AI-generated insights and recommendations
* - Price comparisons and trip planning
*
* File upload endpoints expect multipart/form-data and are configured
* with Express middleware for multer file handling.
*/
@Route('ai')
@Tags('AI')
export class AIController extends BaseController {
// ==========================================================================
// FLYER UPLOAD ENDPOINTS
// ==========================================================================
/**
* Upload and process a flyer.
*
* Accepts a single flyer file (PDF or image), validates the checksum to
* prevent duplicates, and enqueues it for background AI processing.
* Returns immediately with a job ID for tracking.
*
* The file upload is handled by Express middleware (multer).
* The endpoint expects multipart/form-data with:
* - flyerFile: The flyer image/PDF file
* - checksum: SHA-256 checksum of the file (64 hex characters)
* - baseUrl: Optional base URL override
*
* @summary Upload and process flyer
* @param request Express request with uploaded file
* @param checksum SHA-256 checksum of the file (64 hex characters)
* @param baseUrl Optional base URL for generated image URLs
* @returns Job ID for tracking processing status
*/
@Post('upload-and-process')
@Middlewares(aiUploadLimiter)
@SuccessResponse(202, 'Flyer accepted for processing')
@Response<ErrorResponse>(400, 'Missing file or invalid checksum')
@Response<ErrorResponse & { error: { details: DuplicateFlyerResponse } }>(
409,
'Duplicate flyer detected',
)
public async uploadAndProcess(
@Request() request: ExpressRequest,
@FormField() checksum: string,
@FormField() baseUrl?: string,
): Promise<SuccessResponseType<UploadProcessResponse>> {
const file = request.file as Express.Multer.File | undefined;
// Validate checksum format
if (!checksum || checksum.length !== 64 || !/^[a-f0-9]+$/.test(checksum)) {
this.setStatus(400);
throw new Error('Checksum must be a 64-character hexadecimal string.');
}
// Validate file was uploaded
if (!file) {
this.setStatus(400);
throw new Error('A flyer file (PDF or image) is required.');
}
request.log.debug(
{ filename: file.originalname, size: file.size, checksum },
'Handling upload-and-process',
);
try {
// Handle optional authentication - clear user if no auth header in test/staging
let userProfile = request.user as UserProfile | undefined;
if (
(process.env.NODE_ENV === 'test' || process.env.NODE_ENV === 'staging') &&
!request.headers['authorization']
) {
userProfile = undefined;
}
const job = await aiService.enqueueFlyerProcessing(
file,
checksum,
userProfile,
request.ip ?? 'unknown',
request.log,
baseUrl,
);
this.setStatus(202);
return this.success({
message: 'Flyer accepted for processing.',
jobId: job.id!,
});
} catch (error) {
await cleanupUploadedFile(file);
if (error instanceof DuplicateFlyerError) {
request.log.warn(`Duplicate flyer upload attempt blocked for checksum: ${checksum}`);
this.setStatus(409);
return this.error(ControllerErrorCode.CONFLICT, error.message, {
flyerId: error.flyerId,
}) as unknown as SuccessResponseType<UploadProcessResponse>;
}
throw error;
}
}
/**
* Legacy flyer upload (deprecated).
*
* Process a flyer upload synchronously. This endpoint is deprecated
* and will be removed in a future version. Use /upload-and-process instead.
*
* @summary Legacy flyer upload (deprecated)
* @param request Express request with uploaded file
* @returns The processed flyer data
* @deprecated Use /upload-and-process instead
*/
@Post('upload-legacy')
@Security('bearerAuth')
@Middlewares(aiUploadLimiter)
@SuccessResponse(200, 'Flyer processed successfully')
@Response<ErrorResponse>(400, 'No flyer file uploaded')
@Response<ErrorResponse>(401, 'Unauthorized')
@Response<ErrorResponse>(409, 'Duplicate flyer detected')
public async uploadLegacy(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<FlyerDto>> {
const file = request.file as Express.Multer.File | undefined;
if (!file) {
this.setStatus(400);
throw new Error('No flyer file uploaded.');
}
const userProfile = request.user as UserProfile;
try {
const newFlyer = await aiService.processLegacyFlyerUpload(
file,
request.body,
userProfile,
request.log,
);
return this.success(newFlyer);
} catch (error) {
await cleanupUploadedFile(file);
if (error instanceof DuplicateFlyerError) {
request.log.warn('Duplicate legacy flyer upload attempt blocked.');
this.setStatus(409);
return this.error(ControllerErrorCode.CONFLICT, error.message, {
flyerId: error.flyerId,
}) as unknown as SuccessResponseType<FlyerDto>;
}
throw error;
}
}
/**
* Process flyer data (deprecated).
*
* Saves processed flyer data to the database. This endpoint is deprecated
* and will be removed in a future version. Use /upload-and-process instead.
*
* @summary Process flyer data (deprecated)
* @param request Express request with uploaded file
* @returns Success message with flyer data
* @deprecated Use /upload-and-process instead
*/
@Post('flyers/process')
@Middlewares(aiUploadLimiter)
@SuccessResponse(201, 'Flyer processed and saved successfully')
@Response<ErrorResponse>(400, 'Flyer image file is required')
@Response<ErrorResponse>(409, 'Duplicate flyer detected')
public async processFlyer(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<{ message: string; flyer: FlyerDto }>> {
const file = request.file as Express.Multer.File | undefined;
if (!file) {
this.setStatus(400);
throw new Error('Flyer image file is required.');
}
const userProfile = request.user as UserProfile | undefined;
try {
const newFlyer = await aiService.processLegacyFlyerUpload(
file,
request.body,
userProfile,
request.log,
);
return this.created({ message: 'Flyer processed and saved successfully.', flyer: newFlyer });
} catch (error) {
await cleanupUploadedFile(file);
if (error instanceof DuplicateFlyerError) {
request.log.warn('Duplicate flyer upload attempt blocked.');
this.setStatus(409);
return this.error(ControllerErrorCode.CONFLICT, error.message, {
flyerId: error.flyerId,
}) as unknown as SuccessResponseType<{ message: string; flyer: FlyerDto }>;
}
throw error;
}
}
// ==========================================================================
// JOB STATUS ENDPOINT
// ==========================================================================
/**
* Check job status.
*
* Checks the status of a background flyer processing job.
* Use this endpoint to poll for completion after uploading a flyer.
*
* @summary Check job status
* @param jobId Job ID returned from upload-and-process
* @param request Express request for logging
* @returns Job status information
*/
@Get('jobs/{jobId}/status')
@SuccessResponse(200, 'Job status retrieved')
@Response<ErrorResponse>(404, 'Job not found')
public async getJobStatus(
@Path() jobId: string,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<JobStatusResponse>> {
const jobStatus = await monitoringService.getFlyerJobStatus(jobId);
request.log.debug(`Status check for job ${jobId}: ${jobStatus.state}`);
return this.success(jobStatus);
}
// ==========================================================================
// IMAGE ANALYSIS ENDPOINTS
// ==========================================================================
/**
* Check if image is a flyer.
*
* Analyzes an uploaded image to determine if it's a grocery store flyer.
*
* @summary Check if image is a flyer
* @param request Express request with uploaded image
* @returns Whether the image is identified as a flyer
*/
@Post('check-flyer')
@Middlewares(aiUploadLimiter)
@SuccessResponse(200, 'Flyer check completed')
@Response<ErrorResponse>(400, 'Image file is required')
public async checkFlyer(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<FlyerCheckResponse>> {
const file = request.file as Express.Multer.File | undefined;
if (!file) {
this.setStatus(400);
throw new Error('Image file is required.');
}
try {
request.log.info(`Server-side flyer check for file: ${file.originalname}`);
// Stubbed response - actual AI implementation would go here
return this.success({ is_flyer: true });
} finally {
await cleanupUploadedFile(file);
}
}
/**
* Extract address from image.
*
* Extracts store address information from a flyer image using AI.
*
* @summary Extract address from image
* @param request Express request with uploaded image
* @returns Extracted address information
*/
@Post('extract-address')
@Middlewares(aiUploadLimiter)
@SuccessResponse(200, 'Address extraction completed')
@Response<ErrorResponse>(400, 'Image file is required')
public async extractAddress(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<ExtractAddressResponse>> {
const file = request.file as Express.Multer.File | undefined;
if (!file) {
this.setStatus(400);
throw new Error('Image file is required.');
}
try {
request.log.info(`Server-side address extraction for file: ${file.originalname}`);
// Stubbed response - actual AI implementation would go here
return this.success({ address: 'not identified' });
} finally {
await cleanupUploadedFile(file);
}
}
/**
* Extract store logo.
*
* Extracts the store logo from flyer images using AI.
*
* @summary Extract store logo
* @param request Express request with uploaded images
* @returns Extracted logo as base64 string
*/
@Post('extract-logo')
@Middlewares(aiUploadLimiter)
@SuccessResponse(200, 'Logo extraction completed')
@Response<ErrorResponse>(400, 'Image files are required')
public async extractLogo(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<ExtractLogoResponse>> {
const files = request.files as Express.Multer.File[] | undefined;
if (!files || !Array.isArray(files) || files.length === 0) {
this.setStatus(400);
throw new Error('Image files are required.');
}
try {
request.log.info(`Server-side logo extraction for ${files.length} image(s).`);
// Stubbed response - actual AI implementation would go here
return this.success({ store_logo_base_64: null });
} finally {
await cleanupUploadedFiles(files);
}
}
/**
* Rescan area of image.
*
* Performs a targeted AI scan on a specific area of an image.
* Useful for re-extracting data from poorly recognized regions.
*
* @summary Rescan area of image
* @param request Express request with uploaded image
* @param cropArea JSON string with x, y, width, height coordinates
* @param extractionType Type of data to extract (store_name, dates, item_details)
* @returns Extracted text from the cropped area
*/
@Post('rescan-area')
@Security('bearerAuth')
@Middlewares(aiUploadLimiter)
@SuccessResponse(200, 'Rescan completed')
@Response<ErrorResponse>(400, 'Image file is required or invalid cropArea')
@Response<ErrorResponse>(401, 'Unauthorized')
public async rescanArea(
@Request() request: ExpressRequest,
@FormField() cropArea: string,
@FormField() extractionType: ExtractionType,
): Promise<SuccessResponseType<RescanAreaResponse>> {
const file = request.file as Express.Multer.File | undefined;
if (!file) {
this.setStatus(400);
throw new Error('Image file is required.');
}
try {
// Parse and validate cropArea
const parsedCropArea = parseCropArea(cropArea);
request.log.debug(
{ extractionType, cropArea: parsedCropArea, filename: file.originalname },
'Rescan area requested',
);
const result = await aiService.extractTextFromImageArea(
file.path,
file.mimetype,
parsedCropArea,
extractionType,
request.log,
);
return this.success(result);
} finally {
await cleanupUploadedFile(file);
}
}
// ==========================================================================
// AI INSIGHTS ENDPOINTS
// ==========================================================================
/**
* Get quick insights.
*
* Get AI-generated quick insights about flyer items.
* Provides brief recommendations and highlights.
*
* @summary Get quick insights
* @param request Express request for logging
* @param body Items to analyze
* @returns AI-generated quick insights
*/
@Post('quick-insights')
@Security('bearerAuth')
@Middlewares(aiGenerationLimiter)
@SuccessResponse(200, 'Quick insights generated')
@Response<ErrorResponse>(401, 'Unauthorized')
public async getQuickInsights(
@Request() request: ExpressRequest,
@Body() body: InsightsRequest,
): Promise<SuccessResponseType<TextResponse>> {
request.log.info(`Server-side quick insights requested for ${body.items.length} items.`);
// Stubbed response - actual AI implementation would go here
return this.success({ text: 'This is a server-generated quick insight: buy the cheap stuff!' });
}
/**
* Get deep dive analysis.
*
* Get detailed AI-generated analysis about flyer items.
* Provides comprehensive information including nutritional value,
* price history, and recommendations.
*
* @summary Get deep dive analysis
* @param request Express request for logging
* @param body Items to analyze
* @returns Detailed AI analysis
*/
@Post('deep-dive')
@Security('bearerAuth')
@Middlewares(aiGenerationLimiter)
@SuccessResponse(200, 'Deep dive analysis generated')
@Response<ErrorResponse>(401, 'Unauthorized')
public async getDeepDive(
@Request() request: ExpressRequest,
@Body() body: InsightsRequest,
): Promise<SuccessResponseType<TextResponse>> {
request.log.info(`Server-side deep dive requested for ${body.items.length} items.`);
// Stubbed response - actual AI implementation would go here
return this.success({
text: 'This is a server-generated deep dive analysis. It is very detailed.',
});
}
/**
* Search web for information.
*
* Search the web for product or deal information using AI.
*
* @summary Search web for information
* @param request Express request for logging
* @param body Search query
* @returns Search results with sources
*/
@Post('search-web')
@Security('bearerAuth')
@Middlewares(aiGenerationLimiter)
@SuccessResponse(200, 'Web search completed')
@Response<ErrorResponse>(401, 'Unauthorized')
public async searchWeb(
@Request() request: ExpressRequest,
@Body() body: SearchWebRequest,
): Promise<SuccessResponseType<SearchWebResponse>> {
request.log.info(`Server-side web search requested for query: ${body.query}`);
// Stubbed response - actual AI implementation would go here
return this.success({ text: 'The web says this is good.', sources: [] });
}
/**
* Compare prices across stores.
*
* Compare prices for items across different stores using AI.
*
* @summary Compare prices across stores
* @param request Express request for logging
* @param body Items to compare
* @returns Price comparison results
*/
@Post('compare-prices')
@Security('bearerAuth')
@Middlewares(aiGenerationLimiter)
@SuccessResponse(200, 'Price comparison completed')
@Response<ErrorResponse>(401, 'Unauthorized')
public async comparePrices(
@Request() request: ExpressRequest,
@Body() body: ComparePricesRequest,
): Promise<SuccessResponseType<SearchWebResponse>> {
request.log.info(`Server-side price comparison requested for ${body.items.length} items.`);
// Stubbed response - actual AI implementation would go here
return this.success({
text: 'This is a server-generated price comparison. Milk is cheaper at SuperMart.',
sources: [],
});
}
/**
* Plan shopping trip.
*
* Plan an optimized shopping trip to a store based on items and user location.
* Uses Google Maps integration for directions and nearby store suggestions.
*
* @summary Plan shopping trip
* @param request Express request for logging
* @param body Trip planning parameters
* @returns Trip plan with directions
*/
@Post('plan-trip')
@Security('bearerAuth')
@Middlewares(aiGenerationLimiter)
@SuccessResponse(200, 'Trip plan generated')
@Response<ErrorResponse>(401, 'Unauthorized')
@Response<ErrorResponse>(501, 'Feature disabled')
public async planTrip(
@Request() request: ExpressRequest,
@Body() body: PlanTripRequest,
): Promise<SuccessResponseType<PlanTripResponse>> {
request.log.debug(
{ itemCount: body.items.length, storeName: body.store.name },
'Trip planning requested.',
);
try {
// Note: planTripWithMaps is currently disabled and throws immediately.
// The cast is safe since FlyerItemForAnalysis has the same shape as FlyerItem.
const result = await aiService.planTripWithMaps(
body.items as unknown as FlyerItem[],
body.store,
body.userLocation as GeolocationCoordinates,
request.log,
);
return this.success(result);
} catch (error) {
request.log.error({ error }, 'Error in plan-trip endpoint');
throw error;
}
}
// ==========================================================================
// STUBBED FUTURE ENDPOINTS
// ==========================================================================
/**
* Generate image (not implemented).
*
* Generate an image from a prompt. Currently not implemented.
*
* @summary Generate image (not implemented)
* @param request Express request for logging
* @param body Image generation prompt
* @returns 501 Not Implemented
*/
@Post('generate-image')
@Security('bearerAuth')
@Middlewares(aiGenerationLimiter)
@SuccessResponse(501, 'Not implemented')
@Response<ErrorResponse>(401, 'Unauthorized')
@Response<ErrorResponse>(501, 'Not implemented')
public async generateImage(
@Request() request: ExpressRequest,
@Body() _body: GenerateImageRequest,
): Promise<ErrorResponse> {
request.log.info('Request received for unimplemented endpoint: generate-image');
this.setStatus(501);
return this.error(
ControllerErrorCode.NOT_IMPLEMENTED,
'Image generation is not yet implemented.',
);
}
/**
* Generate speech (not implemented).
*
* Generate speech from text. Currently not implemented.
*
* @summary Generate speech (not implemented)
* @param request Express request for logging
* @param body Text to convert to speech
* @returns 501 Not Implemented
*/
@Post('generate-speech')
@Security('bearerAuth')
@Middlewares(aiGenerationLimiter)
@SuccessResponse(501, 'Not implemented')
@Response<ErrorResponse>(401, 'Unauthorized')
@Response<ErrorResponse>(501, 'Not implemented')
public async generateSpeech(
@Request() request: ExpressRequest,
@Body() _body: GenerateSpeechRequest,
): Promise<ErrorResponse> {
request.log.info('Request received for unimplemented endpoint: generate-speech');
this.setStatus(501);
return this.error(
ControllerErrorCode.NOT_IMPLEMENTED,
'Speech generation is not yet implemented.',
);
}
}

View File

@@ -0,0 +1,486 @@
// src/controllers/auth.controller.test.ts
// ============================================================================
// AUTH CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the AuthController class. These tests verify controller
// logic in isolation by mocking external dependencies like auth service,
// passport, and response handling.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest, Response as ExpressResponse } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock all external dependencies before importing the controller module.
// ============================================================================
// Mock tsoa decorators and Controller class (required before controller import)
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(_status: number): void {
// Mock setStatus
}
},
Get: () => () => {},
Post: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Body: () => () => {},
Request: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
Middlewares: () => () => {},
}));
// Mock auth service
vi.mock('../services/authService', () => ({
authService: {
registerAndLoginUser: vi.fn(),
handleSuccessfulLogin: vi.fn(),
resetPassword: vi.fn(),
updatePassword: vi.fn(),
refreshAccessToken: vi.fn(),
logout: vi.fn(),
},
}));
// Mock passport
vi.mock('../config/passport', () => ({
default: {
authenticate: vi.fn(),
},
}));
// Mock password strength validation
vi.mock('../utils/authUtils', () => ({
validatePasswordStrength: vi.fn(),
}));
// Mock rate limiters
vi.mock('../config/rateLimiters', () => ({
loginLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
registerLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
forgotPasswordLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
resetPasswordLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
refreshTokenLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
logoutLimiter: (_req: unknown, _res: unknown, next: () => void) => next(),
}));
// Import mocked modules after mock definitions
import { authService } from '../services/authService';
import { validatePasswordStrength } from '../utils/authUtils';
import { AuthController } from './auth.controller';
// Cast mocked modules for type-safe access
const mockedAuthService = authService as Mocked<typeof authService>;
const mockedValidatePasswordStrength = validatePasswordStrength as ReturnType<typeof vi.fn>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
const mockRes = {
cookie: vi.fn(),
} as unknown as ExpressResponse;
return {
body: {},
cookies: {},
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
res: mockRes,
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock user profile for testing.
*/
function createMockUserProfile() {
return {
full_name: 'Test User',
avatar_url: null,
address_id: null,
points: 0,
role: 'user' as const,
preferences: null,
created_by: null,
updated_by: null,
created_at: '2024-01-01T00:00:00.000Z',
updated_at: '2024-01-01T00:00:00.000Z',
user: {
user_id: 'test-user-id',
email: 'test@example.com',
created_at: '2024-01-01T00:00:00.000Z',
updated_at: '2024-01-01T00:00:00.000Z',
},
address: null,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('AuthController', () => {
let controller: AuthController;
beforeEach(() => {
vi.clearAllMocks();
controller = new AuthController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// REGISTRATION TESTS
// ==========================================================================
describe('register()', () => {
it('should successfully register a new user', async () => {
// Arrange
const mockUserProfile = createMockUserProfile();
const request = createMockRequest({
body: {
email: 'test@example.com',
password: 'SecurePassword123!',
full_name: 'Test User',
},
});
mockedValidatePasswordStrength.mockReturnValue({ isValid: true, feedback: '' });
mockedAuthService.registerAndLoginUser.mockResolvedValue({
newUserProfile: mockUserProfile,
accessToken: 'mock-access-token',
refreshToken: 'mock-refresh-token',
});
// Act
const result = await controller.register(
{
email: 'test@example.com',
password: 'SecurePassword123!',
full_name: 'Test User',
},
request,
);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('User registered successfully!');
expect(result.data.userprofile.user.email).toBe('test@example.com');
expect(result.data.token).toBe('mock-access-token');
}
expect(mockedAuthService.registerAndLoginUser).toHaveBeenCalledWith(
'test@example.com',
'SecurePassword123!',
'Test User',
undefined,
expect.anything(),
);
});
it('should reject registration with weak password', async () => {
// Arrange
const request = createMockRequest({
body: {
email: 'test@example.com',
password: 'weak',
},
});
mockedValidatePasswordStrength.mockReturnValue({
isValid: false,
feedback: 'Password must be at least 8 characters long.',
});
// Act & Assert
await expect(
controller.register(
{
email: 'test@example.com',
password: 'weak',
},
request,
),
).rejects.toThrow('Password must be at least 8 characters long.');
expect(mockedAuthService.registerAndLoginUser).not.toHaveBeenCalled();
});
it('should sanitize email input (trim and lowercase)', async () => {
// Arrange
const mockUserProfile = createMockUserProfile();
const request = createMockRequest({
body: {
email: ' TEST@EXAMPLE.COM ',
password: 'SecurePassword123!',
},
});
mockedValidatePasswordStrength.mockReturnValue({ isValid: true, feedback: '' });
mockedAuthService.registerAndLoginUser.mockResolvedValue({
newUserProfile: mockUserProfile,
accessToken: 'mock-access-token',
refreshToken: 'mock-refresh-token',
});
// Act
await controller.register(
{
email: ' TEST@EXAMPLE.COM ',
password: 'SecurePassword123!',
},
request,
);
// Assert
expect(mockedAuthService.registerAndLoginUser).toHaveBeenCalledWith(
'test@example.com',
'SecurePassword123!',
undefined,
undefined,
expect.anything(),
);
});
});
// ==========================================================================
// PASSWORD RESET TESTS
// ==========================================================================
describe('forgotPassword()', () => {
it('should return generic message regardless of email existence', async () => {
// Arrange
const request = createMockRequest();
mockedAuthService.resetPassword.mockResolvedValue('mock-reset-token');
// Act
const result = await controller.forgotPassword({ email: 'test@example.com' }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe(
'If an account with that email exists, a password reset link has been sent.',
);
}
expect(mockedAuthService.resetPassword).toHaveBeenCalledWith(
'test@example.com',
expect.anything(),
);
});
it('should return same message when email does not exist', async () => {
// Arrange
const request = createMockRequest();
mockedAuthService.resetPassword.mockResolvedValue(null);
// Act
const result = await controller.forgotPassword({ email: 'nonexistent@example.com' }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe(
'If an account with that email exists, a password reset link has been sent.',
);
}
});
it('should include token in test environment', async () => {
// Arrange
const originalEnv = process.env.NODE_ENV;
process.env.NODE_ENV = 'test';
const request = createMockRequest();
mockedAuthService.resetPassword.mockResolvedValue('mock-reset-token');
// Act
const result = await controller.forgotPassword({ email: 'test@example.com' }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.token).toBe('mock-reset-token');
}
// Cleanup
process.env.NODE_ENV = originalEnv;
});
});
describe('resetPassword()', () => {
it('should successfully reset password with valid token', async () => {
// Arrange
const request = createMockRequest();
mockedAuthService.updatePassword.mockResolvedValue(true);
// Act
const result = await controller.resetPassword(
{ token: 'valid-reset-token', newPassword: 'NewSecurePassword123!' },
request,
);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Password has been reset successfully.');
}
expect(mockedAuthService.updatePassword).toHaveBeenCalledWith(
'valid-reset-token',
'NewSecurePassword123!',
expect.anything(),
);
});
it('should reject reset with invalid token', async () => {
// Arrange
const request = createMockRequest();
mockedAuthService.updatePassword.mockResolvedValue(false);
// Act & Assert
await expect(
controller.resetPassword(
{ token: 'invalid-token', newPassword: 'NewSecurePassword123!' },
request,
),
).rejects.toThrow('Invalid or expired password reset token.');
});
});
// ==========================================================================
// TOKEN MANAGEMENT TESTS
// ==========================================================================
describe('refreshToken()', () => {
it('should successfully refresh access token', async () => {
// Arrange
const request = createMockRequest({
cookies: { refreshToken: 'valid-refresh-token' },
});
mockedAuthService.refreshAccessToken.mockResolvedValue({
accessToken: 'new-access-token',
});
// Act
const result = await controller.refreshToken(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.token).toBe('new-access-token');
}
expect(mockedAuthService.refreshAccessToken).toHaveBeenCalledWith(
'valid-refresh-token',
expect.anything(),
);
});
it('should reject when refresh token cookie is missing', async () => {
// Arrange
const request = createMockRequest({ cookies: {} });
// Act & Assert
await expect(controller.refreshToken(request)).rejects.toThrow('Refresh token not found.');
});
it('should reject when refresh token is invalid', async () => {
// Arrange
const request = createMockRequest({
cookies: { refreshToken: 'invalid-token' },
});
mockedAuthService.refreshAccessToken.mockResolvedValue(null);
// Act & Assert
await expect(controller.refreshToken(request)).rejects.toThrow(
'Invalid or expired refresh token.',
);
});
});
describe('logout()', () => {
it('should successfully logout and clear refresh token cookie', async () => {
// Arrange
const mockCookie = vi.fn();
const request = createMockRequest({
cookies: { refreshToken: 'valid-refresh-token' },
res: { cookie: mockCookie } as unknown as ExpressResponse,
});
mockedAuthService.logout.mockResolvedValue(undefined);
// Act
const result = await controller.logout(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Logged out successfully.');
}
expect(mockCookie).toHaveBeenCalledWith(
'refreshToken',
'',
expect.objectContaining({ maxAge: 0, httpOnly: true }),
);
});
it('should succeed even without refresh token cookie', async () => {
// Arrange
const mockCookie = vi.fn();
const request = createMockRequest({
cookies: {},
res: { cookie: mockCookie } as unknown as ExpressResponse,
});
// Act
const result = await controller.logout(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Logged out successfully.');
}
expect(mockedAuthService.logout).not.toHaveBeenCalled();
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const request = createMockRequest({ cookies: {} });
const mockCookie = vi.fn();
(request.res as ExpressResponse).cookie = mockCookie;
// Act
const result = await controller.logout(request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
});
});

View File

@@ -0,0 +1,827 @@
// src/controllers/auth.controller.ts
// ============================================================================
// AUTH CONTROLLER
// ============================================================================
// Handles user authentication and authorization endpoints including:
// - User registration and login
// - Password reset flow (forgot-password, reset-password)
// - JWT token refresh and logout
// - OAuth initiation (Google, GitHub) - Note: callbacks handled via Express middleware
//
// This controller implements the authentication API following ADR-028 (API Response Standards)
// and integrates with the existing passport-based authentication system.
// ============================================================================
import {
Route,
Tags,
Post,
Get,
Body,
Request,
SuccessResponse,
Response,
Middlewares,
} from 'tsoa';
import type { Request as ExpressRequest, Response as ExpressResponse } from 'express';
import passport from '../config/passport';
import {
BaseController,
SuccessResponse as SuccessResponseType,
ErrorResponse,
} from './base.controller';
import { authService } from '../services/authService';
import { UniqueConstraintError, ValidationError } from '../services/db/errors.db';
import type { UserProfile } from '../types';
import { validatePasswordStrength } from '../utils/authUtils';
import {
loginLimiter,
registerLimiter,
forgotPasswordLimiter,
resetPasswordLimiter,
refreshTokenLimiter,
logoutLimiter,
} from '../config/rateLimiters';
import type { AddressDto, UserProfileDto } from '../dtos/common.dto';
/**
* User registration request body.
*/
interface RegisterRequest {
/**
* User's email address.
* @format email
* @example "user@example.com"
*/
email: string;
/**
* User's password. Must be at least 8 characters with good entropy.
* @minLength 8
* @example "SecurePassword123!"
*/
password: string;
/**
* User's full name (optional).
* @example "John Doe"
*/
full_name?: string;
/**
* URL to user's avatar image (optional).
* @format uri
*/
avatar_url?: string;
}
/**
* Successful registration response data.
*/
interface RegisterResponseData {
/** Success message */
message: string;
/** The created user's profile */
userprofile: UserProfileDto;
/** JWT access token */
token: string;
}
/**
* User login request body.
*/
interface LoginRequest {
/**
* User's email address.
* @format email
* @example "user@example.com"
*/
email: string;
/**
* User's password.
* @example "SecurePassword123!"
*/
password: string;
/**
* If true, refresh token lasts 30 days instead of session-only.
*/
rememberMe?: boolean;
}
/**
* Successful login response data.
*/
interface LoginResponseData {
/** The authenticated user's profile */
userprofile: UserProfileDto;
/** JWT access token */
token: string;
}
/**
* Forgot password request body.
*/
interface ForgotPasswordRequest {
/**
* Email address of the account to reset.
* @format email
* @example "user@example.com"
*/
email: string;
}
/**
* Forgot password response data.
*/
interface ForgotPasswordResponseData {
/** Generic success message (same for existing and non-existing emails for security) */
message: string;
/** Reset token (only included in test environment for testability) */
token?: string;
}
/**
* Reset password request body.
*/
interface ResetPasswordRequest {
/**
* Password reset token from email.
*/
token: string;
/**
* New password. Must be at least 8 characters with good entropy.
* @minLength 8
*/
newPassword: string;
}
/**
* Logout response data.
*/
interface LogoutResponseData {
/** Success message */
message: string;
}
/**
* Token refresh response data.
*/
interface RefreshTokenResponseData {
/** New JWT access token */
token: string;
}
/**
* Message response data.
*/
interface MessageResponseData {
/** Success message */
message: string;
}
// ============================================================================
// CUSTOM ERROR CLASSES
// ============================================================================
/**
* Authentication error for login failures and related issues.
*/
class AuthenticationError extends Error {
public status: number;
constructor(message: string, status: number = 401) {
super(message);
this.name = 'AuthenticationError';
this.status = status;
}
}
// ============================================================================
// DTO CONVERSION HELPERS
// ============================================================================
/**
* Converts a UserProfile to a UserProfileDto.
*
* This conversion is necessary because UserProfile contains Address which
* contains GeoJSONPoint with tuple type coordinates: [number, number].
* tsoa cannot serialize tuples, so we flatten to separate lat/lng fields.
*
* @param userProfile The UserProfile from the service layer
* @returns A UserProfileDto safe for tsoa serialization
*/
function toUserProfileDto(userProfile: UserProfile): UserProfileDto {
const addressDto: AddressDto | null = userProfile.address
? {
address_id: userProfile.address.address_id,
address_line_1: userProfile.address.address_line_1,
address_line_2: userProfile.address.address_line_2,
city: userProfile.address.city,
province_state: userProfile.address.province_state,
postal_code: userProfile.address.postal_code,
country: userProfile.address.country,
latitude:
userProfile.address.latitude ?? userProfile.address.location?.coordinates[1] ?? null,
longitude:
userProfile.address.longitude ?? userProfile.address.location?.coordinates[0] ?? null,
created_at: userProfile.address.created_at,
updated_at: userProfile.address.updated_at,
}
: null;
return {
full_name: userProfile.full_name,
avatar_url: userProfile.avatar_url,
address_id: userProfile.address_id,
points: userProfile.points,
role: userProfile.role,
preferences: userProfile.preferences,
created_by: userProfile.created_by,
updated_by: userProfile.updated_by,
created_at: userProfile.created_at,
updated_at: userProfile.updated_at,
user: {
user_id: userProfile.user.user_id,
email: userProfile.user.email,
created_at: userProfile.user.created_at,
updated_at: userProfile.user.updated_at,
},
address: addressDto,
};
}
// ============================================================================
// AUTH CONTROLLER
// ============================================================================
/**
* Authentication controller handling user registration, login, password reset,
* and token management.
*
* OAuth endpoints (Google, GitHub) use passport middleware for redirect-based
* authentication flows and are handled differently than standard JSON endpoints.
*/
@Route('auth')
@Tags('Auth')
export class AuthController extends BaseController {
// ==========================================================================
// REGISTRATION
// ==========================================================================
/**
* Register a new user account.
*
* Creates a new user with the provided credentials and returns authentication tokens.
* The password must be at least 8 characters with good entropy (mix of characters).
*
* @summary Register a new user
* @param requestBody User registration data
* @param request Express request object (for logging and cookies)
* @returns User profile and JWT token on successful registration
*/
@Post('register')
@Middlewares(registerLimiter)
@SuccessResponse(201, 'User registered successfully')
@Response<ErrorResponse>(400, 'Validation error (weak password)')
@Response<ErrorResponse>(409, 'Email already registered')
@Response<ErrorResponse>(429, 'Too many registration attempts')
public async register(
@Body() requestBody: RegisterRequest,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<RegisterResponseData>> {
const { email, password, full_name, avatar_url } = this.sanitizeRegisterInput(requestBody);
const reqLog = request.log;
// Validate password strength
const strength = validatePasswordStrength(password);
if (!strength.isValid) {
throw new ValidationError([], strength.feedback);
}
try {
const { newUserProfile, accessToken, refreshToken } = await authService.registerAndLoginUser(
email,
password,
full_name,
avatar_url,
reqLog,
);
// Set refresh token as httpOnly cookie
this.setRefreshTokenCookie(request.res!, refreshToken, false);
this.setStatus(201);
return this.success({
message: 'User registered successfully!',
userprofile: toUserProfileDto(newUserProfile),
token: accessToken,
});
} catch (error: unknown) {
if (error instanceof UniqueConstraintError) {
this.setStatus(409);
throw error;
}
reqLog.error({ error }, `User registration route failed for email: ${email}.`);
throw error;
}
}
// ==========================================================================
// LOGIN
// ==========================================================================
/**
* Login with email and password.
*
* Authenticates user credentials via the local passport strategy and returns JWT tokens.
* Failed login attempts are tracked for account lockout protection.
*
* @summary Login with email and password
* @param requestBody Login credentials
* @param request Express request object
* @returns User profile and JWT token on successful authentication
*/
@Post('login')
@Middlewares(loginLimiter)
@SuccessResponse(200, 'Login successful')
@Response<ErrorResponse>(401, 'Invalid credentials or account locked')
@Response<ErrorResponse>(429, 'Too many login attempts')
public async login(
@Body() requestBody: LoginRequest,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<LoginResponseData>> {
const { email, password, rememberMe } = this.sanitizeLoginInput(requestBody);
const reqLog = request.log;
return new Promise((resolve, reject) => {
// Attach sanitized email to request body for passport
request.body.email = email;
request.body.password = password;
passport.authenticate(
'local',
{ session: false },
async (err: Error | null, user: Express.User | false, info: { message: string }) => {
reqLog.debug(`[API /login] Received login request for email: ${email}`);
if (err) {
reqLog.error({ err }, '[API /login] Passport reported an error.');
return reject(err);
}
if (!user) {
reqLog.warn({ info }, '[API /login] Passport reported NO USER found.');
const authError = new AuthenticationError(info?.message || 'Login failed');
return reject(authError);
}
reqLog.info(
{ userId: (user as UserProfile).user?.user_id },
'[API /login] User authenticated.',
);
try {
const userProfile = user as UserProfile;
const { accessToken, refreshToken } = await authService.handleSuccessfulLogin(
userProfile,
reqLog,
);
reqLog.info(`JWT and refresh token issued for user: ${userProfile.user.email}`);
// Set refresh token cookie
this.setRefreshTokenCookie(request.res!, refreshToken, rememberMe || false);
resolve(
this.success({ userprofile: toUserProfileDto(userProfile), token: accessToken }),
);
} catch (tokenErr) {
const email = (user as UserProfile)?.user?.email || request.body.email;
reqLog.error({ error: tokenErr }, `Failed to process login for user: ${email}`);
reject(tokenErr);
}
},
)(request, request.res!, (err: unknown) => {
if (err) reject(err);
});
});
}
// ==========================================================================
// PASSWORD RESET FLOW
// ==========================================================================
/**
* Request a password reset.
*
* Sends a password reset email if the account exists. For security, always returns
* the same response whether the email exists or not to prevent email enumeration.
*
* @summary Request password reset email
* @param requestBody Email address for password reset
* @param request Express request object
* @returns Generic success message (same for existing and non-existing emails)
*/
@Post('forgot-password')
@Middlewares(forgotPasswordLimiter)
@SuccessResponse(200, 'Request processed')
@Response<ErrorResponse>(429, 'Too many password reset requests')
public async forgotPassword(
@Body() requestBody: ForgotPasswordRequest,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<ForgotPasswordResponseData>> {
const email = this.sanitizeEmail(requestBody.email);
const reqLog = request.log;
try {
const token = await authService.resetPassword(email, reqLog);
// Response payload - token only included in test environment for testability
const responsePayload: ForgotPasswordResponseData = {
message: 'If an account with that email exists, a password reset link has been sent.',
};
if (process.env.NODE_ENV === 'test' && token) {
responsePayload.token = token;
}
return this.success(responsePayload);
} catch (error) {
reqLog.error({ error }, `An error occurred during /forgot-password for email: ${email}`);
throw error;
}
}
/**
* Reset password with token.
*
* Resets the user's password using a valid reset token from the forgot-password email.
* The token is single-use and expires after 1 hour.
*
* @summary Reset password with token
* @param requestBody Reset token and new password
* @param request Express request object
* @returns Success message on password reset
*/
@Post('reset-password')
@Middlewares(resetPasswordLimiter)
@SuccessResponse(200, 'Password reset successful')
@Response<ErrorResponse>(400, 'Invalid or expired token, or weak password')
@Response<ErrorResponse>(429, 'Too many reset attempts')
public async resetPassword(
@Body() requestBody: ResetPasswordRequest,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<MessageResponseData>> {
const { token, newPassword } = requestBody;
const reqLog = request.log;
try {
const resetSuccessful = await authService.updatePassword(token, newPassword, reqLog);
if (!resetSuccessful) {
this.setStatus(400);
throw new ValidationError([], 'Invalid or expired password reset token.');
}
return this.success({ message: 'Password has been reset successfully.' });
} catch (error) {
reqLog.error({ error }, 'An error occurred during password reset.');
throw error;
}
}
// ==========================================================================
// TOKEN MANAGEMENT
// ==========================================================================
/**
* Refresh access token.
*
* Uses the refresh token cookie to issue a new access token.
* The refresh token itself is not rotated to allow multiple active sessions.
*
* @summary Refresh access token
* @param request Express request object (contains refresh token cookie)
* @returns New JWT access token
*/
@Post('refresh-token')
@Middlewares(refreshTokenLimiter)
@SuccessResponse(200, 'New access token issued')
@Response<ErrorResponse>(401, 'Refresh token not found')
@Response<ErrorResponse>(403, 'Invalid or expired refresh token')
@Response<ErrorResponse>(429, 'Too many refresh attempts')
public async refreshToken(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<RefreshTokenResponseData>> {
const refreshToken = request.cookies?.refreshToken;
const reqLog = request.log;
if (!refreshToken) {
this.setStatus(401);
throw new AuthenticationError('Refresh token not found.');
}
try {
const result = await authService.refreshAccessToken(refreshToken, reqLog);
if (!result) {
this.setStatus(403);
throw new AuthenticationError('Invalid or expired refresh token.', 403);
}
return this.success({ token: result.accessToken });
} catch (error) {
if (error instanceof AuthenticationError) {
throw error;
}
reqLog.error({ error }, 'An error occurred during /refresh-token.');
throw error;
}
}
/**
* Logout user.
*
* Invalidates the refresh token and clears the cookie.
* The access token will remain valid until it expires (15 minutes).
*
* @summary Logout user
* @param request Express request object
* @returns Success message
*/
@Post('logout')
@Middlewares(logoutLimiter)
@SuccessResponse(200, 'Logged out successfully')
@Response<ErrorResponse>(429, 'Too many logout attempts')
public async logout(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<LogoutResponseData>> {
const refreshToken = request.cookies?.refreshToken;
const reqLog = request.log;
if (refreshToken) {
// Invalidate the token in the database (fire and forget)
authService.logout(refreshToken, reqLog).catch((err: Error) => {
reqLog.error({ error: err }, 'Logout token invalidation failed in background.');
});
}
// Clear the refresh token cookie
this.clearRefreshTokenCookie(request.res!);
return this.success({ message: 'Logged out successfully.' });
}
// ==========================================================================
// OAUTH INITIATION ENDPOINTS
// ==========================================================================
// Note: OAuth callback endpoints are handled by Express middleware due to
// their redirect-based nature. These initiation endpoints redirect to the
// OAuth provider and are documented here for completeness.
//
// The actual callbacks (/auth/google/callback, /auth/github/callback) remain
// in the Express routes file (auth.routes.ts) because tsoa controllers are
// designed for JSON APIs, not redirect-based OAuth flows.
// ==========================================================================
/**
* Initiate Google OAuth login.
*
* Redirects to Google for authentication. After successful authentication,
* Google redirects back to /auth/google/callback with an authorization code.
*
* Note: This endpoint performs a redirect, not a JSON response.
*
* @summary Initiate Google OAuth
* @param request Express request object
*/
@Get('google')
@Response(302, 'Redirects to Google OAuth consent screen')
public async googleAuth(@Request() request: ExpressRequest): Promise<void> {
return new Promise((resolve, reject) => {
passport.authenticate('google', { session: false })(request, request.res!, (err: unknown) => {
if (err) {
reject(err);
} else {
resolve();
}
});
});
}
/**
* Google OAuth callback.
*
* Handles the callback from Google after user authentication.
* On success, redirects to the frontend with an access token in the query string.
* On failure, redirects to the frontend with an error parameter.
*
* Note: This endpoint performs a redirect, not a JSON response.
*
* @summary Google OAuth callback
* @param request Express request object
*/
@Get('google/callback')
@Response(302, 'Redirects to frontend with token or error')
public async googleAuthCallback(@Request() request: ExpressRequest): Promise<void> {
return new Promise((resolve, reject) => {
passport.authenticate(
'google',
{ session: false, failureRedirect: '/?error=google_auth_failed' },
async (err: Error | null, user: Express.User | false) => {
if (err) {
request.log.error({ error: err }, 'Google OAuth authentication error');
return reject(err);
}
await this.handleOAuthCallback(
'google',
user as UserProfile | false,
request,
request.res!,
);
resolve();
},
)(request, request.res!, (err: unknown) => {
if (err) reject(err);
});
});
}
/**
* Initiate GitHub OAuth login.
*
* Redirects to GitHub for authentication. After successful authentication,
* GitHub redirects back to /auth/github/callback with an authorization code.
*
* Note: This endpoint performs a redirect, not a JSON response.
*
* @summary Initiate GitHub OAuth
* @param request Express request object
*/
@Get('github')
@Response(302, 'Redirects to GitHub OAuth consent screen')
public async githubAuth(@Request() request: ExpressRequest): Promise<void> {
return new Promise((resolve, reject) => {
passport.authenticate('github', { session: false })(request, request.res!, (err: unknown) => {
if (err) {
reject(err);
} else {
resolve();
}
});
});
}
/**
* GitHub OAuth callback.
*
* Handles the callback from GitHub after user authentication.
* On success, redirects to the frontend with an access token in the query string.
* On failure, redirects to the frontend with an error parameter.
*
* Note: This endpoint performs a redirect, not a JSON response.
*
* @summary GitHub OAuth callback
* @param request Express request object
*/
@Get('github/callback')
@Response(302, 'Redirects to frontend with token or error')
public async githubAuthCallback(@Request() request: ExpressRequest): Promise<void> {
return new Promise((resolve, reject) => {
passport.authenticate(
'github',
{ session: false, failureRedirect: '/?error=github_auth_failed' },
async (err: Error | null, user: Express.User | false) => {
if (err) {
request.log.error({ error: err }, 'GitHub OAuth authentication error');
return reject(err);
}
await this.handleOAuthCallback(
'github',
user as UserProfile | false,
request,
request.res!,
);
resolve();
},
)(request, request.res!, (err: unknown) => {
if (err) reject(err);
});
});
}
// ==========================================================================
// PRIVATE HELPER METHODS
// ==========================================================================
/**
* Sanitizes and normalizes email input.
* Trims whitespace and converts to lowercase.
*/
private sanitizeEmail(email: string): string {
return email.trim().toLowerCase();
}
/**
* Sanitizes registration input by trimming and normalizing values.
*/
private sanitizeRegisterInput(input: RegisterRequest): RegisterRequest {
return {
email: this.sanitizeEmail(input.email),
password: input.password.trim(),
full_name: input.full_name?.trim() || undefined,
avatar_url: input.avatar_url?.trim() || undefined,
};
}
/**
* Sanitizes login input by trimming and normalizing values.
*/
private sanitizeLoginInput(input: LoginRequest): LoginRequest {
return {
email: this.sanitizeEmail(input.email),
password: input.password,
rememberMe: input.rememberMe,
};
}
/**
* Sets the refresh token as an httpOnly cookie.
*
* @param res Express response object
* @param refreshToken The refresh token to set
* @param rememberMe If true, cookie persists for 30 days; otherwise 7 days
*/
private setRefreshTokenCookie(
res: ExpressResponse,
refreshToken: string,
rememberMe: boolean,
): void {
const maxAge = rememberMe
? 30 * 24 * 60 * 60 * 1000 // 30 days
: 7 * 24 * 60 * 60 * 1000; // 7 days
res.cookie('refreshToken', refreshToken, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
maxAge,
});
}
/**
* Clears the refresh token cookie by setting it to expire immediately.
*
* @param res Express response object
*/
private clearRefreshTokenCookie(res: ExpressResponse): void {
res.cookie('refreshToken', '', {
httpOnly: true,
maxAge: 0,
secure: process.env.NODE_ENV === 'production',
});
}
/**
* Handles OAuth callback by generating tokens and redirecting to frontend.
*
* @param provider OAuth provider name ('google' or 'github')
* @param user Authenticated user profile or false if authentication failed
* @param request Express request object
* @param res Express response object
*/
private async handleOAuthCallback(
provider: 'google' | 'github',
user: UserProfile | false,
request: ExpressRequest,
res: ExpressResponse,
): Promise<void> {
const reqLog = request.log;
if (!user || !user.user) {
reqLog.error('OAuth callback received but no user profile found');
res.redirect(`${process.env.FRONTEND_URL}/?error=auth_failed`);
return;
}
try {
const { accessToken, refreshToken } = await authService.handleSuccessfulLogin(user, reqLog);
res.cookie('refreshToken', refreshToken, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
maxAge: 30 * 24 * 60 * 60 * 1000, // 30 days
});
// Redirect to frontend with provider-specific token parameter
const tokenParam = provider === 'google' ? 'googleAuthToken' : 'githubAuthToken';
res.redirect(`${process.env.FRONTEND_URL}/?${tokenParam}=${accessToken}`);
} catch (err) {
reqLog.error({ error: err }, `Failed to complete ${provider} OAuth login`);
res.redirect(`${process.env.FRONTEND_URL}/?error=auth_failed`);
}
}
}

View File

@@ -0,0 +1,344 @@
// src/controllers/base.controller.ts
// ============================================================================
// BASE CONTROLLER FOR TSOA
// ============================================================================
// Provides a standardized base class for all tsoa controllers, ensuring
// consistent response formatting, error handling, and access to common
// utilities across the API.
//
// All controller methods should use the helper methods provided here to
// construct responses, ensuring compliance with ADR-028 (API Response Format).
// ============================================================================
import { Controller } from 'tsoa';
import type {
SuccessResponse,
ErrorResponse,
PaginatedResponse,
PaginationInput,
PaginationMeta,
ResponseMeta,
ControllerErrorCodeType,
} from './types';
import { ControllerErrorCode } from './types';
/**
* Base controller class providing standardized response helpers and error handling.
*
* All tsoa controllers should extend this class to ensure consistent API
* response formatting per ADR-028.
*
* @example
* ```typescript
* import { Route, Get, Tags } from 'tsoa';
* import { BaseController } from './base.controller';
* import type { SuccessResponse } from './types';
*
* @Route('users')
* @Tags('Users')
* export class UsersController extends BaseController {
* @Get('{id}')
* public async getUser(id: string): Promise<SuccessResponse<User>> {
* const user = await userService.getUserById(id);
* return this.success(user);
* }
* }
* ```
*/
export abstract class BaseController extends Controller {
// ==========================================================================
// SUCCESS RESPONSE HELPERS
// ==========================================================================
/**
* Creates a standard success response envelope.
*
* @param data - The response payload
* @param statusCode - HTTP status code (default: 200)
* @param meta - Optional metadata (requestId, timestamp)
* @returns A SuccessResponse object matching ADR-028 format
*
* @example
* ```typescript
* // Simple success response
* return this.success({ id: 1, name: 'Item' });
*
* // Success with 201 Created
* this.setStatus(201);
* return this.success(newUser);
*
* // Success with metadata
* return this.success(data, { requestId: 'abc-123' });
* ```
*/
protected success<T>(data: T, meta?: Omit<ResponseMeta, 'pagination'>): SuccessResponse<T> {
const response: SuccessResponse<T> = {
success: true,
data,
};
if (meta) {
response.meta = meta;
}
return response;
}
/**
* Creates a paginated success response with pagination metadata.
*
* @param data - Array of items for the current page
* @param pagination - Pagination input (page, limit, total)
* @param meta - Optional additional metadata
* @returns A PaginatedResponse object with calculated pagination info
*
* @example
* ```typescript
* const { users, total } = await userService.listUsers({ page, limit });
* return this.paginated(users, { page, limit, total });
* ```
*/
protected paginated<T>(
data: T[],
pagination: PaginationInput,
meta?: Omit<ResponseMeta, 'pagination'>,
): PaginatedResponse<T> {
const paginationMeta = this.calculatePagination(pagination);
return {
success: true,
data,
meta: {
...meta,
pagination: paginationMeta,
},
};
}
/**
* Creates a success response with just a message.
* Useful for operations that complete successfully but don't return data.
*
* @param message - Success message
* @returns A SuccessResponse with a message object
*
* @example
* ```typescript
* // After deleting a resource
* return this.message('User deleted successfully');
*
* // After an action that doesn't return data
* return this.message('Password updated successfully');
* ```
*/
protected message(message: string): SuccessResponse<{ message: string }> {
return this.success({ message });
}
// ==========================================================================
// ERROR RESPONSE HELPERS
// ==========================================================================
/**
* Creates a standard error response envelope.
*
* Note: For most error cases, you should throw an appropriate error class
* (NotFoundError, ValidationError, etc.) and let the global error handler
* format the response. Use this method only when you need fine-grained
* control over the error response format.
*
* @param code - Machine-readable error code
* @param message - Human-readable error message
* @param details - Optional error details (validation errors, etc.)
* @param meta - Optional metadata (requestId for error tracking)
* @returns An ErrorResponse object matching ADR-028 format
*
* @example
* ```typescript
* // Manual error response (prefer throwing errors instead)
* this.setStatus(400);
* return this.error(
* ControllerErrorCode.BAD_REQUEST,
* 'Invalid operation',
* { reason: 'Cannot delete last admin user' }
* );
* ```
*/
protected error(
code: ControllerErrorCodeType | string,
message: string,
details?: unknown,
meta?: Pick<ResponseMeta, 'requestId' | 'timestamp'>,
): ErrorResponse {
const response: ErrorResponse = {
success: false,
error: {
code,
message,
},
};
if (details !== undefined) {
response.error.details = details;
}
if (meta) {
response.meta = meta;
}
return response;
}
// ==========================================================================
// PAGINATION HELPERS
// ==========================================================================
/**
* Calculates pagination metadata from input parameters.
*
* @param input - Pagination input (page, limit, total)
* @returns Calculated pagination metadata
*/
protected calculatePagination(input: PaginationInput): PaginationMeta {
const { page, limit, total } = input;
const totalPages = Math.ceil(total / limit);
return {
page,
limit,
total,
totalPages,
hasNextPage: page < totalPages,
hasPrevPage: page > 1,
};
}
/**
* Normalizes pagination parameters with defaults and bounds.
*
* @param page - Requested page number (defaults to 1)
* @param limit - Requested page size (defaults to 20, max 100)
* @returns Normalized page and limit values
*
* @example
* ```typescript
* @Get()
* public async listUsers(
* @Query() page?: number,
* @Query() limit?: number,
* ): Promise<PaginatedResponse<User>> {
* const { page: p, limit: l } = this.normalizePagination(page, limit);
* // p and l are now safe to use with guaranteed bounds
* }
* ```
*/
protected normalizePagination(page?: number, limit?: number): { page: number; limit: number } {
const DEFAULT_PAGE = 1;
const DEFAULT_LIMIT = 20;
const MAX_LIMIT = 100;
return {
page: Math.max(DEFAULT_PAGE, Math.floor(page ?? DEFAULT_PAGE)),
limit: Math.min(MAX_LIMIT, Math.max(1, Math.floor(limit ?? DEFAULT_LIMIT))),
};
}
// ==========================================================================
// HTTP STATUS CODE HELPERS
// ==========================================================================
/**
* Sets HTTP 201 Created status and returns success response.
* Use for POST endpoints that create new resources.
*
* @param data - The created resource
* @param meta - Optional metadata
* @returns SuccessResponse with 201 status
*
* @example
* ```typescript
* @Post()
* public async createUser(body: CreateUserRequest): Promise<SuccessResponse<User>> {
* const user = await userService.createUser(body);
* return this.created(user);
* }
* ```
*/
protected created<T>(data: T, meta?: Omit<ResponseMeta, 'pagination'>): SuccessResponse<T> {
this.setStatus(201);
return this.success(data, meta);
}
/**
* Sets HTTP 204 No Content status.
* Use for DELETE endpoints or operations that succeed without returning data.
*
* Note: tsoa requires a return type, so this returns undefined.
* The actual HTTP response will have no body.
*
* @example
* ```typescript
* @Delete('{id}')
* public async deleteUser(id: string): Promise<void> {
* await userService.deleteUser(id);
* return this.noContent();
* }
* ```
*/
protected noContent(): void {
this.setStatus(204);
}
// ==========================================================================
// ERROR CODE CONSTANTS
// ==========================================================================
/**
* Standard error codes for use in error responses.
* Exposed as a protected property for use in derived controllers.
*/
protected readonly ErrorCode = ControllerErrorCode;
}
// ============================================================================
// CONTROLLER ERROR CLASSES
// ============================================================================
// Error classes that can be thrown from controllers and will be handled
// by the global error handler to produce appropriate HTTP responses.
// These re-export the repository errors for convenience.
// ============================================================================
export {
NotFoundError,
ValidationError,
ForbiddenError,
UniqueConstraintError,
RepositoryError,
} from '../services/db/errors.db';
// ============================================================================
// RE-EXPORTS
// ============================================================================
// Re-export types for convenient imports in controller files.
// ============================================================================
export { ControllerErrorCode } from './types';
export type {
SuccessResponse,
ErrorResponse,
PaginatedResponse,
PaginationInput,
PaginationMeta,
PaginationParams,
ResponseMeta,
RequestContext,
AuthenticatedUser,
MessageResponse,
HealthResponse,
DetailedHealthResponse,
ServiceHealth,
ValidationIssue,
ValidationErrorResponse,
ControllerErrorCodeType,
ApiResponse,
} from './types';

View File

@@ -0,0 +1,467 @@
// src/controllers/budget.controller.test.ts
// ============================================================================
// BUDGET CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the BudgetController class. These tests verify controller
// logic in isolation by mocking the budget repository.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Post: () => () => {},
Put: () => () => {},
Delete: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Security: () => () => {},
Path: () => () => {},
Query: () => () => {},
Body: () => () => {},
Request: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock budget repository
vi.mock('../services/db/index.db', () => ({
budgetRepo: {
getBudgetsForUser: vi.fn(),
createBudget: vi.fn(),
updateBudget: vi.fn(),
deleteBudget: vi.fn(),
getSpendingByCategory: vi.fn(),
},
}));
// Import mocked modules after mock definitions
import { budgetRepo } from '../services/db/index.db';
import { BudgetController } from './budget.controller';
// Cast mocked modules for type-safe access
const mockedBudgetRepo = budgetRepo as Mocked<typeof budgetRepo>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object with authenticated user.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
user: createMockUserProfile(),
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock user profile for testing.
*/
function createMockUserProfile() {
return {
full_name: 'Test User',
role: 'user' as const,
user: {
user_id: 'test-user-id',
email: 'test@example.com',
},
};
}
/**
* Creates a mock budget record.
*/
function createMockBudget(overrides: Record<string, unknown> = {}) {
return {
budget_id: 1,
user_id: 'test-user-id',
name: 'Monthly Groceries',
amount_cents: 50000,
period: 'monthly' as const,
start_date: '2024-01-01',
created_at: '2024-01-01T00:00:00.000Z',
updated_at: '2024-01-01T00:00:00.000Z',
...overrides,
};
}
/**
* Creates a mock spending by category record.
*/
function createMockSpendingByCategory(overrides: Record<string, unknown> = {}) {
return {
category_id: 1,
category_name: 'Dairy & Eggs',
total_cents: 2500,
item_count: 5,
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('BudgetController', () => {
let controller: BudgetController;
beforeEach(() => {
vi.clearAllMocks();
controller = new BudgetController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// LIST BUDGETS
// ==========================================================================
describe('getBudgets()', () => {
it('should return all budgets for the user', async () => {
// Arrange
const mockBudgets = [
createMockBudget(),
createMockBudget({ budget_id: 2, name: 'Weekly Snacks', period: 'weekly' }),
];
const request = createMockRequest();
mockedBudgetRepo.getBudgetsForUser.mockResolvedValue(mockBudgets);
// Act
const result = await controller.getBudgets(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].name).toBe('Monthly Groceries');
}
expect(mockedBudgetRepo.getBudgetsForUser).toHaveBeenCalledWith(
'test-user-id',
expect.anything(),
);
});
it('should return empty array when user has no budgets', async () => {
// Arrange
const request = createMockRequest();
mockedBudgetRepo.getBudgetsForUser.mockResolvedValue([]);
// Act
const result = await controller.getBudgets(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
});
// ==========================================================================
// CREATE BUDGET
// ==========================================================================
describe('createBudget()', () => {
it('should create a new budget', async () => {
// Arrange
const mockBudget = createMockBudget();
const request = createMockRequest();
mockedBudgetRepo.createBudget.mockResolvedValue(mockBudget);
// Act
const result = await controller.createBudget(request, {
name: 'Monthly Groceries',
amount_cents: 50000,
period: 'monthly',
start_date: '2024-01-01',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.name).toBe('Monthly Groceries');
expect(result.data.amount_cents).toBe(50000);
}
expect(mockedBudgetRepo.createBudget).toHaveBeenCalledWith(
'test-user-id',
expect.objectContaining({
name: 'Monthly Groceries',
amount_cents: 50000,
period: 'monthly',
}),
expect.anything(),
);
});
it('should create a weekly budget', async () => {
// Arrange
const mockBudget = createMockBudget({ period: 'weekly', amount_cents: 10000 });
const request = createMockRequest();
mockedBudgetRepo.createBudget.mockResolvedValue(mockBudget);
// Act
const result = await controller.createBudget(request, {
name: 'Weekly Snacks',
amount_cents: 10000,
period: 'weekly',
start_date: '2024-01-01',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.period).toBe('weekly');
}
});
});
// ==========================================================================
// UPDATE BUDGET
// ==========================================================================
describe('updateBudget()', () => {
it('should update an existing budget', async () => {
// Arrange
const mockUpdatedBudget = createMockBudget({ amount_cents: 60000 });
const request = createMockRequest();
mockedBudgetRepo.updateBudget.mockResolvedValue(mockUpdatedBudget);
// Act
const result = await controller.updateBudget(1, request, {
amount_cents: 60000,
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.amount_cents).toBe(60000);
}
expect(mockedBudgetRepo.updateBudget).toHaveBeenCalledWith(
1,
'test-user-id',
expect.objectContaining({ amount_cents: 60000 }),
expect.anything(),
);
});
it('should update budget name', async () => {
// Arrange
const mockUpdatedBudget = createMockBudget({ name: 'Updated Budget Name' });
const request = createMockRequest();
mockedBudgetRepo.updateBudget.mockResolvedValue(mockUpdatedBudget);
// Act
const result = await controller.updateBudget(1, request, {
name: 'Updated Budget Name',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.name).toBe('Updated Budget Name');
}
});
it('should reject update with no fields provided', async () => {
// Arrange
const request = createMockRequest();
// Act & Assert
await expect(controller.updateBudget(1, request, {})).rejects.toThrow(
'At least one field to update must be provided.',
);
});
it('should update multiple fields at once', async () => {
// Arrange
const mockUpdatedBudget = createMockBudget({
name: 'New Name',
amount_cents: 75000,
period: 'weekly',
});
const request = createMockRequest();
mockedBudgetRepo.updateBudget.mockResolvedValue(mockUpdatedBudget);
// Act
const result = await controller.updateBudget(1, request, {
name: 'New Name',
amount_cents: 75000,
period: 'weekly',
});
// Assert
expect(result.success).toBe(true);
expect(mockedBudgetRepo.updateBudget).toHaveBeenCalledWith(
1,
'test-user-id',
expect.objectContaining({
name: 'New Name',
amount_cents: 75000,
period: 'weekly',
}),
expect.anything(),
);
});
});
// ==========================================================================
// DELETE BUDGET
// ==========================================================================
describe('deleteBudget()', () => {
it('should delete a budget', async () => {
// Arrange
const request = createMockRequest();
mockedBudgetRepo.deleteBudget.mockResolvedValue(undefined);
// Act
const result = await controller.deleteBudget(1, request);
// Assert
expect(result).toBeUndefined();
expect(mockedBudgetRepo.deleteBudget).toHaveBeenCalledWith(
1,
'test-user-id',
expect.anything(),
);
});
});
// ==========================================================================
// SPENDING ANALYSIS
// ==========================================================================
describe('getSpendingAnalysis()', () => {
it('should return spending breakdown by category', async () => {
// Arrange
const mockSpendingData = [
createMockSpendingByCategory(),
createMockSpendingByCategory({
category_id: 2,
category_name: 'Produce',
total_cents: 3500,
}),
];
const request = createMockRequest();
mockedBudgetRepo.getSpendingByCategory.mockResolvedValue(mockSpendingData);
// Act
const result = await controller.getSpendingAnalysis('2024-01-01', '2024-01-31', request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].category_name).toBe('Dairy & Eggs');
}
expect(mockedBudgetRepo.getSpendingByCategory).toHaveBeenCalledWith(
'test-user-id',
'2024-01-01',
'2024-01-31',
expect.anything(),
);
});
it('should return empty array when no spending data exists', async () => {
// Arrange
const request = createMockRequest();
mockedBudgetRepo.getSpendingByCategory.mockResolvedValue([]);
// Act
const result = await controller.getSpendingAnalysis('2024-01-01', '2024-01-31', request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const request = createMockRequest();
mockedBudgetRepo.getBudgetsForUser.mockResolvedValue([]);
// Act
const result = await controller.getBudgets(request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
it('should use created helper for 201 responses', async () => {
// Arrange
const mockBudget = createMockBudget();
const request = createMockRequest();
mockedBudgetRepo.createBudget.mockResolvedValue(mockBudget);
// Act
const result = await controller.createBudget(request, {
name: 'Test',
amount_cents: 1000,
period: 'weekly',
start_date: '2024-01-01',
});
// Assert
expect(result.success).toBe(true);
});
it('should use noContent helper for 204 responses', async () => {
// Arrange
const request = createMockRequest();
mockedBudgetRepo.deleteBudget.mockResolvedValue(undefined);
// Act
const result = await controller.deleteBudget(1, request);
// Assert
expect(result).toBeUndefined();
});
});
});

View File

@@ -0,0 +1,233 @@
// src/controllers/budget.controller.ts
// ============================================================================
// BUDGET CONTROLLER
// ============================================================================
// Provides endpoints for managing user budgets, including CRUD operations
// and spending analysis. All endpoints require authentication.
//
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import {
Get,
Post,
Put,
Delete,
Route,
Tags,
Security,
Body,
Path,
Query,
Request,
SuccessResponse,
Response,
} from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import { budgetRepo } from '../services/db/index.db';
import type { Budget, SpendingByCategory, UserProfile } from '../types';
// ============================================================================
// REQUEST/RESPONSE TYPES
// ============================================================================
/**
* Request body for creating a new budget.
*/
interface CreateBudgetRequest {
/** Budget name */
name: string;
/** Budget amount in cents (must be positive) */
amount_cents: number;
/** Budget period - weekly or monthly */
period: 'weekly' | 'monthly';
/** Budget start date in YYYY-MM-DD format */
start_date: string;
}
/**
* Request body for updating a budget.
* All fields are optional, but at least one must be provided.
*/
interface UpdateBudgetRequest {
/** Budget name */
name?: string;
/** Budget amount in cents (must be positive) */
amount_cents?: number;
/** Budget period - weekly or monthly */
period?: 'weekly' | 'monthly';
/** Budget start date in YYYY-MM-DD format */
start_date?: string;
}
// ============================================================================
// BUDGET CONTROLLER
// ============================================================================
/**
* Controller for managing user budgets.
*
* All endpoints require JWT authentication. Users can only access
* their own budgets - the user ID is extracted from the JWT token.
*/
@Route('budgets')
@Tags('Budgets')
@Security('bearerAuth')
export class BudgetController extends BaseController {
// ==========================================================================
// LIST BUDGETS
// ==========================================================================
/**
* Get all budgets for the authenticated user.
*
* Returns a list of all budgets owned by the authenticated user,
* ordered by start date descending (newest first).
*
* @summary Get all budgets
* @param request Express request with authenticated user
* @returns List of user budgets
*/
@Get()
@SuccessResponse(200, 'List of user budgets')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getBudgets(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<Budget[]>> {
const userProfile = request.user as UserProfile;
const budgets = await budgetRepo.getBudgetsForUser(userProfile.user.user_id, request.log);
return this.success(budgets);
}
// ==========================================================================
// CREATE BUDGET
// ==========================================================================
/**
* Create a new budget for the authenticated user.
*
* Creates a budget with the specified name, amount, period, and start date.
* The budget is automatically associated with the authenticated user.
*
* @summary Create budget
* @param request Express request with authenticated user
* @param body Budget creation data
* @returns The newly created budget
*/
@Post()
@SuccessResponse(201, 'Budget created')
@Response<ErrorResponse>(400, 'Validation error')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async createBudget(
@Request() request: ExpressRequest,
@Body() body: CreateBudgetRequest,
): Promise<SuccessResponseType<Budget>> {
const userProfile = request.user as UserProfile;
const newBudget = await budgetRepo.createBudget(userProfile.user.user_id, body, request.log);
return this.created(newBudget);
}
// ==========================================================================
// UPDATE BUDGET
// ==========================================================================
/**
* Update an existing budget.
*
* Updates the specified budget with the provided fields. At least one
* field must be provided. The user must own the budget to update it.
*
* @summary Update budget
* @param id Budget ID
* @param request Express request with authenticated user
* @param body Fields to update
* @returns The updated budget
*/
@Put('{id}')
@SuccessResponse(200, 'Budget updated')
@Response<ErrorResponse>(400, 'Validation error - at least one field required')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
@Response<ErrorResponse>(404, 'Budget not found')
public async updateBudget(
@Path() id: number,
@Request() request: ExpressRequest,
@Body() body: UpdateBudgetRequest,
): Promise<SuccessResponseType<Budget>> {
const userProfile = request.user as UserProfile;
// Validate at least one field is provided
if (Object.keys(body).length === 0) {
this.setStatus(400);
throw new Error('At least one field to update must be provided.');
}
const updatedBudget = await budgetRepo.updateBudget(
id,
userProfile.user.user_id,
body,
request.log,
);
return this.success(updatedBudget);
}
// ==========================================================================
// DELETE BUDGET
// ==========================================================================
/**
* Delete a budget.
*
* Permanently deletes the specified budget. The user must own
* the budget to delete it.
*
* @summary Delete budget
* @param id Budget ID
* @param request Express request with authenticated user
*/
@Delete('{id}')
@SuccessResponse(204, 'Budget deleted')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
@Response<ErrorResponse>(404, 'Budget not found')
public async deleteBudget(@Path() id: number, @Request() request: ExpressRequest): Promise<void> {
const userProfile = request.user as UserProfile;
await budgetRepo.deleteBudget(id, userProfile.user.user_id, request.log);
return this.noContent();
}
// ==========================================================================
// SPENDING ANALYSIS
// ==========================================================================
/**
* Get spending analysis by category.
*
* Returns a breakdown of spending by category for the specified date range.
* This helps users understand their spending patterns relative to their budgets.
*
* @summary Get spending analysis
* @param startDate Start date in YYYY-MM-DD format
* @param endDate End date in YYYY-MM-DD format
* @param request Express request with authenticated user
* @returns Spending breakdown by category
*/
@Get('spending-analysis')
@SuccessResponse(200, 'Spending breakdown by category')
@Response<ErrorResponse>(400, 'Invalid date format')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getSpendingAnalysis(
@Query() startDate: string,
@Query() endDate: string,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<SpendingByCategory[]>> {
const userProfile = request.user as UserProfile;
const spendingData = await budgetRepo.getSpendingByCategory(
userProfile.user.user_id,
startDate,
endDate,
request.log,
);
return this.success(spendingData);
}
}

View File

@@ -0,0 +1,333 @@
// src/controllers/category.controller.test.ts
// ============================================================================
// CATEGORY CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the CategoryController class. These tests verify controller
// logic in isolation by mocking the category database service.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Path: () => () => {},
Query: () => () => {},
Request: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock category database service
vi.mock('../services/db/category.db', () => ({
CategoryDbService: {
getAllCategories: vi.fn(),
getCategoryByName: vi.fn(),
getCategoryById: vi.fn(),
},
}));
// Import mocked modules after mock definitions
import { CategoryDbService } from '../services/db/category.db';
import { CategoryController } from './category.controller';
// Cast mocked modules for type-safe access
const mockedCategoryDbService = CategoryDbService as Mocked<typeof CategoryDbService>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock category record.
*/
function createMockCategory(overrides: Record<string, unknown> = {}) {
return {
category_id: 1,
name: 'Dairy & Eggs',
description: 'Milk, cheese, eggs, and dairy products',
icon: 'dairy',
created_at: '2024-01-01T00:00:00.000Z',
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('CategoryController', () => {
let controller: CategoryController;
beforeEach(() => {
vi.clearAllMocks();
controller = new CategoryController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// LIST CATEGORIES
// ==========================================================================
describe('getAllCategories()', () => {
it('should return all categories', async () => {
// Arrange
const mockCategories = [
createMockCategory(),
createMockCategory({ category_id: 2, name: 'Produce' }),
createMockCategory({ category_id: 3, name: 'Meat & Seafood' }),
];
const request = createMockRequest();
mockedCategoryDbService.getAllCategories.mockResolvedValue(mockCategories);
// Act
const result = await controller.getAllCategories(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(3);
expect(result.data[0].name).toBe('Dairy & Eggs');
}
expect(mockedCategoryDbService.getAllCategories).toHaveBeenCalledWith(expect.anything());
});
it('should return empty array when no categories exist', async () => {
// Arrange
const request = createMockRequest();
mockedCategoryDbService.getAllCategories.mockResolvedValue([]);
// Act
const result = await controller.getAllCategories(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
});
// ==========================================================================
// LOOKUP BY NAME
// ==========================================================================
describe('getCategoryByName()', () => {
it('should return category when found by name', async () => {
// Arrange
const mockCategory = createMockCategory();
const request = createMockRequest();
mockedCategoryDbService.getCategoryByName.mockResolvedValue(mockCategory);
// Act
const result = await controller.getCategoryByName('Dairy & Eggs', request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.name).toBe('Dairy & Eggs');
expect(result.data.category_id).toBe(1);
}
expect(mockedCategoryDbService.getCategoryByName).toHaveBeenCalledWith(
'Dairy & Eggs',
expect.anything(),
);
});
it('should throw NotFoundError when category not found', async () => {
// Arrange
const request = createMockRequest();
mockedCategoryDbService.getCategoryByName.mockResolvedValue(null);
// Act & Assert
await expect(controller.getCategoryByName('Nonexistent Category', request)).rejects.toThrow(
"Category 'Nonexistent Category' not found",
);
});
it('should return error when name is empty', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getCategoryByName('', request);
// Assert
expect(result.success).toBe(false);
});
it('should return error when name is whitespace only', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getCategoryByName(' ', request);
// Assert
expect(result.success).toBe(false);
});
});
// ==========================================================================
// GET BY ID
// ==========================================================================
describe('getCategoryById()', () => {
it('should return category when found by ID', async () => {
// Arrange
const mockCategory = createMockCategory();
const request = createMockRequest();
mockedCategoryDbService.getCategoryById.mockResolvedValue(mockCategory);
// Act
const result = await controller.getCategoryById(1, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.category_id).toBe(1);
expect(result.data.name).toBe('Dairy & Eggs');
}
expect(mockedCategoryDbService.getCategoryById).toHaveBeenCalledWith(1, expect.anything());
});
it('should throw NotFoundError when category not found', async () => {
// Arrange
const request = createMockRequest();
mockedCategoryDbService.getCategoryById.mockResolvedValue(null);
// Act & Assert
await expect(controller.getCategoryById(999, request)).rejects.toThrow(
'Category with ID 999 not found',
);
});
it('should return error for invalid ID (zero)', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getCategoryById(0, request);
// Assert
expect(result.success).toBe(false);
});
it('should return error for invalid ID (negative)', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getCategoryById(-1, request);
// Assert
expect(result.success).toBe(false);
});
it('should return error for invalid ID (NaN)', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getCategoryById(NaN, request);
// Assert
expect(result.success).toBe(false);
});
});
// ==========================================================================
// PUBLIC ACCESS (NO AUTH REQUIRED)
// ==========================================================================
describe('Public access', () => {
it('should work without user authentication', async () => {
// Arrange
const mockCategories = [createMockCategory()];
const request = createMockRequest({ user: undefined });
mockedCategoryDbService.getAllCategories.mockResolvedValue(mockCategories);
// Act
const result = await controller.getAllCategories(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(1);
}
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const mockCategories = [createMockCategory()];
const request = createMockRequest();
mockedCategoryDbService.getAllCategories.mockResolvedValue(mockCategories);
// Act
const result = await controller.getAllCategories(request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
it('should use error helper for validation errors', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getCategoryByName('', request);
// Assert
expect(result).toHaveProperty('success', false);
});
});
});

View File

@@ -0,0 +1,137 @@
// src/controllers/category.controller.ts
// ============================================================================
// CATEGORY CONTROLLER
// ============================================================================
// Provides endpoints for retrieving grocery categories. Categories are
// predefined (e.g., "Dairy & Eggs", "Fruits & Vegetables") and are used
// to organize items throughout the application.
//
// All endpoints are public (no authentication required).
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import { Get, Route, Tags, Path, Query, Request, SuccessResponse, Response } from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController, NotFoundError } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import { CategoryDbService, type Category } from '../services/db/category.db';
// ============================================================================
// CATEGORY CONTROLLER
// ============================================================================
/**
* Controller for retrieving grocery categories.
*
* Categories are system-defined and cannot be modified by users.
* All endpoints are public and do not require authentication.
*/
@Route('categories')
@Tags('Categories')
export class CategoryController extends BaseController {
// ==========================================================================
// LIST CATEGORIES
// ==========================================================================
/**
* List all available grocery categories.
*
* Returns all predefined grocery categories ordered alphabetically by name.
* Use this endpoint to populate category dropdowns in the UI.
*
* @summary List all available grocery categories
* @param request Express request for logging
* @returns List of categories ordered alphabetically by name
*/
@Get()
@SuccessResponse(200, 'List of categories ordered alphabetically by name')
@Response<ErrorResponse>(500, 'Server error')
public async getAllCategories(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<Category[]>> {
const categories = await CategoryDbService.getAllCategories(request.log);
return this.success(categories);
}
// ==========================================================================
// LOOKUP BY NAME
// ==========================================================================
/**
* Lookup category by name.
*
* Find a category by its name (case-insensitive). This endpoint is provided
* for migration support to help clients transition from using category names
* to category IDs.
*
* @summary Lookup category by name
* @param name The category name to search for (case-insensitive)
* @param request Express request for logging
* @returns Category found
*/
@Get('lookup')
@SuccessResponse(200, 'Category found')
@Response<ErrorResponse>(400, 'Missing or invalid query parameter')
@Response<ErrorResponse>(404, 'Category not found')
public async getCategoryByName(
@Query() name: string,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<Category>> {
// Validate name parameter
if (!name || name.trim() === '') {
this.setStatus(400);
return this.error(
this.ErrorCode.BAD_REQUEST,
'Query parameter "name" is required and must be a non-empty string',
) as unknown as SuccessResponseType<Category>;
}
const category = await CategoryDbService.getCategoryByName(name, request.log);
if (!category) {
throw new NotFoundError(`Category '${name}' not found`);
}
return this.success(category);
}
// ==========================================================================
// GET BY ID
// ==========================================================================
/**
* Get a specific category by ID.
*
* Retrieve detailed information about a single category.
*
* @summary Get a specific category by ID
* @param id The category ID
* @param request Express request for logging
* @returns Category details
*/
@Get('{id}')
@SuccessResponse(200, 'Category details')
@Response<ErrorResponse>(400, 'Invalid category ID')
@Response<ErrorResponse>(404, 'Category not found')
public async getCategoryById(
@Path() id: number,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<Category>> {
// Validate ID
if (isNaN(id) || id <= 0) {
this.setStatus(400);
return this.error(
this.ErrorCode.BAD_REQUEST,
'Invalid category ID. Must be a positive integer.',
) as unknown as SuccessResponseType<Category>;
}
const category = await CategoryDbService.getCategoryById(id, request.log);
if (!category) {
throw new NotFoundError(`Category with ID ${id} not found`);
}
return this.success(category);
}
}

View File

@@ -0,0 +1,254 @@
// src/controllers/deals.controller.test.ts
// ============================================================================
// DEALS CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the DealsController class. These tests verify controller
// logic in isolation by mocking the deals repository.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Security: () => () => {},
Request: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock deals repository
vi.mock('../services/db/deals.db', () => ({
dealsRepo: {
findBestPricesForWatchedItems: vi.fn(),
},
}));
// Import mocked modules after mock definitions
import { dealsRepo } from '../services/db/deals.db';
import { DealsController } from './deals.controller';
// Cast mocked modules for type-safe access
const mockedDealsRepo = dealsRepo as Mocked<typeof dealsRepo>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object with authenticated user.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
user: createMockUserProfile(),
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock user profile for testing.
*/
function createMockUserProfile() {
return {
full_name: 'Test User',
role: 'user' as const,
user: {
user_id: 'test-user-id',
email: 'test@example.com',
},
};
}
/**
* Creates a mock watched item deal.
*/
function createMockWatchedItemDeal(overrides: Record<string, unknown> = {}) {
return {
watched_item_id: 1,
master_item_id: 100,
item_name: 'Milk 2%',
current_price_cents: 350,
regular_price_cents: 450,
discount_percent: 22.2,
store_name: 'Superstore',
store_location_id: 5,
flyer_id: 10,
flyer_valid_from: '2024-01-15',
flyer_valid_to: '2024-01-21',
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('DealsController', () => {
let controller: DealsController;
beforeEach(() => {
vi.clearAllMocks();
controller = new DealsController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// BEST WATCHED PRICES
// ==========================================================================
describe('getBestWatchedPrices()', () => {
it('should return best prices for watched items', async () => {
// Arrange
const mockDeals = [
createMockWatchedItemDeal(),
createMockWatchedItemDeal({
watched_item_id: 2,
item_name: 'Bread',
current_price_cents: 250,
}),
];
const request = createMockRequest();
mockedDealsRepo.findBestPricesForWatchedItems.mockResolvedValue(mockDeals);
// Act
const result = await controller.getBestWatchedPrices(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].item_name).toBe('Milk 2%');
expect(result.data[0].current_price_cents).toBe(350);
}
expect(mockedDealsRepo.findBestPricesForWatchedItems).toHaveBeenCalledWith(
'test-user-id',
expect.anything(),
);
});
it('should return empty array when user has no watched items', async () => {
// Arrange
const request = createMockRequest();
mockedDealsRepo.findBestPricesForWatchedItems.mockResolvedValue([]);
// Act
const result = await controller.getBestWatchedPrices(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should return empty array when no active deals exist', async () => {
// Arrange
const request = createMockRequest();
mockedDealsRepo.findBestPricesForWatchedItems.mockResolvedValue([]);
// Act
const result = await controller.getBestWatchedPrices(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should log successful fetch', async () => {
// Arrange
const mockDeals = [createMockWatchedItemDeal()];
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
mockedDealsRepo.findBestPricesForWatchedItems.mockResolvedValue(mockDeals);
// Act
await controller.getBestWatchedPrices(request);
// Assert
expect(mockLog.info).toHaveBeenCalledWith(
{ dealCount: 1 },
'Successfully fetched best watched item deals.',
);
});
it('should use user ID from authenticated profile', async () => {
// Arrange
const customProfile = {
full_name: 'Custom User',
role: 'user' as const,
user: {
user_id: 'custom-user-id',
email: 'custom@example.com',
},
};
const request = createMockRequest({ user: customProfile });
mockedDealsRepo.findBestPricesForWatchedItems.mockResolvedValue([]);
// Act
await controller.getBestWatchedPrices(request);
// Assert
expect(mockedDealsRepo.findBestPricesForWatchedItems).toHaveBeenCalledWith(
'custom-user-id',
expect.anything(),
);
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const request = createMockRequest();
mockedDealsRepo.findBestPricesForWatchedItems.mockResolvedValue([]);
// Act
const result = await controller.getBestWatchedPrices(request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
});
});

View File

@@ -0,0 +1,62 @@
// src/controllers/deals.controller.ts
// ============================================================================
// DEALS CONTROLLER
// ============================================================================
// Provides endpoints for retrieving deal information, specifically the
// best prices for items that the user is watching.
//
// All endpoints require authentication.
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import { Get, Route, Tags, Security, Request, SuccessResponse, Response } from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import { dealsRepo } from '../services/db/deals.db';
import type { WatchedItemDeal, UserProfile } from '../types';
// ============================================================================
// DEALS CONTROLLER
// ============================================================================
/**
* Controller for retrieving deal information.
*
* All endpoints require JWT authentication. The user ID is extracted
* from the JWT token to retrieve user-specific deal information.
*/
@Route('deals')
@Tags('Deals')
@Security('bearerAuth')
export class DealsController extends BaseController {
// ==========================================================================
// BEST WATCHED PRICES
// ==========================================================================
/**
* Get best prices for watched items.
*
* Fetches the best current sale price for each of the authenticated user's
* watched items. Only considers currently active flyers (valid_to >= today).
* In case of price ties, the deal that is valid for the longest time is preferred.
*
* @summary Get best prices for watched items
* @param request Express request with authenticated user
* @returns List of best prices for watched items
*/
@Get('best-watched-prices')
@SuccessResponse(200, 'List of best prices for watched items')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getBestWatchedPrices(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<WatchedItemDeal[]>> {
const userProfile = request.user as UserProfile;
const deals = await dealsRepo.findBestPricesForWatchedItems(
userProfile.user.user_id,
request.log,
);
request.log.info({ dealCount: deals.length }, 'Successfully fetched best watched item deals.');
return this.success(deals);
}
}

View File

@@ -0,0 +1,590 @@
// src/controllers/flyer.controller.test.ts
// ============================================================================
// FLYER CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the FlyerController class. These tests verify controller
// logic in isolation by mocking external dependencies like database repositories.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Post: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Path: () => () => {},
Query: () => () => {},
Body: () => () => {},
Request: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock database repositories
vi.mock('../services/db/index.db', () => ({
flyerRepo: {
getFlyers: vi.fn(),
getFlyerById: vi.fn(),
getFlyerItems: vi.fn(),
getFlyerItemsForFlyers: vi.fn(),
countFlyerItemsForFlyers: vi.fn(),
trackFlyerItemInteraction: vi.fn(),
},
}));
// Import mocked modules after mock definitions
import * as db from '../services/db/index.db';
import { FlyerController } from './flyer.controller';
// Cast mocked modules for type-safe access
const mockedDb = db as Mocked<typeof db>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock flyer object.
*/
function createMockFlyer(overrides: Record<string, unknown> = {}) {
return {
flyer_id: 1,
file_name: 'test-flyer.jpg',
image_url: '/uploads/flyers/test-flyer.jpg',
icon_url: '/uploads/flyers/icons/test-flyer.jpg',
checksum: 'abc123',
store_id: 1,
valid_from: '2024-01-01',
valid_to: '2024-01-07',
status: 'processed' as const,
item_count: 10,
uploaded_by: 'user-123',
store: {
store_id: 1,
name: 'Test Store',
logo_url: '/uploads/logos/store.jpg',
created_at: '2024-01-01T00:00:00.000Z',
updated_at: '2024-01-01T00:00:00.000Z',
},
locations: [],
created_at: '2024-01-01T00:00:00.000Z',
updated_at: '2024-01-01T00:00:00.000Z',
...overrides,
};
}
/**
* Creates a mock flyer item object.
*/
function createMockFlyerItem(overrides: Record<string, unknown> = {}) {
return {
flyer_item_id: 1,
flyer_id: 1,
item: 'Test Product',
price_display: '$2.99',
price_in_cents: 299,
quantity: '1',
quantity_num: 1,
master_item_id: 100,
master_item_name: 'Test Master Item',
category_id: 5,
category_name: 'Dairy',
unit_price: { value: 299, unit: 'each' },
product_id: null,
view_count: 0,
click_count: 0,
created_at: '2024-01-01T00:00:00.000Z',
updated_at: '2024-01-01T00:00:00.000Z',
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('FlyerController', () => {
let controller: FlyerController;
beforeEach(() => {
vi.clearAllMocks();
controller = new FlyerController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// LIST ENDPOINTS
// ==========================================================================
describe('getFlyers()', () => {
it('should return flyers with default pagination', async () => {
// Arrange
const mockFlyers = [createMockFlyer(), createMockFlyer({ flyer_id: 2 })];
const request = createMockRequest();
mockedDb.flyerRepo.getFlyers.mockResolvedValue(mockFlyers);
// Act
const result = await controller.getFlyers(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
}
expect(mockedDb.flyerRepo.getFlyers).toHaveBeenCalledWith(
expect.anything(),
20, // default limit
0, // default offset
);
});
it('should respect custom pagination parameters', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.getFlyers.mockResolvedValue([]);
// Act
await controller.getFlyers(request, 50, 10);
// Assert
expect(mockedDb.flyerRepo.getFlyers).toHaveBeenCalledWith(expect.anything(), 50, 10);
});
it('should cap limit at 100', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.getFlyers.mockResolvedValue([]);
// Act
await controller.getFlyers(request, 200);
// Assert
expect(mockedDb.flyerRepo.getFlyers).toHaveBeenCalledWith(expect.anything(), 100, 0);
});
it('should floor limit to minimum of 1', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.getFlyers.mockResolvedValue([]);
// Act
await controller.getFlyers(request, -5);
// Assert
expect(mockedDb.flyerRepo.getFlyers).toHaveBeenCalledWith(expect.anything(), 1, 0);
});
it('should normalize offset to 0 if negative', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.getFlyers.mockResolvedValue([]);
// Act
await controller.getFlyers(request, 20, -10);
// Assert
expect(mockedDb.flyerRepo.getFlyers).toHaveBeenCalledWith(expect.anything(), 20, 0);
});
it('should floor decimal pagination values', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.getFlyers.mockResolvedValue([]);
// Act
await controller.getFlyers(request, 15.9, 5.7);
// Assert
expect(mockedDb.flyerRepo.getFlyers).toHaveBeenCalledWith(expect.anything(), 15, 5);
});
});
// ==========================================================================
// SINGLE RESOURCE ENDPOINTS
// ==========================================================================
describe('getFlyerById()', () => {
it('should return flyer by ID successfully', async () => {
// Arrange
const mockFlyer = createMockFlyer();
const request = createMockRequest();
mockedDb.flyerRepo.getFlyerById.mockResolvedValue(mockFlyer);
// Act
const result = await controller.getFlyerById(1, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.flyer_id).toBe(1);
expect(result.data.file_name).toBe('test-flyer.jpg');
}
expect(mockedDb.flyerRepo.getFlyerById).toHaveBeenCalledWith(1);
});
it('should log successful retrieval', async () => {
// Arrange
const mockFlyer = createMockFlyer();
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
mockedDb.flyerRepo.getFlyerById.mockResolvedValue(mockFlyer);
// Act
await controller.getFlyerById(1, request);
// Assert
expect(mockLog.debug).toHaveBeenCalledWith({ flyerId: 1 }, 'Retrieved flyer by ID');
});
});
describe('getFlyerItems()', () => {
it('should return flyer items successfully', async () => {
// Arrange
const mockItems = [
createMockFlyerItem(),
createMockFlyerItem({ flyer_item_id: 2, item: 'Another Product' }),
];
const request = createMockRequest();
mockedDb.flyerRepo.getFlyerItems.mockResolvedValue(mockItems);
// Act
const result = await controller.getFlyerItems(1, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].item).toBe('Test Product');
}
expect(mockedDb.flyerRepo.getFlyerItems).toHaveBeenCalledWith(1, expect.anything());
});
it('should log item count', async () => {
// Arrange
const mockItems = [createMockFlyerItem()];
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
mockedDb.flyerRepo.getFlyerItems.mockResolvedValue(mockItems);
// Act
await controller.getFlyerItems(1, request);
// Assert
expect(mockLog.debug).toHaveBeenCalledWith(
{ flyerId: 1, itemCount: 1 },
'Retrieved flyer items',
);
});
it('should return empty array for flyer with no items', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.getFlyerItems.mockResolvedValue([]);
// Act
const result = await controller.getFlyerItems(999, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toEqual([]);
}
});
});
// ==========================================================================
// BATCH ENDPOINTS
// ==========================================================================
describe('batchFetchItems()', () => {
it('should return items for multiple flyers', async () => {
// Arrange
const mockItems = [
createMockFlyerItem({ flyer_id: 1 }),
createMockFlyerItem({ flyer_id: 2, flyer_item_id: 2 }),
];
const request = createMockRequest();
mockedDb.flyerRepo.getFlyerItemsForFlyers.mockResolvedValue(mockItems);
// Act
const result = await controller.batchFetchItems({ flyerIds: [1, 2, 3] }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
}
expect(mockedDb.flyerRepo.getFlyerItemsForFlyers).toHaveBeenCalledWith(
[1, 2, 3],
expect.anything(),
);
});
it('should log batch fetch details', async () => {
// Arrange
const mockItems = [createMockFlyerItem()];
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
mockedDb.flyerRepo.getFlyerItemsForFlyers.mockResolvedValue(mockItems);
// Act
await controller.batchFetchItems({ flyerIds: [1, 2] }, request);
// Assert
expect(mockLog.debug).toHaveBeenCalledWith(
{ flyerCount: 2, itemCount: 1 },
'Batch fetched flyer items',
);
});
it('should return empty array when no items found', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.getFlyerItemsForFlyers.mockResolvedValue([]);
// Act
const result = await controller.batchFetchItems({ flyerIds: [999, 1000] }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toEqual([]);
}
});
});
describe('batchCountItems()', () => {
it('should return total item count for multiple flyers', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.countFlyerItemsForFlyers.mockResolvedValue(25);
// Act
const result = await controller.batchCountItems({ flyerIds: [1, 2, 3] }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.count).toBe(25);
}
expect(mockedDb.flyerRepo.countFlyerItemsForFlyers).toHaveBeenCalledWith(
[1, 2, 3],
expect.anything(),
);
});
it('should log count details', async () => {
// Arrange
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
mockedDb.flyerRepo.countFlyerItemsForFlyers.mockResolvedValue(10);
// Act
await controller.batchCountItems({ flyerIds: [1] }, request);
// Assert
expect(mockLog.debug).toHaveBeenCalledWith(
{ flyerCount: 1, totalItems: 10 },
'Batch counted items',
);
});
it('should return 0 for empty flyer list', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.countFlyerItemsForFlyers.mockResolvedValue(0);
// Act
const result = await controller.batchCountItems({ flyerIds: [] }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.count).toBe(0);
}
});
});
// ==========================================================================
// TRACKING ENDPOINTS
// ==========================================================================
describe('trackItemInteraction()', () => {
it('should accept view tracking (fire-and-forget)', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.trackFlyerItemInteraction.mockResolvedValue(undefined);
// Act
const result = await controller.trackItemInteraction(1, { type: 'view' }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Tracking accepted');
}
});
it('should accept click tracking', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.trackFlyerItemInteraction.mockResolvedValue(undefined);
// Act
const result = await controller.trackItemInteraction(1, { type: 'click' }, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Tracking accepted');
}
});
it('should log error but not fail on tracking failure', async () => {
// Arrange
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
// Make tracking fail
mockedDb.flyerRepo.trackFlyerItemInteraction.mockRejectedValue(new Error('Database error'));
// Act
const result = await controller.trackItemInteraction(1, { type: 'view' }, request);
// Assert - should still return success (fire-and-forget)
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Tracking accepted');
}
// Wait for async error handling
await new Promise((resolve) => setTimeout(resolve, 10));
// Error should be logged
expect(mockLog.error).toHaveBeenCalledWith(
expect.objectContaining({
error: expect.any(Error),
itemId: 1,
interactionType: 'view',
}),
'Flyer item interaction tracking failed (fire-and-forget)',
);
});
it('should call tracking with correct parameters', async () => {
// Arrange
const request = createMockRequest();
mockedDb.flyerRepo.trackFlyerItemInteraction.mockResolvedValue(undefined);
// Act
await controller.trackItemInteraction(42, { type: 'click' }, request);
// Assert
expect(mockedDb.flyerRepo.trackFlyerItemInteraction).toHaveBeenCalledWith(
42,
'click',
expect.anything(),
);
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const mockFlyer = createMockFlyer();
const request = createMockRequest();
mockedDb.flyerRepo.getFlyerById.mockResolvedValue(mockFlyer);
// Act
const result = await controller.getFlyerById(1, request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
});
});

View File

@@ -0,0 +1,282 @@
// src/controllers/flyer.controller.ts
// ============================================================================
// FLYER CONTROLLER
// ============================================================================
// Provides endpoints for managing flyers and flyer items.
// Implements endpoints for:
// - Listing flyers with pagination
// - Getting individual flyer details
// - Getting items for a flyer
// - Batch fetching items for multiple flyers
// - Batch counting items for multiple flyers
// - Tracking item interactions (fire-and-forget)
// ============================================================================
import {
Get,
Post,
Route,
Tags,
Path,
Query,
Body,
Request,
SuccessResponse,
Response,
} from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import * as db from '../services/db/index.db';
import type { FlyerDto, FlyerItemDto } from '../dtos/common.dto';
// ============================================================================
// REQUEST/RESPONSE TYPES
// ============================================================================
// Types for request bodies and custom response shapes that will appear in
// the OpenAPI specification.
// ============================================================================
/**
* Request body for batch fetching flyer items.
*/
interface BatchFetchRequest {
/**
* Array of flyer IDs to fetch items for.
* @minItems 1
* @example [1, 2, 3]
*/
flyerIds: number[];
}
/**
* Request body for batch counting flyer items.
*/
interface BatchCountRequest {
/**
* Array of flyer IDs to count items for.
* @example [1, 2, 3]
*/
flyerIds: number[];
}
/**
* Request body for tracking item interactions.
*/
interface TrackInteractionRequest {
/**
* Type of interaction to track.
* @example "view"
*/
type: 'view' | 'click';
}
/**
* Response for batch item count.
*/
interface BatchCountResponse {
/**
* Total number of items across all requested flyers.
*/
count: number;
}
/**
* Response for tracking confirmation.
*/
interface TrackingResponse {
/**
* Confirmation message.
*/
message: string;
}
// ============================================================================
// FLYER CONTROLLER
// ============================================================================
/**
* Controller for flyer management endpoints.
*
* Provides read-only access to flyers and flyer items for all users,
* with analytics tracking capabilities.
*/
@Route('flyers')
@Tags('Flyers')
export class FlyerController extends BaseController {
// ==========================================================================
// LIST ENDPOINTS
// ==========================================================================
/**
* Get all flyers.
*
* Returns a paginated list of all flyers, ordered by creation date (newest first).
* Includes store information and location data for each flyer.
*
* @summary List all flyers
* @param limit Maximum number of flyers to return (default: 20)
* @param offset Number of flyers to skip for pagination (default: 0)
* @returns Array of flyer objects with store information
*/
@Get()
@SuccessResponse(200, 'List of flyers retrieved successfully')
public async getFlyers(
@Request() req: ExpressRequest,
@Query() limit?: number,
@Query() offset?: number,
): Promise<SuccessResponseType<FlyerDto[]>> {
// Apply defaults and bounds for pagination
// Note: Using offset-based pagination to match existing API behavior
const normalizedLimit = Math.min(100, Math.max(1, Math.floor(limit ?? 20)));
const normalizedOffset = Math.max(0, Math.floor(offset ?? 0));
const flyers = await db.flyerRepo.getFlyers(req.log, normalizedLimit, normalizedOffset);
// The Flyer type from the repository is structurally compatible with FlyerDto
// (FlyerDto just omits the GeoJSONPoint type that tsoa can't handle)
return this.success(flyers as unknown as FlyerDto[]);
}
// ==========================================================================
// SINGLE RESOURCE ENDPOINTS
// ==========================================================================
/**
* Get flyer by ID.
*
* Returns a single flyer with its full details, including store information
* and all associated store locations.
*
* @summary Get a single flyer
* @param id The unique identifier of the flyer
* @returns The flyer object with full details
*/
@Get('{id}')
@SuccessResponse(200, 'Flyer retrieved successfully')
@Response<ErrorResponse>(404, 'Flyer not found')
public async getFlyerById(
@Path() id: number,
@Request() req: ExpressRequest,
): Promise<SuccessResponseType<FlyerDto>> {
// getFlyerById throws NotFoundError if flyer doesn't exist
// The global error handler converts this to a 404 response
const flyer = await db.flyerRepo.getFlyerById(id);
req.log.debug({ flyerId: id }, 'Retrieved flyer by ID');
return this.success(flyer as unknown as FlyerDto);
}
/**
* Get flyer items.
*
* Returns all items (deals) associated with a specific flyer.
* Items are ordered by their position in the flyer.
*
* @summary Get items for a flyer
* @param id The unique identifier of the flyer
* @returns Array of flyer items with pricing and category information
*/
@Get('{id}/items')
@SuccessResponse(200, 'Flyer items retrieved successfully')
@Response<ErrorResponse>(404, 'Flyer not found')
public async getFlyerItems(
@Path() id: number,
@Request() req: ExpressRequest,
): Promise<SuccessResponseType<FlyerItemDto[]>> {
const items = await db.flyerRepo.getFlyerItems(id, req.log);
req.log.debug({ flyerId: id, itemCount: items.length }, 'Retrieved flyer items');
return this.success(items as unknown as FlyerItemDto[]);
}
// ==========================================================================
// BATCH ENDPOINTS
// ==========================================================================
/**
* Batch fetch flyer items.
*
* Returns all items for multiple flyers in a single request.
* This is more efficient than making separate requests for each flyer.
* Items are ordered by flyer ID, then by item position within each flyer.
*
* @summary Batch fetch items for multiple flyers
* @param body Request body containing array of flyer IDs
* @returns Array of all flyer items for the requested flyers
*/
@Post('items/batch-fetch')
@SuccessResponse(200, 'Batch items retrieved successfully')
public async batchFetchItems(
@Body() body: BatchFetchRequest,
@Request() req: ExpressRequest,
): Promise<SuccessResponseType<FlyerItemDto[]>> {
const items = await db.flyerRepo.getFlyerItemsForFlyers(body.flyerIds, req.log);
req.log.debug(
{ flyerCount: body.flyerIds.length, itemCount: items.length },
'Batch fetched flyer items',
);
return this.success(items as unknown as FlyerItemDto[]);
}
/**
* Batch count flyer items.
*
* Returns the total item count for multiple flyers.
* Useful for displaying item counts without fetching all item data.
*
* @summary Batch count items for multiple flyers
* @param body Request body containing array of flyer IDs
* @returns Object with total count of items across all requested flyers
*/
@Post('items/batch-count')
@SuccessResponse(200, 'Batch count retrieved successfully')
public async batchCountItems(
@Body() body: BatchCountRequest,
@Request() req: ExpressRequest,
): Promise<SuccessResponseType<BatchCountResponse>> {
const count = await db.flyerRepo.countFlyerItemsForFlyers(body.flyerIds, req.log);
req.log.debug({ flyerCount: body.flyerIds.length, totalItems: count }, 'Batch counted items');
return this.success({ count });
}
// ==========================================================================
// TRACKING ENDPOINTS
// ==========================================================================
/**
* Track item interaction.
*
* Records a view or click interaction with a flyer item for analytics purposes.
* This endpoint uses a fire-and-forget pattern: it returns immediately with a
* 202 Accepted response while the tracking is processed asynchronously.
*
* This design ensures that tracking does not slow down the user experience,
* and any tracking failures are logged but do not affect the client.
*
* @summary Track a flyer item interaction
* @param itemId The unique identifier of the flyer item
* @param body The interaction type (view or click)
* @returns Confirmation that tracking was accepted
*/
@Post('items/{itemId}/track')
@SuccessResponse(202, 'Tracking accepted')
public async trackItemInteraction(
@Path() itemId: number,
@Body() body: TrackInteractionRequest,
@Request() req: ExpressRequest,
): Promise<SuccessResponseType<TrackingResponse>> {
// Fire-and-forget: start the tracking operation but don't await it.
// We explicitly handle errors in the .catch() to prevent unhandled rejections
// and to log any failures without affecting the client response.
db.flyerRepo.trackFlyerItemInteraction(itemId, body.type, req.log).catch((error) => {
// Log the error but don't propagate it - this is intentional
// as tracking failures should not impact user experience
req.log.error(
{ error, itemId, interactionType: body.type },
'Flyer item interaction tracking failed (fire-and-forget)',
);
});
// Return immediately with 202 Accepted
this.setStatus(202);
return this.success({ message: 'Tracking accepted' });
}
}

View File

@@ -0,0 +1,457 @@
// src/controllers/gamification.controller.test.ts
// ============================================================================
// GAMIFICATION CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the GamificationController class. These tests verify controller
// logic in isolation by mocking the gamification service.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Post: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Security: () => () => {},
Query: () => () => {},
Body: () => () => {},
Request: () => () => {},
Middlewares: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock gamification service
vi.mock('../services/gamificationService', () => ({
gamificationService: {
getAllAchievements: vi.fn(),
getLeaderboard: vi.fn(),
getUserAchievements: vi.fn(),
awardAchievement: vi.fn(),
},
}));
// Mock rate limiters
vi.mock('../config/rateLimiters', () => ({
publicReadLimiter: (req: unknown, res: unknown, next: () => void) => next(),
userReadLimiter: (req: unknown, res: unknown, next: () => void) => next(),
adminTriggerLimiter: (req: unknown, res: unknown, next: () => void) => next(),
}));
// Import mocked modules after mock definitions
import { gamificationService } from '../services/gamificationService';
import { GamificationController } from './gamification.controller';
// Cast mocked modules for type-safe access
const mockedGamificationService = gamificationService as Mocked<typeof gamificationService>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object with authenticated user.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
user: createMockUserProfile(),
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock user profile for testing.
*/
function createMockUserProfile() {
return {
full_name: 'Test User',
role: 'user' as const,
user: {
user_id: 'test-user-id',
email: 'test@example.com',
},
};
}
/**
* Creates a mock admin user profile for testing.
*/
function createMockAdminProfile() {
return {
full_name: 'Admin User',
role: 'admin' as const,
user: {
user_id: 'admin-user-id',
email: 'admin@example.com',
},
};
}
/**
* Creates a mock achievement.
*/
function createMockAchievement(overrides: Record<string, unknown> = {}) {
return {
achievement_id: 1,
name: 'First-Upload',
description: 'Upload your first flyer',
points: 10,
icon: 'upload',
category: 'contribution',
created_at: '2024-01-01T00:00:00.000Z',
...overrides,
};
}
/**
* Creates a mock user achievement.
*/
function createMockUserAchievement(overrides: Record<string, unknown> = {}) {
return {
user_achievement_id: 1,
user_id: 'test-user-id',
achievement_id: 1,
achievement_name: 'First-Upload',
achievement_description: 'Upload your first flyer',
points: 10,
earned_at: '2024-01-15T10:00:00.000Z',
...overrides,
};
}
/**
* Creates a mock leaderboard user.
*/
function createMockLeaderboardUser(overrides: Record<string, unknown> = {}) {
return {
user_id: 'user-1',
display_name: 'Top User',
total_points: 150,
achievement_count: 8,
rank: 1,
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('GamificationController', () => {
let controller: GamificationController;
beforeEach(() => {
vi.clearAllMocks();
controller = new GamificationController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// PUBLIC ENDPOINTS
// ==========================================================================
describe('getAllAchievements()', () => {
it('should return all achievements', async () => {
// Arrange
const mockAchievements = [
createMockAchievement(),
createMockAchievement({ achievement_id: 2, name: 'Deal-Hunter', points: 25 }),
];
const request = createMockRequest();
mockedGamificationService.getAllAchievements.mockResolvedValue(mockAchievements);
// Act
const result = await controller.getAllAchievements(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].name).toBe('First-Upload');
}
expect(mockedGamificationService.getAllAchievements).toHaveBeenCalledWith(expect.anything());
});
it('should return empty array when no achievements exist', async () => {
// Arrange
const request = createMockRequest();
mockedGamificationService.getAllAchievements.mockResolvedValue([]);
// Act
const result = await controller.getAllAchievements(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should work without user authentication', async () => {
// Arrange
const mockAchievements = [createMockAchievement()];
const request = createMockRequest({ user: undefined });
mockedGamificationService.getAllAchievements.mockResolvedValue(mockAchievements);
// Act
const result = await controller.getAllAchievements(request);
// Assert
expect(result.success).toBe(true);
});
});
describe('getLeaderboard()', () => {
it('should return leaderboard with default limit', async () => {
// Arrange
const mockLeaderboard = [
createMockLeaderboardUser(),
createMockLeaderboardUser({ user_id: 'user-2', rank: 2, total_points: 120 }),
];
const request = createMockRequest();
mockedGamificationService.getLeaderboard.mockResolvedValue(mockLeaderboard);
// Act
const result = await controller.getLeaderboard(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].rank).toBe(1);
}
expect(mockedGamificationService.getLeaderboard).toHaveBeenCalledWith(
10, // default limit
expect.anything(),
);
});
it('should use custom limit', async () => {
// Arrange
const mockLeaderboard = [createMockLeaderboardUser()];
const request = createMockRequest();
mockedGamificationService.getLeaderboard.mockResolvedValue(mockLeaderboard);
// Act
await controller.getLeaderboard(request, 25);
// Assert
expect(mockedGamificationService.getLeaderboard).toHaveBeenCalledWith(25, expect.anything());
});
it('should cap limit at 50', async () => {
// Arrange
const mockLeaderboard = [createMockLeaderboardUser()];
const request = createMockRequest();
mockedGamificationService.getLeaderboard.mockResolvedValue(mockLeaderboard);
// Act
await controller.getLeaderboard(request, 100);
// Assert
expect(mockedGamificationService.getLeaderboard).toHaveBeenCalledWith(50, expect.anything());
});
it('should floor limit at 1', async () => {
// Arrange
const mockLeaderboard = [createMockLeaderboardUser()];
const request = createMockRequest();
mockedGamificationService.getLeaderboard.mockResolvedValue(mockLeaderboard);
// Act
await controller.getLeaderboard(request, 0);
// Assert
expect(mockedGamificationService.getLeaderboard).toHaveBeenCalledWith(1, expect.anything());
});
it('should work without user authentication', async () => {
// Arrange
const mockLeaderboard = [createMockLeaderboardUser()];
const request = createMockRequest({ user: undefined });
mockedGamificationService.getLeaderboard.mockResolvedValue(mockLeaderboard);
// Act
const result = await controller.getLeaderboard(request);
// Assert
expect(result.success).toBe(true);
});
});
// ==========================================================================
// AUTHENTICATED USER ENDPOINTS
// ==========================================================================
describe('getMyAchievements()', () => {
it('should return user achievements', async () => {
// Arrange
const mockUserAchievements = [
createMockUserAchievement(),
createMockUserAchievement({ user_achievement_id: 2, achievement_name: 'Deal-Hunter' }),
];
const request = createMockRequest();
mockedGamificationService.getUserAchievements.mockResolvedValue(mockUserAchievements);
// Act
const result = await controller.getMyAchievements(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].achievement_name).toBe('First-Upload');
}
expect(mockedGamificationService.getUserAchievements).toHaveBeenCalledWith(
'test-user-id',
expect.anything(),
);
});
it('should return empty array when user has no achievements', async () => {
// Arrange
const request = createMockRequest();
mockedGamificationService.getUserAchievements.mockResolvedValue([]);
// Act
const result = await controller.getMyAchievements(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should use user ID from authenticated profile', async () => {
// Arrange
const customProfile = {
full_name: 'Custom User',
role: 'user' as const,
user: {
user_id: 'custom-user-id',
email: 'custom@example.com',
},
};
const request = createMockRequest({ user: customProfile });
mockedGamificationService.getUserAchievements.mockResolvedValue([]);
// Act
await controller.getMyAchievements(request);
// Assert
expect(mockedGamificationService.getUserAchievements).toHaveBeenCalledWith(
'custom-user-id',
expect.anything(),
);
});
});
// ==========================================================================
// ADMIN ENDPOINTS
// ==========================================================================
describe('awardAchievement()', () => {
it('should award achievement to user (admin)', async () => {
// Arrange
const request = createMockRequest({ user: createMockAdminProfile() });
mockedGamificationService.awardAchievement.mockResolvedValue(undefined);
// Act
const result = await controller.awardAchievement(request, {
userId: 'target-user-id',
achievementName: 'First-Upload',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe(
"Successfully awarded 'First-Upload' to user target-user-id.",
);
}
expect(mockedGamificationService.awardAchievement).toHaveBeenCalledWith(
'target-user-id',
'First-Upload',
expect.anything(),
);
});
it('should include achievement name in success message', async () => {
// Arrange
const request = createMockRequest({ user: createMockAdminProfile() });
mockedGamificationService.awardAchievement.mockResolvedValue(undefined);
// Act
const result = await controller.awardAchievement(request, {
userId: 'user-123',
achievementName: 'Deal-Hunter',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('Deal-Hunter');
expect(result.data.message).toContain('user-123');
}
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const request = createMockRequest();
mockedGamificationService.getAllAchievements.mockResolvedValue([]);
// Act
const result = await controller.getAllAchievements(request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
});
});

View File

@@ -0,0 +1,190 @@
// src/controllers/gamification.controller.ts
// ============================================================================
// GAMIFICATION CONTROLLER
// ============================================================================
// Provides endpoints for the achievement and leaderboard system.
// Includes public endpoints for viewing achievements and leaderboard,
// authenticated endpoint for user's achievements, and admin endpoint
// for manually awarding achievements.
//
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import {
Get,
Post,
Route,
Tags,
Security,
Body,
Query,
Request,
SuccessResponse,
Response,
Middlewares,
} from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import { gamificationService } from '../services/gamificationService';
import type { UserProfile, Achievement, UserAchievement, LeaderboardUser } from '../types';
import { publicReadLimiter, userReadLimiter, adminTriggerLimiter } from '../config/rateLimiters';
// ============================================================================
// REQUEST/RESPONSE TYPES
// ============================================================================
/**
* Request body for awarding an achievement (admin only).
*/
interface AwardAchievementRequest {
/**
* User ID to award the achievement to.
* @format uuid
*/
userId: string;
/**
* Name of the achievement to award.
* @example "First-Upload"
*/
achievementName: string;
}
/**
* Response for successful achievement award.
*/
interface AwardAchievementResponse {
/** Success message */
message: string;
}
// ============================================================================
// GAMIFICATION CONTROLLER
// ============================================================================
/**
* Controller for achievement and leaderboard system.
*
* Public endpoints:
* - GET /achievements - List all available achievements
* - GET /achievements/leaderboard - View top users by points
*
* Authenticated endpoints:
* - GET /achievements/me - View user's earned achievements
*
* Admin endpoints:
* - POST /achievements/award - Manually award an achievement
*/
@Route('achievements')
@Tags('Achievements')
export class GamificationController extends BaseController {
// ==========================================================================
// PUBLIC ENDPOINTS
// ==========================================================================
/**
* Get all achievements.
*
* Returns the master list of all available achievements in the system.
* This is a public endpoint.
*
* @summary Get all achievements
* @param request Express request for logging
* @returns List of all available achievements
*/
@Get()
@Middlewares(publicReadLimiter)
@SuccessResponse(200, 'List of all achievements')
public async getAllAchievements(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<Achievement[]>> {
const achievements = await gamificationService.getAllAchievements(request.log);
return this.success(achievements);
}
/**
* Get leaderboard.
*
* Returns the top users ranked by total points earned from achievements.
* This is a public endpoint.
*
* @summary Get leaderboard
* @param request Express request for logging
* @param limit Maximum number of users to return (1-50, default: 10)
* @returns Leaderboard entries with user points
*/
@Get('leaderboard')
@Middlewares(publicReadLimiter)
@SuccessResponse(200, 'Leaderboard entries')
public async getLeaderboard(
@Request() request: ExpressRequest,
@Query() limit?: number,
): Promise<SuccessResponseType<LeaderboardUser[]>> {
// Normalize limit: default 10, min 1, max 50
const normalizedLimit = Math.min(50, Math.max(1, Math.floor(limit ?? 10)));
const leaderboard = await gamificationService.getLeaderboard(normalizedLimit, request.log);
return this.success(leaderboard);
}
// ==========================================================================
// AUTHENTICATED USER ENDPOINTS
// ==========================================================================
/**
* Get my achievements.
*
* Returns all achievements earned by the authenticated user.
*
* @summary Get my achievements
* @param request Express request with authenticated user
* @returns List of user's earned achievements
*/
@Get('me')
@Security('bearerAuth')
@Middlewares(userReadLimiter)
@SuccessResponse(200, "List of user's earned achievements")
@Response<ErrorResponse>(401, 'Unauthorized - JWT token missing or invalid')
public async getMyAchievements(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<UserAchievement[]>> {
const userProfile = request.user as UserProfile;
const userAchievements = await gamificationService.getUserAchievements(
userProfile.user.user_id,
request.log,
);
return this.success(userAchievements);
}
// ==========================================================================
// ADMIN ENDPOINTS
// ==========================================================================
/**
* Award achievement to user (Admin only).
*
* Manually award an achievement to a specific user. Requires admin role.
*
* @summary Award achievement to user (Admin only)
* @param request Express request with authenticated admin user
* @param body User ID and achievement name
* @returns Success message
*/
@Post('award')
@Security('bearerAuth', ['admin'])
@Middlewares(adminTriggerLimiter)
@SuccessResponse(200, 'Achievement awarded successfully')
@Response<ErrorResponse>(400, 'Invalid achievement name')
@Response<ErrorResponse>(401, 'Unauthorized - JWT token missing or invalid')
@Response<ErrorResponse>(403, 'Forbidden - User is not an admin')
@Response<ErrorResponse>(404, 'User or achievement not found')
public async awardAchievement(
@Request() request: ExpressRequest,
@Body() body: AwardAchievementRequest,
): Promise<SuccessResponseType<AwardAchievementResponse>> {
await gamificationService.awardAchievement(body.userId, body.achievementName, request.log);
return this.success({
message: `Successfully awarded '${body.achievementName}' to user ${body.userId}.`,
});
}
}

View File

@@ -0,0 +1,769 @@
// src/controllers/health.controller.test.ts
// ============================================================================
// HEALTH CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the HealthController class. These tests verify controller
// logic in isolation by mocking external dependencies like database, Redis,
// and file system access.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock all external dependencies before importing the controller module.
// ============================================================================
// Mock tsoa decorators and Controller class (required before controller import)
// tsoa is used at compile-time for code generation but needs to be mocked for Vitest
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(_status: number): void {
// Mock setStatus
}
},
Get: () => () => {},
Route: () => () => {},
Tags: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock database connection module
vi.mock('../services/db/connection.db', () => ({
checkTablesExist: vi.fn(),
getPoolStatus: vi.fn(),
getPool: vi.fn(),
}));
// Mock file system module
vi.mock('node:fs/promises', () => ({
default: {
access: vi.fn(),
constants: { W_OK: 1 },
},
}));
// Mock Redis connection from queue service
vi.mock('../services/queueService.server', () => ({
connection: {
ping: vi.fn(),
get: vi.fn(),
},
}));
// Use vi.hoisted to create mock queue objects available during vi.mock hoisting
const { mockQueuesModule } = vi.hoisted(() => {
const createMockQueue = () => ({
getJobCounts: vi.fn().mockResolvedValue({
waiting: 0,
active: 0,
failed: 0,
delayed: 0,
}),
});
return {
mockQueuesModule: {
flyerQueue: createMockQueue(),
emailQueue: createMockQueue(),
analyticsQueue: createMockQueue(),
weeklyAnalyticsQueue: createMockQueue(),
cleanupQueue: createMockQueue(),
tokenCleanupQueue: createMockQueue(),
receiptQueue: createMockQueue(),
expiryAlertQueue: createMockQueue(),
barcodeQueue: createMockQueue(),
},
};
});
// Mock the queues.server module
vi.mock('../services/queues.server', () => mockQueuesModule);
// Import mocked modules after mock definitions
import * as dbConnection from '../services/db/connection.db';
import { connection as redisConnection } from '../services/queueService.server';
import fs from 'node:fs/promises';
import { HealthController } from './health.controller';
// Cast mocked modules for type-safe access
const mockedDbConnection = dbConnection as Mocked<typeof dbConnection>;
const mockedRedisConnection = redisConnection as Mocked<typeof redisConnection> & {
get: ReturnType<typeof vi.fn>;
};
const mockedFs = fs as Mocked<typeof fs>;
// Cast queues module for test assertions
const mockedQueues = mockQueuesModule as {
flyerQueue: { getJobCounts: ReturnType<typeof vi.fn> };
emailQueue: { getJobCounts: ReturnType<typeof vi.fn> };
analyticsQueue: { getJobCounts: ReturnType<typeof vi.fn> };
weeklyAnalyticsQueue: { getJobCounts: ReturnType<typeof vi.fn> };
cleanupQueue: { getJobCounts: ReturnType<typeof vi.fn> };
tokenCleanupQueue: { getJobCounts: ReturnType<typeof vi.fn> };
receiptQueue: { getJobCounts: ReturnType<typeof vi.fn> };
expiryAlertQueue: { getJobCounts: ReturnType<typeof vi.fn> };
barcodeQueue: { getJobCounts: ReturnType<typeof vi.fn> };
};
// ============================================================================
// TEST SUITE
// ============================================================================
describe('HealthController', () => {
let controller: HealthController;
beforeEach(() => {
vi.clearAllMocks();
controller = new HealthController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// BASIC HEALTH CHECKS
// ==========================================================================
describe('ping()', () => {
it('should return a pong response', async () => {
const result = await controller.ping();
expect(result.success).toBe(true);
expect(result.data).toEqual({ message: 'pong' });
});
});
// ==========================================================================
// KUBERNETES PROBES (ADR-020)
// ==========================================================================
describe('live()', () => {
it('should return ok status with timestamp', async () => {
const result = await controller.live();
expect(result.success).toBe(true);
expect(result.data.status).toBe('ok');
expect(result.data.timestamp).toBeDefined();
expect(() => new Date(result.data.timestamp)).not.toThrow();
});
});
describe('ready()', () => {
it('should return healthy status when all services are healthy', async () => {
// Arrange: Mock all services as healthy
const mockPool = { query: vi.fn().mockResolvedValue({ rows: [{ 1: 1 }] }) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 10,
idleCount: 8,
waitingCount: 1,
});
mockedRedisConnection.ping.mockResolvedValue('PONG');
mockedFs.access.mockResolvedValue(undefined);
// Act
const result = await controller.ready();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.status).toBe('healthy');
expect(result.data.services.database.status).toBe('healthy');
expect(result.data.services.redis.status).toBe('healthy');
expect(result.data.services.storage.status).toBe('healthy');
expect(result.data.uptime).toBeDefined();
expect(result.data.timestamp).toBeDefined();
}
});
it('should return degraded status when database pool has high waiting count', async () => {
// Arrange: Mock database as degraded (waitingCount > 3)
const mockPool = { query: vi.fn().mockResolvedValue({ rows: [{ 1: 1 }] }) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 10,
idleCount: 2,
waitingCount: 5,
});
mockedRedisConnection.ping.mockResolvedValue('PONG');
mockedFs.access.mockResolvedValue(undefined);
// Act
const result = await controller.ready();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.status).toBe('degraded');
expect(result.data.services.database.status).toBe('degraded');
}
});
it('should return unhealthy status when database is unavailable', async () => {
// Arrange: Mock database as unhealthy
const mockPool = { query: vi.fn().mockRejectedValue(new Error('Connection failed')) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedRedisConnection.ping.mockResolvedValue('PONG');
mockedFs.access.mockResolvedValue(undefined);
// Act
const result = await controller.ready();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.message).toBe('Service unhealthy');
const details = result.error.details as {
status: string;
services: { database: { status: string; message: string } };
};
expect(details.status).toBe('unhealthy');
expect(details.services.database.status).toBe('unhealthy');
expect(details.services.database.message).toBe('Connection failed');
}
});
it('should return unhealthy status when Redis is unavailable', async () => {
// Arrange: Mock Redis as unhealthy
const mockPool = { query: vi.fn().mockResolvedValue({ rows: [{ 1: 1 }] }) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 10,
idleCount: 8,
waitingCount: 1,
});
mockedRedisConnection.ping.mockRejectedValue(new Error('Redis connection refused'));
mockedFs.access.mockResolvedValue(undefined);
// Act
const result = await controller.ready();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
const details = result.error.details as {
status: string;
services: { redis: { status: string; message: string } };
};
expect(details.status).toBe('unhealthy');
expect(details.services.redis.status).toBe('unhealthy');
expect(details.services.redis.message).toBe('Redis connection refused');
}
});
it('should return unhealthy when Redis returns unexpected ping response', async () => {
// Arrange
const mockPool = { query: vi.fn().mockResolvedValue({ rows: [{ 1: 1 }] }) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 10,
idleCount: 8,
waitingCount: 1,
});
mockedRedisConnection.ping.mockResolvedValue('UNEXPECTED');
mockedFs.access.mockResolvedValue(undefined);
// Act
const result = await controller.ready();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
const details = result.error.details as {
services: { redis: { status: string; message: string } };
};
expect(details.services.redis.status).toBe('unhealthy');
expect(details.services.redis.message).toContain('Unexpected ping response');
}
});
it('should still return healthy when storage is unhealthy but critical services are healthy', async () => {
// Arrange: Storage unhealthy, but db and redis healthy
const mockPool = { query: vi.fn().mockResolvedValue({ rows: [{ 1: 1 }] }) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 10,
idleCount: 8,
waitingCount: 1,
});
mockedRedisConnection.ping.mockResolvedValue('PONG');
mockedFs.access.mockRejectedValue(new Error('Permission denied'));
// Act
const result = await controller.ready();
// Assert: Storage is not critical, so should still be healthy/200
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.services.storage.status).toBe('unhealthy');
}
});
it('should handle database error with non-Error object', async () => {
// Arrange
const mockPool = { query: vi.fn().mockRejectedValue('String error') };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedRedisConnection.ping.mockResolvedValue('PONG');
mockedFs.access.mockResolvedValue(undefined);
// Act
const result = await controller.ready();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
const details = result.error.details as { services: { database: { message: string } } };
expect(details.services.database.message).toBe('Database connection failed');
}
});
});
describe('startup()', () => {
it('should return started status when database is healthy', async () => {
// Arrange
const mockPool = { query: vi.fn().mockResolvedValue({ rows: [{ 1: 1 }] }) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 10,
idleCount: 8,
waitingCount: 1,
});
// Act
const result = await controller.startup();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.status).toBe('started');
expect(result.data.database.status).toBe('healthy');
expect(result.data.timestamp).toBeDefined();
}
});
it('should return error when database is unhealthy during startup', async () => {
// Arrange
const mockPool = { query: vi.fn().mockRejectedValue(new Error('Database not ready')) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
// Act
const result = await controller.startup();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.message).toBe('Waiting for database connection');
const details = result.error.details as {
status: string;
database: { status: string; message: string };
};
expect(details.status).toBe('starting');
expect(details.database.status).toBe('unhealthy');
expect(details.database.message).toBe('Database not ready');
}
});
it('should return started with degraded database when pool has high waiting count', async () => {
// Arrange
const mockPool = { query: vi.fn().mockResolvedValue({ rows: [{ 1: 1 }] }) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 10,
idleCount: 2,
waitingCount: 5,
});
// Act
const result = await controller.startup();
// Assert: Degraded is not unhealthy, so startup should succeed
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.status).toBe('started');
expect(result.data.database.status).toBe('degraded');
}
});
});
// ==========================================================================
// INDIVIDUAL SERVICE HEALTH CHECKS
// ==========================================================================
describe('dbSchema()', () => {
it('should return success when all tables exist', async () => {
// Arrange
mockedDbConnection.checkTablesExist.mockResolvedValue([]);
// Act
const result = await controller.dbSchema();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('All required database tables exist.');
}
});
it('should return error when tables are missing', async () => {
// Arrange
mockedDbConnection.checkTablesExist.mockResolvedValue(['missing_table_1', 'missing_table_2']);
// Act
const result = await controller.dbSchema();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.message).toContain('Missing tables: missing_table_1, missing_table_2');
}
});
});
describe('storage()', () => {
it('should return success when storage is accessible', async () => {
// Arrange
mockedFs.access.mockResolvedValue(undefined);
// Act
const result = await controller.storage();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('is accessible and writable');
}
});
it('should return error when storage is not accessible', async () => {
// Arrange
mockedFs.access.mockRejectedValue(new Error('EACCES: permission denied'));
// Act
const result = await controller.storage();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.message).toContain('Storage check failed');
}
});
});
describe('dbPool()', () => {
it('should return success for a healthy pool status', async () => {
// Arrange
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 10,
idleCount: 8,
waitingCount: 1,
});
// Act
const result = await controller.dbPool();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toContain('Pool Status: 10 total, 8 idle, 1 waiting');
expect(result.data.totalCount).toBe(10);
expect(result.data.idleCount).toBe(8);
expect(result.data.waitingCount).toBe(1);
}
});
it('should return error for an unhealthy pool status', async () => {
// Arrange
mockedDbConnection.getPoolStatus.mockReturnValue({
totalCount: 20,
idleCount: 5,
waitingCount: 15,
});
// Act
const result = await controller.dbPool();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.message).toContain('Pool may be under stress');
expect(result.error.message).toContain('Pool Status: 20 total, 5 idle, 15 waiting');
}
});
});
describe('time()', () => {
it('should return current server time, year, and week', async () => {
// Arrange
const fakeDate = new Date('2024-03-15T10:30:00.000Z');
vi.useFakeTimers();
vi.setSystemTime(fakeDate);
// Act
const result = await controller.time();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.currentTime).toBe('2024-03-15T10:30:00.000Z');
expect(result.data.year).toBe(2024);
expect(result.data.week).toBe(11);
}
});
});
describe('redis()', () => {
it('should return success when Redis ping is successful', async () => {
// Arrange
mockedRedisConnection.ping.mockResolvedValue('PONG');
// Act
const result = await controller.redis();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Redis connection is healthy.');
}
});
it('should return error when Redis ping fails', async () => {
// Arrange
mockedRedisConnection.ping.mockRejectedValue(new Error('Connection timed out'));
// Act
const result = await controller.redis();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.message).toBe('Connection timed out');
}
});
it('should return error when Redis returns unexpected response', async () => {
// Arrange
mockedRedisConnection.ping.mockResolvedValue('OK');
// Act
const result = await controller.redis();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.message).toContain('Unexpected Redis ping response: OK');
}
});
});
// ==========================================================================
// QUEUE HEALTH MONITORING (ADR-053)
// ==========================================================================
describe('queues()', () => {
// Helper function to set all queue mocks
const setAllQueueMocks = (jobCounts: {
waiting: number;
active: number;
failed: number;
delayed: number;
}) => {
mockedQueues.flyerQueue.getJobCounts.mockResolvedValue(jobCounts);
mockedQueues.emailQueue.getJobCounts.mockResolvedValue(jobCounts);
mockedQueues.analyticsQueue.getJobCounts.mockResolvedValue(jobCounts);
mockedQueues.weeklyAnalyticsQueue.getJobCounts.mockResolvedValue(jobCounts);
mockedQueues.cleanupQueue.getJobCounts.mockResolvedValue(jobCounts);
mockedQueues.tokenCleanupQueue.getJobCounts.mockResolvedValue(jobCounts);
mockedQueues.receiptQueue.getJobCounts.mockResolvedValue(jobCounts);
mockedQueues.expiryAlertQueue.getJobCounts.mockResolvedValue(jobCounts);
mockedQueues.barcodeQueue.getJobCounts.mockResolvedValue(jobCounts);
};
it('should return healthy status when all queues and workers are healthy', async () => {
// Arrange
setAllQueueMocks({ waiting: 5, active: 2, failed: 1, delayed: 0 });
// Mock Redis heartbeat responses (all healthy)
const recentTimestamp = new Date(Date.now() - 10000).toISOString();
const heartbeatValue = JSON.stringify({
timestamp: recentTimestamp,
pid: 1234,
host: 'test-host',
});
mockedRedisConnection.get.mockResolvedValue(heartbeatValue);
// Act
const result = await controller.queues();
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.status).toBe('healthy');
expect(result.data.queues['flyer-processing']).toEqual({
waiting: 5,
active: 2,
failed: 1,
delayed: 0,
});
expect(result.data.workers['flyer-processing']).toEqual({
alive: true,
lastSeen: recentTimestamp,
pid: 1234,
host: 'test-host',
});
}
});
it('should return unhealthy status when a queue is unavailable', async () => {
// Arrange: flyerQueue fails, others succeed
mockedQueues.flyerQueue.getJobCounts.mockRejectedValue(new Error('Redis connection lost'));
const healthyJobCounts = { waiting: 0, active: 0, failed: 0, delayed: 0 };
mockedQueues.emailQueue.getJobCounts.mockResolvedValue(healthyJobCounts);
mockedQueues.analyticsQueue.getJobCounts.mockResolvedValue(healthyJobCounts);
mockedQueues.weeklyAnalyticsQueue.getJobCounts.mockResolvedValue(healthyJobCounts);
mockedQueues.cleanupQueue.getJobCounts.mockResolvedValue(healthyJobCounts);
mockedQueues.tokenCleanupQueue.getJobCounts.mockResolvedValue(healthyJobCounts);
mockedQueues.receiptQueue.getJobCounts.mockResolvedValue(healthyJobCounts);
mockedQueues.expiryAlertQueue.getJobCounts.mockResolvedValue(healthyJobCounts);
mockedQueues.barcodeQueue.getJobCounts.mockResolvedValue(healthyJobCounts);
mockedRedisConnection.get.mockResolvedValue(null);
// Act
const result = await controller.queues();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.message).toBe('One or more queues or workers unavailable');
const details = result.error.details as {
status: string;
queues: Record<string, { error?: string }>;
};
expect(details.status).toBe('unhealthy');
expect(details.queues['flyer-processing']).toEqual({ error: 'Redis connection lost' });
}
});
it('should return unhealthy status when a worker heartbeat is stale', async () => {
// Arrange
const healthyJobCounts = { waiting: 0, active: 0, failed: 0, delayed: 0 };
setAllQueueMocks(healthyJobCounts);
// Stale heartbeat (> 60s ago)
const staleTimestamp = new Date(Date.now() - 120000).toISOString();
const staleHeartbeat = JSON.stringify({
timestamp: staleTimestamp,
pid: 1234,
host: 'test-host',
});
let callCount = 0;
mockedRedisConnection.get.mockImplementation(() => {
callCount++;
return Promise.resolve(callCount === 1 ? staleHeartbeat : null);
});
// Act
const result = await controller.queues();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
const details = result.error.details as {
status: string;
workers: Record<string, { alive: boolean }>;
};
expect(details.status).toBe('unhealthy');
expect(details.workers['flyer-processing']).toEqual({ alive: false });
}
});
it('should return unhealthy status when worker heartbeat is missing', async () => {
// Arrange
const healthyJobCounts = { waiting: 0, active: 0, failed: 0, delayed: 0 };
setAllQueueMocks(healthyJobCounts);
mockedRedisConnection.get.mockResolvedValue(null);
// Act
const result = await controller.queues();
// Assert
expect(result.success).toBe(false);
if (!result.success) {
const details = result.error.details as {
status: string;
workers: Record<string, { alive: boolean }>;
};
expect(details.status).toBe('unhealthy');
expect(details.workers['flyer-processing']).toEqual({ alive: false });
}
});
it('should handle Redis connection errors gracefully for heartbeat checks', async () => {
// Arrange
const healthyJobCounts = { waiting: 0, active: 0, failed: 0, delayed: 0 };
setAllQueueMocks(healthyJobCounts);
mockedRedisConnection.get.mockRejectedValue(new Error('Redis connection lost'));
// Act
const result = await controller.queues();
// Assert: Heartbeat fetch errors are treated as non-critical
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.status).toBe('healthy');
expect(result.data.workers['flyer-processing']).toEqual({
alive: false,
error: 'Redis connection lost',
});
}
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
const result = await controller.ping();
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
it('should use error helper for consistent error format', async () => {
// Arrange: Make database check fail
mockedDbConnection.checkTablesExist.mockResolvedValue(['missing_table']);
// Act
const result = await controller.dbSchema();
// Assert
expect(result).toHaveProperty('success', false);
expect(result).toHaveProperty('error');
if (!result.success) {
expect(result.error).toHaveProperty('code');
expect(result.error).toHaveProperty('message');
}
});
it('should set HTTP status codes via setStatus', async () => {
// Arrange: Make startup probe fail
const mockPool = { query: vi.fn().mockRejectedValue(new Error('No database')) };
mockedDbConnection.getPool.mockReturnValue(mockPool as never);
// Act
const result = await controller.startup();
// Assert: The controller called setStatus(503) internally
// We can verify this by checking the result structure is an error
expect(result.success).toBe(false);
});
});
});

View File

@@ -0,0 +1,673 @@
// src/controllers/health.controller.ts
// ============================================================================
// HEALTH CONTROLLER
// ============================================================================
// Provides health check endpoints for monitoring the application state,
// implementing ADR-020: Health Checks and Liveness/Readiness Probes.
//
// This controller exposes endpoints for:
// - Liveness probe (/live) - Is the server process running?
// - Readiness probe (/ready) - Is the server ready to accept traffic?
// - Startup probe (/startup) - Has the server completed initialization?
// - Individual service health checks (db, redis, storage, queues)
// ============================================================================
import { Get, Route, Tags, SuccessResponse, Response } from 'tsoa';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse, ServiceHealth } from './types';
import { getPoolStatus, getPool, checkTablesExist } from '../services/db/connection.db';
import { connection as redisConnection } from '../services/queueService.server';
import {
flyerQueue,
emailQueue,
analyticsQueue,
weeklyAnalyticsQueue,
cleanupQueue,
tokenCleanupQueue,
receiptQueue,
expiryAlertQueue,
barcodeQueue,
} from '../services/queues.server';
import { getSimpleWeekAndYear } from '../utils/dateUtils';
import fs from 'node:fs/promises';
// ============================================================================
// RESPONSE TYPES
// ============================================================================
// Types for health check responses that will appear in the OpenAPI spec.
// ============================================================================
/**
* Simple ping response.
*/
interface PingResponse {
message: string;
}
/**
* Liveness probe response.
*/
interface LivenessResponse {
status: 'ok';
timestamp: string;
}
/**
* Readiness probe response with service status.
*/
interface ReadinessResponse {
status: 'healthy' | 'degraded' | 'unhealthy';
timestamp: string;
uptime: number;
services: {
database: ServiceHealth;
redis: ServiceHealth;
storage: ServiceHealth;
};
}
/**
* Startup probe response.
*/
interface StartupResponse {
status: 'started' | 'starting';
timestamp: string;
database: ServiceHealth;
}
/**
* Database schema check response.
*/
interface DbSchemaResponse {
message: string;
}
/**
* Storage check response.
*/
interface StorageResponse {
message: string;
}
/**
* Database pool status response.
*/
interface DbPoolResponse {
message: string;
totalCount: number;
idleCount: number;
waitingCount: number;
}
/**
* Server time response.
*/
interface TimeResponse {
currentTime: string;
year: number;
week: number;
}
/**
* Redis health check response.
*/
interface RedisHealthResponse {
message: string;
}
/**
* Queue job counts.
*/
interface QueueJobCounts {
waiting: number;
active: number;
failed: number;
delayed: number;
}
/**
* Worker heartbeat status.
*/
interface WorkerHeartbeat {
alive: boolean;
lastSeen?: string;
pid?: number;
host?: string;
error?: string;
}
/**
* Queue health response with metrics and worker heartbeats.
*/
interface QueuesHealthResponse {
status: 'healthy' | 'unhealthy';
timestamp: string;
queues: Record<string, QueueJobCounts | { error: string }>;
workers: Record<string, WorkerHeartbeat>;
}
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
// Reusable functions for checking service health.
// ============================================================================
/**
* Checks database connectivity with timing.
*
* @returns ServiceHealth object with database status and latency
*/
async function checkDatabase(): Promise<ServiceHealth> {
const start = Date.now();
try {
const pool = getPool();
await pool.query('SELECT 1');
const latency = Date.now() - start;
const poolStatus = getPoolStatus();
// Consider degraded if waiting connections > 3
const status = poolStatus.waitingCount > 3 ? 'degraded' : 'healthy';
return {
status,
latency,
details: {
totalConnections: poolStatus.totalCount,
idleConnections: poolStatus.idleCount,
waitingConnections: poolStatus.waitingCount,
} as Record<string, unknown>,
};
} catch (error) {
return {
status: 'unhealthy',
latency: Date.now() - start,
message: error instanceof Error ? error.message : 'Database connection failed',
};
}
}
/**
* Checks Redis connectivity with timing.
*
* @returns ServiceHealth object with Redis status and latency
*/
async function checkRedis(): Promise<ServiceHealth> {
const start = Date.now();
try {
const reply = await redisConnection.ping();
const latency = Date.now() - start;
if (reply === 'PONG') {
return { status: 'healthy', latency };
}
return {
status: 'unhealthy',
latency,
message: `Unexpected ping response: ${reply}`,
};
} catch (error) {
return {
status: 'unhealthy',
latency: Date.now() - start,
message: error instanceof Error ? error.message : 'Redis connection failed',
};
}
}
/**
* Checks storage accessibility with timing.
*
* @returns ServiceHealth object with storage status and latency
*/
async function checkStorage(): Promise<ServiceHealth> {
const storagePath =
process.env.STORAGE_PATH || '/var/www/flyer-crawler.projectium.com/flyer-images';
const start = Date.now();
try {
await fs.access(storagePath, fs.constants.W_OK);
return {
status: 'healthy',
latency: Date.now() - start,
details: { path: storagePath },
};
} catch {
return {
status: 'unhealthy',
latency: Date.now() - start,
message: `Storage not accessible: ${storagePath}`,
};
}
}
// ============================================================================
// HEALTH CONTROLLER
// ============================================================================
/**
* Health check controller for monitoring application state.
*
* Provides endpoints for Kubernetes liveness/readiness/startup probes
* and individual service health checks per ADR-020.
*/
@Route('health')
@Tags('Health')
export class HealthController extends BaseController {
// ==========================================================================
// BASIC HEALTH CHECKS
// ==========================================================================
/**
* Simple ping endpoint.
*
* Returns a pong response to verify server is responsive.
* Use this for basic connectivity checks.
*
* @summary Simple ping endpoint
* @returns A pong response confirming the server is alive
*/
@Get('ping')
@SuccessResponse(200, 'Server is responsive')
public async ping(): Promise<SuccessResponseType<PingResponse>> {
return this.success({ message: 'pong' });
}
// ==========================================================================
// KUBERNETES PROBES (ADR-020)
// ==========================================================================
/**
* Liveness probe.
*
* Returns 200 OK if the server process is running.
* If this fails, the orchestrator should restart the container.
* This endpoint is intentionally simple and has no external dependencies.
*
* @summary Liveness probe
* @returns Status indicating the server process is alive
*/
@Get('live')
@SuccessResponse(200, 'Server process is alive')
public async live(): Promise<SuccessResponseType<LivenessResponse>> {
return this.success({
status: 'ok',
timestamp: new Date().toISOString(),
});
}
/**
* Readiness probe.
*
* Returns 200 OK if the server is ready to accept traffic.
* Checks all critical dependencies (database, Redis, storage).
* If this fails, the orchestrator should remove the container from the load balancer.
*
* @summary Readiness probe
* @returns Service health status for all critical dependencies
*/
@Get('ready')
@SuccessResponse(200, 'Server is ready to accept traffic')
@Response<ErrorResponse>(503, 'Service is unhealthy and should not receive traffic')
public async ready(): Promise<SuccessResponseType<ReadinessResponse> | ErrorResponse> {
// Check all services in parallel for speed
const [database, redis, storage] = await Promise.all([
checkDatabase(),
checkRedis(),
checkStorage(),
]);
// Determine overall status
// - 'healthy' if all critical services (db, redis) are healthy
// - 'degraded' if any service is degraded but none unhealthy
// - 'unhealthy' if any critical service is unhealthy
const criticalServices = [database, redis];
const allServices = [database, redis, storage];
let overallStatus: 'healthy' | 'degraded' | 'unhealthy' = 'healthy';
if (criticalServices.some((s) => s.status === 'unhealthy')) {
overallStatus = 'unhealthy';
} else if (allServices.some((s) => s.status === 'degraded')) {
overallStatus = 'degraded';
}
const response: ReadinessResponse = {
status: overallStatus,
timestamp: new Date().toISOString(),
uptime: process.uptime(),
services: {
database,
redis,
storage,
},
};
// Return appropriate HTTP status code
// 200 = healthy or degraded (can still handle traffic)
// 503 = unhealthy (should not receive traffic)
if (overallStatus === 'unhealthy') {
this.setStatus(503);
return this.error(this.ErrorCode.SERVICE_UNAVAILABLE, 'Service unhealthy', response);
}
return this.success(response);
}
/**
* Startup probe.
*
* Similar to readiness but used during container startup.
* The orchestrator will not send liveness/readiness probes until this succeeds.
* This allows for longer initialization times without triggering restarts.
*
* @summary Startup probe for container orchestration
* @returns Startup status with database health
*/
@Get('startup')
@SuccessResponse(200, 'Server has started successfully')
@Response<ErrorResponse>(503, 'Server is still starting')
public async startup(): Promise<SuccessResponseType<StartupResponse> | ErrorResponse> {
// For startup, we only check database connectivity
// Redis and storage can be checked later in readiness
const database = await checkDatabase();
if (database.status === 'unhealthy') {
this.setStatus(503);
return this.error(this.ErrorCode.SERVICE_UNAVAILABLE, 'Waiting for database connection', {
status: 'starting',
database,
});
}
return this.success({
status: 'started',
timestamp: new Date().toISOString(),
database,
});
}
// ==========================================================================
// INDIVIDUAL SERVICE HEALTH CHECKS
// ==========================================================================
/**
* Database schema check.
*
* Checks if all essential database tables exist.
* This is a critical check to ensure the database schema is correctly set up.
*
* @summary Check database schema
* @returns Message confirming all required tables exist
*/
@Get('db-schema')
@SuccessResponse(200, 'All required database tables exist')
@Response<ErrorResponse>(500, 'Database schema check failed')
public async dbSchema(): Promise<SuccessResponseType<DbSchemaResponse> | ErrorResponse> {
const requiredTables = ['users', 'profiles', 'flyers', 'flyer_items', 'stores'];
const missingTables = await checkTablesExist(requiredTables);
if (missingTables.length > 0) {
this.setStatus(500);
return this.error(
this.ErrorCode.INTERNAL_ERROR,
`Database schema check failed. Missing tables: ${missingTables.join(', ')}.`,
);
}
return this.success({ message: 'All required database tables exist.' });
}
/**
* Storage health check.
*
* Verifies that the application's file storage path is accessible and writable.
* This is important for features like file uploads.
*
* @summary Check storage accessibility
* @returns Message confirming storage is accessible
*/
@Get('storage')
@SuccessResponse(200, 'Storage is accessible and writable')
@Response<ErrorResponse>(500, 'Storage check failed')
public async storage(): Promise<SuccessResponseType<StorageResponse> | ErrorResponse> {
const storagePath =
process.env.STORAGE_PATH || '/var/www/flyer-crawler.projectium.com/flyer-images';
try {
await fs.access(storagePath, fs.constants.W_OK);
return this.success({
message: `Storage directory '${storagePath}' is accessible and writable.`,
});
} catch {
this.setStatus(500);
return this.error(
this.ErrorCode.INTERNAL_ERROR,
`Storage check failed. Ensure the directory '${storagePath}' exists and is writable by the application.`,
);
}
}
/**
* Database pool status check.
*
* Checks the status of the database connection pool.
* This helps diagnose issues related to database connection saturation.
*
* @summary Check database connection pool status
* @returns Pool status with connection counts
*/
@Get('db-pool')
@SuccessResponse(200, 'Database pool is healthy')
@Response<ErrorResponse>(500, 'Database pool may be under stress')
public async dbPool(): Promise<SuccessResponseType<DbPoolResponse> | ErrorResponse> {
const status = getPoolStatus();
const isHealthy = status.waitingCount < 5;
const message = `Pool Status: ${status.totalCount} total, ${status.idleCount} idle, ${status.waitingCount} waiting.`;
if (isHealthy) {
return this.success({
message,
totalCount: status.totalCount,
idleCount: status.idleCount,
waitingCount: status.waitingCount,
});
}
this.setStatus(500);
return this.error(
this.ErrorCode.INTERNAL_ERROR,
`Pool may be under stress. ${message}`,
status,
);
}
/**
* Server time check.
*
* Returns the server's current time, year, and week number.
* Useful for verifying time synchronization and for features dependent on week numbers.
*
* @summary Get server time and week number
* @returns Current server time with year and week number
*/
@Get('time')
@SuccessResponse(200, 'Server time retrieved')
public async time(): Promise<SuccessResponseType<TimeResponse>> {
const now = new Date();
const { year, week } = getSimpleWeekAndYear(now);
return this.success({
currentTime: now.toISOString(),
year,
week,
});
}
/**
* Redis health check.
*
* Checks the health of the Redis connection.
*
* @summary Check Redis connectivity
* @returns Message confirming Redis is healthy
*/
@Get('redis')
@SuccessResponse(200, 'Redis connection is healthy')
@Response<ErrorResponse>(500, 'Redis health check failed')
public async redis(): Promise<SuccessResponseType<RedisHealthResponse> | ErrorResponse> {
try {
const reply = await redisConnection.ping();
if (reply === 'PONG') {
return this.success({ message: 'Redis connection is healthy.' });
}
throw new Error(`Unexpected Redis ping response: ${reply}`);
} catch (error) {
this.setStatus(500);
const message = error instanceof Error ? error.message : 'Redis health check failed';
return this.error(this.ErrorCode.INTERNAL_ERROR, message);
}
}
// ==========================================================================
// QUEUE HEALTH MONITORING (ADR-053)
// ==========================================================================
/**
* Queue health and metrics with worker heartbeats.
*
* Returns job counts for all BullMQ queues and worker heartbeat status.
* Use this endpoint to monitor queue depths and detect stuck/frozen workers.
* Implements ADR-053: Worker Health Checks and Stalled Job Monitoring.
*
* @summary Queue health and metrics
* @returns Queue metrics and worker heartbeat status
*/
@Get('queues')
@SuccessResponse(200, 'Queue metrics retrieved successfully')
@Response<ErrorResponse>(503, 'One or more queues or workers unavailable')
public async queues(): Promise<SuccessResponseType<QueuesHealthResponse> | ErrorResponse> {
// Define all queues to monitor
const queues = [
{ name: 'flyer-processing', queue: flyerQueue },
{ name: 'email-sending', queue: emailQueue },
{ name: 'analytics-reporting', queue: analyticsQueue },
{ name: 'weekly-analytics-reporting', queue: weeklyAnalyticsQueue },
{ name: 'file-cleanup', queue: cleanupQueue },
{ name: 'token-cleanup', queue: tokenCleanupQueue },
{ name: 'receipt-processing', queue: receiptQueue },
{ name: 'expiry-alerts', queue: expiryAlertQueue },
{ name: 'barcode-detection', queue: barcodeQueue },
];
// Fetch job counts for all queues in parallel
const queueMetrics = await Promise.all(
queues.map(async ({ name, queue }) => {
try {
const counts = await queue.getJobCounts();
return {
name,
counts: {
waiting: counts.waiting || 0,
active: counts.active || 0,
failed: counts.failed || 0,
delayed: counts.delayed || 0,
},
};
} catch (error) {
// If individual queue fails, return error state
return {
name,
error: error instanceof Error ? error.message : 'Unknown error',
};
}
}),
);
// Fetch worker heartbeats in parallel
const workerNames = queues.map((q) => q.name);
const workerHeartbeats = await Promise.all(
workerNames.map(async (name) => {
try {
const key = `worker:heartbeat:${name}`;
const value = await redisConnection.get(key);
if (!value) {
return { name, alive: false };
}
const heartbeat = JSON.parse(value) as {
timestamp: string;
pid: number;
host: string;
};
const lastSeenMs = new Date(heartbeat.timestamp).getTime();
const nowMs = Date.now();
const ageSeconds = (nowMs - lastSeenMs) / 1000;
// Consider alive if last heartbeat < 60 seconds ago
const alive = ageSeconds < 60;
return {
name,
alive,
lastSeen: heartbeat.timestamp,
pid: heartbeat.pid,
host: heartbeat.host,
};
} catch (error) {
// If heartbeat check fails, mark as unknown
return {
name,
alive: false,
error: error instanceof Error ? error.message : 'Unknown error',
};
}
}),
);
// Build response objects
const queuesData: Record<string, QueueJobCounts | { error: string }> = {};
const workersData: Record<string, WorkerHeartbeat> = {};
let hasErrors = false;
for (const metric of queueMetrics) {
if ('error' in metric && metric.error) {
queuesData[metric.name] = { error: metric.error };
hasErrors = true;
} else if ('counts' in metric && metric.counts) {
queuesData[metric.name] = metric.counts;
}
}
for (const heartbeat of workerHeartbeats) {
if ('error' in heartbeat && heartbeat.error) {
workersData[heartbeat.name] = { alive: false, error: heartbeat.error };
} else if (!heartbeat.alive) {
workersData[heartbeat.name] = { alive: false };
hasErrors = true;
} else {
workersData[heartbeat.name] = {
alive: heartbeat.alive,
lastSeen: heartbeat.lastSeen,
pid: heartbeat.pid,
host: heartbeat.host,
};
}
}
const response: QueuesHealthResponse = {
status: hasErrors ? 'unhealthy' : 'healthy',
timestamp: new Date().toISOString(),
queues: queuesData,
workers: workersData,
};
if (hasErrors) {
this.setStatus(503);
return this.error(
this.ErrorCode.SERVICE_UNAVAILABLE,
'One or more queues or workers unavailable',
response,
);
}
return this.success(response);
}
}

View File

@@ -0,0 +1,616 @@
// src/controllers/inventory.controller.test.ts
// ============================================================================
// INVENTORY CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the InventoryController class. These tests verify controller
// logic in isolation by mocking the expiry service.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Post: () => () => {},
Put: () => () => {},
Delete: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Security: () => () => {},
Path: () => () => {},
Query: () => () => {},
Body: () => () => {},
Request: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock expiry service
vi.mock('../services/expiryService.server', () => ({
getInventory: vi.fn(),
addInventoryItem: vi.fn(),
getExpiringItemsGrouped: vi.fn(),
getExpiringItems: vi.fn(),
getExpiredItems: vi.fn(),
getAlertSettings: vi.fn(),
updateAlertSettings: vi.fn(),
getRecipeSuggestionsForExpiringItems: vi.fn(),
getInventoryItemById: vi.fn(),
updateInventoryItem: vi.fn(),
deleteInventoryItem: vi.fn(),
markItemConsumed: vi.fn(),
}));
// Import mocked modules after mock definitions
import * as expiryService from '../services/expiryService.server';
import { InventoryController } from './inventory.controller';
// Cast mocked modules for type-safe access
const mockedExpiryService = expiryService as Mocked<typeof expiryService>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object with authenticated user.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
user: createMockUserProfile(),
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock user profile for testing.
*/
function createMockUserProfile() {
return {
full_name: 'Test User',
role: 'user' as const,
user: {
user_id: 'test-user-id',
email: 'test@example.com',
},
};
}
/**
* Creates a mock inventory item.
*/
function createMockInventoryItem(overrides: Record<string, unknown> = {}) {
return {
inventory_id: 1,
user_id: 'test-user-id',
item_name: 'Milk',
quantity: 1,
unit: 'L',
purchase_date: '2024-01-01',
expiry_date: '2024-01-15',
source: 'manual_entry' as const,
location: 'refrigerator' as const,
is_consumed: false,
created_at: '2024-01-01T00:00:00.000Z',
updated_at: '2024-01-01T00:00:00.000Z',
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('InventoryController', () => {
let controller: InventoryController;
beforeEach(() => {
vi.clearAllMocks();
controller = new InventoryController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// INVENTORY ITEM ENDPOINTS
// ==========================================================================
describe('getInventory()', () => {
it('should return inventory items with default pagination', async () => {
// Arrange
const mockResult = {
items: [createMockInventoryItem()],
total: 1,
};
const request = createMockRequest();
mockedExpiryService.getInventory.mockResolvedValue(mockResult);
// Act
const result = await controller.getInventory(request);
// Assert
expect(result.success).toBe(true);
expect(mockedExpiryService.getInventory).toHaveBeenCalledWith(
expect.objectContaining({
user_id: 'test-user-id',
limit: 50,
offset: 0,
}),
expect.anything(),
);
});
it('should cap limit at 100', async () => {
// Arrange
const mockResult = { items: [], total: 0 };
const request = createMockRequest();
mockedExpiryService.getInventory.mockResolvedValue(mockResult);
// Act
await controller.getInventory(request, 200);
// Assert
expect(mockedExpiryService.getInventory).toHaveBeenCalledWith(
expect.objectContaining({ limit: 100 }),
expect.anything(),
);
});
it('should support filtering by location', async () => {
// Arrange
const mockResult = { items: [], total: 0 };
const request = createMockRequest();
mockedExpiryService.getInventory.mockResolvedValue(mockResult);
// Act
await controller.getInventory(request, 50, 0, 'refrigerator');
// Assert
expect(mockedExpiryService.getInventory).toHaveBeenCalledWith(
expect.objectContaining({ location: 'refrigerator' }),
expect.anything(),
);
});
it('should support search parameter', async () => {
// Arrange
const mockResult = { items: [], total: 0 };
const request = createMockRequest();
mockedExpiryService.getInventory.mockResolvedValue(mockResult);
// Act
await controller.getInventory(
request,
50,
0,
undefined,
undefined,
undefined,
undefined,
'milk',
);
// Assert
expect(mockedExpiryService.getInventory).toHaveBeenCalledWith(
expect.objectContaining({ search: 'milk' }),
expect.anything(),
);
});
});
describe('addInventoryItem()', () => {
it('should add an inventory item', async () => {
// Arrange
const mockItem = createMockInventoryItem();
const request = createMockRequest();
mockedExpiryService.addInventoryItem.mockResolvedValue(mockItem);
// Act
const result = await controller.addInventoryItem(request, {
item_name: 'Milk',
source: 'manual_entry',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.item_name).toBe('Milk');
}
});
it('should log item addition', async () => {
// Arrange
const mockItem = createMockInventoryItem();
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
mockedExpiryService.addInventoryItem.mockResolvedValue(mockItem);
// Act
await controller.addInventoryItem(request, {
item_name: 'Milk',
source: 'manual_entry',
});
// Assert
expect(mockLog.info).toHaveBeenCalledWith(
{ userId: 'test-user-id', itemName: 'Milk' },
'Adding item to inventory',
);
});
});
// ==========================================================================
// EXPIRING ITEMS ENDPOINTS
// ==========================================================================
describe('getExpiringSummary()', () => {
it('should return expiring items grouped by urgency', async () => {
// Arrange
const mockResult = {
expired: [],
expiring_today: [],
expiring_this_week: [createMockInventoryItem()],
expiring_this_month: [],
};
const request = createMockRequest();
mockedExpiryService.getExpiringItemsGrouped.mockResolvedValue(mockResult);
// Act
const result = await controller.getExpiringSummary(request);
// Assert
expect(result.success).toBe(true);
expect(mockedExpiryService.getExpiringItemsGrouped).toHaveBeenCalledWith(
'test-user-id',
expect.anything(),
);
});
});
describe('getExpiringItems()', () => {
it('should return expiring items with default 7 days', async () => {
// Arrange
const mockItems = [createMockInventoryItem()];
const request = createMockRequest();
mockedExpiryService.getExpiringItems.mockResolvedValue(mockItems);
// Act
const result = await controller.getExpiringItems(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.total).toBe(1);
}
expect(mockedExpiryService.getExpiringItems).toHaveBeenCalledWith(
'test-user-id',
7,
expect.anything(),
);
});
it('should cap days at 90', async () => {
// Arrange
const request = createMockRequest();
mockedExpiryService.getExpiringItems.mockResolvedValue([]);
// Act
await controller.getExpiringItems(request, 200);
// Assert
expect(mockedExpiryService.getExpiringItems).toHaveBeenCalledWith(
'test-user-id',
90,
expect.anything(),
);
});
it('should floor days at 1', async () => {
// Arrange
const request = createMockRequest();
mockedExpiryService.getExpiringItems.mockResolvedValue([]);
// Act
await controller.getExpiringItems(request, 0);
// Assert
expect(mockedExpiryService.getExpiringItems).toHaveBeenCalledWith(
'test-user-id',
1,
expect.anything(),
);
});
});
describe('getExpiredItems()', () => {
it('should return expired items', async () => {
// Arrange
const mockItems = [createMockInventoryItem({ expiry_date: '2023-12-01' })];
const request = createMockRequest();
mockedExpiryService.getExpiredItems.mockResolvedValue(mockItems);
// Act
const result = await controller.getExpiredItems(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.items).toHaveLength(1);
}
});
});
// ==========================================================================
// ALERT SETTINGS ENDPOINTS
// ==========================================================================
describe('getAlertSettings()', () => {
it('should return alert settings', async () => {
// Arrange
const mockSettings = [
{ alert_method: 'email', days_before_expiry: 3, is_enabled: true },
{ alert_method: 'push', days_before_expiry: 1, is_enabled: true },
];
const request = createMockRequest();
mockedExpiryService.getAlertSettings.mockResolvedValue(mockSettings);
// Act
const result = await controller.getAlertSettings(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
}
});
});
describe('updateAlertSettings()', () => {
it('should update alert settings', async () => {
// Arrange
const mockUpdated = { alert_method: 'email', days_before_expiry: 5, is_enabled: true };
const request = createMockRequest();
mockedExpiryService.updateAlertSettings.mockResolvedValue(mockUpdated);
// Act
const result = await controller.updateAlertSettings('email', request, {
days_before_expiry: 5,
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.days_before_expiry).toBe(5);
}
});
});
// ==========================================================================
// RECIPE SUGGESTIONS
// ==========================================================================
describe('getRecipeSuggestions()', () => {
it('should return recipe suggestions for expiring items', async () => {
// Arrange
const mockResult = {
recipes: [{ recipe_id: 1, name: 'Test Recipe' }],
total: 1,
considered_items: [createMockInventoryItem()],
};
const request = createMockRequest();
mockedExpiryService.getRecipeSuggestionsForExpiringItems.mockResolvedValue(mockResult);
// Act
const result = await controller.getRecipeSuggestions(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.recipes).toHaveLength(1);
}
});
it('should normalize pagination parameters', async () => {
// Arrange
const mockResult = { recipes: [], total: 0, considered_items: [] };
const request = createMockRequest();
mockedExpiryService.getRecipeSuggestionsForExpiringItems.mockResolvedValue(mockResult);
// Act
await controller.getRecipeSuggestions(request, 100, 100, 100);
// Assert
expect(mockedExpiryService.getRecipeSuggestionsForExpiringItems).toHaveBeenCalledWith(
'test-user-id',
90, // days capped at 90
expect.anything(),
{ limit: 50, offset: 100 }, // limit capped at 50
);
});
});
// ==========================================================================
// INVENTORY ITEM BY ID ENDPOINTS
// ==========================================================================
describe('getInventoryItemById()', () => {
it('should return an inventory item by ID', async () => {
// Arrange
const mockItem = createMockInventoryItem();
const request = createMockRequest();
mockedExpiryService.getInventoryItemById.mockResolvedValue(mockItem);
// Act
const result = await controller.getInventoryItemById(1, request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.inventory_id).toBe(1);
}
});
});
describe('updateInventoryItem()', () => {
it('should update an inventory item', async () => {
// Arrange
const mockItem = createMockInventoryItem({ quantity: 2 });
const request = createMockRequest();
mockedExpiryService.updateInventoryItem.mockResolvedValue(mockItem);
// Act
const result = await controller.updateInventoryItem(1, request, { quantity: 2 });
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.quantity).toBe(2);
}
});
it('should reject update with no fields provided', async () => {
// Arrange
const request = createMockRequest();
// Act & Assert
await expect(controller.updateInventoryItem(1, request, {})).rejects.toThrow(
'At least one field to update must be provided.',
);
});
});
describe('deleteInventoryItem()', () => {
it('should delete an inventory item', async () => {
// Arrange
const request = createMockRequest();
mockedExpiryService.deleteInventoryItem.mockResolvedValue(undefined);
// Act
const result = await controller.deleteInventoryItem(1, request);
// Assert
expect(result).toBeUndefined();
expect(mockedExpiryService.deleteInventoryItem).toHaveBeenCalledWith(
1,
'test-user-id',
expect.anything(),
);
});
});
describe('markItemConsumed()', () => {
it('should mark an item as consumed', async () => {
// Arrange
const request = createMockRequest();
mockedExpiryService.markItemConsumed.mockResolvedValue(undefined);
// Act
const result = await controller.markItemConsumed(1, request);
// Assert
expect(result).toBeUndefined();
expect(mockedExpiryService.markItemConsumed).toHaveBeenCalledWith(
1,
'test-user-id',
expect.anything(),
);
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const mockItem = createMockInventoryItem();
const request = createMockRequest();
mockedExpiryService.getInventoryItemById.mockResolvedValue(mockItem);
// Act
const result = await controller.getInventoryItemById(1, request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
it('should use created helper for 201 responses', async () => {
// Arrange
const mockItem = createMockInventoryItem();
const request = createMockRequest();
mockedExpiryService.addInventoryItem.mockResolvedValue(mockItem);
// Act
const result = await controller.addInventoryItem(request, {
item_name: 'Test',
source: 'manual_entry',
});
// Assert
expect(result.success).toBe(true);
});
it('should use noContent helper for 204 responses', async () => {
// Arrange
const request = createMockRequest();
mockedExpiryService.deleteInventoryItem.mockResolvedValue(undefined);
// Act
const result = await controller.deleteInventoryItem(1, request);
// Assert
expect(result).toBeUndefined();
});
});
});

View File

@@ -0,0 +1,535 @@
// src/controllers/inventory.controller.ts
// ============================================================================
// INVENTORY CONTROLLER
// ============================================================================
// Provides endpoints for managing pantry inventory, expiry tracking, and alerts.
// All endpoints require authentication.
//
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import {
Get,
Post,
Put,
Delete,
Route,
Tags,
Security,
Body,
Path,
Query,
Request,
SuccessResponse,
Response,
} from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import * as expiryService from '../services/expiryService.server';
import type { UserProfile } from '../types';
import type {
UserInventoryItem,
StorageLocation,
InventorySource,
ExpiryAlertSettings,
ExpiringItemsResponse,
AlertMethod,
} from '../types/expiry';
// ============================================================================
// DTO TYPES FOR OPENAPI
// ============================================================================
/**
* Request body for adding an inventory item.
*/
interface AddInventoryItemRequest {
/** Link to products table */
product_id?: number;
/** Link to master grocery items */
master_item_id?: number;
/**
* Item name (required)
* @minLength 1
* @maxLength 255
*/
item_name: string;
/** Quantity of item (default: 1) */
quantity?: number;
/**
* Unit of measurement
* @maxLength 50
*/
unit?: string;
/** When the item was purchased (YYYY-MM-DD format) */
purchase_date?: string;
/** Expected expiry date (YYYY-MM-DD format) */
expiry_date?: string;
/** How the item is being added */
source: InventorySource;
/** Where the item will be stored */
location?: StorageLocation;
/**
* User notes
* @maxLength 500
*/
notes?: string;
}
/**
* Request body for updating an inventory item.
* At least one field must be provided.
*/
interface UpdateInventoryItemRequest {
/** Updated quantity */
quantity?: number;
/**
* Updated unit
* @maxLength 50
*/
unit?: string;
/** Updated expiry date (YYYY-MM-DD format) */
expiry_date?: string;
/** Updated storage location */
location?: StorageLocation;
/**
* Updated notes
* @maxLength 500
*/
notes?: string;
/** Mark as consumed */
is_consumed?: boolean;
}
/**
* Request body for updating alert settings.
*/
interface UpdateAlertSettingsRequest {
/**
* Days before expiry to send alert
* @minimum 1
* @maximum 30
*/
days_before_expiry?: number;
/** Whether this alert type is enabled */
is_enabled?: boolean;
}
/**
* Response for expiring items list.
*/
interface ExpiringItemsListResponse {
/** Array of expiring items */
items: UserInventoryItem[];
/** Total count of items */
total: number;
}
/**
* Response for recipe suggestions.
*/
interface RecipeSuggestionsResponse {
/** Recipes that use expiring items */
recipes: unknown[];
/** Total count for pagination */
total: number;
/** Items considered for matching */
considered_items: UserInventoryItem[];
}
// ============================================================================
// INVENTORY CONTROLLER
// ============================================================================
/**
* Controller for managing pantry inventory and expiry tracking.
*
* All endpoints require JWT authentication. Users can only access
* their own inventory - the user ID is extracted from the JWT token.
*/
@Route('inventory')
@Tags('Inventory')
@Security('bearerAuth')
export class InventoryController extends BaseController {
// ==========================================================================
// INVENTORY ITEM ENDPOINTS
// ==========================================================================
/**
* Get inventory items.
*
* Retrieves the user's pantry inventory with optional filtering and pagination.
*
* @summary Get inventory items
* @param request Express request with authenticated user
* @param limit Maximum number of items to return (default: 50, max: 100)
* @param offset Number of items to skip for pagination (default: 0)
* @param location Filter by storage location
* @param is_consumed Filter by consumed status
* @param expiring_within_days Filter items expiring within N days
* @param category_id Filter by category ID
* @param search Search by item name
* @param sort_by Sort field
* @param sort_order Sort direction
* @returns List of inventory items
*/
@Get()
@SuccessResponse(200, 'Inventory items retrieved')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getInventory(
@Request() request: ExpressRequest,
@Query() limit?: number,
@Query() offset?: number,
@Query() location?: StorageLocation,
@Query() is_consumed?: boolean,
@Query() expiring_within_days?: number,
@Query() category_id?: number,
@Query() search?: string,
@Query() sort_by?: 'expiry_date' | 'purchase_date' | 'item_name' | 'created_at',
@Query() sort_order?: 'asc' | 'desc',
): Promise<SuccessResponseType<unknown>> {
const userProfile = request.user as UserProfile;
// Normalize pagination parameters
const normalizedLimit = Math.min(100, Math.max(1, Math.floor(limit ?? 50)));
const normalizedOffset = Math.max(0, Math.floor(offset ?? 0));
const result = await expiryService.getInventory(
{
user_id: userProfile.user.user_id,
location,
is_consumed,
expiring_within_days,
category_id,
search,
limit: normalizedLimit,
offset: normalizedOffset,
sort_by,
sort_order,
},
request.log,
);
return this.success(result);
}
/**
* Add inventory item.
*
* Add a new item to the user's pantry inventory.
*
* @summary Add inventory item
* @param request Express request with authenticated user
* @param body Item data
* @returns The created inventory item
*/
@Post()
@SuccessResponse(201, 'Item added to inventory')
@Response<ErrorResponse>(400, 'Validation error')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async addInventoryItem(
@Request() request: ExpressRequest,
@Body() body: AddInventoryItemRequest,
): Promise<SuccessResponseType<UserInventoryItem>> {
const userProfile = request.user as UserProfile;
request.log.info(
{ userId: userProfile.user.user_id, itemName: body.item_name },
'Adding item to inventory',
);
const item = await expiryService.addInventoryItem(userProfile.user.user_id, body, request.log);
return this.created(item);
}
// ==========================================================================
// EXPIRING ITEMS ENDPOINTS
// ==========================================================================
/**
* Get expiring items summary.
*
* Get items grouped by expiry urgency (today, this week, this month, expired).
*
* @summary Get expiring items summary
* @param request Express request with authenticated user
* @returns Expiring items grouped by urgency with counts
*/
@Get('expiring/summary')
@SuccessResponse(200, 'Expiring items grouped by urgency')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getExpiringSummary(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<ExpiringItemsResponse>> {
const userProfile = request.user as UserProfile;
const result = await expiryService.getExpiringItemsGrouped(
userProfile.user.user_id,
request.log,
);
return this.success(result);
}
/**
* Get expiring items.
*
* Get items expiring within a specified number of days.
*
* @summary Get expiring items
* @param request Express request with authenticated user
* @param days Number of days to look ahead (1-90, default: 7)
* @returns List of expiring items
*/
@Get('expiring')
@SuccessResponse(200, 'Expiring items retrieved')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getExpiringItems(
@Request() request: ExpressRequest,
@Query() days?: number,
): Promise<SuccessResponseType<ExpiringItemsListResponse>> {
const userProfile = request.user as UserProfile;
// Normalize days parameter: default 7, min 1, max 90
const normalizedDays = Math.min(90, Math.max(1, Math.floor(days ?? 7)));
const items = await expiryService.getExpiringItems(
userProfile.user.user_id,
normalizedDays,
request.log,
);
return this.success({ items, total: items.length });
}
/**
* Get expired items.
*
* Get all items that have already expired.
*
* @summary Get expired items
* @param request Express request with authenticated user
* @returns List of expired items
*/
@Get('expired')
@SuccessResponse(200, 'Expired items retrieved')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getExpiredItems(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<ExpiringItemsListResponse>> {
const userProfile = request.user as UserProfile;
const items = await expiryService.getExpiredItems(userProfile.user.user_id, request.log);
return this.success({ items, total: items.length });
}
// ==========================================================================
// ALERT SETTINGS ENDPOINTS
// ==========================================================================
/**
* Get alert settings.
*
* Get the user's expiry alert settings for all notification methods.
*
* @summary Get alert settings
* @param request Express request with authenticated user
* @returns Alert settings for all methods
*/
@Get('alerts')
@SuccessResponse(200, 'Alert settings retrieved')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getAlertSettings(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<ExpiryAlertSettings[]>> {
const userProfile = request.user as UserProfile;
const settings = await expiryService.getAlertSettings(userProfile.user.user_id, request.log);
return this.success(settings);
}
/**
* Update alert settings.
*
* Update alert settings for a specific notification method.
*
* @summary Update alert settings
* @param alertMethod The notification method to update (email, push, in_app)
* @param request Express request with authenticated user
* @param body Settings to update
* @returns Updated alert settings
*/
@Put('alerts/{alertMethod}')
@SuccessResponse(200, 'Alert settings updated')
@Response<ErrorResponse>(400, 'Validation error')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async updateAlertSettings(
@Path() alertMethod: AlertMethod,
@Request() request: ExpressRequest,
@Body() body: UpdateAlertSettingsRequest,
): Promise<SuccessResponseType<ExpiryAlertSettings>> {
const userProfile = request.user as UserProfile;
const settings = await expiryService.updateAlertSettings(
userProfile.user.user_id,
alertMethod,
body,
request.log,
);
return this.success(settings);
}
// ==========================================================================
// RECIPE SUGGESTIONS ENDPOINT
// ==========================================================================
/**
* Get recipe suggestions for expiring items.
*
* Get recipes that use items expiring soon to reduce food waste.
*
* @summary Get recipe suggestions for expiring items
* @param request Express request with authenticated user
* @param days Consider items expiring within this many days (1-90, default: 7)
* @param limit Maximum number of recipes to return (1-50, default: 10)
* @param offset Number of recipes to skip for pagination (default: 0)
* @returns Recipe suggestions with matching expiring items
*/
@Get('recipes/suggestions')
@SuccessResponse(200, 'Recipe suggestions retrieved')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getRecipeSuggestions(
@Request() request: ExpressRequest,
@Query() days?: number,
@Query() limit?: number,
@Query() offset?: number,
): Promise<SuccessResponseType<RecipeSuggestionsResponse>> {
const userProfile = request.user as UserProfile;
// Normalize parameters
const normalizedDays = Math.min(90, Math.max(1, Math.floor(days ?? 7)));
const normalizedLimit = Math.min(50, Math.max(1, Math.floor(limit ?? 10)));
const normalizedOffset = Math.max(0, Math.floor(offset ?? 0));
const result = await expiryService.getRecipeSuggestionsForExpiringItems(
userProfile.user.user_id,
normalizedDays,
request.log,
{ limit: normalizedLimit, offset: normalizedOffset },
);
return this.success(result);
}
// ==========================================================================
// INVENTORY ITEM BY ID ENDPOINTS
// ==========================================================================
/**
* Get inventory item by ID.
*
* Retrieve a specific inventory item.
*
* @summary Get inventory item by ID
* @param inventoryId The unique identifier of the inventory item
* @param request Express request with authenticated user
* @returns The inventory item
*/
@Get('{inventoryId}')
@SuccessResponse(200, 'Inventory item retrieved')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
@Response<ErrorResponse>(404, 'Item not found')
public async getInventoryItemById(
@Path() inventoryId: number,
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<UserInventoryItem>> {
const userProfile = request.user as UserProfile;
const item = await expiryService.getInventoryItemById(
inventoryId,
userProfile.user.user_id,
request.log,
);
return this.success(item);
}
/**
* Update inventory item.
*
* Update an existing inventory item. At least one field must be provided.
*
* @summary Update inventory item
* @param inventoryId The unique identifier of the inventory item
* @param request Express request with authenticated user
* @param body Fields to update
* @returns The updated inventory item
*/
@Put('{inventoryId}')
@SuccessResponse(200, 'Item updated')
@Response<ErrorResponse>(400, 'Validation error - at least one field required')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
@Response<ErrorResponse>(404, 'Item not found')
public async updateInventoryItem(
@Path() inventoryId: number,
@Request() request: ExpressRequest,
@Body() body: UpdateInventoryItemRequest,
): Promise<SuccessResponseType<UserInventoryItem>> {
const userProfile = request.user as UserProfile;
// Validate at least one field is provided
if (Object.keys(body).length === 0) {
this.setStatus(400);
throw new Error('At least one field to update must be provided.');
}
const item = await expiryService.updateInventoryItem(
inventoryId,
userProfile.user.user_id,
body,
request.log,
);
return this.success(item);
}
/**
* Delete inventory item.
*
* Remove an item from the user's inventory.
*
* @summary Delete inventory item
* @param inventoryId The unique identifier of the inventory item
* @param request Express request with authenticated user
*/
@Delete('{inventoryId}')
@SuccessResponse(204, 'Item deleted')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
@Response<ErrorResponse>(404, 'Item not found')
public async deleteInventoryItem(
@Path() inventoryId: number,
@Request() request: ExpressRequest,
): Promise<void> {
const userProfile = request.user as UserProfile;
await expiryService.deleteInventoryItem(inventoryId, userProfile.user.user_id, request.log);
return this.noContent();
}
/**
* Mark item as consumed.
*
* Mark an inventory item as consumed.
*
* @summary Mark item as consumed
* @param inventoryId The unique identifier of the inventory item
* @param request Express request with authenticated user
*/
@Post('{inventoryId}/consume')
@SuccessResponse(204, 'Item marked as consumed')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
@Response<ErrorResponse>(404, 'Item not found')
public async markItemConsumed(
@Path() inventoryId: number,
@Request() request: ExpressRequest,
): Promise<void> {
const userProfile = request.user as UserProfile;
await expiryService.markItemConsumed(inventoryId, userProfile.user.user_id, request.log);
return this.noContent();
}
}

View File

@@ -0,0 +1,476 @@
// src/controllers/personalization.controller.test.ts
// ============================================================================
// PERSONALIZATION CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the PersonalizationController class. These tests verify
// controller logic in isolation by mocking the personalization repository.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Query: () => () => {},
Request: () => () => {},
Middlewares: () => () => {},
SuccessResponse: () => () => {},
}));
// Mock personalization repository
vi.mock('../services/db/index.db', () => ({
personalizationRepo: {
getAllMasterItems: vi.fn(),
getDietaryRestrictions: vi.fn(),
getAppliances: vi.fn(),
},
}));
// Mock rate limiters
vi.mock('../config/rateLimiters', () => ({
publicReadLimiter: (req: unknown, res: unknown, next: () => void) => next(),
}));
// Import mocked modules after mock definitions
import * as db from '../services/db/index.db';
import { PersonalizationController } from './personalization.controller';
// Cast mocked modules for type-safe access
const mockedPersonalizationRepo = db.personalizationRepo as Mocked<typeof db.personalizationRepo>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
res: {
set: vi.fn(),
},
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock master grocery item.
*/
function createMockMasterItem(overrides: Record<string, unknown> = {}) {
return {
master_item_id: 1,
name: 'Milk 2%',
category_id: 1,
category_name: 'Dairy & Eggs',
typical_shelf_life_days: 14,
storage_recommendation: 'refrigerator',
...overrides,
};
}
/**
* Creates a mock dietary restriction.
*/
function createMockDietaryRestriction(overrides: Record<string, unknown> = {}) {
return {
restriction_id: 1,
name: 'Vegetarian',
description: 'No meat or fish',
icon: 'leaf',
...overrides,
};
}
/**
* Creates a mock appliance.
*/
function createMockAppliance(overrides: Record<string, unknown> = {}) {
return {
appliance_id: 1,
name: 'Air Fryer',
icon: 'air-fryer',
category: 'cooking',
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('PersonalizationController', () => {
let controller: PersonalizationController;
beforeEach(() => {
vi.clearAllMocks();
controller = new PersonalizationController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// MASTER ITEMS ENDPOINT
// ==========================================================================
describe('getMasterItems()', () => {
it('should return master items without pagination', async () => {
// Arrange
const mockResult = {
items: [createMockMasterItem(), createMockMasterItem({ master_item_id: 2, name: 'Bread' })],
total: 2,
};
const request = createMockRequest();
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
const result = await controller.getMasterItems(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.items).toHaveLength(2);
expect(result.data.total).toBe(2);
}
expect(mockedPersonalizationRepo.getAllMasterItems).toHaveBeenCalledWith(
expect.anything(),
undefined, // no limit
0, // default offset
);
});
it('should support pagination with limit and offset', async () => {
// Arrange
const mockResult = {
items: [createMockMasterItem()],
total: 100,
};
const request = createMockRequest();
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
await controller.getMasterItems(request, 50, 100);
// Assert
expect(mockedPersonalizationRepo.getAllMasterItems).toHaveBeenCalledWith(
expect.anything(),
50,
100,
);
});
it('should cap limit at 500', async () => {
// Arrange
const mockResult = { items: [], total: 0 };
const request = createMockRequest();
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
await controller.getMasterItems(request, 1000, 0);
// Assert
expect(mockedPersonalizationRepo.getAllMasterItems).toHaveBeenCalledWith(
expect.anything(),
500,
0,
);
});
it('should floor limit at 1', async () => {
// Arrange
const mockResult = { items: [], total: 0 };
const request = createMockRequest();
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
await controller.getMasterItems(request, 0, 0);
// Assert
expect(mockedPersonalizationRepo.getAllMasterItems).toHaveBeenCalledWith(
expect.anything(),
1,
0,
);
});
it('should floor offset at 0', async () => {
// Arrange
const mockResult = { items: [], total: 0 };
const request = createMockRequest();
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
await controller.getMasterItems(request, 50, -10);
// Assert
expect(mockedPersonalizationRepo.getAllMasterItems).toHaveBeenCalledWith(
expect.anything(),
50,
0,
);
});
it('should set cache control header', async () => {
// Arrange
const mockSet = vi.fn();
const request = createMockRequest({
res: { set: mockSet } as unknown as ExpressRequest['res'],
});
const mockResult = { items: [], total: 0 };
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
await controller.getMasterItems(request);
// Assert
expect(mockSet).toHaveBeenCalledWith('Cache-Control', 'public, max-age=3600');
});
it('should log request details', async () => {
// Arrange
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
const mockResult = { items: [], total: 0 };
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
await controller.getMasterItems(request, 100, 50);
// Assert
expect(mockLog.info).toHaveBeenCalledWith(
expect.objectContaining({
limit: 100,
offset: 50,
}),
'Fetching master items list from database...',
);
});
});
// ==========================================================================
// DIETARY RESTRICTIONS ENDPOINT
// ==========================================================================
describe('getDietaryRestrictions()', () => {
it('should return dietary restrictions', async () => {
// Arrange
const mockRestrictions = [
createMockDietaryRestriction(),
createMockDietaryRestriction({ restriction_id: 2, name: 'Vegan' }),
createMockDietaryRestriction({ restriction_id: 3, name: 'Gluten-Free' }),
];
const request = createMockRequest();
mockedPersonalizationRepo.getDietaryRestrictions.mockResolvedValue(mockRestrictions);
// Act
const result = await controller.getDietaryRestrictions(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(3);
expect(result.data[0].name).toBe('Vegetarian');
}
expect(mockedPersonalizationRepo.getDietaryRestrictions).toHaveBeenCalledWith(
expect.anything(),
);
});
it('should return empty array when no restrictions exist', async () => {
// Arrange
const request = createMockRequest();
mockedPersonalizationRepo.getDietaryRestrictions.mockResolvedValue([]);
// Act
const result = await controller.getDietaryRestrictions(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should set cache control header', async () => {
// Arrange
const mockSet = vi.fn();
const request = createMockRequest({
res: { set: mockSet } as unknown as ExpressRequest['res'],
});
mockedPersonalizationRepo.getDietaryRestrictions.mockResolvedValue([]);
// Act
await controller.getDietaryRestrictions(request);
// Assert
expect(mockSet).toHaveBeenCalledWith('Cache-Control', 'public, max-age=3600');
});
});
// ==========================================================================
// APPLIANCES ENDPOINT
// ==========================================================================
describe('getAppliances()', () => {
it('should return appliances', async () => {
// Arrange
const mockAppliances = [
createMockAppliance(),
createMockAppliance({ appliance_id: 2, name: 'Instant Pot' }),
createMockAppliance({ appliance_id: 3, name: 'Stand Mixer' }),
];
const request = createMockRequest();
mockedPersonalizationRepo.getAppliances.mockResolvedValue(mockAppliances);
// Act
const result = await controller.getAppliances(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(3);
expect(result.data[0].name).toBe('Air Fryer');
}
expect(mockedPersonalizationRepo.getAppliances).toHaveBeenCalledWith(expect.anything());
});
it('should return empty array when no appliances exist', async () => {
// Arrange
const request = createMockRequest();
mockedPersonalizationRepo.getAppliances.mockResolvedValue([]);
// Act
const result = await controller.getAppliances(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should set cache control header', async () => {
// Arrange
const mockSet = vi.fn();
const request = createMockRequest({
res: { set: mockSet } as unknown as ExpressRequest['res'],
});
mockedPersonalizationRepo.getAppliances.mockResolvedValue([]);
// Act
await controller.getAppliances(request);
// Assert
expect(mockSet).toHaveBeenCalledWith('Cache-Control', 'public, max-age=3600');
});
});
// ==========================================================================
// PUBLIC ACCESS (NO AUTH REQUIRED)
// ==========================================================================
describe('Public access', () => {
it('should work without user authentication for master items', async () => {
// Arrange
const mockResult = { items: [createMockMasterItem()], total: 1 };
const request = createMockRequest({ user: undefined });
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
const result = await controller.getMasterItems(request);
// Assert
expect(result.success).toBe(true);
});
it('should work without user authentication for dietary restrictions', async () => {
// Arrange
const request = createMockRequest({ user: undefined });
mockedPersonalizationRepo.getDietaryRestrictions.mockResolvedValue([]);
// Act
const result = await controller.getDietaryRestrictions(request);
// Assert
expect(result.success).toBe(true);
});
it('should work without user authentication for appliances', async () => {
// Arrange
const request = createMockRequest({ user: undefined });
mockedPersonalizationRepo.getAppliances.mockResolvedValue([]);
// Act
const result = await controller.getAppliances(request);
// Assert
expect(result.success).toBe(true);
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const mockResult = { items: [], total: 0 };
const request = createMockRequest();
mockedPersonalizationRepo.getAllMasterItems.mockResolvedValue(mockResult);
// Act
const result = await controller.getMasterItems(request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
});
});

View File

@@ -0,0 +1,150 @@
// src/controllers/personalization.controller.ts
// ============================================================================
// PERSONALIZATION CONTROLLER
// ============================================================================
// Provides endpoints for personalization data including master grocery items,
// dietary restrictions, and kitchen appliances. These are public endpoints
// used by the frontend for dropdown/selection components.
//
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import { Get, Route, Tags, Query, Request, SuccessResponse, Middlewares } from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType } from './types';
import * as db from '../services/db/index.db';
import type { MasterGroceryItem, DietaryRestriction, Appliance } from '../types';
import { publicReadLimiter } from '../config/rateLimiters';
// ============================================================================
// RESPONSE TYPES
// ============================================================================
/**
* Response for paginated master items list.
*/
interface MasterItemsResponse {
/** Array of master grocery items */
items: MasterGroceryItem[];
/** Total count of all items */
total: number;
}
// ============================================================================
// PERSONALIZATION CONTROLLER
// ============================================================================
/**
* Controller for personalization reference data.
*
* All endpoints are public and do not require authentication.
* Data is used for dropdown/selection components in the UI.
*
* Responses are cached for 1 hour (Cache-Control header) as this
* reference data changes infrequently.
*/
@Route('personalization')
@Tags('Personalization')
export class PersonalizationController extends BaseController {
// ==========================================================================
// MASTER ITEMS ENDPOINT
// ==========================================================================
/**
* Get master items list.
*
* Get the master list of all grocery items with optional pagination.
* Response is cached for 1 hour as this data changes infrequently.
*
* @summary Get master items list
* @param request Express request for logging and response headers
* @param limit Maximum number of items to return (max: 500). If omitted, returns all items.
* @param offset Number of items to skip for pagination (default: 0)
* @returns Paginated list of master grocery items with total count
*/
@Get('master-items')
@Middlewares(publicReadLimiter)
@SuccessResponse(200, 'List of master grocery items with total count')
public async getMasterItems(
@Request() request: ExpressRequest,
@Query() limit?: number,
@Query() offset?: number,
): Promise<SuccessResponseType<MasterItemsResponse>> {
// Normalize parameters
const normalizedLimit =
limit !== undefined ? Math.min(500, Math.max(1, Math.floor(limit))) : undefined;
const normalizedOffset = Math.max(0, Math.floor(offset ?? 0));
// Log database call for tracking
request.log.info(
{ limit: normalizedLimit, offset: normalizedOffset },
'Fetching master items list from database...',
);
// Set cache control header - data changes rarely
request.res?.set('Cache-Control', 'public, max-age=3600');
const result = await db.personalizationRepo.getAllMasterItems(
request.log,
normalizedLimit,
normalizedOffset,
);
return this.success(result);
}
// ==========================================================================
// DIETARY RESTRICTIONS ENDPOINT
// ==========================================================================
/**
* Get dietary restrictions.
*
* Get the master list of all available dietary restrictions.
* Response is cached for 1 hour.
*
* @summary Get dietary restrictions
* @param request Express request for logging and response headers
* @returns List of all dietary restrictions
*/
@Get('dietary-restrictions')
@Middlewares(publicReadLimiter)
@SuccessResponse(200, 'List of all dietary restrictions')
public async getDietaryRestrictions(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<DietaryRestriction[]>> {
// Set cache control header - data changes rarely
request.res?.set('Cache-Control', 'public, max-age=3600');
const restrictions = await db.personalizationRepo.getDietaryRestrictions(request.log);
return this.success(restrictions);
}
// ==========================================================================
// APPLIANCES ENDPOINT
// ==========================================================================
/**
* Get kitchen appliances.
*
* Get the master list of all available kitchen appliances.
* Response is cached for 1 hour.
*
* @summary Get kitchen appliances
* @param request Express request for logging and response headers
* @returns List of all kitchen appliances
*/
@Get('appliances')
@Middlewares(publicReadLimiter)
@SuccessResponse(200, 'List of all kitchen appliances')
public async getAppliances(
@Request() request: ExpressRequest,
): Promise<SuccessResponseType<Appliance[]>> {
// Set cache control header - data changes rarely
request.res?.set('Cache-Control', 'public, max-age=3600');
const appliances = await db.personalizationRepo.getAppliances(request.log);
return this.success(appliances);
}
}

View File

@@ -0,0 +1,43 @@
/**
* Placeholder controller for tsoa configuration verification.
*
* This minimal controller exists only to verify that tsoa is correctly configured.
* It should be removed once actual controllers are implemented.
*
* @see ADR-055 for the OpenAPI specification migration plan
*/
import { Controller, Get, Route, Tags } from 'tsoa';
/**
* Placeholder response type for configuration verification.
*/
interface PlaceholderResponse {
message: string;
configured: boolean;
}
/**
* Placeholder controller for verifying tsoa configuration.
*
* This controller is temporary and should be removed once actual
* API controllers are implemented using tsoa decorators.
*/
@Route('_tsoa')
@Tags('Internal')
export class PlaceholderController extends Controller {
/**
* Verify tsoa configuration is working.
*
* This endpoint exists only for configuration verification and
* should be removed in production.
*
* @returns A simple message confirming tsoa is configured
*/
@Get('verify')
public async verify(): Promise<PlaceholderResponse> {
return {
message: 'tsoa is correctly configured',
configured: true,
};
}
}

View File

@@ -0,0 +1,393 @@
// src/controllers/price.controller.test.ts
// ============================================================================
// PRICE CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the PriceController class. These tests verify controller
// logic in isolation by mocking the price repository.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Post: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Security: () => () => {},
Body: () => () => {},
Request: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock price repository
vi.mock('../services/db/price.db', () => ({
priceRepo: {
getPriceHistory: vi.fn(),
},
}));
// Import mocked modules after mock definitions
import { priceRepo } from '../services/db/price.db';
import { PriceController } from './price.controller';
// Cast mocked modules for type-safe access
const mockedPriceRepo = priceRepo as Mocked<typeof priceRepo>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object with authenticated user.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
user: createMockUserProfile(),
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock user profile for testing.
*/
function createMockUserProfile() {
return {
full_name: 'Test User',
role: 'user' as const,
user: {
user_id: 'test-user-id',
email: 'test@example.com',
},
};
}
/**
* Creates a mock price history data point.
*/
function createMockPriceHistoryData(overrides: Record<string, unknown> = {}) {
return {
master_item_id: 1,
price_cents: 350,
flyer_start_date: '2024-01-15',
flyer_id: 10,
store_name: 'Superstore',
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('PriceController', () => {
let controller: PriceController;
beforeEach(() => {
vi.clearAllMocks();
controller = new PriceController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// GET PRICE HISTORY
// ==========================================================================
describe('getPriceHistory()', () => {
it('should return price history for specified items', async () => {
// Arrange
const mockPriceHistory = [
createMockPriceHistoryData(),
createMockPriceHistoryData({ flyer_start_date: '2024-01-08', price_cents: 399 }),
createMockPriceHistoryData({ master_item_id: 2, price_cents: 450 }),
];
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
const result = await controller.getPriceHistory(request, {
masterItemIds: [1, 2],
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(3);
expect(result.data[0].price_cents).toBe(350);
}
expect(mockedPriceRepo.getPriceHistory).toHaveBeenCalledWith(
[1, 2],
expect.anything(),
1000, // default limit
0, // default offset
);
});
it('should use default limit and offset when not provided', async () => {
// Arrange
const mockPriceHistory = [createMockPriceHistoryData()];
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
await controller.getPriceHistory(request, {
masterItemIds: [1],
});
// Assert
expect(mockedPriceRepo.getPriceHistory).toHaveBeenCalledWith([1], expect.anything(), 1000, 0);
});
it('should use custom limit and offset', async () => {
// Arrange
const mockPriceHistory = [createMockPriceHistoryData()];
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
await controller.getPriceHistory(request, {
masterItemIds: [1],
limit: 500,
offset: 100,
});
// Assert
expect(mockedPriceRepo.getPriceHistory).toHaveBeenCalledWith(
[1],
expect.anything(),
500,
100,
);
});
it('should return error when masterItemIds is empty', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getPriceHistory(request, {
masterItemIds: [],
});
// Assert
expect(result.success).toBe(false);
});
it('should return error when masterItemIds is not an array', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getPriceHistory(request, {
masterItemIds: null as unknown as number[],
});
// Assert
expect(result.success).toBe(false);
});
it('should normalize limit to at least 1', async () => {
// Arrange
const mockPriceHistory = [createMockPriceHistoryData()];
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
await controller.getPriceHistory(request, {
masterItemIds: [1],
limit: 0,
});
// Assert
expect(mockedPriceRepo.getPriceHistory).toHaveBeenCalledWith(
[1],
expect.anything(),
1, // floored to 1
0,
);
});
it('should normalize offset to at least 0', async () => {
// Arrange
const mockPriceHistory = [createMockPriceHistoryData()];
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
await controller.getPriceHistory(request, {
masterItemIds: [1],
offset: -10,
});
// Assert
expect(mockedPriceRepo.getPriceHistory).toHaveBeenCalledWith(
[1],
expect.anything(),
1000,
0, // floored to 0
);
});
it('should log request details', async () => {
// Arrange
const mockPriceHistory = [createMockPriceHistoryData()];
const mockLog = {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
};
const request = createMockRequest({ log: mockLog });
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
await controller.getPriceHistory(request, {
masterItemIds: [1, 2, 3],
limit: 100,
offset: 50,
});
// Assert
expect(mockLog.info).toHaveBeenCalledWith(
expect.objectContaining({
itemCount: 3,
limit: 100,
offset: 50,
}),
'[API /price-history] Received request for historical price data.',
);
});
it('should return empty array when no price history exists', async () => {
// Arrange
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue([]);
// Act
const result = await controller.getPriceHistory(request, {
masterItemIds: [1],
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should handle single item request', async () => {
// Arrange
const mockPriceHistory = [
createMockPriceHistoryData(),
createMockPriceHistoryData({ flyer_start_date: '2024-01-08' }),
];
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
const result = await controller.getPriceHistory(request, {
masterItemIds: [1],
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
}
expect(mockedPriceRepo.getPriceHistory).toHaveBeenCalledWith([1], expect.anything(), 1000, 0);
});
it('should handle multiple items request', async () => {
// Arrange
const mockPriceHistory = [
createMockPriceHistoryData({ master_item_id: 1 }),
createMockPriceHistoryData({ master_item_id: 2 }),
createMockPriceHistoryData({ master_item_id: 3 }),
];
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
const result = await controller.getPriceHistory(request, {
masterItemIds: [1, 2, 3, 4, 5],
});
// Assert
expect(result.success).toBe(true);
expect(mockedPriceRepo.getPriceHistory).toHaveBeenCalledWith(
[1, 2, 3, 4, 5],
expect.anything(),
1000,
0,
);
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const mockPriceHistory = [createMockPriceHistoryData()];
const request = createMockRequest();
mockedPriceRepo.getPriceHistory.mockResolvedValue(mockPriceHistory);
// Act
const result = await controller.getPriceHistory(request, {
masterItemIds: [1],
});
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
it('should use error helper for validation errors', async () => {
// Arrange
const request = createMockRequest();
// Act
const result = await controller.getPriceHistory(request, {
masterItemIds: [],
});
// Assert
expect(result).toHaveProperty('success', false);
});
});
});

View File

@@ -0,0 +1,113 @@
// src/controllers/price.controller.ts
// ============================================================================
// PRICE CONTROLLER
// ============================================================================
// Provides endpoints for retrieving historical price data for grocery items.
// Used for price trend analysis and charting.
//
// All endpoints require authentication.
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import { Post, Route, Tags, Security, Body, Request, SuccessResponse, Response } from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import { priceRepo } from '../services/db/price.db';
import type { PriceHistoryData } from '../types';
// ============================================================================
// REQUEST/RESPONSE TYPES
// ============================================================================
/**
* Request body for fetching price history.
*/
interface PriceHistoryRequest {
/**
* Array of master item IDs to get price history for.
* Must be a non-empty array of positive integers.
*/
masterItemIds: number[];
/**
* Maximum number of price points to return.
* @default 1000
*/
limit?: number;
/**
* Number of price points to skip.
* @default 0
*/
offset?: number;
}
// ============================================================================
// PRICE CONTROLLER
// ============================================================================
/**
* Controller for retrieving price history data.
*
* All endpoints require JWT authentication. Price history is fetched
* for specified master grocery items, useful for trend analysis and charting.
*/
@Route('price-history')
@Tags('Price')
@Security('bearerAuth')
export class PriceController extends BaseController {
// ==========================================================================
// GET PRICE HISTORY
// ==========================================================================
/**
* Get price history for specified items.
*
* Fetches historical price data for a given list of master item IDs.
* Returns the price in cents and the start date of each flyer where
* the item appeared, ordered by master_item_id and date ascending.
*
* Use POST instead of GET because the list of item IDs can be large
* and would exceed URL length limits as query parameters.
*
* @summary Get price history
* @param request Express request with authenticated user
* @param body Request body with master item IDs and optional pagination
* @returns Historical price data for specified items
*/
@Post()
@SuccessResponse(200, 'Historical price data for specified items')
@Response<ErrorResponse>(400, 'Validation error - masterItemIds must be a non-empty array')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async getPriceHistory(
@Request() request: ExpressRequest,
@Body() body: PriceHistoryRequest,
): Promise<SuccessResponseType<PriceHistoryData[]>> {
const { masterItemIds, limit = 1000, offset = 0 } = body;
// Validate masterItemIds
if (!Array.isArray(masterItemIds) || masterItemIds.length === 0) {
this.setStatus(400);
return this.error(
this.ErrorCode.VALIDATION_ERROR,
'masterItemIds must be a non-empty array of positive integers.',
) as unknown as SuccessResponseType<PriceHistoryData[]>;
}
// Normalize limit and offset
const normalizedLimit = Math.max(1, Math.floor(limit));
const normalizedOffset = Math.max(0, Math.floor(offset));
request.log.info(
{ itemCount: masterItemIds.length, limit: normalizedLimit, offset: normalizedOffset },
'[API /price-history] Received request for historical price data.',
);
const priceHistory = await priceRepo.getPriceHistory(
masterItemIds,
request.log,
normalizedLimit,
normalizedOffset,
);
return this.success(priceHistory);
}
}

View File

@@ -0,0 +1,531 @@
// src/controllers/reactions.controller.test.ts
// ============================================================================
// REACTIONS CONTROLLER UNIT TESTS
// ============================================================================
// Unit tests for the ReactionsController class. These tests verify controller
// logic in isolation by mocking the reaction repository.
// ============================================================================
import { describe, it, expect, vi, beforeEach, afterEach, type Mocked } from 'vitest';
import type { Request as ExpressRequest } from 'express';
// ============================================================================
// MOCK SETUP
// ============================================================================
// Mock tsoa decorators and Controller class
vi.mock('tsoa', () => ({
Controller: class Controller {
protected setStatus(status: number): void {
this._status = status;
}
private _status = 200;
},
Get: () => () => {},
Post: () => () => {},
Route: () => () => {},
Tags: () => () => {},
Security: () => () => {},
Query: () => () => {},
Body: () => () => {},
Request: () => () => {},
Middlewares: () => () => {},
SuccessResponse: () => () => {},
Response: () => () => {},
}));
// Mock reaction repository
vi.mock('../services/db/index.db', () => ({
reactionRepo: {
getReactions: vi.fn(),
getReactionSummary: vi.fn(),
toggleReaction: vi.fn(),
},
}));
// Mock rate limiters
vi.mock('../config/rateLimiters', () => ({
publicReadLimiter: (req: unknown, res: unknown, next: () => void) => next(),
reactionToggleLimiter: (req: unknown, res: unknown, next: () => void) => next(),
}));
// Import mocked modules after mock definitions
import { reactionRepo } from '../services/db/index.db';
import { ReactionsController } from './reactions.controller';
// Cast mocked modules for type-safe access
const mockedReactionRepo = reactionRepo as Mocked<typeof reactionRepo>;
// ============================================================================
// HELPER FUNCTIONS
// ============================================================================
/**
* Creates a mock Express request object with authenticated user.
*/
function createMockRequest(overrides: Partial<ExpressRequest> = {}): ExpressRequest {
return {
body: {},
params: {},
query: {},
user: createMockUserProfile(),
log: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
},
...overrides,
} as unknown as ExpressRequest;
}
/**
* Creates a mock user profile for testing.
*/
function createMockUserProfile() {
return {
full_name: 'Test User',
role: 'user' as const,
user: {
user_id: 'test-user-id',
email: 'test@example.com',
},
};
}
/**
* Creates a mock user reaction.
*/
function createMockReaction(overrides: Record<string, unknown> = {}) {
return {
reaction_id: 1,
user_id: 'test-user-id',
entity_type: 'recipe',
entity_id: '123',
reaction_type: 'like',
created_at: '2024-01-15T10:00:00.000Z',
...overrides,
};
}
/**
* Creates a mock reaction summary entry.
*/
function createMockReactionSummary(overrides: Record<string, unknown> = {}) {
return {
reaction_type: 'like',
count: 10,
...overrides,
};
}
// ============================================================================
// TEST SUITE
// ============================================================================
describe('ReactionsController', () => {
let controller: ReactionsController;
beforeEach(() => {
vi.clearAllMocks();
controller = new ReactionsController();
});
afterEach(() => {
vi.useRealTimers();
});
// ==========================================================================
// PUBLIC ENDPOINTS
// ==========================================================================
describe('getReactions()', () => {
it('should return reactions without filters', async () => {
// Arrange
const mockReactions = [
createMockReaction(),
createMockReaction({ reaction_id: 2, reaction_type: 'love' }),
];
const request = createMockRequest();
mockedReactionRepo.getReactions.mockResolvedValue(mockReactions);
// Act
const result = await controller.getReactions(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
}
expect(mockedReactionRepo.getReactions).toHaveBeenCalledWith(
{ userId: undefined, entityType: undefined, entityId: undefined },
expect.anything(),
);
});
it('should filter by userId', async () => {
// Arrange
const mockReactions = [createMockReaction()];
const request = createMockRequest();
mockedReactionRepo.getReactions.mockResolvedValue(mockReactions);
// Act
await controller.getReactions(request, 'user-123');
// Assert
expect(mockedReactionRepo.getReactions).toHaveBeenCalledWith(
{ userId: 'user-123', entityType: undefined, entityId: undefined },
expect.anything(),
);
});
it('should filter by entityType', async () => {
// Arrange
const mockReactions = [createMockReaction()];
const request = createMockRequest();
mockedReactionRepo.getReactions.mockResolvedValue(mockReactions);
// Act
await controller.getReactions(request, undefined, 'recipe');
// Assert
expect(mockedReactionRepo.getReactions).toHaveBeenCalledWith(
{ userId: undefined, entityType: 'recipe', entityId: undefined },
expect.anything(),
);
});
it('should filter by entityId', async () => {
// Arrange
const mockReactions = [createMockReaction()];
const request = createMockRequest();
mockedReactionRepo.getReactions.mockResolvedValue(mockReactions);
// Act
await controller.getReactions(request, undefined, undefined, '123');
// Assert
expect(mockedReactionRepo.getReactions).toHaveBeenCalledWith(
{ userId: undefined, entityType: undefined, entityId: '123' },
expect.anything(),
);
});
it('should support multiple filters', async () => {
// Arrange
const mockReactions = [createMockReaction()];
const request = createMockRequest();
mockedReactionRepo.getReactions.mockResolvedValue(mockReactions);
// Act
await controller.getReactions(request, 'user-123', 'recipe', '456');
// Assert
expect(mockedReactionRepo.getReactions).toHaveBeenCalledWith(
{ userId: 'user-123', entityType: 'recipe', entityId: '456' },
expect.anything(),
);
});
it('should return empty array when no reactions exist', async () => {
// Arrange
const request = createMockRequest();
mockedReactionRepo.getReactions.mockResolvedValue([]);
// Act
const result = await controller.getReactions(request);
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should work without user authentication', async () => {
// Arrange
const mockReactions = [createMockReaction()];
const request = createMockRequest({ user: undefined });
mockedReactionRepo.getReactions.mockResolvedValue(mockReactions);
// Act
const result = await controller.getReactions(request);
// Assert
expect(result.success).toBe(true);
});
});
describe('getReactionSummary()', () => {
it('should return reaction summary for an entity', async () => {
// Arrange
const mockSummary = [
createMockReactionSummary(),
createMockReactionSummary({ reaction_type: 'love', count: 5 }),
];
const request = createMockRequest();
mockedReactionRepo.getReactionSummary.mockResolvedValue(mockSummary);
// Act
const result = await controller.getReactionSummary(request, 'recipe', '123');
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(2);
expect(result.data[0].reaction_type).toBe('like');
expect(result.data[0].count).toBe(10);
}
expect(mockedReactionRepo.getReactionSummary).toHaveBeenCalledWith(
'recipe',
'123',
expect.anything(),
);
});
it('should return empty array when no reactions exist for entity', async () => {
// Arrange
const request = createMockRequest();
mockedReactionRepo.getReactionSummary.mockResolvedValue([]);
// Act
const result = await controller.getReactionSummary(request, 'recipe', '999');
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data).toHaveLength(0);
}
});
it('should work with different entity types', async () => {
// Arrange
const mockSummary = [createMockReactionSummary()];
const request = createMockRequest();
mockedReactionRepo.getReactionSummary.mockResolvedValue(mockSummary);
// Act
await controller.getReactionSummary(request, 'comment', '456');
// Assert
expect(mockedReactionRepo.getReactionSummary).toHaveBeenCalledWith(
'comment',
'456',
expect.anything(),
);
});
it('should work without user authentication', async () => {
// Arrange
const mockSummary = [createMockReactionSummary()];
const request = createMockRequest({ user: undefined });
mockedReactionRepo.getReactionSummary.mockResolvedValue(mockSummary);
// Act
const result = await controller.getReactionSummary(request, 'recipe', '123');
// Assert
expect(result.success).toBe(true);
});
});
// ==========================================================================
// AUTHENTICATED ENDPOINTS
// ==========================================================================
describe('toggleReaction()', () => {
it('should add reaction when it does not exist', async () => {
// Arrange
const mockReaction = createMockReaction();
const request = createMockRequest();
mockedReactionRepo.toggleReaction.mockResolvedValue(mockReaction);
// Act
const result = await controller.toggleReaction(request, {
entity_type: 'recipe',
entity_id: '123',
reaction_type: 'like',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Reaction added.');
expect((result.data as { reaction: typeof mockReaction }).reaction).toEqual(mockReaction);
}
expect(mockedReactionRepo.toggleReaction).toHaveBeenCalledWith(
{
user_id: 'test-user-id',
entity_type: 'recipe',
entity_id: '123',
reaction_type: 'like',
},
expect.anything(),
);
});
it('should remove reaction when it already exists', async () => {
// Arrange
const request = createMockRequest();
mockedReactionRepo.toggleReaction.mockResolvedValue(null);
// Act
const result = await controller.toggleReaction(request, {
entity_type: 'recipe',
entity_id: '123',
reaction_type: 'like',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Reaction removed.');
}
});
it('should use user ID from authenticated profile', async () => {
// Arrange
const customProfile = {
full_name: 'Custom User',
role: 'user' as const,
user: {
user_id: 'custom-user-id',
email: 'custom@example.com',
},
};
const request = createMockRequest({ user: customProfile });
mockedReactionRepo.toggleReaction.mockResolvedValue(null);
// Act
await controller.toggleReaction(request, {
entity_type: 'recipe',
entity_id: '123',
reaction_type: 'like',
});
// Assert
expect(mockedReactionRepo.toggleReaction).toHaveBeenCalledWith(
expect.objectContaining({ user_id: 'custom-user-id' }),
expect.anything(),
);
});
it('should support different reaction types', async () => {
// Arrange
const mockReaction = createMockReaction({ reaction_type: 'love' });
const request = createMockRequest();
mockedReactionRepo.toggleReaction.mockResolvedValue(mockReaction);
// Act
const result = await controller.toggleReaction(request, {
entity_type: 'recipe',
entity_id: '123',
reaction_type: 'love',
});
// Assert
expect(result.success).toBe(true);
expect(mockedReactionRepo.toggleReaction).toHaveBeenCalledWith(
expect.objectContaining({ reaction_type: 'love' }),
expect.anything(),
);
});
it('should support different entity types', async () => {
// Arrange
const mockReaction = createMockReaction({ entity_type: 'comment' });
const request = createMockRequest();
mockedReactionRepo.toggleReaction.mockResolvedValue(mockReaction);
// Act
await controller.toggleReaction(request, {
entity_type: 'comment',
entity_id: '456',
reaction_type: 'like',
});
// Assert
expect(mockedReactionRepo.toggleReaction).toHaveBeenCalledWith(
expect.objectContaining({ entity_type: 'comment', entity_id: '456' }),
expect.anything(),
);
});
});
// ==========================================================================
// BASE CONTROLLER INTEGRATION
// ==========================================================================
describe('BaseController integration', () => {
it('should use success helper for consistent response format', async () => {
// Arrange
const request = createMockRequest();
mockedReactionRepo.getReactions.mockResolvedValue([]);
// Act
const result = await controller.getReactions(request);
// Assert
expect(result).toHaveProperty('success', true);
expect(result).toHaveProperty('data');
});
it('should set 201 status when reaction is added', async () => {
// Arrange
const mockReaction = createMockReaction();
const request = createMockRequest();
mockedReactionRepo.toggleReaction.mockResolvedValue(mockReaction);
// Act
const result = await controller.toggleReaction(request, {
entity_type: 'recipe',
entity_id: '123',
reaction_type: 'like',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Reaction added.');
}
});
it('should set 200 status when reaction is removed', async () => {
// Arrange
const request = createMockRequest();
mockedReactionRepo.toggleReaction.mockResolvedValue(null);
// Act
const result = await controller.toggleReaction(request, {
entity_type: 'recipe',
entity_id: '123',
reaction_type: 'like',
});
// Assert
expect(result.success).toBe(true);
if (result.success) {
expect(result.data.message).toBe('Reaction removed.');
}
});
});
});

View File

@@ -0,0 +1,204 @@
// src/controllers/reactions.controller.ts
// ============================================================================
// REACTIONS CONTROLLER
// ============================================================================
// Provides endpoints for user reactions on content (recipes, comments, etc.).
// Includes public endpoints for viewing reactions and authenticated endpoint
// for toggling reactions.
//
// Implements ADR-028 (API Response Format) via BaseController.
// ============================================================================
import {
Get,
Post,
Route,
Tags,
Security,
Body,
Query,
Request,
SuccessResponse,
Response,
Middlewares,
} from 'tsoa';
import type { Request as ExpressRequest } from 'express';
import { BaseController } from './base.controller';
import type { SuccessResponse as SuccessResponseType, ErrorResponse } from './types';
import { reactionRepo } from '../services/db/index.db';
import type { UserProfile, UserReaction } from '../types';
import { publicReadLimiter, reactionToggleLimiter } from '../config/rateLimiters';
// ============================================================================
// REQUEST/RESPONSE TYPES
// ============================================================================
/**
* Request body for toggling a reaction.
*/
interface ToggleReactionRequest {
/**
* Entity type (e.g., 'recipe', 'comment')
* @minLength 1
*/
entity_type: string;
/**
* Entity ID
* @minLength 1
*/
entity_id: string;
/**
* Type of reaction (e.g., 'like', 'love')
* @minLength 1
*/
reaction_type: string;
}
/**
* Response for toggling a reaction - when added.
*/
interface ReactionAddedResponse {
/** Success message */
message: string;
/** The created reaction */
reaction: UserReaction;
}
/**
* Response for toggling a reaction - when removed.
*/
interface ReactionRemovedResponse {
/** Success message */
message: string;
}
/**
* Reaction summary entry showing count by type.
*/
interface ReactionSummaryEntry {
/** Reaction type */
reaction_type: string;
/** Count of this reaction type */
count: number;
}
// ============================================================================
// REACTIONS CONTROLLER
// ============================================================================
/**
* Controller for user reactions on content.
*
* Public endpoints:
* - GET /reactions - Get reactions with optional filters
* - GET /reactions/summary - Get reaction summary for an entity
*
* Authenticated endpoints:
* - POST /reactions/toggle - Toggle (add/remove) a reaction
*/
@Route('reactions')
@Tags('Reactions')
export class ReactionsController extends BaseController {
// ==========================================================================
// PUBLIC ENDPOINTS
// ==========================================================================
/**
* Get reactions.
*
* Fetches user reactions based on query filters. Supports filtering by
* userId, entityType, and entityId. All filters are optional.
*
* @summary Get reactions
* @param request Express request for logging
* @param userId Filter by user ID (UUID format)
* @param entityType Filter by entity type (e.g., 'recipe', 'comment')
* @param entityId Filter by entity ID
* @returns List of reactions matching filters
*/
@Get()
@Middlewares(publicReadLimiter)
@SuccessResponse(200, 'List of reactions matching filters')
public async getReactions(
@Request() request: ExpressRequest,
@Query() userId?: string,
@Query() entityType?: string,
@Query() entityId?: string,
): Promise<SuccessResponseType<UserReaction[]>> {
const reactions = await reactionRepo.getReactions(
{ userId, entityType, entityId },
request.log,
);
return this.success(reactions);
}
/**
* Get reaction summary.
*
* Fetches a summary of reactions for a specific entity, showing
* the count of each reaction type.
*
* @summary Get reaction summary
* @param request Express request for logging
* @param entityType Entity type (e.g., 'recipe', 'comment') - required
* @param entityId Entity ID - required
* @returns Reaction summary with counts by type
*/
@Get('summary')
@Middlewares(publicReadLimiter)
@SuccessResponse(200, 'Reaction summary with counts by type')
@Response<ErrorResponse>(400, 'Missing required query parameters')
public async getReactionSummary(
@Request() request: ExpressRequest,
@Query() entityType: string,
@Query() entityId: string,
): Promise<SuccessResponseType<ReactionSummaryEntry[]>> {
const summary = await reactionRepo.getReactionSummary(entityType, entityId, request.log);
return this.success(summary);
}
// ==========================================================================
// AUTHENTICATED ENDPOINTS
// ==========================================================================
/**
* Toggle reaction.
*
* Toggles a user's reaction to an entity. If the reaction exists,
* it's removed; otherwise, it's added.
*
* @summary Toggle reaction
* @param request Express request with authenticated user
* @param body Reaction details
* @returns Reaction added (201) or removed (200) confirmation
*/
@Post('toggle')
@Security('bearerAuth')
@Middlewares(reactionToggleLimiter)
@SuccessResponse(200, 'Reaction removed')
@Response<ErrorResponse>(401, 'Unauthorized - invalid or missing token')
public async toggleReaction(
@Request() request: ExpressRequest,
@Body() body: ToggleReactionRequest,
): Promise<SuccessResponseType<ReactionAddedResponse | ReactionRemovedResponse>> {
const userProfile = request.user as UserProfile;
const reactionData = {
user_id: userProfile.user.user_id,
entity_type: body.entity_type,
entity_id: body.entity_id,
reaction_type: body.reaction_type,
};
const result = await reactionRepo.toggleReaction(reactionData, request.log);
if (result) {
// Reaction was added
this.setStatus(201);
return this.success({ message: 'Reaction added.', reaction: result });
} else {
// Reaction was removed
return this.success({ message: 'Reaction removed.' });
}
}
}

Some files were not shown because too many files have changed in this diff Show More