Compare commits

...

8 Commits

Author SHA1 Message Date
Gitea Actions
639313485a ci: Bump version to 0.9.71 [skip ci] 2026-01-09 19:00:01 +05:00
4a04e478c4 integration test fixes - claude for the win? try 4 - i have a good feeling
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 16m58s
2026-01-09 05:56:19 -08:00
Gitea Actions
1814469eb4 ci: Bump version to 0.9.70 [skip ci] 2026-01-09 18:19:13 +05:00
b777430ff7 integration test fixes - claude for the win? try 4 - i have a good feeling
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Has been cancelled
2026-01-09 05:18:19 -08:00
Gitea Actions
23830c0d4e ci: Bump version to 0.9.69 [skip ci] 2026-01-09 17:24:00 +05:00
ef42fee982 integration test fixes - claude for the win? try 3
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 32m3s
2026-01-09 04:23:23 -08:00
Gitea Actions
65cb54500c ci: Bump version to 0.9.68 [skip ci] 2026-01-09 16:42:51 +05:00
664ad291be integration test fixes - claude for the win? try 3
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 30m3s
2026-01-09 03:41:57 -08:00
56 changed files with 4679 additions and 662 deletions

View File

@@ -56,7 +56,14 @@
"mcp__memory__delete_entities",
"mcp__sequential-thinking__sequentialthinking",
"mcp__filesystem__list_directory",
"mcp__filesystem__read_multiple_files"
"mcp__filesystem__read_multiple_files",
"mcp__filesystem__directory_tree",
"mcp__filesystem__read_text_file",
"Bash(wc:*)",
"Bash(npm install:*)",
"Bash(git grep:*)",
"Bash(findstr:*)",
"Bash(git add:*)"
]
}
}

View File

@@ -1,61 +1,66 @@
{
"mcpServers": {
"markitdown": {
"command": "C:\\Users\\games3\\.local\\bin\\uvx.exe",
"args": [
"markitdown-mcp"
]
},
"gitea-torbonium": {
"command": "d:\\gitea-mcp\\gitea-mcp.exe",
"args": ["run", "-t", "stdio"],
"env": {
"GITEA_HOST": "https://gitea.torbonium.com",
"GITEA_ACCESS_TOKEN": "391c9ddbe113378bc87bb8184800ba954648fcf8"
}
},
"gitea-lan": {
"command": "d:\\gitea-mcp\\gitea-mcp.exe",
"args": ["run", "-t", "stdio"],
"env": {
"GITEA_HOST": "https://gitea.torbolan.com",
"GITEA_ACCESS_TOKEN": "REPLACE_WITH_NEW_TOKEN"
}
},
"gitea-projectium": {
"command": "d:\\gitea-mcp\\gitea-mcp.exe",
"args": ["run", "-t", "stdio"],
"env": {
"GITEA_HOST": "https://gitea.projectium.com",
"GITEA_ACCESS_TOKEN": "c72bc0f14f623fec233d3c94b3a16397fe3649ef"
}
},
"podman": {
"command": "D:\\nodejs\\npx.cmd",
"args": ["-y", "podman-mcp-server@latest"],
"env": {
"DOCKER_HOST": "npipe:////./pipe/podman-machine-default"
}
},
"filesystem": {
"command": "D:\\nodejs\\npx.cmd",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
"D:\\gitea\\flyer-crawler.projectium.com\\flyer-crawler.projectium.com"
]
},
"fetch": {
"command": "D:\\nodejs\\npx.cmd",
"args": ["-y", "@modelcontextprotocol/server-fetch"]
},
"sequential-thinking": {
"command": "D:\\nodejs\\npx.cmd",
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"]
},
"memory": {
"command": "D:\\nodejs\\npx.cmd",
"args": ["-y", "@modelcontextprotocol/server-memory"]
}
}
}
"mcpServers": {
"gitea-projectium": {
"command": "d:\\gitea-mcp\\gitea-mcp.exe",
"args": ["run", "-t", "stdio"],
"env": {
"GITEA_HOST": "https://gitea.projectium.com",
"GITEA_ACCESS_TOKEN": "c72bc0f14f623fec233d3c94b3a16397fe3649ef"
}
},
"gitea-torbonium": {
"command": "d:\\gitea-mcp\\gitea-mcp.exe",
"args": ["run", "-t", "stdio"],
"env": {
"GITEA_HOST": "https://gitea.torbonium.com",
"GITEA_ACCESS_TOKEN": "391c9ddbe113378bc87bb8184800ba954648fcf8"
}
},
"gitea-lan": {
"command": "d:\\gitea-mcp\\gitea-mcp.exe",
"args": ["run", "-t", "stdio"],
"env": {
"GITEA_HOST": "https://gitea.torbolan.com",
"GITEA_ACCESS_TOKEN": "YOUR_LAN_TOKEN_HERE"
},
"disabled": true
},
"podman": {
"command": "D:\\nodejs\\npx.cmd",
"args": ["-y", "podman-mcp-server@latest"],
"env": {
"DOCKER_HOST": "npipe:////./pipe/podman-machine-default"
}
},
"filesystem": {
"command": "d:\\nodejs\\node.exe",
"args": [
"c:\\Users\\games3\\AppData\\Roaming\\npm\\node_modules\\@modelcontextprotocol\\server-filesystem\\dist\\index.js",
"d:\\gitea\\flyer-crawler.projectium.com\\flyer-crawler.projectium.com"
]
},
"fetch": {
"command": "D:\\nodejs\\npx.cmd",
"args": ["-y", "@modelcontextprotocol/server-fetch"]
},
"io.github.ChromeDevTools/chrome-devtools-mcp": {
"type": "stdio",
"command": "npx",
"args": ["chrome-devtools-mcp@0.12.1"],
"gallery": "https://api.mcp.github.com",
"version": "0.12.1"
},
"markitdown": {
"command": "C:\\Users\\games3\\.local\\bin\\uvx.exe",
"args": ["markitdown-mcp"]
},
"sequential-thinking": {
"command": "D:\\nodejs\\npx.cmd",
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"]
},
"memory": {
"command": "D:\\nodejs\\npx.cmd",
"args": ["-y", "@modelcontextprotocol/server-memory"]
}
}
}

View File

@@ -117,7 +117,8 @@ jobs:
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_PROD }}
REDIS_URL: 'redis://localhost:6379'
# Explicitly use database 0 for production (test uses database 1)
REDIS_URL: 'redis://localhost:6379/0'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
FRONTEND_URL: 'https://flyer-crawler.projectium.com'
JWT_SECRET: ${{ secrets.JWT_SECRET }}

View File

@@ -96,6 +96,24 @@ jobs:
# It prevents the accumulation of duplicate processes from previous test runs.
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if (p.name && p.name.endsWith('-test')) { console.log('Deleting test process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id, e.message); } } }); console.log('✅ Test process cleanup complete.'); } catch (e) { if (e.stdout.toString().includes('No process found')) { console.log('No PM2 processes running, cleanup not needed.'); } else { console.error('Error cleaning up test processes:', e.message); } }" || true
- name: Flush Redis Test Database Before Tests
# CRITICAL: Clear Redis database 1 (test database) to remove stale BullMQ jobs.
# This prevents old jobs with outdated error messages from polluting test results.
# NOTE: We use database 1 for tests to isolate from production (database 0).
env:
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
run: |
echo "--- Flushing Redis database 1 (test database) to remove stale jobs ---"
if [ -z "$REDIS_PASSWORD" ]; then
echo "⚠️ REDIS_PASSWORD_TEST not set, attempting flush without password..."
redis-cli -n 1 FLUSHDB || echo "Redis flush failed (no password)"
else
redis-cli -a "$REDIS_PASSWORD" -n 1 FLUSHDB 2>/dev/null && echo "✅ Redis database 1 (test) flushed successfully." || echo "⚠️ Redis flush failed"
fi
# Verify the flush worked by checking key count on database 1
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 1 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Redis database 1 key count after flush: $KEY_COUNT"
- name: Run All Tests and Generate Merged Coverage Report
# This single step runs both unit and integration tests, then merges their
# coverage data into a single report. It combines the environment variables
@@ -109,7 +127,9 @@ jobs:
DB_NAME: 'flyer-crawler-test' # Explicitly set for tests
# --- Redis credentials for the test suite ---
REDIS_URL: 'redis://localhost:6379'
# CRITICAL: Use Redis database 1 to isolate tests from production (which uses db 0).
# This prevents the production worker from picking up test jobs.
REDIS_URL: 'redis://localhost:6379/1'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
# --- Integration test specific variables ---
@@ -384,8 +404,8 @@ jobs:
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_TEST }}
# Redis Credentials
REDIS_URL: 'redis://localhost:6379'
# Redis Credentials (use database 1 to isolate from production)
REDIS_URL: 'redis://localhost:6379/1'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
# Application Secrets

View File

@@ -116,7 +116,8 @@ jobs:
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_PROD }}
REDIS_URL: 'redis://localhost:6379'
# Explicitly use database 0 for production (test uses database 1)
REDIS_URL: 'redis://localhost:6379/0'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
FRONTEND_URL: 'https://flyer-crawler.projectium.com'
JWT_SECRET: ${{ secrets.JWT_SECRET }}

View File

@@ -0,0 +1,167 @@
# .gitea/workflows/manual-redis-flush-prod.yml
#
# DANGER: This workflow is DESTRUCTIVE and intended for manual execution only.
# It will completely FLUSH the PRODUCTION Redis database (db 0).
# This will clear all BullMQ queues, sessions, caches, and any other Redis data.
#
name: Manual - Flush Production Redis
on:
workflow_dispatch:
inputs:
confirmation:
description: 'DANGER: This will FLUSH production Redis. Type "flush-production-redis" to confirm.'
required: true
default: 'do-not-run'
flush_type:
description: 'What to flush?'
required: true
type: choice
options:
- 'queues-only'
- 'entire-database'
default: 'queues-only'
jobs:
flush-redis:
runs-on: projectium.com # This job runs on your self-hosted Gitea runner.
env:
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: '**/package-lock.json'
- name: Install Dependencies
run: npm ci
- name: Validate Secrets
run: |
if [ -z "$REDIS_PASSWORD" ]; then
echo "ERROR: REDIS_PASSWORD_PROD secret is not set in Gitea repository settings."
exit 1
fi
echo "✅ Redis password secret is present."
- name: Verify Confirmation Phrase
run: |
if [ "${{ gitea.event.inputs.confirmation }}" != "flush-production-redis" ]; then
echo "ERROR: Confirmation phrase did not match. Aborting Redis flush."
exit 1
fi
echo "✅ Confirmation accepted. Proceeding with Redis flush."
- name: Show Current Redis State
run: |
echo "--- Current Redis Database 0 (Production) State ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 INFO keyspace 2>/dev/null || echo "Could not get keyspace info"
echo ""
echo "--- Key Count ---"
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 0 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Production Redis (db 0) key count: $KEY_COUNT"
echo ""
echo "--- BullMQ Queue Keys ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 KEYS "bull:*" 2>/dev/null | head -20 || echo "No BullMQ keys found"
- name: 🚨 FINAL WARNING & PAUSE 🚨
run: |
echo "*********************************************************************"
echo "WARNING: YOU ARE ABOUT TO FLUSH PRODUCTION REDIS DATA."
echo "Flush type: ${{ gitea.event.inputs.flush_type }}"
echo ""
if [ "${{ gitea.event.inputs.flush_type }}" = "entire-database" ]; then
echo "This will DELETE ALL Redis data including sessions, caches, and queues!"
else
echo "This will DELETE ALL BullMQ queue data (pending jobs, failed jobs, etc.)"
fi
echo ""
echo "This action is IRREVERSIBLE. Press Ctrl+C in the runner terminal NOW to cancel."
echo "Sleeping for 10 seconds..."
echo "*********************************************************************"
sleep 10
- name: Flush BullMQ Queues Only
if: ${{ gitea.event.inputs.flush_type == 'queues-only' }}
env:
REDIS_URL: 'redis://localhost:6379/0'
run: |
echo "--- Obliterating BullMQ queues using Node.js ---"
node -e "
const { Queue } = require('bullmq');
const IORedis = require('ioredis');
const connection = new IORedis(process.env.REDIS_URL, {
maxRetriesPerRequest: null,
password: process.env.REDIS_PASSWORD,
});
const queueNames = [
'flyer-processing',
'email-sending',
'analytics-reporting',
'weekly-analytics-reporting',
'file-cleanup',
'token-cleanup'
];
(async () => {
for (const name of queueNames) {
try {
const queue = new Queue(name, { connection });
const counts = await queue.getJobCounts();
console.log('Queue \"' + name + '\" before obliterate:', JSON.stringify(counts));
await queue.obliterate({ force: true });
console.log('✅ Obliterated queue: ' + name);
await queue.close();
} catch (err) {
console.error('⚠️ Failed to obliterate queue ' + name + ':', err.message);
}
}
await connection.quit();
console.log('✅ All BullMQ queues obliterated.');
})();
"
- name: Flush Entire Redis Database
if: ${{ gitea.event.inputs.flush_type == 'entire-database' }}
run: |
echo "--- Flushing entire Redis database 0 (production) ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 FLUSHDB 2>/dev/null && echo "✅ Redis database 0 flushed successfully." || echo "❌ Redis flush failed"
- name: Verify Flush Results
run: |
echo "--- Redis Database 0 (Production) State After Flush ---"
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 0 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Production Redis (db 0) key count after flush: $KEY_COUNT"
echo ""
echo "--- Remaining BullMQ Queue Keys ---"
BULL_KEYS=$(redis-cli -a "$REDIS_PASSWORD" -n 0 KEYS "bull:*" 2>/dev/null | wc -l || echo "0")
echo "BullMQ key count: $BULL_KEYS"
if [ "${{ gitea.event.inputs.flush_type }}" = "queues-only" ] && [ "$BULL_KEYS" -gt 0 ]; then
echo "⚠️ Warning: Some BullMQ keys may still exist. This can happen if new jobs were added during the flush."
fi
- name: Summary
run: |
echo ""
echo "=========================================="
echo "PRODUCTION REDIS FLUSH COMPLETE"
echo "=========================================="
echo "Flush type: ${{ gitea.event.inputs.flush_type }}"
echo "Timestamp: $(date -u '+%Y-%m-%d %H:%M:%S UTC')"
echo ""
echo "NOTE: If you flushed queues, any pending jobs (flyer processing,"
echo "emails, analytics, etc.) have been permanently deleted."
echo ""
echo "The production workers will automatically start processing"
echo "new jobs as they are added to the queues."
echo "=========================================="

1
.husky/pre-commit Normal file
View File

@@ -0,0 +1 @@
npx lint-staged

4
.lintstagedrc.json Normal file
View File

@@ -0,0 +1,4 @@
{
"*.{js,jsx,ts,tsx}": ["eslint --fix", "prettier --write"],
"*.{json,md,css,html,yml,yaml}": ["prettier --write"]
}

41
.prettierignore Normal file
View File

@@ -0,0 +1,41 @@
# Dependencies
node_modules/
# Build output
dist/
build/
.cache/
# Coverage reports
coverage/
.coverage/
# IDE and editor configs
.idea/
.vscode/
*.swp
*.swo
# Logs
*.log
logs/
# Environment files (may contain secrets)
.env*
!.env.example
# Lock files (managed by package managers)
package-lock.json
pnpm-lock.yaml
yarn.lock
# Generated files
*.min.js
*.min.css
# Git directory
.git/
.gitea/
# Test artifacts
__snapshots__/

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -16,3 +16,82 @@ We will implement a dedicated background job processing system using a task queu
**Positive**: Decouples the API from heavy processing, allows for retries on failure, and enables scaling the processing workers independently. Increases application reliability and resilience.
**Negative**: Introduces a new dependency (Redis) into the infrastructure. Requires refactoring of the flyer processing logic to work within a job queue structure.
## Implementation Details
### Queue Infrastructure
The implementation uses **BullMQ v5.65.1** with **ioredis v5.8.2** for Redis connectivity. Six distinct queues handle different job types:
| Queue Name | Purpose | Retry Attempts | Backoff Strategy |
| ---------------------------- | --------------------------- | -------------- | ---------------------- |
| `flyer-processing` | OCR/AI processing of flyers | 3 | Exponential (5s base) |
| `email-sending` | Email delivery | 5 | Exponential (10s base) |
| `analytics-reporting` | Daily report generation | 2 | Exponential (60s base) |
| `weekly-analytics-reporting` | Weekly report generation | 2 | Exponential (1h base) |
| `file-cleanup` | Temporary file cleanup | 3 | Exponential (30s base) |
| `token-cleanup` | Expired token removal | 2 | Exponential (1h base) |
### Key Files
- `src/services/queues.server.ts` - Queue definitions and configuration
- `src/services/workers.server.ts` - Worker implementations with configurable concurrency
- `src/services/redis.server.ts` - Redis connection management
- `src/services/queueService.server.ts` - Queue lifecycle and graceful shutdown
- `src/services/flyerProcessingService.server.ts` - 5-stage flyer processing pipeline
- `src/types/job-data.ts` - TypeScript interfaces for all job data types
### API Design
Endpoints for long-running tasks return **202 Accepted** immediately with a job ID:
```text
POST /api/ai/upload-and-process → 202 { jobId: "..." }
GET /api/ai/jobs/:jobId/status → { state: "...", progress: ... }
```
### Worker Configuration
Workers are configured via environment variables:
- `WORKER_CONCURRENCY` - Flyer processing parallelism (default: 1)
- `EMAIL_WORKER_CONCURRENCY` - Email worker parallelism (default: 10)
- `ANALYTICS_WORKER_CONCURRENCY` - Analytics worker parallelism (default: 1)
- `CLEANUP_WORKER_CONCURRENCY` - Cleanup worker parallelism (default: 10)
### Monitoring
- **Bull Board UI** available at `/api/admin/jobs` for admin users
- Worker status endpoint: `GET /api/admin/workers/status`
- Queue status endpoint: `GET /api/admin/queues/status`
### Graceful Shutdown
Both API and worker processes implement graceful shutdown with a 30-second timeout, ensuring in-flight jobs complete before process termination.
## Compliance Notes
### Deprecated Synchronous Endpoints
The following endpoints process flyers synchronously and are **deprecated**:
- `POST /api/ai/upload-legacy` - For integration testing only
- `POST /api/ai/flyers/process` - Legacy workflow, should migrate to queue-based approach
New integrations MUST use `POST /api/ai/upload-and-process` for queue-based processing.
### Email Handling
- **Bulk emails** (deal notifications): Enqueued via `emailQueue`
- **Transactional emails** (password reset): Sent synchronously for immediate user feedback
## Future Enhancements
Potential improvements for consideration:
1. **Dead Letter Queue (DLQ)**: Move permanently failed jobs to a dedicated queue for analysis
2. **Job Priority Levels**: Allow priority-based processing for different job types
3. **Real-time Progress**: WebSocket/SSE for live job progress updates to clients
4. **Per-Queue Rate Limiting**: Throttle job processing based on external API limits
5. **Job Dependencies**: Support for jobs that depend on completion of other jobs
6. **Prometheus Metrics**: Export queue metrics for observability dashboards

View File

@@ -2,7 +2,9 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
**Implemented**: 2026-01-09
## Context
@@ -16,3 +18,216 @@ We will introduce a centralized, schema-validated configuration service. We will
**Positive**: Improves application reliability and developer experience by catching configuration errors at startup rather than at runtime. Provides a single source of truth for all required configuration.
**Negative**: Adds a small amount of boilerplate for defining the configuration schema. Requires a one-time effort to refactor all `process.env` access points to use the new configuration service.
## Implementation Status
### What's Implemented
-**Centralized Configuration Schema** - Zod-based validation in `src/config/env.ts`
-**Type-Safe Access** - Full TypeScript types for all configuration
-**Fail-Fast Startup** - Clear error messages for missing/invalid config
-**Environment Helpers** - `isProduction`, `isTest`, `isDevelopment` exports
-**Service Configuration Helpers** - `isSmtpConfigured`, `isAiConfigured`, etc.
### Migration Status
- ⏳ Gradual migration of `process.env` access to `config.*` in progress
- Legacy `process.env` access still works during transition
## Implementation Details
### Configuration Schema
The configuration is organized into logical groups:
```typescript
import { config, isProduction, isTest } from './config/env';
// Database
config.database.host; // DB_HOST
config.database.port; // DB_PORT (default: 5432)
config.database.user; // DB_USER
config.database.password; // DB_PASSWORD
config.database.name; // DB_NAME
// Redis
config.redis.url; // REDIS_URL
config.redis.password; // REDIS_PASSWORD (optional)
// Authentication
config.auth.jwtSecret; // JWT_SECRET (min 32 chars)
config.auth.jwtSecretPrevious; // JWT_SECRET_PREVIOUS (for rotation)
// SMTP (all optional - email degrades gracefully)
config.smtp.host; // SMTP_HOST
config.smtp.port; // SMTP_PORT (default: 587)
config.smtp.user; // SMTP_USER
config.smtp.pass; // SMTP_PASS
config.smtp.secure; // SMTP_SECURE (default: false)
config.smtp.fromEmail; // SMTP_FROM_EMAIL
// AI Services
config.ai.geminiApiKey; // GEMINI_API_KEY
config.ai.geminiRpm; // GEMINI_RPM (default: 5)
config.ai.priceQualityThreshold; // AI_PRICE_QUALITY_THRESHOLD (default: 0.5)
// Google Services
config.google.mapsApiKey; // GOOGLE_MAPS_API_KEY (optional)
config.google.clientId; // GOOGLE_CLIENT_ID (optional)
config.google.clientSecret; // GOOGLE_CLIENT_SECRET (optional)
// Worker Configuration
config.worker.concurrency; // WORKER_CONCURRENCY (default: 1)
config.worker.lockDuration; // WORKER_LOCK_DURATION (default: 30000)
config.worker.emailConcurrency; // EMAIL_WORKER_CONCURRENCY (default: 10)
config.worker.analyticsConcurrency; // ANALYTICS_WORKER_CONCURRENCY (default: 1)
config.worker.cleanupConcurrency; // CLEANUP_WORKER_CONCURRENCY (default: 10)
config.worker.weeklyAnalyticsConcurrency; // WEEKLY_ANALYTICS_WORKER_CONCURRENCY (default: 1)
// Server
config.server.nodeEnv; // NODE_ENV (development/production/test)
config.server.port; // PORT (default: 3001)
config.server.frontendUrl; // FRONTEND_URL
config.server.baseUrl; // BASE_URL
config.server.storagePath; // STORAGE_PATH (default: /var/www/.../flyer-images)
```
### Convenience Helpers
```typescript
import { isProduction, isTest, isDevelopment, isSmtpConfigured } from './config/env';
// Environment checks
if (isProduction) {
// Production-only logic
}
// Service availability checks
if (isSmtpConfigured) {
await sendEmail(...);
} else {
logger.warn('Email not configured, skipping notification');
}
```
### Fail-Fast Error Messages
When configuration is invalid, the application exits with a clear error:
```text
╔════════════════════════════════════════════════════════════════╗
║ CONFIGURATION ERROR - APPLICATION STARTUP ║
╚════════════════════════════════════════════════════════════════╝
The following environment variables are missing or invalid:
- database.host: DB_HOST is required
- auth.jwtSecret: JWT_SECRET must be at least 32 characters for security
Please check your .env file or environment configuration.
See ADR-007 for the complete list of required environment variables.
```
### Usage Example
```typescript
// Before (direct process.env access)
const pool = new Pool({
host: process.env.DB_HOST,
port: parseInt(process.env.DB_PORT || '5432', 10),
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
database: process.env.DB_NAME,
});
// After (type-safe config access)
import { config } from './config/env';
const pool = new Pool({
host: config.database.host,
port: config.database.port,
user: config.database.user,
password: config.database.password,
database: config.database.name,
});
```
## Required Environment Variables
### Critical (Application will not start without these)
| Variable | Description |
| ------------- | ----------------------------------------------------- |
| `DB_HOST` | PostgreSQL database host |
| `DB_USER` | PostgreSQL database user |
| `DB_PASSWORD` | PostgreSQL database password |
| `DB_NAME` | PostgreSQL database name |
| `REDIS_URL` | Redis connection URL (e.g., `redis://localhost:6379`) |
| `JWT_SECRET` | JWT signing secret (minimum 32 characters) |
### Optional with Defaults
| Variable | Default | Description |
| ---------------------------- | ------------------------- | ------------------------------- |
| `DB_PORT` | 5432 | PostgreSQL port |
| `PORT` | 3001 | Server HTTP port |
| `NODE_ENV` | development | Environment mode |
| `STORAGE_PATH` | /var/www/.../flyer-images | File upload directory |
| `SMTP_PORT` | 587 | SMTP server port |
| `SMTP_SECURE` | false | Use TLS for SMTP |
| `GEMINI_RPM` | 5 | Gemini API requests per minute |
| `AI_PRICE_QUALITY_THRESHOLD` | 0.5 | AI extraction quality threshold |
| `WORKER_CONCURRENCY` | 1 | Flyer processing concurrency |
| `WORKER_LOCK_DURATION` | 30000 | Worker lock duration (ms) |
### Optional (Feature-specific)
| Variable | Description |
| --------------------- | ------------------------------------------- |
| `GEMINI_API_KEY` | Google Gemini API key (enables AI features) |
| `GOOGLE_MAPS_API_KEY` | Google Maps API key (enables geocoding) |
| `SMTP_HOST` | SMTP server (enables email notifications) |
| `SMTP_USER` | SMTP authentication username |
| `SMTP_PASS` | SMTP authentication password |
| `SMTP_FROM_EMAIL` | Sender email address |
| `FRONTEND_URL` | Frontend URL for email links |
| `JWT_SECRET_PREVIOUS` | Previous JWT secret for rotation (ADR-029) |
## Key Files
- `src/config/env.ts` - Configuration schema and validation
- `.env.example` - Template for required environment variables
## Migration Guide
To migrate existing `process.env` usage:
1. Import the config:
```typescript
import { config, isProduction } from '../config/env';
```
2. Replace direct access:
```typescript
// Before
process.env.DB_HOST;
process.env.NODE_ENV === 'production';
parseInt(process.env.PORT || '3001', 10);
// After
config.database.host;
isProduction;
config.server.port;
```
3. Use service helpers for optional features:
```typescript
import { isSmtpConfigured, isAiConfigured } from '../config/env';
if (isSmtpConfigured) {
// Email is available
}
```

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -20,3 +20,107 @@ We will implement a multi-layered caching strategy using an in-memory data store
**Positive**: Directly addresses application performance and scalability. Reduces database load and improves API response times for common requests.
**Negative**: Introduces Redis as a dependency if not already used. Adds complexity to the data-fetching logic and requires careful management of cache invalidation to prevent stale data.
## Implementation Details
### Cache Service
A centralized cache service (`src/services/cacheService.server.ts`) provides reusable caching functionality:
- **`getOrSet<T>(key, fetcher, options)`**: Cache-aside pattern implementation
- **`get<T>(key)`**: Retrieve cached value
- **`set<T>(key, value, ttl)`**: Store value with TTL
- **`del(key)`**: Delete specific key
- **`invalidatePattern(pattern)`**: Delete keys matching a pattern
All cache operations are fail-safe - cache failures do not break the application.
### TTL Configuration
Different data types use different TTL values based on volatility:
| Data Type | TTL | Rationale |
| ------------------- | --------- | -------------------------------------- |
| Brands/Stores | 1 hour | Rarely changes, safe to cache longer |
| Flyer lists | 5 minutes | Changes when new flyers are added |
| Individual flyers | 10 minutes| Stable once created |
| Flyer items | 10 minutes| Stable once created |
| Statistics | 5 minutes | Can be slightly stale |
| Frequent sales | 15 minutes| Aggregated data, updated periodically |
| Categories | 1 hour | Rarely changes |
### Cache Key Strategy
Cache keys follow a consistent prefix pattern for pattern-based invalidation:
- `cache:brands` - All brands list
- `cache:flyers:{limit}:{offset}` - Paginated flyer lists
- `cache:flyer:{id}` - Individual flyer data
- `cache:flyer-items:{flyerId}` - Items for a specific flyer
- `cache:stats:*` - Statistics data
- `geocode:{address}` - Geocoding results (30-day TTL)
### Cached Endpoints
The following repository methods implement server-side caching:
| Method | Cache Key Pattern | TTL |
| ------ | ----------------- | --- |
| `FlyerRepository.getAllBrands()` | `cache:brands` | 1 hour |
| `FlyerRepository.getFlyers()` | `cache:flyers:{limit}:{offset}` | 5 minutes |
| `FlyerRepository.getFlyerItems()` | `cache:flyer-items:{flyerId}` | 10 minutes |
### Cache Invalidation
**Event-based invalidation** is triggered on write operations:
- **Flyer creation** (`FlyerPersistenceService.saveFlyer`): Invalidates all `cache:flyers*` keys
- **Flyer deletion** (`FlyerRepository.deleteFlyer`): Invalidates specific flyer and flyer items cache, plus flyer lists
**Manual invalidation** via admin endpoints:
- `POST /api/admin/system/clear-cache` - Clears all application cache (flyers, brands, stats)
- `POST /api/admin/system/clear-geocode-cache` - Clears geocoding cache
### Client-Side Caching
TanStack React Query provides client-side caching with configurable stale times:
| Query Type | Stale Time |
| ----------------- | ----------- |
| Categories | 1 hour |
| Master Items | 10 minutes |
| Flyer Items | 5 minutes |
| Flyers | 2 minutes |
| Shopping Lists | 1 minute |
| Activity Log | 30 seconds |
### Multi-Layer Cache Architecture
```text
Client Request
[TanStack React Query] ← Client-side cache (staleTime-based)
[Express API]
[CacheService.getOrSet()] ← Server-side Redis cache (TTL-based)
[PostgreSQL Database]
```
## Key Files
- `src/services/cacheService.server.ts` - Centralized cache service
- `src/services/db/flyer.db.ts` - Repository with caching for brands, flyers, flyer items
- `src/services/flyerPersistenceService.server.ts` - Cache invalidation on flyer creation
- `src/routes/admin.routes.ts` - Admin cache management endpoints
- `src/config/queryClient.ts` - Client-side query cache configuration
## Future Enhancements
1. **Recipe caching**: Add caching to expensive recipe queries (by-sale-percentage, etc.)
2. **Cache warming**: Pre-populate cache on startup for frequently accessed static data
3. **Cache metrics**: Add hit/miss rate monitoring for observability
4. **Conditional caching**: Skip cache for authenticated user-specific data
5. **Cache compression**: Compress large cached payloads to reduce Redis memory usage

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -14,9 +14,305 @@ We will formalize the testing pyramid for the project, defining the role of each
1. **Unit Tests (Vitest)**: For isolated functions, components, and repository methods with mocked dependencies. High coverage is expected.
2. **Integration Tests (Supertest)**: For API routes, testing the interaction between controllers, services, and mocked database layers. Focus on contract and middleware correctness.
3. **End-to-End (E2E) Tests (Playwright/Cypress)**: For critical user flows (e.g., login, flyer upload, checkout), running against a real browser and a test database to ensure the entire system works together.
3. **End-to-End (E2E) Tests (Vitest + Supertest)**: For critical user flows (e.g., login, flyer upload, checkout), running against a real test server and database to ensure the entire system works together.
## Consequences
**Positive**: Ensures a consistent and comprehensive approach to quality assurance. Gives developers confidence when refactoring or adding new features. Clearly defines "done" for a new feature.
**Negative**: May require investment in setting up and maintaining the E2E testing environment. Can slightly increase the time required to develop a feature if all test layers are required.
## Implementation Details
### Testing Framework Stack
| Tool | Version | Purpose |
| ---- | ------- | ------- |
| Vitest | 4.0.15 | Test runner for all test types |
| @testing-library/react | 16.3.0 | React component testing |
| @testing-library/jest-dom | 6.9.1 | DOM assertion matchers |
| supertest | 7.1.4 | HTTP assertion library for API testing |
| msw | 2.12.3 | Mock Service Worker for network mocking |
| testcontainers | 11.8.1 | Database containerization (optional) |
| c8 + nyc | 10.1.3 / 17.1.0 | Coverage reporting |
### Test File Organization
```text
src/
├── components/
│ └── *.test.tsx # Component unit tests (colocated)
├── hooks/
│ └── *.test.ts # Hook unit tests (colocated)
├── services/
│ └── *.test.ts # Service unit tests (colocated)
├── routes/
│ └── *.test.ts # Route handler unit tests (colocated)
├── utils/
│ └── *.test.ts # Utility function tests (colocated)
└── tests/
├── setup/ # Test configuration and setup files
├── utils/ # Test utilities, factories, helpers
├── assets/ # Test fixtures (images, files)
├── integration/ # Integration test files (*.test.ts)
└── e2e/ # End-to-end test files (*.e2e.test.ts)
```
**Naming Convention**: `{filename}.test.ts` or `{filename}.test.tsx` for unit/integration, `{filename}.e2e.test.ts` for E2E.
### Configuration Files
| Config | Environment | Purpose |
| ------ | ----------- | ------- |
| `vite.config.ts` | jsdom | Unit tests (React components, hooks) |
| `vitest.config.integration.ts` | node | Integration tests (API routes) |
| `vitest.config.e2e.ts` | node | E2E tests (full user flows) |
| `vitest.workspace.ts` | - | Orchestrates all test projects |
### Test Pyramid
```text
┌─────────────┐
│ E2E │ 5 test files
│ Tests │ Critical user flows
├─────────────┤
│ Integration │ 17 test files
│ Tests │ API contracts + middleware
┌───┴─────────────┴───┐
│ Unit Tests │ 185 test files
│ Components, Hooks, │ Isolated functions
│ Services, Utils │ Mocked dependencies
└─────────────────────┘
```
### Unit Tests
**Purpose**: Test isolated functions, components, and modules with mocked dependencies.
**Environment**: jsdom (browser-like)
**Key Patterns**:
```typescript
// Component testing with providers
import { renderWithProviders, screen } from '@/tests/utils/renderWithProviders';
describe('MyComponent', () => {
it('renders correctly', () => {
renderWithProviders(<MyComponent />);
expect(screen.getByText('Hello')).toBeInTheDocument();
});
});
```
```typescript
// Hook testing
import { renderHook, waitFor } from '@testing-library/react';
import { useMyHook } from './useMyHook';
describe('useMyHook', () => {
it('returns expected value', async () => {
const { result } = renderHook(() => useMyHook());
await waitFor(() => expect(result.current.data).toBeDefined());
});
});
```
**Global Mocks** (automatically applied via `tests-setup-unit.ts`):
- Database connections (`pg.Pool`)
- AI services (`@google/genai`)
- Authentication (`jsonwebtoken`, `bcrypt`)
- Logging (`logger.server`, `logger.client`)
- Notifications (`notificationService`)
### Integration Tests
**Purpose**: Test API routes with real service interactions and database.
**Environment**: node
**Setup**: Real Express server on port 3001, real PostgreSQL database
```typescript
// API route testing pattern
import supertest from 'supertest';
import { createAndLoginUser } from '@/tests/utils/testHelpers';
describe('Auth API', () => {
let request: ReturnType<typeof supertest>;
let authToken: string;
beforeAll(async () => {
const app = (await import('../../../server')).default;
request = supertest(app);
const { token } = await createAndLoginUser(request);
authToken = token;
});
it('GET /api/auth/me returns user profile', async () => {
const response = await request
.get('/api/auth/me')
.set('Authorization', `Bearer ${authToken}`);
expect(response.status).toBe(200);
expect(response.body.user.email).toBeDefined();
});
});
```
**Database Cleanup**:
```typescript
import { cleanupDb } from '@/tests/utils/cleanup';
afterAll(async () => {
await cleanupDb({ users: [testUserId] });
});
```
### E2E Tests
**Purpose**: Test complete user journeys through the application.
**Timeout**: 120 seconds (for long-running flows)
**Current E2E Tests**:
- `auth.e2e.test.ts` - Registration, login, password reset
- `flyer-upload.e2e.test.ts` - Complete flyer upload pipeline
- `user-journey.e2e.test.ts` - Full user workflow
- `admin-authorization.e2e.test.ts` - Admin-specific flows
- `admin-dashboard.e2e.test.ts` - Admin dashboard functionality
### Mock Factories
The project uses comprehensive mock factories (`src/tests/utils/mockFactories.ts`, 1553 lines) for creating test data:
```typescript
import {
createMockUser,
createMockFlyer,
createMockFlyerItem,
createMockRecipe,
resetMockIds,
} from '@/tests/utils/mockFactories';
beforeEach(() => {
resetMockIds(); // Ensure deterministic IDs
});
it('creates flyer with items', () => {
const flyer = createMockFlyer({ store_name: 'TestMart' });
const items = [createMockFlyerItem({ flyer_id: flyer.flyer_id })];
// ...
});
```
**Factory Coverage**: 90+ factory functions for all domain entities including users, flyers, recipes, shopping lists, budgets, achievements, etc.
### Test Utilities
| Utility | Purpose |
| ------- | ------- |
| `renderWithProviders()` | Wrap components with AppProviders + Router |
| `createAndLoginUser()` | Create user and return auth token |
| `cleanupDb()` | Database cleanup respecting FK constraints |
| `createTestApp()` | Create Express app for route testing |
| `poll()` | Polling utility for async operations |
### Coverage Configuration
**Coverage Provider**: v8 (built-in Vitest)
**Report Directories**:
- `.coverage/unit/` - Unit test coverage
- `.coverage/integration/` - Integration test coverage
- `.coverage/e2e/` - E2E test coverage
**Excluded from Coverage**:
- `src/index.tsx`, `src/main.tsx` (entry points)
- `src/tests/**` (test files themselves)
- `src/**/*.d.ts` (type declarations)
- `src/components/icons/**` (icon components)
- `src/db/seed*.ts` (database seeding scripts)
### npm Scripts
```bash
# Run all tests
npm run test
# Run by level
npm run test:unit # Unit tests only (jsdom)
npm run test:integration # Integration tests only (node)
# With coverage
npm run test:coverage # Unit + Integration with reports
# Clean coverage directories
npm run clean
```
### Test Timeouts
| Test Type | Timeout | Rationale |
| --------- | ------- | --------- |
| Unit | 5 seconds | Fast, isolated tests |
| Integration | 60 seconds | AI service calls, DB operations |
| E2E | 120 seconds | Full user flow with multiple API calls |
## Best Practices
### When to Write Each Test Type
1. **Unit Tests** (required):
- Pure functions and utilities
- React components (rendering, user interactions)
- Custom hooks
- Service methods with mocked dependencies
- Repository methods
2. **Integration Tests** (required for API changes):
- New API endpoints
- Authentication/authorization flows
- Middleware behavior
- Database query correctness
3. **E2E Tests** (for critical paths):
- User registration and login
- Core business flows (flyer upload, shopping lists)
- Admin operations
### Test Isolation Guidelines
1. **Reset mock IDs**: Call `resetMockIds()` in `beforeEach()`
2. **Unique test data**: Use timestamps or UUIDs for emails/usernames
3. **Clean up after tests**: Use `cleanupDb()` in `afterAll()`
4. **Don't share state**: Each test should be independent
### Mocking Guidelines
1. **Unit tests**: Mock external dependencies (DB, APIs, services)
2. **Integration tests**: Mock only external APIs (AI services)
3. **E2E tests**: Minimal mocking, use real services where possible
## Key Files
- `vite.config.ts` - Unit test configuration
- `vitest.config.integration.ts` - Integration test configuration
- `vitest.config.e2e.ts` - E2E test configuration
- `vitest.workspace.ts` - Workspace orchestration
- `src/tests/setup/tests-setup-unit.ts` - Global mocks (488 lines)
- `src/tests/setup/integration-global-setup.ts` - Server + DB setup
- `src/tests/utils/mockFactories.ts` - Mock factories (1553 lines)
- `src/tests/utils/testHelpers.ts` - Test utilities
## Future Enhancements
1. **Browser E2E Tests**: Consider adding Playwright for actual browser testing
2. **Visual Regression**: Screenshot comparison for UI components
3. **Performance Testing**: Add benchmarks for critical paths
4. **Mutation Testing**: Verify test quality with mutation testing tools
5. **Coverage Thresholds**: Define minimum coverage requirements per module

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Partially Implemented
## Context
@@ -16,3 +16,255 @@ We will establish a formal Design System and Component Library. This will involv
- **Positive**: Ensures a consistent and high-quality user interface. Accelerates frontend development by providing reusable, well-documented components. Improves maintainability and reduces technical debt.
- **Negative**: Requires an initial investment in setting up Storybook and migrating existing components. Adds a new dependency and a new workflow for frontend development.
## Implementation Status
### What's Implemented
The codebase has a solid foundation for a design system:
-**Tailwind CSS v4.1.17** as the styling solution
-**Dark mode** fully implemented with system preference detection
-**55 custom icon components** for consistent iconography
-**Component organization** with shared vs. feature-specific separation
-**Accessibility patterns** with ARIA attributes and focus management
### What's Not Yet Implemented
-**Storybook** is not yet installed or configured
-**Formal design token documentation** (colors, typography, spacing)
-**Visual regression testing** for component changes
## Implementation Details
### Component Library Structure
```text
src/
├── components/ # 30+ shared UI components
│ ├── icons/ # 55 SVG icon components
│ ├── Header.tsx
│ ├── Footer.tsx
│ ├── LoadingSpinner.tsx
│ ├── ErrorDisplay.tsx
│ ├── ConfirmationModal.tsx
│ ├── DarkModeToggle.tsx
│ ├── StatCard.tsx
│ ├── PasswordInput.tsx
│ └── ...
├── features/ # Feature-specific components
│ ├── charts/ # PriceChart, PriceHistoryChart
│ ├── flyer/ # FlyerDisplay, FlyerList, FlyerUploader
│ ├── shopping/ # ShoppingListComponent, WatchedItemsList
│ └── voice-assistant/ # VoiceAssistant
├── layouts/ # Page layouts
│ └── MainLayout.tsx
├── pages/ # Page components
│ └── admin/components/ # Admin-specific components
└── providers/ # Context providers
```
### Styling Approach
**Tailwind CSS** with utility-first classes:
```typescript
// Component example with consistent styling patterns
<button className="px-4 py-2 bg-brand-primary text-white rounded-lg
hover:bg-brand-dark transition-colors duration-200
focus:outline-none focus:ring-2 focus:ring-brand-primary
focus:ring-offset-2 dark:focus:ring-offset-gray-800">
Click me
</button>
```
**Common Utility Patterns**:
| Pattern | Classes |
| ------- | ------- |
| Card container | `bg-white dark:bg-gray-800 rounded-lg shadow-md p-6` |
| Primary button | `bg-brand-primary hover:bg-brand-dark text-white rounded-lg px-4 py-2` |
| Secondary button | `bg-gray-100 dark:bg-gray-700 text-gray-700 dark:text-gray-200` |
| Input field | `border border-gray-300 dark:border-gray-600 rounded-md px-3 py-2` |
| Focus ring | `focus:outline-none focus:ring-2 focus:ring-brand-primary` |
### Color System
**Brand Colors** (Tailwind theme extensions):
- `brand-primary` - Primary brand color (blue/teal)
- `brand-light` - Lighter variant
- `brand-dark` - Darker variant for hover states
- `brand-secondary` - Secondary accent color
**Semantic Colors**:
- Gray scale: `gray-50` through `gray-950`
- Error: `red-500`, `red-600`
- Success: `green-500`, `green-600`
- Warning: `yellow-500`, `orange-500`
- Info: `blue-500`, `blue-600`
### Dark Mode Implementation
Dark mode is fully implemented using Tailwind's `dark:` variant:
```typescript
// Initialization in useAppInitialization hook
const initializeDarkMode = () => {
// Priority: user profile > localStorage > system preference
const stored = localStorage.getItem('darkMode');
const systemPreference = window.matchMedia('(prefers-color-scheme: dark)').matches;
const isDarkMode = stored ? stored === 'true' : systemPreference;
document.documentElement.classList.toggle('dark', isDarkMode);
return isDarkMode;
};
```
**Usage in components**:
```typescript
<div className="bg-white dark:bg-gray-800 text-gray-900 dark:text-white">
Content adapts to theme
</div>
```
### Icon System
**55 custom SVG icon components** in `src/components/icons/`:
```typescript
// Icon component pattern
interface IconProps extends React.SVGProps<SVGSVGElement> {
title?: string;
}
export const CheckCircleIcon: React.FC<IconProps> = ({ title, ...props }) => (
<svg {...props} fill="currentColor" viewBox="0 0 24 24">
{title && <title>{title}</title>}
<path d="..." />
</svg>
);
```
**Usage**:
```typescript
<CheckCircleIcon className="w-5 h-5 text-green-500" title="Success" />
```
**External icons**: Lucide React (`lucide-react` v0.555.0) used for additional icons.
### Accessibility Patterns
**ARIA Attributes**:
```typescript
// Modal pattern
<div role="dialog" aria-modal="true" aria-labelledby="modal-title">
<h2 id="modal-title">Modal Title</h2>
</div>
// Button with label
<button aria-label="Close modal">
<XMarkIcon aria-hidden="true" />
</button>
// Loading state
<div role="status" aria-live="polite">
<LoadingSpinner />
</div>
```
**Focus Management**:
- Consistent focus rings: `focus:ring-2 focus:ring-brand-primary focus:ring-offset-2`
- Dark mode offset: `dark:focus:ring-offset-gray-800`
- No outline: `focus:outline-none` (using ring instead)
### State Management
**Context Providers** (see ADR-005):
| Provider | Purpose |
| -------- | ------- |
| `AuthProvider` | Authentication state |
| `ModalProvider` | Modal open/close state |
| `FlyersProvider` | Flyer data |
| `MasterItemsProvider` | Grocery items |
| `UserDataProvider` | User-specific data |
**Provider Hierarchy** in `AppProviders.tsx`:
```typescript
<QueryClientProvider>
<ModalProvider>
<AuthProvider>
<FlyersProvider>
<MasterItemsProvider>
<UserDataProvider>
{children}
</UserDataProvider>
</MasterItemsProvider>
</FlyersProvider>
</AuthProvider>
</ModalProvider>
</QueryClientProvider>
```
## Key Files
- `tailwind.config.js` - Tailwind CSS configuration
- `src/index.css` - Tailwind CSS entry point
- `src/components/` - Shared UI components
- `src/components/icons/` - Icon component library (55 icons)
- `src/providers/AppProviders.tsx` - Context provider composition
- `src/hooks/useAppInitialization.ts` - Dark mode initialization
## Component Guidelines
### When to Create Shared Components
Create a shared component in `src/components/` when:
1. Used in 3+ places across the application
2. Represents a reusable UI pattern (buttons, cards, modals)
3. Has consistent styling/behavior requirements
### Naming Conventions
- **Components**: PascalCase (`LoadingSpinner.tsx`)
- **Icons**: PascalCase with `Icon` suffix (`CheckCircleIcon.tsx`)
- **Hooks**: camelCase with `use` prefix (`useModal.ts`)
- **Contexts**: PascalCase with `Context` suffix (`AuthContext.tsx`)
### Styling Guidelines
1. Use Tailwind utility classes exclusively
2. Include dark mode variants for all colors: `bg-white dark:bg-gray-800`
3. Add focus states for interactive elements
4. Use semantic color names from the design system
## Future Enhancements (Storybook Setup)
To complete ADR-012 implementation:
1. **Install Storybook**:
```bash
npx storybook@latest init
```
2. **Create stories for core components**:
- Button variants
- Form inputs (PasswordInput, etc.)
- Modal components
- Loading states
- Icon showcase
3. **Add visual regression testing** with Chromatic or Percy
4. **Document design tokens** formally in Storybook
5. **Create component composition guidelines**

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -20,3 +20,197 @@ We will implement a multi-layered security approach for the API:
- **Positive**: Significantly improves the application's security posture against common web vulnerabilities like XSS, clickjacking, and brute-force attacks.
- **Negative**: Requires careful configuration of CORS and rate limits to avoid blocking legitimate traffic. Content-Security-Policy can be complex to configure correctly.
## Implementation Status
### What's Implemented
-**Helmet** - Security headers middleware with CSP, HSTS, and more
-**Rate Limiting** - Comprehensive implementation with 17+ specific limiters
-**Input Validation** - Zod-based request validation on all routes
-**File Upload Security** - MIME type validation, size limits, filename sanitization
-**Error Handling** - Production-safe error responses (no sensitive data leakage)
-**Request Timeout** - 5-minute timeout protection
-**Secure Cookies** - httpOnly and secure flags for authentication cookies
### Not Required
- **CORS** - Not needed (API and frontend are same-origin)
## Implementation Details
### Helmet Security Headers
Using **helmet v8.x** configured in `server.ts` as the first middleware after app initialization.
**Security Headers Applied**:
| Header | Configuration | Purpose |
| ------ | ------------- | ------- |
| Content-Security-Policy | Custom directives | Prevents XSS, code injection |
| Strict-Transport-Security | 1 year, includeSubDomains, preload | Forces HTTPS connections |
| X-Content-Type-Options | nosniff | Prevents MIME type sniffing |
| X-Frame-Options | DENY | Prevents clickjacking |
| X-XSS-Protection | 0 (disabled) | Deprecated, CSP preferred |
| Referrer-Policy | strict-origin-when-cross-origin | Controls referrer information |
| Cross-Origin-Resource-Policy | cross-origin | Allows external resource loading |
**Content Security Policy Directives**:
```typescript
contentSecurityPolicy: {
directives: {
defaultSrc: ["'self'"],
scriptSrc: ["'self'", "'unsafe-inline'"], // React inline scripts
styleSrc: ["'self'", "'unsafe-inline'"], // Tailwind inline styles
imgSrc: ["'self'", 'data:', 'blob:', 'https:'], // External images
fontSrc: ["'self'", 'https:', 'data:'],
connectSrc: ["'self'", 'https:', 'wss:'], // API + WebSocket
frameSrc: ["'none'"], // No iframes
objectSrc: ["'none'"], // No plugins
upgradeInsecureRequests: [], // Production only
},
}
```
**HSTS Configuration**:
- Max-age: 1 year (31536000 seconds)
- Includes subdomains
- Preload-ready for browser HSTS lists
### Rate Limiting
Using **express-rate-limit v8.2.1** with a centralized configuration in `src/config/rateLimiters.ts`.
**Standard Configuration**:
```typescript
const standardConfig = {
standardHeaders: true, // Sends RateLimit-* headers
legacyHeaders: false,
skip: shouldSkipRateLimit, // Disabled in test environment
};
```
**Rate Limiters by Category**:
| Category | Limiter | Window | Max Requests |
| -------- | ------- | ------ | ------------ |
| **Authentication** | loginLimiter | 15 min | 5 |
| | registerLimiter | 1 hour | 5 |
| | forgotPasswordLimiter | 15 min | 5 |
| | resetPasswordLimiter | 15 min | 10 |
| | refreshTokenLimiter | 15 min | 20 |
| | logoutLimiter | 15 min | 10 |
| **Public/User Read** | publicReadLimiter | 15 min | 100 |
| | userReadLimiter | 15 min | 100 |
| | userUpdateLimiter | 15 min | 100 |
| **Sensitive Operations** | userSensitiveUpdateLimiter | 1 hour | 5 |
| | adminTriggerLimiter | 15 min | 30 |
| **AI/Costly** | aiGenerationLimiter | 15 min | 20 |
| | geocodeLimiter | 1 hour | 100 |
| | priceHistoryLimiter | 15 min | 50 |
| **Uploads** | adminUploadLimiter | 15 min | 20 |
| | aiUploadLimiter | 15 min | 10 |
| | batchLimiter | 15 min | 50 |
| **Tracking** | trackingLimiter | 15 min | 200 |
| | reactionToggleLimiter | 15 min | 150 |
**Test Environment Handling**:
Rate limiting is automatically disabled in test environment via `shouldSkipRateLimit` utility (`src/utils/rateLimit.ts`). Tests can opt-in to rate limiting by setting the `x-test-rate-limit-enable: true` header.
### Input Validation
**Zod Schema Validation** (`src/middleware/validation.middleware.ts`):
- Type-safe parsing and coercion for params, query, and body
- Applied to all API routes via `validateRequest()` middleware
- Returns structured validation errors with field-level details
**Filename Sanitization** (`src/utils/stringUtils.ts`):
```typescript
// Removes dangerous characters from uploaded filenames
sanitizeFilename(filename: string): string
```
### File Upload Security
**Multer Configuration** (`src/middleware/multer.middleware.ts`):
- MIME type validation via `imageFileFilter` (only image/* allowed)
- File size limits (2MB for logos, configurable per upload type)
- Unique filenames using timestamps + random suffixes
- User-scoped storage paths
### Error Handling
**Production-Safe Responses** (`src/middleware/errorHandler.ts`):
- Production mode: Returns generic error message with tracking ID
- Development mode: Returns detailed error information
- Sensitive error details are logged but never exposed to clients
### Request Security
**Timeout Protection** (`server.ts`):
- 5-minute request timeout via `connect-timeout` middleware
- Prevents resource exhaustion from long-running requests
**Secure Cookies**:
```typescript
// Cookie configuration for auth tokens
{
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'strict',
maxAge: 7 * 24 * 60 * 60 * 1000 // 7 days for refresh token
}
```
### Request Logging
Per-request structured logging (ADR-004):
- Request ID tracking
- User ID and IP address logging
- Failed request details (4xx+) logged with headers and body
- Unhandled errors assigned unique error IDs
## Key Files
- `server.ts` - Helmet middleware configuration (security headers)
- `src/config/rateLimiters.ts` - Rate limiter definitions (17+ limiters)
- `src/utils/rateLimit.ts` - Rate limit skip logic for testing
- `src/middleware/validation.middleware.ts` - Zod-based request validation
- `src/middleware/errorHandler.ts` - Production-safe error handling
- `src/middleware/multer.middleware.ts` - Secure file upload configuration
- `src/utils/stringUtils.ts` - Filename sanitization
## Future Enhancements
1. **Configure CORS** (if needed for cross-origin access):
```bash
npm install cors @types/cors
```
Add to `server.ts`:
```typescript
import cors from 'cors';
app.use(cors({
origin: process.env.ALLOWED_ORIGINS?.split(',') || 'http://localhost:3000',
credentials: true,
}));
```
2. **Redis-backed rate limiting**: For distributed deployments, use `rate-limit-redis` store
3. **CSP Nonce**: Generate per-request nonces for stricter script-src policy
4. **Report-Only CSP**: Add `Content-Security-Policy-Report-Only` header for testing policy changes

View File

@@ -2,7 +2,9 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
**Implemented**: 2026-01-09
## Context
@@ -20,3 +22,195 @@ We will implement dedicated health check endpoints in the Express application.
- **Positive**: Enables robust, automated application lifecycle management in a containerized environment. Prevents traffic from being sent to unhealthy or uninitialized application instances.
- **Negative**: Adds a small amount of code for the health check endpoints. Requires configuration in the container orchestration layer.
## Implementation Status
### What's Implemented
-**Liveness Probe** (`/api/health/live`) - Simple process health check
-**Readiness Probe** (`/api/health/ready`) - Comprehensive dependency health check
-**Startup Probe** (`/api/health/startup`) - Initial startup verification
-**Individual Service Checks** - Database, Redis, Storage endpoints
-**Detailed Health Response** - Service latency, status, and details
## Implementation Details
### Probe Endpoints
| Endpoint | Purpose | Checks | HTTP Status |
| --------------------- | --------------- | ------------------ | ----------------------------- |
| `/api/health/live` | Liveness probe | Process running | 200 = alive |
| `/api/health/ready` | Readiness probe | DB, Redis, Storage | 200 = ready, 503 = not ready |
| `/api/health/startup` | Startup probe | Database only | 200 = started, 503 = starting |
### Liveness Probe
The liveness probe is intentionally simple with no external dependencies:
```typescript
// GET /api/health/live
{
"status": "ok",
"timestamp": "2026-01-09T12:00:00.000Z"
}
```
**Usage**: If this endpoint fails to respond, the container should be restarted.
### Readiness Probe
The readiness probe checks all critical dependencies:
```typescript
// GET /api/health/ready
{
"status": "healthy", // healthy | degraded | unhealthy
"timestamp": "2026-01-09T12:00:00.000Z",
"uptime": 3600.5,
"services": {
"database": {
"status": "healthy",
"latency": 5,
"details": {
"totalConnections": 10,
"idleConnections": 8,
"waitingConnections": 0
}
},
"redis": {
"status": "healthy",
"latency": 2
},
"storage": {
"status": "healthy",
"latency": 1,
"details": {
"path": "/var/www/.../flyer-images"
}
}
}
}
```
**Status Logic**:
- `healthy` - All critical services (database, Redis) are healthy
- `degraded` - Some non-critical issues (high connection wait, storage issues)
- `unhealthy` - Critical service unavailable (returns 503)
### Startup Probe
The startup probe is used during container initialization:
```typescript
// GET /api/health/startup
// Success (200):
{
"status": "started",
"timestamp": "2026-01-09T12:00:00.000Z",
"database": { "status": "healthy", "latency": 5 }
}
// Still starting (503):
{
"status": "starting",
"message": "Waiting for database connection",
"database": { "status": "unhealthy", "message": "..." }
}
```
### Individual Service Endpoints
For detailed diagnostics:
| Endpoint | Purpose |
| ----------------------- | ------------------------------- |
| `/api/health/ping` | Simple server responsiveness |
| `/api/health/db-schema` | Verify database tables exist |
| `/api/health/db-pool` | Database connection pool status |
| `/api/health/redis` | Redis connectivity |
| `/api/health/storage` | File storage accessibility |
| `/api/health/time` | Server time synchronization |
## Kubernetes Configuration Example
```yaml
apiVersion: v1
kind: Pod
spec:
containers:
- name: flyer-crawler
livenessProbe:
httpGet:
path: /api/health/live
port: 3001
initialDelaySeconds: 10
periodSeconds: 15
failureThreshold: 3
readinessProbe:
httpGet:
path: /api/health/ready
port: 3001
initialDelaySeconds: 5
periodSeconds: 10
failureThreshold: 3
startupProbe:
httpGet:
path: /api/health/startup
port: 3001
initialDelaySeconds: 0
periodSeconds: 5
failureThreshold: 30 # Allow up to 150 seconds for startup
```
## Docker Compose Configuration Example
```yaml
services:
api:
image: flyer-crawler:latest
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:3001/api/health/ready']
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
```
## PM2 Configuration Example
For non-containerized deployments using PM2:
```javascript
// ecosystem.config.js
module.exports = {
apps: [
{
name: 'flyer-crawler',
script: 'dist/server.js',
// PM2 will check this endpoint
// and restart if it fails
health_check: {
url: 'http://localhost:3001/api/health/ready',
interval: 30000,
timeout: 10000,
},
},
],
};
```
## Key Files
- `src/routes/health.routes.ts` - Health check endpoint implementations
- `server.ts` - Health routes mounted at `/api/health`
## Service Health Thresholds
| Service | Healthy | Degraded | Unhealthy |
| -------- | ---------------------- | ----------------------- | ------------------- |
| Database | Responds to `SELECT 1` | > 3 waiting connections | Connection fails |
| Redis | `PING` returns `PONG` | N/A | Connection fails |
| Storage | Write access to path | N/A | Path not accessible |

View File

@@ -2,7 +2,9 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
**Implemented**: 2026-01-09
## Context
@@ -10,10 +12,171 @@ The project contains both frontend (React) and backend (Node.js) code. While lin
## Decision
We will mandate the use of **Prettier** for automated code formatting and a unified **ESLint** configuration for code quality rules across both frontend and backend. This will be enforced automatically using a pre-commit hook managed by a tool like **Husky**.
We will mandate the use of **Prettier** for automated code formatting and a unified **ESLint** configuration for code quality rules across both frontend and backend. This will be enforced automatically using a pre-commit hook managed by **Husky** and **lint-staged**.
## Consequences
**Positive**: Improves developer experience and team velocity by automating code consistency. Reduces time spent on stylistic code review comments. Enhances code readability and maintainability.
**Negative**: Requires an initial setup and configuration of Prettier, ESLint, and Husky. May require a one-time reformatting of the entire codebase.
## Implementation Status
### What's Implemented
-**Prettier Configuration** - `.prettierrc` with consistent settings
-**Prettier Ignore** - `.prettierignore` to exclude generated files
-**ESLint Configuration** - `eslint.config.js` with TypeScript and React support
-**ESLint + Prettier Integration** - `eslint-config-prettier` to avoid conflicts
-**Husky Pre-commit Hooks** - Automatic enforcement on commit
-**lint-staged** - Run linters only on staged files for performance
## Implementation Details
### Prettier Configuration
The project uses a consistent Prettier configuration in `.prettierrc`:
```json
{
"semi": true,
"trailingComma": "all",
"singleQuote": true,
"printWidth": 100,
"tabWidth": 2,
"useTabs": false,
"endOfLine": "auto"
}
```
### ESLint Configuration
ESLint is configured with:
- TypeScript support via `typescript-eslint`
- React hooks rules via `eslint-plugin-react-hooks`
- React Refresh support for HMR
- Prettier compatibility via `eslint-config-prettier`
```javascript
// eslint.config.js (ESLint v9 flat config)
import globals from 'globals';
import tseslint from 'typescript-eslint';
import pluginReact from 'eslint-plugin-react';
import pluginReactHooks from 'eslint-plugin-react-hooks';
import pluginReactRefresh from 'eslint-plugin-react-refresh';
import eslintConfigPrettier from 'eslint-config-prettier';
export default tseslint.config(
// ... configurations
eslintConfigPrettier, // Must be last to override formatting rules
);
```
### Pre-commit Hook
The pre-commit hook runs lint-staged automatically:
```bash
# .husky/pre-commit
npx lint-staged
```
### lint-staged Configuration
lint-staged runs appropriate tools based on file type:
```json
{
"*.{js,jsx,ts,tsx}": ["eslint --fix", "prettier --write"],
"*.{json,md,css,html,yml,yaml}": ["prettier --write"]
}
```
### NPM Scripts
| Script | Description |
| ------------------ | ---------------------------------------------- |
| `npm run format` | Format all files with Prettier |
| `npm run lint` | Run ESLint on all TypeScript/JavaScript files |
| `npm run validate` | Run Prettier check + TypeScript check + ESLint |
## Key Files
| File | Purpose |
| -------------------- | -------------------------------- |
| `.prettierrc` | Prettier configuration |
| `.prettierignore` | Files to exclude from formatting |
| `eslint.config.js` | ESLint flat configuration (v9) |
| `.husky/pre-commit` | Pre-commit hook script |
| `.lintstagedrc.json` | lint-staged configuration |
## Developer Workflow
### Automatic Formatting on Commit
When you commit changes:
1. Husky intercepts the commit
2. lint-staged identifies staged files
3. ESLint fixes auto-fixable issues
4. Prettier formats the code
5. Changes are automatically staged
6. Commit proceeds if no errors
### Manual Formatting
```bash
# Format entire codebase
npm run format
# Check formatting without changes
npx prettier --check .
# Run ESLint
npm run lint
# Run all validation checks
npm run validate
```
### IDE Integration
For the best experience, configure your IDE:
**VS Code** - Install extensions:
- Prettier - Code formatter
- ESLint
Add to `.vscode/settings.json`:
```json
{
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.formatOnSave": true,
"editor.codeActionsOnSave": {
"source.fixAll.eslint": "explicit"
}
}
```
## Troubleshooting
### "eslint --fix failed"
ESLint may fail on unfixable errors. Review the output and manually fix the issues.
### "prettier --write failed"
Check for syntax errors in the file that prevent parsing.
### Bypassing Hooks (Emergency)
In rare cases, you may need to bypass hooks:
```bash
git commit --no-verify -m "emergency fix"
```
Use sparingly - the CI pipeline will still catch formatting issues.

View File

@@ -0,0 +1,149 @@
# ADR-028: API Response Standardization and Envelope Pattern
**Date**: 2026-01-09
**Status**: Proposed
## Context
The API currently has inconsistent response formats across different endpoints:
1. Some endpoints return raw data arrays (`[{...}, {...}]`)
2. Some return wrapped objects (`{ data: [...] }`)
3. Pagination is handled inconsistently (some use `page`/`limit`, others use `offset`/`count`)
4. Error responses vary in structure between middleware and route handlers
5. No standard for including metadata (pagination info, request timing, etc.)
This inconsistency creates friction for:
- Frontend developers who must handle multiple response formats
- API documentation and client SDK generation
- Implementing consistent error handling across the application
- Future API versioning transitions
## Decision
We will adopt a standardized response envelope pattern for all API responses.
### Success Response Format
```typescript
interface ApiSuccessResponse<T> {
success: true;
data: T;
meta?: {
// Pagination (when applicable)
pagination?: {
page: number;
limit: number;
total: number;
totalPages: number;
hasNextPage: boolean;
hasPrevPage: boolean;
};
// Timing
requestId?: string;
timestamp?: string;
duration?: number;
};
}
```
### Error Response Format
```typescript
interface ApiErrorResponse {
success: false;
error: {
code: string; // Machine-readable error code (e.g., 'VALIDATION_ERROR')
message: string; // Human-readable message
details?: unknown; // Additional context (validation errors, etc.)
};
meta?: {
requestId?: string;
timestamp?: string;
};
}
```
### Implementation Approach
1. **Response Helper Functions**: Create utility functions in `src/utils/apiResponse.ts`:
- `sendSuccess(res, data, meta?)`
- `sendPaginated(res, data, pagination)`
- `sendError(res, code, message, details?, statusCode?)`
2. **Error Handler Integration**: Update `errorHandler.ts` to use the standard error format
3. **Gradual Migration**: Apply to new endpoints immediately, migrate existing endpoints incrementally
4. **TypeScript Types**: Export response types for frontend consumption
## Consequences
### Positive
- **Consistency**: All responses follow a predictable structure
- **Type Safety**: Frontend can rely on consistent types
- **Debugging**: Request IDs and timestamps aid in issue investigation
- **Pagination**: Standardized pagination metadata reduces frontend complexity
- **API Evolution**: Envelope pattern makes it easier to add fields without breaking changes
### Negative
- **Verbosity**: Responses are slightly larger due to envelope overhead
- **Migration Effort**: Existing endpoints need updating
- **Learning Curve**: Developers must learn and use the helper functions
## Implementation Status
### What's Implemented
- ❌ Not yet implemented
### What Needs To Be Done
1. Create `src/utils/apiResponse.ts` with helper functions
2. Create `src/types/api.ts` with response type definitions
3. Update `errorHandler.ts` to use standard error format
4. Create migration guide for existing endpoints
5. Update 2-3 routes as examples
6. Document pattern in this ADR
## Example Usage
```typescript
// In a route handler
router.get('/flyers', async (req, res, next) => {
try {
const { page = 1, limit = 20 } = req.query;
const { flyers, total } = await flyerService.getFlyers({ page, limit });
return sendPaginated(res, flyers, {
page,
limit,
total,
});
} catch (error) {
next(error);
}
});
// Response:
// {
// "success": true,
// "data": [...],
// "meta": {
// "pagination": {
// "page": 1,
// "limit": 20,
// "total": 150,
// "totalPages": 8,
// "hasNextPage": true,
// "hasPrevPage": false
// },
// "requestId": "abc-123",
// "timestamp": "2026-01-09T12:00:00.000Z"
// }
// }
```

View File

@@ -0,0 +1,147 @@
# ADR-029: Secret Rotation and Key Management Strategy
**Date**: 2026-01-09
**Status**: Proposed
## Context
While ADR-007 covers configuration validation at startup, it does not address the lifecycle management of secrets:
1. **JWT Secrets**: If the JWT_SECRET is rotated, all existing user sessions are immediately invalidated
2. **Database Credentials**: No documented procedure for rotating database passwords without downtime
3. **API Keys**: External service API keys (AI services, geocoding) have no rotation strategy
4. **Emergency Revocation**: No process for immediately invalidating compromised credentials
Current risks:
- Long-lived secrets that never change become high-value targets
- No ability to rotate secrets without application restart
- No audit trail of when secrets were last rotated
- Compromised keys could remain active indefinitely
## Decision
We will implement a comprehensive secret rotation and key management strategy.
### 1. JWT Secret Rotation with Dual-Key Support
Support multiple JWT secrets simultaneously to enable zero-downtime rotation:
```typescript
// Environment variables
JWT_SECRET = current_secret;
JWT_SECRET_PREVIOUS = old_secret; // Optional, for transition period
// Token verification tries current first, falls back to previous
const verifyToken = (token: string) => {
try {
return jwt.verify(token, process.env.JWT_SECRET);
} catch {
if (process.env.JWT_SECRET_PREVIOUS) {
return jwt.verify(token, process.env.JWT_SECRET_PREVIOUS);
}
throw new AuthenticationError('Invalid token');
}
};
```
### 2. Database Credential Rotation
Document and implement a procedure for PostgreSQL credential rotation:
1. Create new database user with identical permissions
2. Update application configuration to use new credentials
3. Restart application instances (rolling restart)
4. Remove old database user after all instances updated
5. Log rotation event for audit purposes
### 3. API Key Management
For external service API keys (Google AI, geocoding services):
1. **Naming Convention**: `{SERVICE}_API_KEY` and `{SERVICE}_API_KEY_PREVIOUS`
2. **Fallback Logic**: Try primary key, fall back to previous on 401/403
3. **Health Checks**: Validate API keys on startup
4. **Usage Logging**: Track which key is being used for each request
### 4. Emergency Revocation Procedures
Document emergency procedures for:
- **JWT Compromise**: Set new JWT_SECRET, clear all refresh tokens from database
- **Database Compromise**: Rotate credentials immediately, audit access logs
- **API Key Compromise**: Regenerate at provider, update environment, restart
### 5. Secret Audit Trail
Track secret lifecycle events:
- When secrets were last rotated
- Who initiated the rotation
- Which instances are using which secrets
## Implementation Approach
### Phase 1: Dual JWT Secret Support
- Modify token verification to support fallback secret
- Add JWT_SECRET_PREVIOUS to configuration schema
- Update documentation
### Phase 2: Rotation Scripts
- Create `scripts/rotate-jwt-secret.sh`
- Create `scripts/rotate-db-credentials.sh`
- Add rotation instructions to operations runbook
### Phase 3: API Key Fallback
- Wrap external API clients with fallback logic
- Add key validation to health checks
- Implement key usage logging
## Consequences
### Positive
- **Zero-Downtime Rotation**: Secrets can be rotated without invalidating all sessions
- **Reduced Risk**: Regular rotation limits exposure window for compromised credentials
- **Audit Trail**: Clear record of when secrets were changed
- **Emergency Response**: Documented procedures for security incidents
### Negative
- **Complexity**: Dual-key logic adds code complexity
- **Operations Overhead**: Regular rotation requires operational discipline
- **Testing**: Rotation procedures need to be tested periodically
## Implementation Status
### What's Implemented
- ❌ Not yet implemented
### What Needs To Be Done
1. Implement dual JWT secret verification
2. Create rotation scripts
3. Document emergency procedures
4. Add secret validation to health checks
5. Create rotation schedule recommendations
## Key Files (To Be Created)
- `src/utils/secretManager.ts` - Secret rotation utilities
- `scripts/rotate-jwt-secret.sh` - JWT rotation script
- `scripts/rotate-db-credentials.sh` - Database credential rotation
- `docs/operations/secret-rotation.md` - Operations runbook
## Rotation Schedule Recommendations
| Secret Type | Rotation Frequency | Grace Period |
| ------------------ | -------------------------- | ----------------- |
| JWT_SECRET | 90 days | 7 days (dual-key) |
| Database Passwords | 180 days | Rolling restart |
| AI API Keys | On suspicion of compromise | Immediate |
| Refresh Tokens | 7-day max age | N/A (per-token) |

View File

@@ -0,0 +1,150 @@
# ADR-030: Graceful Degradation and Circuit Breaker Pattern
**Date**: 2026-01-09
**Status**: Proposed
## Context
The application depends on several external services:
1. **AI Services** (Google Gemini) - For flyer item extraction
2. **Redis** - For caching, rate limiting, and job queues
3. **PostgreSQL** - Primary data store
4. **Geocoding APIs** - For location services
Currently, when these services fail:
- AI failures may cause the entire upload to fail
- Redis unavailability could crash the application or bypass rate limiting
- No circuit breakers prevent repeated calls to failing services
- No fallback behaviors are defined
This creates fragility where a single service outage can cascade into application-wide failures.
## Decision
We will implement a graceful degradation strategy with circuit breakers for external service dependencies.
### 1. Circuit Breaker Pattern
Implement circuit breakers for external service calls using a library like `opossum`:
```typescript
import CircuitBreaker from 'opossum';
const aiCircuitBreaker = new CircuitBreaker(callAiService, {
timeout: 30000, // 30 second timeout
errorThresholdPercentage: 50, // Open circuit at 50% failures
resetTimeout: 30000, // Try again after 30 seconds
volumeThreshold: 5, // Minimum calls before calculating error %
});
aiCircuitBreaker.on('open', () => {
logger.warn('AI service circuit breaker opened');
});
aiCircuitBreaker.on('halfOpen', () => {
logger.info('AI service circuit breaker half-open, testing...');
});
```
### 2. Fallback Behaviors by Service
| Service | Fallback Behavior |
| ---------------------- | ---------------------------------------- |
| **Redis (Cache)** | Skip cache, query database directly |
| **Redis (Rate Limit)** | Log warning, allow request (fail-open) |
| **Redis (Queues)** | Queue to memory, process synchronously |
| **AI Service** | Return partial results, queue for retry |
| **Geocoding** | Return null location, allow manual entry |
| **PostgreSQL** | No fallback - critical dependency |
### 3. Health Status Aggregation
Extend health checks (ADR-020) to report service-level health:
```typescript
// GET /api/health/ready response
{
"status": "degraded", // healthy | degraded | unhealthy
"services": {
"database": { "status": "healthy", "latency": 5 },
"redis": { "status": "healthy", "latency": 2 },
"ai": { "status": "degraded", "circuitState": "half-open" },
"geocoding": { "status": "healthy", "latency": 150 }
}
}
```
### 4. Retry Strategies
Define retry policies for transient failures:
```typescript
const retryConfig = {
ai: { maxRetries: 3, backoff: 'exponential', initialDelay: 1000 },
geocoding: { maxRetries: 2, backoff: 'linear', initialDelay: 500 },
database: { maxRetries: 3, backoff: 'exponential', initialDelay: 100 },
};
```
## Implementation Approach
### Phase 1: Redis Fallbacks
- Wrap cache operations with try-catch (already partially done in cacheService)
- Add fail-open for rate limiting when Redis is down
- Log degraded state
### Phase 2: AI Circuit Breaker
- Wrap AI service calls with circuit breaker
- Implement queue-for-retry on circuit open
- Add manual fallback UI for failed extractions
### Phase 3: Health Aggregation
- Update health endpoints with service status
- Add Prometheus-compatible metrics
- Create dashboard for service health
## Consequences
### Positive
- **Resilience**: Application continues functioning during partial outages
- **User Experience**: Degraded but functional is better than complete failure
- **Observability**: Clear visibility into service health
- **Protection**: Circuit breakers prevent cascading failures
### Negative
- **Complexity**: Additional code for fallback logic
- **Testing**: Requires testing failure scenarios
- **Consistency**: Some operations may have different results during degradation
## Implementation Status
### What's Implemented
- ✅ Cache operations fail gracefully (cacheService.server.ts)
- ❌ Circuit breakers for AI services
- ❌ Rate limit fail-open behavior
- ❌ Health aggregation endpoint
- ❌ Retry strategies with backoff
### What Needs To Be Done
1. Install and configure `opossum` circuit breaker library
2. Wrap AI service calls with circuit breaker
3. Add fail-open to rate limiting
4. Extend health endpoints with service status
5. Document degraded mode behaviors
## Key Files
- `src/utils/circuitBreaker.ts` - Circuit breaker configurations (to create)
- `src/services/cacheService.server.ts` - Already has graceful fallbacks
- `src/routes/health.routes.ts` - Health check endpoints (to extend)
- `src/services/aiService.server.ts` - AI service wrapper (to wrap)

View File

@@ -0,0 +1,199 @@
# ADR-031: Data Retention and Privacy Compliance (GDPR/CCPA)
**Date**: 2026-01-09
**Status**: Proposed
## Context
The application stores various types of user data:
1. **User Accounts**: Email, password hash, profile information
2. **Shopping Lists**: Personal shopping preferences and history
3. **Watch Lists**: Tracked items and price alerts
4. **Activity Logs**: User actions for analytics and debugging
5. **Tracking Data**: Page views, interactions, feature usage
Current gaps in privacy compliance:
- **No Data Retention Policies**: Activity logs accumulate indefinitely
- **No User Data Export**: Users cannot export their data (GDPR Article 20)
- **No User Data Deletion**: No self-service account deletion (GDPR Article 17)
- **No Cookie Consent**: Cookie usage not disclosed or consented
- **No Privacy Policy Enforcement**: Privacy commitments not enforced in code
These gaps create legal exposure for users in EU (GDPR) and California (CCPA).
## Decision
We will implement comprehensive data retention and privacy compliance features.
### 1. Data Retention Policies
| Data Type | Retention Period | Deletion Method |
| ------------------------- | ------------------------ | ------------------------ |
| **Activity Logs** | 90 days | Automated cleanup job |
| **Tracking Events** | 30 days | Automated cleanup job |
| **Deleted User Data** | 30 days (soft delete) | Hard delete after period |
| **Expired Sessions** | 7 days after expiry | Token cleanup job |
| **Failed Login Attempts** | 24 hours | Automated cleanup |
| **Flyer Data** | Indefinite (public data) | N/A |
| **User Shopping Lists** | Until account deletion | With account |
| **User Watch Lists** | Until account deletion | With account |
### 2. User Data Export (Right to Portability)
Implement `GET /api/users/me/export` endpoint:
```typescript
interface UserDataExport {
exportDate: string;
user: {
email: string;
created_at: string;
profile: ProfileData;
};
shoppingLists: ShoppingList[];
watchedItems: WatchedItem[];
priceAlerts: PriceAlert[];
achievements: Achievement[];
// Exclude: password hash, internal IDs, admin flags
}
```
Export formats: JSON (primary), CSV (optional)
### 3. User Data Deletion (Right to Erasure)
Implement `DELETE /api/users/me` endpoint:
1. **Soft Delete**: Mark account as deleted, anonymize PII
2. **Grace Period**: 30 days to restore account
3. **Hard Delete**: Permanently remove all user data after grace period
4. **Audit Log**: Record deletion request (anonymized)
Deletion cascade:
- User account → Anonymize email/name
- Shopping lists → Delete
- Watch lists → Delete
- Achievements → Delete
- Activity logs → Anonymize user_id
- Sessions/tokens → Delete immediately
### 4. Cookie Consent
Implement cookie consent banner:
```typescript
// Cookie categories
enum CookieCategory {
ESSENTIAL = 'essential', // Always allowed (auth, CSRF)
FUNCTIONAL = 'functional', // Dark mode, preferences
ANALYTICS = 'analytics', // Usage tracking
}
// Store consent in localStorage and server-side
interface CookieConsent {
essential: true; // Cannot be disabled
functional: boolean;
analytics: boolean;
consentDate: string;
consentVersion: string;
}
```
### 5. Privacy Policy Enforcement
Enforce privacy commitments in code:
- Email addresses never logged in plaintext
- Passwords never logged (already in pino redact config)
- IP addresses anonymized after 7 days
- Third-party data sharing requires explicit consent
## Implementation Approach
### Phase 1: Data Retention Jobs
- Create retention cleanup job in background job service
- Add activity_log retention (90 days)
- Add tracking_events retention (30 days)
### Phase 2: User Data Export
- Create export endpoint
- Implement data aggregation query
- Add rate limiting (1 export per 24h)
### Phase 3: Account Deletion
- Implement soft delete with anonymization
- Create hard delete cleanup job
- Add account recovery endpoint
### Phase 4: Cookie Consent
- Create consent banner component
- Store consent preferences
- Gate analytics based on consent
## Consequences
### Positive
- **Legal Compliance**: Meets GDPR and CCPA requirements
- **User Trust**: Demonstrates commitment to privacy
- **Data Hygiene**: Automatic cleanup prevents data bloat
- **Reduced Liability**: Less data = less risk
### Negative
- **Implementation Effort**: Significant feature development
- **Operational Complexity**: Deletion jobs need monitoring
- **Feature Limitations**: Some features may be limited without consent
## Implementation Status
### What's Implemented
- ✅ Token cleanup job exists (tokenCleanupQueue)
- ❌ Activity log retention
- ❌ User data export endpoint
- ❌ Account deletion endpoint
- ❌ Cookie consent banner
- ❌ Data anonymization functions
### What Needs To Be Done
1. Add activity_log cleanup to background jobs
2. Create `/api/users/me/export` endpoint
3. Create `/api/users/me` DELETE endpoint with soft delete
4. Implement cookie consent UI component
5. Document data retention in privacy policy
6. Add anonymization utility functions
## Key Files (To Be Created/Modified)
- `src/services/backgroundJobService.ts` - Add retention jobs
- `src/routes/user.routes.ts` - Add export/delete endpoints
- `src/services/privacyService.server.ts` - Data export/deletion logic
- `src/components/CookieConsent.tsx` - Consent banner
- `src/utils/anonymize.ts` - Data anonymization utilities
## Compliance Checklist
### GDPR Requirements
- [ ] Article 15: Right of Access (data export)
- [ ] Article 17: Right to Erasure (account deletion)
- [ ] Article 20: Right to Data Portability (JSON export)
- [ ] Article 7: Conditions for Consent (cookie consent)
- [ ] Article 13: Information to be Provided (privacy policy)
### CCPA Requirements
- [ ] Right to Know (data export)
- [ ] Right to Delete (account deletion)
- [ ] Right to Opt-Out (cookie consent for analytics)
- [ ] Non-Discrimination (no feature penalty for privacy choices)

View File

@@ -4,49 +4,55 @@ This directory contains a log of the architectural decisions made for the Flyer
## 1. Foundational / Core Infrastructure
**[ADR-002](./0002-standardized-transaction-management.md)**: Standardized Transaction Management and Unit of Work Pattern (Proposed)
**[ADR-007](./0007-configuration-and-secrets-management.md)**: Configuration and Secrets Management (Proposed)
**[ADR-020](./0020-health-checks-and-liveness-readiness-probes.md)**: Health Checks and Liveness/Readiness Probes (Proposed)
**[ADR-002](./0002-standardized-transaction-management.md)**: Standardized Transaction Management and Unit of Work Pattern (Accepted)
**[ADR-007](./0007-configuration-and-secrets-management.md)**: Configuration and Secrets Management (Accepted)
**[ADR-020](./0020-health-checks-and-liveness-readiness-probes.md)**: Health Checks and Liveness/Readiness Probes (Accepted)
**[ADR-030](./0030-graceful-degradation-and-circuit-breaker.md)**: Graceful Degradation and Circuit Breaker Pattern (Proposed)
## 2. Data Management
**[ADR-009](./0009-caching-strategy-for-read-heavy-operations.md)**: Caching Strategy for Read-Heavy Operations (Proposed)
**[ADR-009](./0009-caching-strategy-for-read-heavy-operations.md)**: Caching Strategy for Read-Heavy Operations (Partially Implemented)
**[ADR-013](./0013-database-schema-migration-strategy.md)**: Database Schema Migration Strategy (Proposed)
**[ADR-019](./0019-data-backup-and-recovery-strategy.md)**: Data Backup and Recovery Strategy (Proposed)
**[ADR-023](./0023-database-schema-migration-strategy.md)**: Database Schema Migration Strategy (Proposed)
**[ADR-031](./0031-data-retention-and-privacy-compliance.md)**: Data Retention and Privacy Compliance (Proposed)
## 3. API & Integration
**[ADR-003](./0003-standardized-input-validation-using-middleware.md)**: Standardized Input Validation using Middleware (Proposed)
**[ADR-003](./0003-standardized-input-validation-using-middleware.md)**: Standardized Input Validation using Middleware (Accepted)
**[ADR-008](./0008-api-versioning-strategy.md)**: API Versioning Strategy (Proposed)
**[ADR-018](./0018-api-documentation-strategy.md)**: API Documentation Strategy (Proposed)
**[ADR-022](./0022-real-time-notification-system.md)**: Real-time Notification System (Proposed)
**[ADR-028](./0028-api-response-standardization.md)**: API Response Standardization and Envelope Pattern (Proposed)
## 4. Security & Compliance
**[ADR-001](./0001-standardized-error-handling.md)**: Standardized Error Handling for Service and Repository Layers (Accepted)
**[ADR-011](./0011-advanced-authorization-and-access-control-strategy.md)**: Advanced Authorization and Access Control Strategy (Proposed)
**[ADR-016](./0016-api-security-hardening.md)**: API Security Hardening (Proposed)
**[ADR-016](./0016-api-security-hardening.md)**: API Security Hardening (Accepted)
**[ADR-029](./0029-secret-rotation-and-key-management.md)**: Secret Rotation and Key Management Strategy (Proposed)
## 5. Observability & Monitoring
**[ADR-004](./0004-standardized-application-wide-structured-logging.md)**: Standardized Application-Wide Structured Logging (Proposed)
**[ADR-004](./0004-standardized-application-wide-structured-logging.md)**: Standardized Application-Wide Structured Logging (Accepted)
**[ADR-015](./0015-application-performance-monitoring-and-error-tracking.md)**: Application Performance Monitoring (APM) and Error Tracking (Proposed)
## 6. Deployment & Operations
**[ADR-006](./0006-background-job-processing-and-task-queues.md)**: Background Job Processing and Task Queues (Proposed)
**[ADR-006](./0006-background-job-processing-and-task-queues.md)**: Background Job Processing and Task Queues (Partially Implemented)
**[ADR-014](./0014-containerization-and-deployment-strategy.md)**: Containerization and Deployment Strategy (Proposed)
**[ADR-017](./0017-ci-cd-and-branching-strategy.md)**: CI/CD and Branching Strategy (Proposed)
**[ADR-024](./0024-feature-flagging-strategy.md)**: Feature Flagging Strategy (Proposed)
## 7. Frontend / User Interface
**[ADR-005](./0005-frontend-state-management-and-server-cache-strategy.md)**: Frontend State Management and Server Cache Strategy (Proposed)
**[ADR-012](./0012-frontend-component-library-and-design-system.md)**: Frontend Component Library and Design System (Proposed)
**[ADR-005](./0005-frontend-state-management-and-server-cache-strategy.md)**: Frontend State Management and Server Cache Strategy (Accepted)
**[ADR-012](./0012-frontend-component-library-and-design-system.md)**: Frontend Component Library and Design System (Partially Implemented)
**[ADR-025](./0025-internationalization-and-localization-strategy.md)**: Internationalization (i18n) and Localization (l10n) Strategy (Proposed)
**[ADR-026](./0026-standardized-client-side-structured-logging.md)**: Standardized Client-Side Structured Logging (Proposed)
## 8. Development Workflow & Quality
**[ADR-010](./0010-testing-strategy-and-standards.md)**: Testing Strategy and Standards (Proposed)
**[ADR-021](./0021-code-formatting-and-linting-unification.md)**: Code Formatting and Linting Unification (Proposed)
**[ADR-010](./0010-testing-strategy-and-standards.md)**: Testing Strategy and Standards (Accepted)
**[ADR-021](./0021-code-formatting-and-linting-unification.md)**: Code Formatting and Linting Unification (Accepted)
**[ADR-027](./0027-standardized-naming-convention-for-ai-and-database-types.md)**: Standardized Naming Convention for AI and Database Types (Accepted)

View File

@@ -3,6 +3,7 @@ import tseslint from 'typescript-eslint';
import pluginReact from 'eslint-plugin-react';
import pluginReactHooks from 'eslint-plugin-react-hooks';
import pluginReactRefresh from 'eslint-plugin-react-refresh';
import eslintConfigPrettier from 'eslint-config-prettier';
export default tseslint.config(
{
@@ -29,4 +30,6 @@ export default tseslint.config(
},
// TypeScript files
...tseslint.configs.recommended,
// Prettier compatibility - must be last to override other formatting rules
eslintConfigPrettier,
);

607
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "flyer-crawler",
"version": "0.9.67",
"version": "0.9.71",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "flyer-crawler",
"version": "0.9.67",
"version": "0.9.71",
"dependencies": {
"@bull-board/api": "^6.14.2",
"@bull-board/express": "^6.14.2",
@@ -22,6 +22,7 @@
"express": "^5.1.0",
"express-list-endpoints": "^7.1.1",
"express-rate-limit": "^8.2.1",
"helmet": "^8.1.0",
"ioredis": "^5.8.2",
"jsonwebtoken": "^9.0.2",
"lucide-react": "^0.555.0",
@@ -92,8 +93,10 @@
"eslint-plugin-react-refresh": "^0.4.24",
"glob": "^13.0.0",
"globals": "16.5.0",
"husky": "^9.1.7",
"istanbul-reports": "^3.2.0",
"jsdom": "^27.2.0",
"lint-staged": "^16.2.7",
"msw": "^2.12.3",
"nyc": "^17.1.0",
"pino-pretty": "^13.1.3",
@@ -6144,6 +6147,22 @@
"url": "https://github.com/sponsors/epoberezkin"
}
},
"node_modules/ansi-escapes": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.2.0.tgz",
"integrity": "sha512-g6LhBsl+GBPRWGWsBtutpzBYuIIdBkLEvad5C/va/74Db018+5TZiyA26cZJAr3Rft5lprVqOIPxf5Vid6tqAw==",
"dev": true,
"license": "MIT",
"dependencies": {
"environment": "^1.0.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/ansi-regex": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
@@ -6952,6 +6971,19 @@
"balanced-match": "^1.0.0"
}
},
"node_modules/braces": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
"dev": true,
"license": "MIT",
"dependencies": {
"fill-range": "^7.1.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/browserslist": {
"version": "4.28.1",
"resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz",
@@ -7286,6 +7318,85 @@
"node": ">=6"
}
},
"node_modules/cli-cursor": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz",
"integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==",
"dev": true,
"license": "MIT",
"dependencies": {
"restore-cursor": "^5.0.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/cli-truncate": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-5.1.1.tgz",
"integrity": "sha512-SroPvNHxUnk+vIW/dOSfNqdy1sPEFkrTk6TUtqLCnBlo3N7TNYYkzzN7uSD6+jVjrdO4+p8nH7JzH6cIvUem6A==",
"dev": true,
"license": "MIT",
"dependencies": {
"slice-ansi": "^7.1.0",
"string-width": "^8.0.0"
},
"engines": {
"node": ">=20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/cli-truncate/node_modules/ansi-regex": {
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
"integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/cli-truncate/node_modules/string-width": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-8.1.0.tgz",
"integrity": "sha512-Kxl3KJGb/gxkaUMOjRsQ8IrXiGW75O4E3RPjFIINOVH8AMl2SQ/yWdTzWwF3FevIX9LcMAjJW+GRwAlAbTSXdg==",
"dev": true,
"license": "MIT",
"dependencies": {
"get-east-asian-width": "^1.3.0",
"strip-ansi": "^7.1.0"
},
"engines": {
"node": ">=20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/cli-truncate/node_modules/strip-ansi": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
"integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/cli-width": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz",
@@ -7394,6 +7505,16 @@
"node": ">= 0.8"
}
},
"node_modules/commander": {
"version": "14.0.2",
"resolved": "https://registry.npmjs.org/commander/-/commander-14.0.2.tgz",
"integrity": "sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=20"
}
},
"node_modules/commondir": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
@@ -8344,6 +8465,19 @@
"url": "https://github.com/fb55/entities?sponsor=1"
}
},
"node_modules/environment": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz",
"integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/es-abstract": {
"version": "1.24.1",
"resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.1.tgz",
@@ -9292,6 +9426,19 @@
"node": ">=10"
}
},
"node_modules/fill-range": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
"dev": true,
"license": "MIT",
"dependencies": {
"to-regex-range": "^5.0.1"
},
"engines": {
"node": ">=8"
}
},
"node_modules/finalhandler": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz",
@@ -9816,6 +9963,19 @@
"node": "6.* || 8.* || >= 10.*"
}
},
"node_modules/get-east-asian-width": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz",
"integrity": "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/get-intrinsic": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
@@ -10193,6 +10353,15 @@
"dev": true,
"license": "MIT"
},
"node_modules/helmet": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/helmet/-/helmet-8.1.0.tgz",
"integrity": "sha512-jOiHyAZsmnr8LqoPGmCjYAaiuWwjAPLgY8ZX2XrmHawt99/u1y6RgrZMTeoPfpUbV96HOalYgz1qzkRbw54Pmg==",
"license": "MIT",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/help-me": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/help-me/-/help-me-5.0.0.tgz",
@@ -10307,6 +10476,22 @@
"node": ">= 6"
}
},
"node_modules/husky": {
"version": "9.1.7",
"resolved": "https://registry.npmjs.org/husky/-/husky-9.1.7.tgz",
"integrity": "sha512-5gs5ytaNjBrh5Ow3zrvdUUY+0VxIuWVL4i9irt6friV+BqdCfmV11CQTWMiBYWHbXhco+J1kHfTOUkePhCDvMA==",
"dev": true,
"license": "MIT",
"bin": {
"husky": "bin.js"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/typicode"
}
},
"node_modules/iconv-lite": {
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.1.tgz",
@@ -10720,6 +10905,16 @@
"dev": true,
"license": "MIT"
},
"node_modules/is-number": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.12.0"
}
},
"node_modules/is-number-object": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/is-number-object/-/is-number-object-1.1.1.tgz",
@@ -11755,6 +11950,134 @@
"url": "https://opencollective.com/parcel"
}
},
"node_modules/lint-staged": {
"version": "16.2.7",
"resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-16.2.7.tgz",
"integrity": "sha512-lDIj4RnYmK7/kXMya+qJsmkRFkGolciXjrsZ6PC25GdTfWOAWetR0ZbsNXRAj1EHHImRSalc+whZFg56F5DVow==",
"dev": true,
"license": "MIT",
"dependencies": {
"commander": "^14.0.2",
"listr2": "^9.0.5",
"micromatch": "^4.0.8",
"nano-spawn": "^2.0.0",
"pidtree": "^0.6.0",
"string-argv": "^0.3.2",
"yaml": "^2.8.1"
},
"bin": {
"lint-staged": "bin/lint-staged.js"
},
"engines": {
"node": ">=20.17"
},
"funding": {
"url": "https://opencollective.com/lint-staged"
}
},
"node_modules/listr2": {
"version": "9.0.5",
"resolved": "https://registry.npmjs.org/listr2/-/listr2-9.0.5.tgz",
"integrity": "sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g==",
"dev": true,
"license": "MIT",
"dependencies": {
"cli-truncate": "^5.0.0",
"colorette": "^2.0.20",
"eventemitter3": "^5.0.1",
"log-update": "^6.1.0",
"rfdc": "^1.4.1",
"wrap-ansi": "^9.0.0"
},
"engines": {
"node": ">=20.0.0"
}
},
"node_modules/listr2/node_modules/ansi-regex": {
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
"integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/listr2/node_modules/ansi-styles": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
"integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/listr2/node_modules/emoji-regex": {
"version": "10.6.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz",
"integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==",
"dev": true,
"license": "MIT"
},
"node_modules/listr2/node_modules/string-width": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
"integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"emoji-regex": "^10.3.0",
"get-east-asian-width": "^1.0.0",
"strip-ansi": "^7.1.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/listr2/node_modules/strip-ansi": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
"integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/listr2/node_modules/wrap-ansi": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
"integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^6.2.1",
"string-width": "^7.0.0",
"strip-ansi": "^7.1.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/locate-path": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
@@ -11852,6 +12175,111 @@
"integrity": "sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==",
"license": "MIT"
},
"node_modules/log-update": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/log-update/-/log-update-6.1.0.tgz",
"integrity": "sha512-9ie8ItPR6tjY5uYJh8K/Zrv/RMZ5VOlOWvtZdEHYSTFKZfIBPQa9tOAEeAWhd+AnIneLJ22w5fjOYtoutpWq5w==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-escapes": "^7.0.0",
"cli-cursor": "^5.0.0",
"slice-ansi": "^7.1.0",
"strip-ansi": "^7.1.0",
"wrap-ansi": "^9.0.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/log-update/node_modules/ansi-regex": {
"version": "6.2.2",
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
"integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
"node_modules/log-update/node_modules/ansi-styles": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
"integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/log-update/node_modules/emoji-regex": {
"version": "10.6.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz",
"integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==",
"dev": true,
"license": "MIT"
},
"node_modules/log-update/node_modules/string-width": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
"integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"emoji-regex": "^10.3.0",
"get-east-asian-width": "^1.0.0",
"strip-ansi": "^7.1.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/log-update/node_modules/strip-ansi": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
"integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-regex": "^6.0.1"
},
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
"node_modules/log-update/node_modules/wrap-ansi": {
"version": "9.0.2",
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
"integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^6.2.1",
"string-width": "^7.0.0",
"strip-ansi": "^7.1.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
"node_modules/long": {
"version": "5.3.2",
"resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz",
@@ -12004,6 +12432,33 @@
"node": ">= 0.6"
}
},
"node_modules/micromatch": {
"version": "4.0.8",
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
"integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
"dev": true,
"license": "MIT",
"dependencies": {
"braces": "^3.0.3",
"picomatch": "^2.3.1"
},
"engines": {
"node": ">=8.6"
}
},
"node_modules/micromatch/node_modules/picomatch": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8.6"
},
"funding": {
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/mime": {
"version": "2.6.0",
"resolved": "https://registry.npmjs.org/mime/-/mime-2.6.0.tgz",
@@ -12042,6 +12497,19 @@
"url": "https://opencollective.com/express"
}
},
"node_modules/mimic-function": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz",
"integrity": "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/min-indent": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/min-indent/-/min-indent-1.0.1.tgz",
@@ -12320,6 +12788,19 @@
"license": "MIT",
"optional": true
},
"node_modules/nano-spawn": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/nano-spawn/-/nano-spawn-2.0.0.tgz",
"integrity": "sha512-tacvGzUY5o2D8CBh2rrwxyNojUsZNU2zjNTzKQrkgGJQTbGAfArVWXSKMBokBeeg6C7OLRGUEyoFlYbfeWQIqw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=20.17"
},
"funding": {
"url": "https://github.com/sindresorhus/nano-spawn?sponsor=1"
}
},
"node_modules/nanoid": {
"version": "3.3.11",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz",
@@ -12953,6 +13434,22 @@
"wrappy": "1"
}
},
"node_modules/onetime": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz",
"integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"mimic-function": "^5.0.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/optionator": {
"version": "0.9.4",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz",
@@ -13408,6 +13905,19 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/pidtree": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/pidtree/-/pidtree-0.6.0.tgz",
"integrity": "sha512-eG2dWTVw5bzqGRztnHExczNxt5VGsE6OwTeCG3fdUf9KBsZzO3R5OIIIzWR+iZA0NtZ+RDVdaoE2dK1cn6jH4g==",
"dev": true,
"license": "MIT",
"bin": {
"pidtree": "bin/pidtree.js"
},
"engines": {
"node": ">=0.10"
}
},
"node_modules/piexifjs": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/piexifjs/-/piexifjs-1.0.6.tgz",
@@ -14358,6 +14868,23 @@
"url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1"
}
},
"node_modules/restore-cursor": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz",
"integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==",
"dev": true,
"license": "MIT",
"dependencies": {
"onetime": "^7.0.0",
"signal-exit": "^4.1.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/retry": {
"version": "0.12.0",
"resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz",
@@ -14375,6 +14902,13 @@
"dev": true,
"license": "MIT"
},
"node_modules/rfdc": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.4.1.tgz",
"integrity": "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==",
"dev": true,
"license": "MIT"
},
"node_modules/rimraf": {
"version": "6.1.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-6.1.2.tgz",
@@ -14957,6 +15491,52 @@
"node": ">=18"
}
},
"node_modules/slice-ansi": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.2.tgz",
"integrity": "sha512-iOBWFgUX7caIZiuutICxVgX1SdxwAVFFKwt1EvMYYec/NWO5meOJ6K5uQxhrYBdQJne4KxiqZc+KptFOWFSI9w==",
"dev": true,
"license": "MIT",
"dependencies": {
"ansi-styles": "^6.2.1",
"is-fullwidth-code-point": "^5.0.0"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/chalk/slice-ansi?sponsor=1"
}
},
"node_modules/slice-ansi/node_modules/ansi-styles": {
"version": "6.2.3",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
"integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/slice-ansi/node_modules/is-fullwidth-code-point": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz",
"integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"get-east-asian-width": "^1.3.1"
},
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/sonic-boom": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/sonic-boom/-/sonic-boom-4.2.0.tgz",
@@ -15230,6 +15810,16 @@
"safe-buffer": "~5.2.0"
}
},
"node_modules/string-argv": {
"version": "0.3.2",
"resolved": "https://registry.npmjs.org/string-argv/-/string-argv-0.3.2.tgz",
"integrity": "sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.6.19"
}
},
"node_modules/string-width": {
"version": "4.2.3",
"resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
@@ -15790,6 +16380,19 @@
"node": ">=14.14"
}
},
"node_modules/to-regex-range": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-number": "^7.0.0"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/toidentifier": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",

View File

@@ -1,7 +1,7 @@
{
"name": "flyer-crawler",
"private": true,
"version": "0.9.67",
"version": "0.9.71",
"type": "module",
"scripts": {
"dev": "concurrently \"npm:start:dev\" \"vite\"",
@@ -24,7 +24,8 @@
"start:test": "NODE_ENV=test NODE_V8_COVERAGE=.coverage/tmp/integration-server tsx server.ts",
"db:reset:dev": "NODE_ENV=development tsx src/db/seed.ts",
"db:reset:test": "NODE_ENV=test tsx src/db/seed.ts",
"worker:prod": "NODE_ENV=production tsx src/services/queueService.server.ts"
"worker:prod": "NODE_ENV=production tsx src/services/queueService.server.ts",
"prepare": "husky"
},
"dependencies": {
"@bull-board/api": "^6.14.2",
@@ -41,6 +42,7 @@
"express": "^5.1.0",
"express-list-endpoints": "^7.1.1",
"express-rate-limit": "^8.2.1",
"helmet": "^8.1.0",
"ioredis": "^5.8.2",
"jsonwebtoken": "^9.0.2",
"lucide-react": "^0.555.0",
@@ -111,8 +113,10 @@
"eslint-plugin-react-refresh": "^0.4.24",
"glob": "^13.0.0",
"globals": "16.5.0",
"husky": "^9.1.7",
"istanbul-reports": "^3.2.0",
"jsdom": "^27.2.0",
"lint-staged": "^16.2.7",
"msw": "^2.12.3",
"nyc": "^17.1.0",
"pino-pretty": "^13.1.3",

View File

@@ -1,6 +1,7 @@
// server.ts
import express, { Request, Response, NextFunction } from 'express';
import { randomUUID } from 'crypto';
import helmet from 'helmet';
import timeout from 'connect-timeout';
import cookieParser from 'cookie-parser';
import listEndpoints from 'express-list-endpoints';
@@ -62,6 +63,38 @@ logger.info('-----------------------------------------------\n');
const app = express();
// --- Security Headers Middleware (ADR-016) ---
// Helmet sets various HTTP headers to help protect the app from common web vulnerabilities.
// Must be applied early in the middleware chain, before any routes.
app.use(
helmet({
// Content Security Policy - configured for API + SPA frontend
contentSecurityPolicy: {
directives: {
defaultSrc: ["'self'"],
scriptSrc: ["'self'", "'unsafe-inline'"], // Allow inline scripts for React
styleSrc: ["'self'", "'unsafe-inline'"], // Allow inline styles for Tailwind
imgSrc: ["'self'", 'data:', 'blob:', 'https:'], // Allow images from various sources
fontSrc: ["'self'", 'https:', 'data:'],
connectSrc: ["'self'", 'https:', 'wss:'], // Allow API and WebSocket connections
frameSrc: ["'none'"], // Disallow iframes
objectSrc: ["'none'"], // Disallow plugins
upgradeInsecureRequests: process.env.NODE_ENV === 'production' ? [] : null,
},
},
// Cross-Origin settings for API
crossOriginEmbedderPolicy: false, // Disabled to allow loading external images
crossOriginResourcePolicy: { policy: 'cross-origin' }, // Allow cross-origin resource loading
// Additional security headers
hsts: {
maxAge: 31536000, // 1 year in seconds
includeSubDomains: true,
preload: true,
},
referrerPolicy: { policy: 'strict-origin-when-cross-origin' },
}),
);
// --- Core Middleware ---
// Increase the limit for JSON and URL-encoded bodies. This is crucial for handling large file uploads
// that are part of multipart/form-data requests, as the overall request size is checked.

303
src/config/env.ts Normal file
View File

@@ -0,0 +1,303 @@
// src/config/env.ts
/**
* @file Centralized, schema-validated configuration service.
* Implements ADR-007: Configuration and Secrets Management.
*
* This module parses and validates all environment variables at application startup.
* If any required configuration is missing or invalid, the application will fail fast
* with a clear error message.
*
* Usage:
* import { config } from './config/env';
* console.log(config.database.host);
*/
import { z } from 'zod';
// --- Schema Definitions ---
/**
* Helper to parse string to integer with default.
* Handles empty strings by treating them as undefined.
*/
const intWithDefault = (defaultValue: number) =>
z
.string()
.optional()
.transform((val) => (val && val.trim() !== '' ? parseInt(val, 10) : defaultValue))
.pipe(z.number().int());
/**
* Helper to parse string to float with default.
*/
const floatWithDefault = (defaultValue: number) =>
z
.string()
.optional()
.transform((val) => (val && val.trim() !== '' ? parseFloat(val) : defaultValue))
.pipe(z.number());
/**
* Helper to parse string 'true'/'false' to boolean.
*/
const booleanString = (defaultValue: boolean) =>
z
.string()
.optional()
.transform((val) => (val === undefined ? defaultValue : val === 'true'));
/**
* Database configuration schema.
*/
const databaseSchema = z.object({
host: z.string().min(1, 'DB_HOST is required'),
port: intWithDefault(5432),
user: z.string().min(1, 'DB_USER is required'),
password: z.string().min(1, 'DB_PASSWORD is required'),
name: z.string().min(1, 'DB_NAME is required'),
});
/**
* Redis configuration schema.
*/
const redisSchema = z.object({
url: z.string().url('REDIS_URL must be a valid URL'),
password: z.string().optional(),
});
/**
* Authentication configuration schema.
*/
const authSchema = z.object({
jwtSecret: z.string().min(32, 'JWT_SECRET must be at least 32 characters for security'),
jwtSecretPrevious: z.string().optional(), // For secret rotation (ADR-029)
});
/**
* SMTP/Email configuration schema.
* All fields are optional - email service degrades gracefully if not configured.
*/
const smtpSchema = z.object({
host: z.string().optional(),
port: intWithDefault(587),
user: z.string().optional(),
pass: z.string().optional(),
secure: booleanString(false),
fromEmail: z.string().email().optional(),
});
/**
* AI/Gemini configuration schema.
*/
const aiSchema = z.object({
geminiApiKey: z.string().optional(),
geminiRpm: intWithDefault(5),
priceQualityThreshold: floatWithDefault(0.5),
});
/**
* Google services configuration schema.
*/
const googleSchema = z.object({
mapsApiKey: z.string().optional(),
clientId: z.string().optional(),
clientSecret: z.string().optional(),
});
/**
* Worker concurrency configuration schema.
*/
const workerSchema = z.object({
concurrency: intWithDefault(1),
lockDuration: intWithDefault(30000),
emailConcurrency: intWithDefault(10),
analyticsConcurrency: intWithDefault(1),
cleanupConcurrency: intWithDefault(10),
weeklyAnalyticsConcurrency: intWithDefault(1),
});
/**
* Server configuration schema.
*/
const serverSchema = z.object({
nodeEnv: z.enum(['development', 'production', 'test']).default('development'),
port: intWithDefault(3001),
frontendUrl: z.string().url().optional(),
baseUrl: z.string().optional(),
storagePath: z.string().default('/var/www/flyer-crawler.projectium.com/flyer-images'),
});
/**
* Complete environment configuration schema.
*/
const envSchema = z.object({
database: databaseSchema,
redis: redisSchema,
auth: authSchema,
smtp: smtpSchema,
ai: aiSchema,
google: googleSchema,
worker: workerSchema,
server: serverSchema,
});
export type EnvConfig = z.infer<typeof envSchema>;
// --- Configuration Loading ---
/**
* Maps environment variables to the configuration structure.
* This is the single source of truth for which env vars map to which config keys.
*/
function loadEnvVars(): unknown {
return {
database: {
host: process.env.DB_HOST,
port: process.env.DB_PORT,
user: process.env.DB_USER,
password: process.env.DB_PASSWORD,
name: process.env.DB_NAME,
},
redis: {
url: process.env.REDIS_URL,
password: process.env.REDIS_PASSWORD,
},
auth: {
jwtSecret: process.env.JWT_SECRET,
jwtSecretPrevious: process.env.JWT_SECRET_PREVIOUS,
},
smtp: {
host: process.env.SMTP_HOST,
port: process.env.SMTP_PORT,
user: process.env.SMTP_USER,
pass: process.env.SMTP_PASS,
secure: process.env.SMTP_SECURE,
fromEmail: process.env.SMTP_FROM_EMAIL,
},
ai: {
geminiApiKey: process.env.GEMINI_API_KEY,
geminiRpm: process.env.GEMINI_RPM,
priceQualityThreshold: process.env.AI_PRICE_QUALITY_THRESHOLD,
},
google: {
mapsApiKey: process.env.GOOGLE_MAPS_API_KEY,
clientId: process.env.GOOGLE_CLIENT_ID,
clientSecret: process.env.GOOGLE_CLIENT_SECRET,
},
worker: {
concurrency: process.env.WORKER_CONCURRENCY,
lockDuration: process.env.WORKER_LOCK_DURATION,
emailConcurrency: process.env.EMAIL_WORKER_CONCURRENCY,
analyticsConcurrency: process.env.ANALYTICS_WORKER_CONCURRENCY,
cleanupConcurrency: process.env.CLEANUP_WORKER_CONCURRENCY,
weeklyAnalyticsConcurrency: process.env.WEEKLY_ANALYTICS_WORKER_CONCURRENCY,
},
server: {
nodeEnv: process.env.NODE_ENV,
port: process.env.PORT,
frontendUrl: process.env.FRONTEND_URL,
baseUrl: process.env.BASE_URL,
storagePath: process.env.STORAGE_PATH,
},
};
}
/**
* Validates and parses environment configuration.
* Throws a descriptive error if validation fails.
*/
function parseConfig(): EnvConfig {
const rawConfig = loadEnvVars();
const result = envSchema.safeParse(rawConfig);
if (!result.success) {
const errors = result.error.issues.map((issue) => {
const path = issue.path.join('.');
return ` - ${path}: ${issue.message}`;
});
const errorMessage = [
'',
'╔════════════════════════════════════════════════════════════════╗',
'║ CONFIGURATION ERROR - APPLICATION STARTUP ║',
'╚════════════════════════════════════════════════════════════════╝',
'',
'The following environment variables are missing or invalid:',
'',
...errors,
'',
'Please check your .env file or environment configuration.',
'See ADR-007 for the complete list of required environment variables.',
'',
].join('\n');
// In test environment, throw instead of exiting to allow test frameworks to catch
if (process.env.NODE_ENV === 'test') {
throw new Error(errorMessage);
}
console.error(errorMessage);
process.exit(1);
}
return result.data;
}
// --- Exported Configuration ---
/**
* The validated application configuration.
* This is a singleton that is parsed once at module load time.
*
* @example
* ```typescript
* import { config } from './config/env';
*
* // Access database config
* const pool = new Pool({
* host: config.database.host,
* port: config.database.port,
* user: config.database.user,
* password: config.database.password,
* database: config.database.name,
* });
*
* // Check environment
* if (config.server.isProduction) {
* // production-only logic
* }
* ```
*/
export const config: EnvConfig = parseConfig();
// --- Convenience Helpers ---
/**
* Returns true if running in production environment.
*/
export const isProduction = config.server.nodeEnv === 'production';
/**
* Returns true if running in test environment.
*/
export const isTest = config.server.nodeEnv === 'test';
/**
* Returns true if running in development environment.
*/
export const isDevelopment = config.server.nodeEnv === 'development';
/**
* Returns true if SMTP is configured (all required fields present).
*/
export const isSmtpConfigured =
!!config.smtp.host && !!config.smtp.user && !!config.smtp.pass && !!config.smtp.fromEmail;
/**
* Returns true if AI services are configured.
*/
export const isAiConfigured = !!config.ai.geminiApiKey;
/**
* Returns true if Google Maps is configured.
*/
export const isGoogleMapsConfigured = !!config.google.mapsApiKey;

View File

@@ -8,6 +8,7 @@ import { z } from 'zod';
import * as db from '../services/db/index.db';
import type { UserProfile } from '../types';
import { geocodingService } from '../services/geocodingService.server';
import { cacheService } from '../services/cacheService.server';
import { requireFileUpload } from '../middleware/fileUpload.middleware'; // This was a duplicate, fixed.
import {
createUploadMiddleware,
@@ -635,6 +636,44 @@ router.post(
},
);
/**
* POST /api/admin/system/clear-cache - Clears the application data cache.
* Clears cached flyers, brands, and stats data from Redis.
* Requires admin privileges.
*/
router.post(
'/system/clear-cache',
adminTriggerLimiter,
validateRequest(emptySchema),
async (req: Request, res: Response, next: NextFunction) => {
const userProfile = req.user as UserProfile;
req.log.info(
`[Admin] Manual cache clear received from user: ${userProfile.user.user_id}`,
);
try {
const [flyersDeleted, brandsDeleted, statsDeleted] = await Promise.all([
cacheService.invalidateFlyers(req.log),
cacheService.invalidateBrands(req.log),
cacheService.invalidateStats(req.log),
]);
const totalDeleted = flyersDeleted + brandsDeleted + statsDeleted;
res.status(200).json({
message: `Successfully cleared the application cache. ${totalDeleted} keys were removed.`,
details: {
flyers: flyersDeleted,
brands: brandsDeleted,
stats: statsDeleted,
},
});
} catch (error) {
req.log.error({ error }, '[Admin] Failed to clear application cache.');
next(error);
}
},
);
/* Catches errors from multer (e.g., file size, file filter) */
router.use(handleMulterError);

View File

@@ -234,6 +234,9 @@ router.post(
* POST /api/ai/upload-legacy - Process a flyer upload from a legacy client.
* This is an authenticated route that processes the flyer synchronously.
* This is used for integration testing the legacy upload flow.
*
* @deprecated Use POST /api/ai/upload-and-process instead for async queue-based processing (ADR-0006).
* This synchronous endpoint is retained only for integration testing purposes.
*/
router.post(
'/upload-legacy',
@@ -282,9 +285,12 @@ router.get(
);
/**
* This endpoint saves the processed flyer data to the database. It is the final step
* in the flyer upload workflow after the AI has extracted the data.
* POST /api/ai/flyers/process - Saves the processed flyer data to the database.
* This is the final step in the flyer upload workflow after the AI has extracted the data.
* It uses `optionalAuth` to handle submissions from both anonymous and authenticated users.
*
* @deprecated Use POST /api/ai/upload-and-process instead for async queue-based processing (ADR-0006).
* This synchronous endpoint processes flyers inline and should be migrated to the queue-based approach.
*/
router.post(
'/flyers/process',

View File

@@ -1,21 +1,125 @@
// src/routes/health.routes.ts
// All route handlers now use req.log (request-scoped logger) as per ADR-004
/**
* @file Health check endpoints implementing ADR-020: Health Checks and Liveness/Readiness Probes.
*
* Provides endpoints for:
* - Liveness probe (/live) - Is the server process running?
* - Readiness probe (/ready) - Is the server ready to accept traffic?
* - Individual service health checks (db, redis, storage)
*/
import { Router, Request, Response, NextFunction } from 'express';
// All route handlers now use req.log (request-scoped logger) as per ADR-004
import { z } from 'zod';
// All route handlers now use req.log (request-scoped logger) as per ADR-004
import { checkTablesExist, getPoolStatus } from '../services/db/connection.db';
// Removed: import { logger } from '../services/logger.server';
// All route handlers now use req.log (request-scoped logger) as per ADR-004
import { checkTablesExist, getPoolStatus, getPool } from '../services/db/connection.db';
import { connection as redisConnection } from '../services/queueService.server';
import fs from 'node:fs/promises';
// All route handlers now use req.log (request-scoped logger) as per ADR-004
import { getSimpleWeekAndYear } from '../utils/dateUtils';
// All route handlers now use req.log (request-scoped logger) as per ADR-004
import { validateRequest } from '../middleware/validation.middleware';
const router = Router();
// --- Types for Health Check Response ---
interface ServiceHealth {
status: 'healthy' | 'degraded' | 'unhealthy';
latency?: number;
message?: string;
details?: Record<string, unknown>;
}
interface ReadinessResponse {
status: 'healthy' | 'degraded' | 'unhealthy';
timestamp: string;
uptime: number;
services: {
database: ServiceHealth;
redis: ServiceHealth;
storage: ServiceHealth;
};
}
// --- Helper Functions ---
/**
* Checks database connectivity with timing.
*/
async function checkDatabase(): Promise<ServiceHealth> {
const start = Date.now();
try {
const pool = getPool();
await pool.query('SELECT 1');
const latency = Date.now() - start;
const poolStatus = getPoolStatus();
// Consider degraded if waiting connections > 3
const status = poolStatus.waitingCount > 3 ? 'degraded' : 'healthy';
return {
status,
latency,
details: {
totalConnections: poolStatus.totalCount,
idleConnections: poolStatus.idleCount,
waitingConnections: poolStatus.waitingCount,
},
};
} catch (error) {
return {
status: 'unhealthy',
latency: Date.now() - start,
message: error instanceof Error ? error.message : 'Database connection failed',
};
}
}
/**
* Checks Redis connectivity with timing.
*/
async function checkRedis(): Promise<ServiceHealth> {
const start = Date.now();
try {
const reply = await redisConnection.ping();
const latency = Date.now() - start;
if (reply === 'PONG') {
return { status: 'healthy', latency };
}
return {
status: 'unhealthy',
latency,
message: `Unexpected ping response: ${reply}`,
};
} catch (error) {
return {
status: 'unhealthy',
latency: Date.now() - start,
message: error instanceof Error ? error.message : 'Redis connection failed',
};
}
}
/**
* Checks storage accessibility.
*/
async function checkStorage(): Promise<ServiceHealth> {
const storagePath =
process.env.STORAGE_PATH || '/var/www/flyer-crawler.projectium.com/flyer-images';
const start = Date.now();
try {
await fs.access(storagePath, fs.constants.W_OK);
return {
status: 'healthy',
latency: Date.now() - start,
details: { path: storagePath },
};
} catch {
return {
status: 'unhealthy',
latency: Date.now() - start,
message: `Storage not accessible: ${storagePath}`,
};
}
}
// --- Zod Schemas for Health Routes (as per ADR-003) ---
// These routes do not expect any input, so we define empty schemas
// to maintain a consistent validation pattern across the application.
@@ -28,6 +132,104 @@ router.get('/ping', validateRequest(emptySchema), (_req: Request, res: Response)
res.status(200).send('pong');
});
// =============================================================================
// KUBERNETES PROBES (ADR-020)
// =============================================================================
/**
* GET /api/health/live - Liveness probe for container orchestration.
*
* Returns 200 OK if the server process is running.
* If this fails, the orchestrator should restart the container.
*
* This endpoint is intentionally simple and has no external dependencies.
* It only checks that the Node.js process can handle HTTP requests.
*/
router.get('/live', validateRequest(emptySchema), (_req: Request, res: Response) => {
res.status(200).json({
status: 'ok',
timestamp: new Date().toISOString(),
});
});
/**
* GET /api/health/ready - Readiness probe for container orchestration.
*
* Returns 200 OK if the server is ready to accept traffic.
* Checks all critical dependencies (database, Redis).
* If this fails, the orchestrator should remove the container from the load balancer.
*
* Response includes detailed status of each service for debugging.
*/
router.get('/ready', validateRequest(emptySchema), async (req: Request, res: Response) => {
// Check all services in parallel for speed
const [database, redis, storage] = await Promise.all([
checkDatabase(),
checkRedis(),
checkStorage(),
]);
// Determine overall status
// - 'healthy' if all critical services (db, redis) are healthy
// - 'degraded' if any service is degraded but none unhealthy
// - 'unhealthy' if any critical service is unhealthy
const criticalServices = [database, redis];
const allServices = [database, redis, storage];
let overallStatus: 'healthy' | 'degraded' | 'unhealthy' = 'healthy';
if (criticalServices.some((s) => s.status === 'unhealthy')) {
overallStatus = 'unhealthy';
} else if (allServices.some((s) => s.status === 'degraded')) {
overallStatus = 'degraded';
}
const response: ReadinessResponse = {
status: overallStatus,
timestamp: new Date().toISOString(),
uptime: process.uptime(),
services: {
database,
redis,
storage,
},
};
// Return appropriate HTTP status code
// 200 = healthy or degraded (can still handle traffic)
// 503 = unhealthy (should not receive traffic)
const httpStatus = overallStatus === 'unhealthy' ? 503 : 200;
return res.status(httpStatus).json(response);
});
/**
* GET /api/health/startup - Startup probe for container orchestration.
*
* Similar to readiness but used during container startup.
* The orchestrator will not send liveness/readiness probes until this succeeds.
* This allows for longer initialization times without triggering restarts.
*/
router.get('/startup', validateRequest(emptySchema), async (req: Request, res: Response) => {
// For startup, we only check database connectivity
// Redis and storage can be checked later in readiness
const database = await checkDatabase();
if (database.status === 'unhealthy') {
return res.status(503).json({
status: 'starting',
message: 'Waiting for database connection',
database,
});
}
return res.status(200).json({
status: 'started',
timestamp: new Date().toISOString(),
database,
});
});
/**
* GET /api/health/db-schema - Checks if all essential database tables exist.
* This is a critical check to ensure the database schema is correctly set up.
@@ -49,7 +251,8 @@ router.get('/db-schema', validateRequest(emptySchema), async (req, res, next: Ne
return next(error);
}
const message =
(error as any)?.message || 'An unknown error occurred during DB schema check.';
(error as { message?: string })?.message ||
'An unknown error occurred during DB schema check.';
return next(new Error(message));
}
});
@@ -59,16 +262,15 @@ router.get('/db-schema', validateRequest(emptySchema), async (req, res, next: Ne
* This is important for features like file uploads.
*/
router.get('/storage', validateRequest(emptySchema), async (req, res, next: NextFunction) => {
const storagePath = process.env.STORAGE_PATH || '/var/www/flyer-crawler.projectium.com/flyer-images';
const storagePath =
process.env.STORAGE_PATH || '/var/www/flyer-crawler.projectium.com/flyer-images';
try {
await fs.access(storagePath, fs.constants.W_OK); // Use fs.promises
return res
.status(200)
.json({
success: true,
message: `Storage directory '${storagePath}' is accessible and writable.`,
});
} catch (error: unknown) {
return res.status(200).json({
success: true,
message: `Storage directory '${storagePath}' is accessible and writable.`,
});
} catch {
next(
new Error(
`Storage check failed. Ensure the directory '${storagePath}' exists and is writable by the application.`,
@@ -103,7 +305,8 @@ router.get(
return next(error);
}
const message =
(error as any)?.message || 'An unknown error occurred during DB pool check.';
(error as { message?: string })?.message ||
'An unknown error occurred during DB pool check.';
return next(new Error(message));
}
},
@@ -141,7 +344,8 @@ router.get(
return next(error);
}
const message =
(error as any)?.message || 'An unknown error occurred during Redis health check.';
(error as { message?: string })?.message ||
'An unknown error occurred during Redis health check.';
return next(new Error(message));
}
},

View File

@@ -0,0 +1,226 @@
// src/services/cacheService.server.ts
/**
* @file Centralized caching service implementing the Cache-Aside pattern.
* This service provides a reusable wrapper around Redis for caching read-heavy operations.
* See ADR-009 for the caching strategy documentation.
*/
import type { Logger } from 'pino';
import { connection as redis } from './redis.server';
import { logger as globalLogger } from './logger.server';
/**
* TTL values in seconds for different cache types.
* These can be tuned based on data volatility and freshness requirements.
*/
export const CACHE_TTL = {
/** Brand/store list - rarely changes, safe to cache for 1 hour */
BRANDS: 60 * 60,
/** Flyer list - changes when new flyers are added, cache for 5 minutes */
FLYERS: 5 * 60,
/** Individual flyer data - cache for 10 minutes */
FLYER: 10 * 60,
/** Flyer items - cache for 10 minutes */
FLYER_ITEMS: 10 * 60,
/** Statistics - can be slightly stale, cache for 5 minutes */
STATS: 5 * 60,
/** Most frequent sales - aggregated data, cache for 15 minutes */
FREQUENT_SALES: 15 * 60,
/** Categories - rarely changes, cache for 1 hour */
CATEGORIES: 60 * 60,
} as const;
/**
* Cache key prefixes for different data types.
* Using consistent prefixes allows for pattern-based invalidation.
*/
export const CACHE_PREFIX = {
BRANDS: 'cache:brands',
FLYERS: 'cache:flyers',
FLYER: 'cache:flyer',
FLYER_ITEMS: 'cache:flyer-items',
STATS: 'cache:stats',
FREQUENT_SALES: 'cache:frequent-sales',
CATEGORIES: 'cache:categories',
} as const;
export interface CacheOptions {
/** Time-to-live in seconds */
ttl: number;
/** Optional logger for this operation */
logger?: Logger;
}
/**
* Centralized cache service implementing the Cache-Aside pattern.
* All cache operations are fail-safe - cache failures do not break the application.
*/
class CacheService {
/**
* Retrieves a value from cache.
* @param key The cache key
* @param logger Optional logger for this operation
* @returns The cached value or null if not found/error
*/
async get<T>(key: string, logger: Logger = globalLogger): Promise<T | null> {
try {
const cached = await redis.get(key);
if (cached) {
logger.debug({ cacheKey: key }, 'Cache hit');
return JSON.parse(cached) as T;
}
logger.debug({ cacheKey: key }, 'Cache miss');
return null;
} catch (error) {
logger.warn({ err: error, cacheKey: key }, 'Redis GET failed, proceeding without cache');
return null;
}
}
/**
* Stores a value in cache with TTL.
* @param key The cache key
* @param value The value to cache (will be JSON stringified)
* @param ttl Time-to-live in seconds
* @param logger Optional logger for this operation
*/
async set<T>(key: string, value: T, ttl: number, logger: Logger = globalLogger): Promise<void> {
try {
await redis.set(key, JSON.stringify(value), 'EX', ttl);
logger.debug({ cacheKey: key, ttl }, 'Value cached');
} catch (error) {
logger.warn({ err: error, cacheKey: key }, 'Redis SET failed, value not cached');
}
}
/**
* Deletes a specific key from cache.
* @param key The cache key to delete
* @param logger Optional logger for this operation
*/
async del(key: string, logger: Logger = globalLogger): Promise<void> {
try {
await redis.del(key);
logger.debug({ cacheKey: key }, 'Cache key deleted');
} catch (error) {
logger.warn({ err: error, cacheKey: key }, 'Redis DEL failed');
}
}
/**
* Invalidates all cache keys matching a pattern.
* Uses SCAN for safe iteration over large key sets.
* @param pattern The pattern to match (e.g., 'cache:flyers*')
* @param logger Optional logger for this operation
* @returns The number of keys deleted
*/
async invalidatePattern(pattern: string, logger: Logger = globalLogger): Promise<number> {
let cursor = '0';
let totalDeleted = 0;
try {
do {
const [nextCursor, keys] = await redis.scan(cursor, 'MATCH', pattern, 'COUNT', 100);
cursor = nextCursor;
if (keys.length > 0) {
const deletedCount = await redis.del(...keys);
totalDeleted += deletedCount;
}
} while (cursor !== '0');
logger.info({ pattern, totalDeleted }, 'Cache invalidation completed');
return totalDeleted;
} catch (error) {
logger.error({ err: error, pattern }, 'Cache invalidation failed');
throw error;
}
}
/**
* Implements the Cache-Aside pattern: try cache first, fall back to fetcher, cache result.
* This is the primary method for adding caching to existing repository methods.
*
* @param key The cache key
* @param fetcher Function that retrieves data from the source (e.g., database)
* @param options Cache options including TTL
* @returns The data (from cache or fetcher)
*
* @example
* ```typescript
* const brands = await cacheService.getOrSet(
* CACHE_PREFIX.BRANDS,
* () => this.db.query('SELECT * FROM stores'),
* { ttl: CACHE_TTL.BRANDS, logger }
* );
* ```
*/
async getOrSet<T>(
key: string,
fetcher: () => Promise<T>,
options: CacheOptions,
): Promise<T> {
const logger = options.logger ?? globalLogger;
// Try to get from cache first
const cached = await this.get<T>(key, logger);
if (cached !== null) {
return cached;
}
// Cache miss - fetch from source
const data = await fetcher();
// Cache the result (fire-and-forget, don't await)
this.set(key, data, options.ttl, logger).catch(() => {
// Error already logged in set()
});
return data;
}
// --- Convenience methods for specific cache types ---
/**
* Invalidates all brand-related cache entries.
*/
async invalidateBrands(logger: Logger = globalLogger): Promise<number> {
return this.invalidatePattern(`${CACHE_PREFIX.BRANDS}*`, logger);
}
/**
* Invalidates all flyer-related cache entries.
*/
async invalidateFlyers(logger: Logger = globalLogger): Promise<number> {
const patterns = [
`${CACHE_PREFIX.FLYERS}*`,
`${CACHE_PREFIX.FLYER}*`,
`${CACHE_PREFIX.FLYER_ITEMS}*`,
];
let total = 0;
for (const pattern of patterns) {
total += await this.invalidatePattern(pattern, logger);
}
return total;
}
/**
* Invalidates cache for a specific flyer and its items.
*/
async invalidateFlyer(flyerId: number, logger: Logger = globalLogger): Promise<void> {
await Promise.all([
this.del(`${CACHE_PREFIX.FLYER}:${flyerId}`, logger),
this.del(`${CACHE_PREFIX.FLYER_ITEMS}:${flyerId}`, logger),
// Also invalidate the flyers list since it may contain this flyer
this.invalidatePattern(`${CACHE_PREFIX.FLYERS}*`, logger),
]);
}
/**
* Invalidates all statistics cache entries.
*/
async invalidateStats(logger: Logger = globalLogger): Promise<number> {
return this.invalidatePattern(`${CACHE_PREFIX.STATS}*`, logger);
}
}
export const cacheService = new CacheService();

View File

@@ -18,6 +18,7 @@ describe('Address DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.query.mockReset();
addressRepo = new AddressRepository(mockDb);
});

View File

@@ -40,6 +40,7 @@ describe('Admin DB Service', () => {
beforeEach(() => {
// Reset the global mock's call history before each test.
vi.clearAllMocks();
mockDb.query.mockReset();
// Reset the withTransaction mock before each test
vi.mocked(withTransaction).mockImplementation(async (callback) => {

View File

@@ -47,6 +47,7 @@ describe('Budget DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.query.mockReset();
// Instantiate the repository with the minimal mock db for each test
budgetRepo = new BudgetRepository(mockDb);
});

View File

@@ -28,6 +28,7 @@ import { logger as mockLogger } from '../logger.server';
describe('Conversion DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
// Make getPool return our mock instance for each test
vi.mocked(getPool).mockReturnValue(mockPoolInstance as any);
});

View File

@@ -34,6 +34,16 @@ vi.mock('../logger.server', () => ({
}));
import { logger as mockLogger } from '../logger.server';
// Mock cacheService to bypass caching logic during tests
vi.mock('../cacheService.server', () => ({
cacheService: {
getOrSet: vi.fn(async (_key, callback) => callback()),
invalidateFlyer: vi.fn(),
},
CACHE_TTL: { BRANDS: 3600, FLYERS: 300, FLYER_ITEMS: 600 },
CACHE_PREFIX: { BRANDS: 'brands', FLYERS: 'flyers', FLYER_ITEMS: 'flyer_items' },
}));
// Mock the withTransaction helper
vi.mock('./connection.db', async (importOriginal) => {
const actual = await importOriginal<typeof import('./connection.db')>();
@@ -46,6 +56,7 @@ describe('Flyer DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
//In a transaction, `pool.connect()` returns a client. That client has a `release` method.
// For these tests, we simulate this by having `connect` resolve to the pool instance itself,
// and we ensure the `release` method is mocked on that instance.
@@ -586,18 +597,6 @@ describe('Flyer DB Service', () => {
});
describe('getFlyers', () => {
const expectedQuery = `
SELECT
f.*,
json_build_object(
'store_id', s.store_id,
'name', s.name,
'logo_url', s.logo_url
) as store
FROM public.flyers f
JOIN public.stores s ON f.store_id = s.store_id
ORDER BY f.created_at DESC LIMIT $1 OFFSET $2`;
it('should use default limit and offset when none are provided', async () => {
console.log('[TEST DEBUG] Running test: getFlyers > should use default limit and offset');
const mockFlyers: Flyer[] = [createMockFlyer({ flyer_id: 1 })];
@@ -611,7 +610,7 @@ describe('Flyer DB Service', () => {
);
expect(mockPoolInstance.query).toHaveBeenCalledWith(
expectedQuery,
expect.stringContaining('FROM public.flyers f'),
[20, 0], // Default values
);
});
@@ -629,7 +628,7 @@ describe('Flyer DB Service', () => {
);
expect(mockPoolInstance.query).toHaveBeenCalledWith(
expectedQuery,
expect.stringContaining('FROM public.flyers f'),
[10, 5], // Provided values
);
});

View File

@@ -3,6 +3,7 @@ import type { Pool, PoolClient } from 'pg';
import { getPool, withTransaction } from './connection.db';
import type { Logger } from 'pino';
import { UniqueConstraintError, NotFoundError, handleDbError } from './errors.db';
import { cacheService, CACHE_TTL, CACHE_PREFIX } from '../cacheService.server';
import type {
Flyer,
FlyerItem,
@@ -229,22 +230,31 @@ export class FlyerRepository {
/**
* Retrieves all distinct brands from the stores table.
* Uses cache-aside pattern with 1-hour TTL (brands rarely change).
* @returns A promise that resolves to an array of Brand objects.
*/
async getAllBrands(logger: Logger): Promise<Brand[]> {
try {
const query = `
SELECT s.store_id as brand_id, s.name, s.logo_url, s.created_at, s.updated_at
FROM public.stores s
ORDER BY s.name;
`;
const res = await this.db.query<Brand>(query);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getAllBrands', {}, {
defaultMessage: 'Failed to retrieve brands from database.',
});
}
const cacheKey = CACHE_PREFIX.BRANDS;
return cacheService.getOrSet<Brand[]>(
cacheKey,
async () => {
try {
const query = `
SELECT s.store_id as brand_id, s.name, s.logo_url, s.created_at, s.updated_at
FROM public.stores s
ORDER BY s.name;
`;
const res = await this.db.query<Brand>(query);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getAllBrands', {}, {
defaultMessage: 'Failed to retrieve brands from database.',
});
}
},
{ ttl: CACHE_TTL.BRANDS, logger },
);
}
/**
@@ -262,49 +272,67 @@ export class FlyerRepository {
/**
* Retrieves all flyers from the database, ordered by creation date.
* Uses cache-aside pattern with 5-minute TTL.
* @param limit The maximum number of flyers to return.
* @param offset The number of flyers to skip.
* @returns A promise that resolves to an array of Flyer objects.
*/
async getFlyers(logger: Logger, limit: number = 20, offset: number = 0): Promise<Flyer[]> {
try {
const query = `
SELECT
f.*,
json_build_object(
'store_id', s.store_id,
'name', s.name,
'logo_url', s.logo_url
) as store
FROM public.flyers f
JOIN public.stores s ON f.store_id = s.store_id
ORDER BY f.created_at DESC LIMIT $1 OFFSET $2`;
const res = await this.db.query<Flyer>(query, [limit, offset]);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getFlyers', { limit, offset }, {
defaultMessage: 'Failed to retrieve flyers from database.',
});
}
const cacheKey = `${CACHE_PREFIX.FLYERS}:${limit}:${offset}`;
return cacheService.getOrSet<Flyer[]>(
cacheKey,
async () => {
try {
const query = `
SELECT
f.*,
json_build_object(
'store_id', s.store_id,
'name', s.name,
'logo_url', s.logo_url
) as store
FROM public.flyers f
JOIN public.stores s ON f.store_id = s.store_id
ORDER BY f.created_at DESC LIMIT $1 OFFSET $2`;
const res = await this.db.query<Flyer>(query, [limit, offset]);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getFlyers', { limit, offset }, {
defaultMessage: 'Failed to retrieve flyers from database.',
});
}
},
{ ttl: CACHE_TTL.FLYERS, logger },
);
}
/**
* Retrieves all items for a specific flyer.
* Uses cache-aside pattern with 10-minute TTL.
* @param flyerId The ID of the flyer.
* @returns A promise that resolves to an array of FlyerItem objects.
*/
async getFlyerItems(flyerId: number, logger: Logger): Promise<FlyerItem[]> {
try {
const res = await this.db.query<FlyerItem>(
'SELECT * FROM public.flyer_items WHERE flyer_id = $1 ORDER BY flyer_item_id ASC',
[flyerId],
);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getFlyerItems', { flyerId }, {
defaultMessage: 'Failed to retrieve flyer items from database.',
});
}
const cacheKey = `${CACHE_PREFIX.FLYER_ITEMS}:${flyerId}`;
return cacheService.getOrSet<FlyerItem[]>(
cacheKey,
async () => {
try {
const res = await this.db.query<FlyerItem>(
'SELECT * FROM public.flyer_items WHERE flyer_id = $1 ORDER BY flyer_item_id ASC',
[flyerId],
);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getFlyerItems', { flyerId }, {
defaultMessage: 'Failed to retrieve flyer items from database.',
});
}
},
{ ttl: CACHE_TTL.FLYER_ITEMS, logger },
);
}
/**
@@ -399,6 +427,7 @@ export class FlyerRepository {
/**
* Deletes a flyer and all its associated items in a transaction.
* This should typically be an admin-only action.
* Invalidates related cache entries after successful deletion.
* @param flyerId The ID of the flyer to delete.
*/
async deleteFlyer(flyerId: number, logger: Logger): Promise<void> {
@@ -413,6 +442,9 @@ export class FlyerRepository {
}
logger.info(`Successfully deleted flyer with ID: ${flyerId}`);
});
// Invalidate cache after successful deletion
await cacheService.invalidateFlyer(flyerId, logger);
} catch (error) {
handleDbError(error, logger, 'Database transaction error in deleteFlyer', { flyerId }, {
defaultMessage: 'Failed to delete flyer.',

View File

@@ -29,6 +29,7 @@ describe('Gamification DB Service', () => {
beforeEach(() => {
// Reset the global mock's call history before each test.
vi.clearAllMocks();
mockDb.query.mockReset();
// Instantiate the repository with the mock pool for each test
gamificationRepo = new GamificationRepository(mockDb);

View File

@@ -30,6 +30,7 @@ describe('Notification DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
// Instantiate the repository with the mock pool for each test
notificationRepo = new NotificationRepository(mockPoolInstance as unknown as Pool);

View File

@@ -35,6 +35,7 @@ describe('Personalization DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockQuery.mockReset();
// Reset the withTransaction mock before each test
vi.mocked(withTransaction).mockImplementation(async (callback) => {
const mockClient = { query: vi.fn() };

View File

@@ -27,6 +27,7 @@ import { logger as mockLogger } from '../logger.server';
describe('Price DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
// Make getPool return our mock instance for each test
vi.mocked(getPool).mockReturnValue(mockPoolInstance as any);
});

View File

@@ -34,6 +34,7 @@ describe('Reaction DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.query.mockReset();
reactionRepo = new ReactionRepository(mockDb);
});

View File

@@ -28,6 +28,7 @@ describe('Recipe DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockQuery.mockReset();
// Instantiate the repository with the mock pool for each test
recipeRepo = new RecipeRepository(mockPoolInstance as unknown as Pool);
});

View File

@@ -36,6 +36,7 @@ describe('Shopping DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
// Instantiate the repository with the mock pool for each test
shoppingRepo = new ShoppingRepository(mockPoolInstance as unknown as Pool);
});

View File

@@ -62,6 +62,7 @@ describe('User DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
userRepo = new UserRepository(mockPoolInstance as unknown as PoolClient);
// Provide a default mock implementation for withTransaction for all tests.
vi.mocked(withTransaction).mockImplementation(

View File

@@ -4,12 +4,13 @@ import { withTransaction } from './db/connection.db';
import { createFlyerAndItems } from './db/flyer.db';
import { AdminRepository } from './db/admin.db';
import { GamificationRepository } from './db/gamification.db';
import { cacheService } from './cacheService.server';
import type { FlyerInsert, FlyerItemInsert, Flyer } from '../types';
export class FlyerPersistenceService {
/**
* Saves the flyer and its items to the database within a transaction.
* Also logs the activity.
* Also logs the activity and invalidates related cache entries.
*/
async saveFlyer(
flyerData: FlyerInsert,
@@ -17,7 +18,7 @@ export class FlyerPersistenceService {
userId: string | undefined,
logger: Logger,
): Promise<Flyer> {
return withTransaction(async (client) => {
const flyer = await withTransaction(async (client) => {
const { flyer, items } = await createFlyerAndItems(flyerData, itemsForDb, logger, client);
logger.info(
@@ -43,5 +44,12 @@ export class FlyerPersistenceService {
}
return flyer;
});
// Invalidate flyer list cache after successful creation (fire-and-forget)
cacheService.invalidateFlyers(logger).catch(() => {
// Error already logged in invalidateFlyers
});
return flyer;
}
}

View File

@@ -2,6 +2,7 @@
import { describe, it, expect, afterAll } from 'vitest';
import crypto from 'crypto';
import * as apiClient from '../../services/apiClient';
import { getPool } from '../../services/db/connection.db';
import path from 'path';
import fs from 'fs';
import { cleanupDb } from '../utils/cleanup';
@@ -19,12 +20,14 @@ describe('E2E Flyer Upload and Processing Workflow', () => {
let authToken: string;
let userId: string | null = null;
let flyerId: number | null = null;
let storeId: number | null = null;
afterAll(async () => {
// Use the centralized cleanup utility for robustness.
await cleanupDb({
userIds: [userId],
flyerIds: [flyerId],
storeIds: [storeId],
});
});
@@ -98,5 +101,13 @@ describe('E2E Flyer Upload and Processing Workflow', () => {
expect(jobStatus.state).toBe('completed');
flyerId = jobStatus.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
// Fetch the store_id associated with the created flyer for robust cleanup
if (flyerId) {
const flyerRes = await getPool().query('SELECT store_id FROM public.flyers WHERE flyer_id = $1', [flyerId]);
if (flyerRes.rows.length > 0) {
storeId = flyerRes.rows[0].store_id;
}
}
}, 240000); // Extended timeout for AI processing
});

View File

@@ -18,6 +18,8 @@ describe('Admin API Routes Integration Tests', () => {
let regularUserToken: string;
const createdUserIds: string[] = [];
const createdStoreIds: number[] = [];
const createdCorrectionIds: number[] = [];
const createdFlyerIds: number[] = [];
beforeAll(async () => {
vi.stubEnv('FRONTEND_URL', 'https://example.com');
@@ -47,6 +49,8 @@ describe('Admin API Routes Integration Tests', () => {
await cleanupDb({
userIds: createdUserIds,
storeIds: createdStoreIds,
suggestedCorrectionIds: createdCorrectionIds,
flyerIds: createdFlyerIds,
});
});
@@ -174,6 +178,7 @@ describe('Admin API Routes Integration Tests', () => {
[testStoreId, `checksum-${Date.now()}-${Math.random()}`.padEnd(64, '0')],
);
const flyerId = flyerRes.rows[0].flyer_id;
createdFlyerIds.push(flyerId);
const flyerItemRes = await getPool().query(
`INSERT INTO public.flyer_items (flyer_id, item, price_display, price_in_cents, quantity)
@@ -188,6 +193,7 @@ describe('Admin API Routes Integration Tests', () => {
[testFlyerItemId, adminUser.user.user_id],
);
testCorrectionId = correctionRes.rows[0].suggested_correction_id;
createdCorrectionIds.push(testCorrectionId);
});
it('should allow an admin to approve a correction', async () => {

View File

@@ -4,10 +4,9 @@ import supertest from 'supertest';
import fs from 'node:fs/promises';
import path from 'path';
import * as db from '../../services/db/index.db';
import { getPool } from '../../services/db/connection.db';
import { generateFileChecksum } from '../../utils/checksum';
import { logger } from '../../services/logger.server';
import type { UserProfile, ExtractedFlyerItem } from '../../types';
import type { UserProfile } from '../../types';
import { createAndLoginUser } from '../utils/testHelpers';
import { cleanupDb } from '../utils/cleanup';
import { poll } from '../utils/poll';
@@ -15,11 +14,12 @@ import { cleanupFiles } from '../utils/cleanupFiles';
import piexif from 'piexifjs';
import exifParser from 'exif-parser';
import sharp from 'sharp';
import * as imageProcessor from '../../utils/imageProcessor';
// Mock the image processor to ensure safe filenames for DB constraints
vi.mock('../../utils/imageProcessor', async () => {
const actual = await vi.importActual<typeof import('../../utils/imageProcessor')>('../../utils/imageProcessor');
const actual = await vi.importActual<typeof import('../../utils/imageProcessor')>(
'../../utils/imageProcessor',
);
return {
...actual,
generateFlyerIcon: vi.fn().mockResolvedValue('mock-icon-safe.webp'),
@@ -28,34 +28,61 @@ vi.mock('../../utils/imageProcessor', async () => {
// FIX: Mock storageService to return valid URLs (for DB) and write files to disk (for test verification)
vi.mock('../../services/storage/storageService', () => {
const fs = require('node:fs/promises');
const path = require('path');
// eslint-disable-next-line @typescript-eslint/no-require-imports
const fsModule = require('node:fs/promises');
// eslint-disable-next-line @typescript-eslint/no-require-imports
const pathModule = require('path');
// Match the directory used in the test helpers
const uploadDir = path.join(process.cwd(), 'flyer-images');
const uploadDir = pathModule.join(process.cwd(), 'flyer-images');
return {
storageService: {
upload: vi.fn().mockImplementation(async (fileData, fileName) => {
const name = fileName || (fileData && fileData.name) || (typeof fileData === 'string' ? path.basename(fileData) : `upload-${Date.now()}.jpg`);
await fs.mkdir(uploadDir, { recursive: true });
const destPath = path.join(uploadDir, name);
upload: vi
.fn()
.mockImplementation(
async (
fileData: Buffer | string | { name?: string; path?: string },
fileName?: string,
) => {
const name =
fileName ||
(fileData && typeof fileData === 'object' && 'name' in fileData && fileData.name) ||
(typeof fileData === 'string'
? pathModule.basename(fileData)
: `upload-${Date.now()}.jpg`);
let content = Buffer.from('');
if (Buffer.isBuffer(fileData)) {
content = fileData as any;
} else if (typeof fileData === 'string') {
try { content = await fs.readFile(fileData); } catch (e) {}
} else if (fileData && fileData.path) {
try { content = await fs.readFile(fileData.path); } catch (e) {}
}
await fs.writeFile(destPath, content);
await fsModule.mkdir(uploadDir, { recursive: true });
const destPath = pathModule.join(uploadDir, name);
// Return a valid URL to satisfy the 'url_check' DB constraint
return `https://example.com/uploads/${name}`;
}),
let content: Buffer = Buffer.from('');
if (Buffer.isBuffer(fileData)) {
content = Buffer.from(fileData);
} else if (typeof fileData === 'string') {
try {
content = await fsModule.readFile(fileData);
} catch {
/* ignore */
}
} else if (
fileData &&
typeof fileData === 'object' &&
'path' in fileData &&
fileData.path
) {
try {
content = await fsModule.readFile(fileData.path);
} catch {
/* ignore */
}
}
await fsModule.writeFile(destPath, content);
// Return a valid URL to satisfy the 'url_check' DB constraint
return `https://example.com/uploads/${name}`;
},
),
delete: vi.fn().mockResolvedValue(undefined),
}
},
};
});
@@ -63,11 +90,12 @@ vi.mock('../../services/storage/storageService', () => {
* @vitest-environment node
*/
// CRITICAL: This mock function must be declared with vi.hoisted() to ensure it's available
// CRITICAL: These mock functions must be declared with vi.hoisted() to ensure they're available
// at the module level BEFORE any imports are resolved.
const { mockExtractCoreData } = vi.hoisted(() => {
const { mockExtractCoreData, mockWithTransaction } = vi.hoisted(() => {
return {
mockExtractCoreData: vi.fn(),
mockWithTransaction: vi.fn(),
};
});
@@ -97,11 +125,15 @@ vi.mock('../../services/aiService.server', async (importOriginal) => {
// Mock the connection DB service to intercept withTransaction.
// This is crucial because FlyerPersistenceService imports directly from connection.db,
// so mocking index.db is insufficient.
// CRITICAL: We use the hoisted mockWithTransaction function so tests can manipulate the same
// function instance that workers are using.
vi.mock('../../services/db/connection.db', async (importOriginal) => {
const actual = await importOriginal<typeof import('../../services/db/connection.db')>();
// Initialize the hoisted mock to use the real implementation by default
mockWithTransaction.mockImplementation(actual.withTransaction);
return {
...actual,
withTransaction: vi.fn().mockImplementation(actual.withTransaction),
withTransaction: mockWithTransaction,
};
});
@@ -110,6 +142,7 @@ describe('Flyer Processing Background Job Integration Test', () => {
const createdUserIds: string[] = [];
const createdFlyerIds: number[] = [];
const createdFilePaths: string[] = [];
const createdStoreIds: number[] = [];
let workersModule: typeof import('../../services/workers.server');
const originalFrontendUrl = process.env.FRONTEND_URL;
@@ -159,38 +192,50 @@ describe('Flyer Processing Background Job Integration Test', () => {
// 2. Restore DB Service Mock to real implementation
// This ensures that unless a test specifically mocks a failure, the DB logic works as expected.
const { withTransaction } = await import('../../services/db/connection.db');
// We need to get the actual implementation again to restore it
const actualDb = await vi.importActual<typeof import('../../services/db/connection.db')>('../../services/db/connection.db');
vi.mocked(withTransaction).mockReset();
vi.mocked(withTransaction).mockImplementation(actualDb.withTransaction);
// CRITICAL: Use the hoisted mockWithTransaction directly so we're manipulating the same instance
// that the workers are using.
const actualDb = await vi.importActual<typeof import('../../services/db/connection.db')>(
'../../services/db/connection.db',
);
mockWithTransaction.mockReset();
mockWithTransaction.mockImplementation(actualDb.withTransaction);
});
afterAll(async () => {
// Restore original value
// Restore original value
process.env.FRONTEND_URL = originalFrontendUrl;
vi.unstubAllEnvs(); // Clean up env stubs
vi.restoreAllMocks(); // Restore the AI spy
// Use the centralized cleanup utility.
await cleanupDb({
userIds: createdUserIds,
flyerIds: createdFlyerIds,
});
// Use the centralized file cleanup utility.
await cleanupFiles(createdFilePaths);
// NEW: Clean up workers and Redis connection to prevent tests from hanging.
// CRITICAL: Close workers FIRST before any cleanup to ensure no pending jobs
// are trying to access files or databases during cleanup.
// This prevents the Node.js async hooks crash that occurs when fs operations
// are rejected during process shutdown.
if (workersModule) {
console.error('[TEST TEARDOWN] Closing in-process workers...');
await workersModule.closeWorkers();
// Give workers a moment to fully release resources
await new Promise((resolve) => setTimeout(resolve, 100));
}
// Close the shared redis connection used by the workers/queues
const { connection } = await import('../../services/redis.server');
await connection.quit();
// Use the centralized cleanup utility.
await cleanupDb({
userIds: createdUserIds,
flyerIds: createdFlyerIds,
storeIds: createdStoreIds,
});
// Use the centralized file cleanup utility.
await cleanupFiles(createdFilePaths);
// Final delay to let any remaining async operations settle
// This helps prevent the Node.js async context assertion failure
await new Promise((resolve) => setTimeout(resolve, 50));
});
/**
@@ -198,9 +243,11 @@ describe('Flyer Processing Background Job Integration Test', () => {
* It uploads a file, polls for completion, and verifies the result in the database.
*/
const runBackgroundProcessingTest = async (user?: UserProfile, token?: string) => {
console.error(`[TEST START] runBackgroundProcessingTest. User: ${user?.user.email ?? 'ANONYMOUS'}`);
console.error(
`[TEST START] runBackgroundProcessingTest. User: ${user?.user.email ?? 'ANONYMOUS'}`,
);
// Arrange: Load a mock flyer PDF.
console.error('[TEST] about to read test-flyer-image.jpg')
console.error('[TEST] about to read test-flyer-image.jpg');
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
@@ -208,15 +255,17 @@ describe('Flyer Processing Background Job Integration Test', () => {
// This prevents a 409 Conflict error when the second test runs.
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(Date.now().toString())]);
const uniqueFileName = `test-flyer-image-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, { type: 'image/jpeg' });
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, {
type: 'image/jpeg',
});
const checksum = await generateFileChecksum(mockImageFile);
console.error('[TEST] mockImageFile created with uniqueFileName: ', uniqueFileName)
console.error('[TEST] mockImageFile created with uniqueFileName: ', uniqueFileName);
console.error('[TEST DATA] Generated checksum for test:', checksum);
// Track created files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
console.error('[TEST] createdFilesPaths after 1st push: ', createdFilePaths)
console.error('[TEST] createdFilesPaths after 1st push: ', createdFilePaths);
// The icon name is derived from the original filename.
const iconFileName = `icon-${path.parse(uniqueFileName).name}.webp`;
createdFilePaths.push(path.join(uploadDir, 'icons', iconFileName));
@@ -274,6 +323,9 @@ describe('Flyer Processing Background Job Integration Test', () => {
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
expect(savedFlyer?.flyer_id).toBe(flyerId);
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
expect(savedFlyer?.file_name).toBe(uniqueFileName);
// Also add the final processed image path to the cleanup list.
// This is important because JPEGs are re-processed to strip EXIF data, creating a new file.
@@ -293,9 +345,7 @@ describe('Flyer Processing Background Job Integration Test', () => {
}
};
it('should successfully process a flyer for an AUTHENTICATED user via the background queue', async ({
onTestFinished,
}) => {
it('should successfully process a flyer for an AUTHENTICATED user via the background queue', async () => {
// Arrange: Create a new user specifically for this test.
const email = `auth-flyer-user-${Date.now()}@example.com`;
const { user: authUser, token } = await createAndLoginUser({
@@ -314,345 +364,336 @@ describe('Flyer Processing Background Job Integration Test', () => {
await runBackgroundProcessingTest();
}, 240000); // Increase timeout to 240 seconds for this long-running test
it(
'should strip EXIF data from uploaded JPEG images during processing',
async () => {
// Arrange: Create a user for this test
const { user: authUser, token } = await createAndLoginUser({
email: `exif-user-${Date.now()}@example.com`,
fullName: 'EXIF Tester',
request,
});
createdUserIds.push(authUser.user.user_id);
it('should strip EXIF data from uploaded JPEG images during processing', async () => {
// Arrange: Create a user for this test
const { user: authUser, token } = await createAndLoginUser({
email: `exif-user-${Date.now()}@example.com`,
fullName: 'EXIF Tester',
request,
});
createdUserIds.push(authUser.user.user_id);
// 1. Create an image buffer with EXIF data
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const jpegDataAsString = imageBuffer.toString('binary');
// 1. Create an image buffer with EXIF data
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const jpegDataAsString = imageBuffer.toString('binary');
const exifObj = {
'0th': { [piexif.ImageIFD.Software]: 'Gemini Code Assist Test' },
Exif: { [piexif.ExifIFD.DateTimeOriginal]: '2025:12:25 10:00:00' },
};
const exifBytes = piexif.dump(exifObj);
const jpegWithExif = piexif.insert(exifBytes, jpegDataAsString);
const imageWithExifBuffer = Buffer.from(jpegWithExif, 'binary');
const exifObj = {
'0th': { [piexif.ImageIFD.Software]: 'Gemini Code Assist Test' },
Exif: { [piexif.ExifIFD.DateTimeOriginal]: '2025:12:25 10:00:00' },
};
const exifBytes = piexif.dump(exifObj);
const jpegWithExif = piexif.insert(exifBytes, jpegDataAsString);
const imageWithExifBuffer = Buffer.from(jpegWithExif, 'binary');
const uniqueFileName = `test-flyer-with-exif-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(imageWithExifBuffer)], uniqueFileName, { type: 'image/jpeg' });
const checksum = await generateFileChecksum(mockImageFile);
const uniqueFileName = `test-flyer-with-exif-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(imageWithExifBuffer)], uniqueFileName, {
type: 'image/jpeg',
});
const checksum = await generateFileChecksum(mockImageFile);
// Track original and derived files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
const iconFileName = `icon-${path.parse(uniqueFileName).name}.webp`;
createdFilePaths.push(path.join(uploadDir, 'icons', iconFileName));
// Track original and derived files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
const iconFileName = `icon-${path.parse(uniqueFileName).name}.webp`;
createdFilePaths.push(path.join(uploadDir, 'icons', iconFileName));
// 2. Act: Upload the file and wait for processing
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.set('Authorization', `Bearer ${token}`)
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', imageWithExifBuffer, uniqueFileName);
// 2. Act: Upload the file and wait for processing
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.set('Authorization', `Bearer ${token}`)
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', imageWithExifBuffer, uniqueFileName);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
// Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request
.get(`/api/ai/jobs/${jobId}/status`)
.set('Authorization', `Bearer ${token}`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'EXIF stripping job' },
);
// Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request
.get(`/api/ai/jobs/${jobId}/status`)
.set('Authorization', `Bearer ${token}`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'EXIF stripping job' },
);
// 3. Assert
if (jobStatus?.state === 'failed') {
console.error('[DEBUG] EXIF test job failed:', jobStatus.failedReason);
console.error('[DEBUG] Job stack trace:', jobStatus.stacktrace);
console.error('[DEBUG] Job return value:', JSON.stringify(jobStatus.returnValue, null, 2));
}
expect(jobStatus?.state).toBe('completed');
const flyerId = jobStatus?.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
createdFlyerIds.push(flyerId);
// 3. Assert
if (jobStatus?.state === 'failed') {
console.error('[DEBUG] EXIF test job failed:', jobStatus.failedReason);
console.error('[DEBUG] Job stack trace:', jobStatus.stacktrace);
console.error('[DEBUG] Job return value:', JSON.stringify(jobStatus.returnValue, null, 2));
}
expect(jobStatus?.state).toBe('completed');
const flyerId = jobStatus?.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
createdFlyerIds.push(flyerId);
// 4. Verify EXIF data is stripped from the saved file
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
// 4. Verify EXIF data is stripped from the saved file
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath); // Add final path for cleanup
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath); // Add final path for cleanup
const savedImageBuffer = await fs.readFile(savedImagePath);
const parser = exifParser.create(savedImageBuffer);
const exifResult = parser.parse();
const savedImageBuffer = await fs.readFile(savedImagePath);
const parser = exifParser.create(savedImageBuffer);
const exifResult = parser.parse();
console.error('[TEST] savedImagePath during EXIF data stripping: ', savedImagePath)
console.error('[TEST] exifResult.tags: ', exifResult.tags)
console.error('[TEST] savedImagePath during EXIF data stripping: ', savedImagePath);
console.error('[TEST] exifResult.tags: ', exifResult.tags);
// The `tags` object will be empty if no EXIF data is found.
expect(exifResult.tags).toEqual({});
expect(exifResult.tags.Software).toBeUndefined();
}, 240000);
// The `tags` object will be empty if no EXIF data is found.
expect(exifResult.tags).toEqual({});
expect(exifResult.tags.Software).toBeUndefined();
},
240000,
);
it('should strip metadata from uploaded PNG images during processing', async () => {
// Arrange: Create a user for this test
const { user: authUser, token } = await createAndLoginUser({
email: `png-meta-user-${Date.now()}@example.com`,
fullName: 'PNG Metadata Tester',
request,
});
createdUserIds.push(authUser.user.user_id);
it(
'should strip metadata from uploaded PNG images during processing',
async () => {
// Arrange: Create a user for this test
const { user: authUser, token } = await createAndLoginUser({
email: `png-meta-user-${Date.now()}@example.com`,
fullName: 'PNG Metadata Tester',
request,
});
createdUserIds.push(authUser.user.user_id);
// 1. Create a PNG image buffer with custom metadata using sharp
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
// 1. Create a PNG image buffer with custom metadata using sharp
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageWithMetadataBuffer = await sharp(imagePath)
.png() // Convert to PNG
.withMetadata({
exif: {
IFD0: {
Copyright: 'Gemini Code Assist PNG Test',
},
const imageWithMetadataBuffer = await sharp(imagePath)
.png() // Convert to PNG
.withMetadata({
exif: {
IFD0: {
Copyright: 'Gemini Code Assist PNG Test',
},
})
.toBuffer();
const uniqueFileName = `test-flyer-with-metadata-${Date.now()}.png`;
const mockImageFile = new File([new Uint8Array(imageWithMetadataBuffer)], uniqueFileName, { type: 'image/png' });
const checksum = await generateFileChecksum(mockImageFile);
// Track files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
const iconFileName = `icon-${path.parse(uniqueFileName).name}.webp`;
createdFilePaths.push(path.join(uploadDir, 'icons', iconFileName));
// 2. Act: Upload the file and wait for processing
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.set('Authorization', `Bearer ${token}`)
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', imageWithMetadataBuffer, uniqueFileName);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
// Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request
.get(`/api/ai/jobs/${jobId}/status`)
.set('Authorization', `Bearer ${token}`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'PNG metadata stripping job' },
);
})
.toBuffer();
// 3. Assert job completion
if (jobStatus?.state === 'failed') {
console.error('[DEBUG] PNG metadata test job failed:', jobStatus.failedReason);
console.error('[DEBUG] Job stack trace:', jobStatus.stacktrace);
console.error('[DEBUG] Job return value:', JSON.stringify(jobStatus.returnValue, null, 2));
}
expect(jobStatus?.state).toBe('completed');
const flyerId = jobStatus?.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
createdFlyerIds.push(flyerId);
const uniqueFileName = `test-flyer-with-metadata-${Date.now()}.png`;
const mockImageFile = new File([new Uint8Array(imageWithMetadataBuffer)], uniqueFileName, {
type: 'image/png',
});
const checksum = await generateFileChecksum(mockImageFile);
// 4. Verify metadata is stripped from the saved file
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
// Track files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
const iconFileName = `icon-${path.parse(uniqueFileName).name}.webp`;
createdFilePaths.push(path.join(uploadDir, 'icons', iconFileName));
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath); // Add final path for cleanup
// 2. Act: Upload the file and wait for processing
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.set('Authorization', `Bearer ${token}`)
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', imageWithMetadataBuffer, uniqueFileName);
console.error('[TEST] savedImagePath during PNG metadata stripping: ', savedImagePath)
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
// Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request
.get(`/api/ai/jobs/${jobId}/status`)
.set('Authorization', `Bearer ${token}`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'PNG metadata stripping job' },
);
const savedImageMetadata = await sharp(savedImagePath).metadata();
// 3. Assert job completion
if (jobStatus?.state === 'failed') {
console.error('[DEBUG] PNG metadata test job failed:', jobStatus.failedReason);
console.error('[DEBUG] Job stack trace:', jobStatus.stacktrace);
console.error('[DEBUG] Job return value:', JSON.stringify(jobStatus.returnValue, null, 2));
}
expect(jobStatus?.state).toBe('completed');
const flyerId = jobStatus?.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
createdFlyerIds.push(flyerId);
// The test should fail here initially because PNGs are not processed.
// The `exif` property should be undefined after the fix.
expect(savedImageMetadata.exif).toBeUndefined();
},
240000,
// 4. Verify metadata is stripped from the saved file
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
);
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath); // Add final path for cleanup
it(
'should handle a failure from the AI service gracefully',
async () => {
// Arrange: Mock the AI service to throw an error for this specific test.
const aiError = new Error('AI model failed to extract data.');
// Update the spy implementation to reject
mockExtractCoreData.mockRejectedValue(aiError);
console.error('[TEST] savedImagePath during PNG metadata stripping: ', savedImagePath);
// Arrange: Prepare a unique flyer file for upload.
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(`ai-error-test-${Date.now()}`)]);
const uniqueFileName = `ai-error-test-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, { type: 'image/jpeg' });
const checksum = await generateFileChecksum(mockImageFile);
const savedImageMetadata = await sharp(savedImagePath).metadata();
// Track created files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
// The test should fail here initially because PNGs are not processed.
// The `exif` property should be undefined after the fix.
expect(savedImageMetadata.exif).toBeUndefined();
}, 240000);
// Act 1: Upload the file to start the background job.
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', uniqueContent, uniqueFileName);
it('should handle a failure from the AI service gracefully', async () => {
// Arrange: Mock the AI service to throw an error for this specific test.
const aiError = new Error('AI model failed to extract data.');
// Update the spy implementation to reject
mockExtractCoreData.mockRejectedValue(aiError);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
// Arrange: Prepare a unique flyer file for upload.
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(`ai-error-test-${Date.now()}`)]);
const uniqueFileName = `ai-error-test-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, {
type: 'image/jpeg',
});
const checksum = await generateFileChecksum(mockImageFile);
// Act 2: Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request.get(`/api/ai/jobs/${jobId}/status`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'AI failure test job' },
);
// Track created files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
// Assert 1: Check that the job failed.
if (jobStatus?.state === 'failed') {
console.error('[TEST DEBUG] AI Failure Test - Job Failed Reason:', jobStatus.failedReason);
console.error('[TEST DEBUG] AI Failure Test - Job Stack:', jobStatus.stacktrace);
}
expect(jobStatus?.state).toBe('failed');
expect(jobStatus?.failedReason).toContain('AI model failed to extract data.');
// Act 1: Upload the file to start the background job.
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', uniqueContent, uniqueFileName);
// Assert 2: Verify the flyer was NOT saved in the database.
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeUndefined();
},
240000,
);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
it(
'should handle a database failure during flyer creation',
async () => {
// Arrange: Mock the database transaction function to throw an error.
// This is a more realistic simulation of a DB failure than mocking the inner createFlyerAndItems function.
const dbError = new Error('DB transaction failed');
const { withTransaction } = await import('../../services/db/connection.db');
vi.mocked(withTransaction).mockRejectedValue(dbError);
// Act 2: Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request.get(`/api/ai/jobs/${jobId}/status`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'AI failure test job' },
);
// Arrange: Prepare a unique flyer file for upload.
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(`db-error-test-${Date.now()}`)]);
const uniqueFileName = `db-error-test-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, { type: 'image/jpeg' });
const checksum = await generateFileChecksum(mockImageFile);
// Assert 1: Check that the job failed.
if (jobStatus?.state === 'failed') {
console.error('[TEST DEBUG] AI Failure Test - Job Failed Reason:', jobStatus.failedReason);
console.error('[TEST DEBUG] AI Failure Test - Job Stack:', jobStatus.stacktrace);
}
expect(jobStatus?.state).toBe('failed');
expect(jobStatus?.failedReason).toContain('AI model failed to extract data.');
// Track created files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
// Assert 2: Verify the flyer was NOT saved in the database.
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeUndefined();
}, 240000);
// Act 1: Upload the file to start the background job.
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', uniqueContent, uniqueFileName);
it('should handle a database failure during flyer creation', async () => {
// Arrange: Mock the database transaction function to throw an error.
// This is a more realistic simulation of a DB failure than mocking the inner createFlyerAndItems function.
// CRITICAL: Use the hoisted mockWithTransaction directly - this is the same instance the workers use.
const dbError = new Error('DB transaction failed');
mockWithTransaction.mockRejectedValue(dbError);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
// Arrange: Prepare a unique flyer file for upload.
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(`db-error-test-${Date.now()}`)]);
const uniqueFileName = `db-error-test-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, {
type: 'image/jpeg',
});
const checksum = await generateFileChecksum(mockImageFile);
// Act 2: Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request.get(`/api/ai/jobs/${jobId}/status`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'DB failure test job' },
);
// Track created files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
// Assert 1: Check that the job failed.
expect(jobStatus?.state).toBe('failed');
expect(jobStatus?.failedReason).toContain('DB transaction failed');
// Act 1: Upload the file to start the background job.
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', uniqueContent, uniqueFileName);
// Assert 2: Verify the flyer was NOT saved in the database.
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeUndefined();
},
240000,
);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
it(
'should NOT clean up temporary files when a job fails, to allow for manual inspection',
async () => {
// Arrange: Mock the AI service to throw an error, causing the job to fail.
const aiError = new Error('Simulated AI failure for cleanup test.');
mockExtractCoreData.mockRejectedValue(aiError);
// Act 2: Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request.get(`/api/ai/jobs/${jobId}/status`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'DB failure test job' },
);
// Arrange: Prepare a unique flyer file for upload.
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueContent = Buffer.concat([
imageBuffer,
Buffer.from(`cleanup-test-${Date.now()}`),
]);
const uniqueFileName = `cleanup-test-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, { type: 'image/jpeg' });
const checksum = await generateFileChecksum(mockImageFile);
// Assert 1: Check that the job failed.
expect(jobStatus?.state).toBe('failed');
expect(jobStatus?.failedReason).toContain('DB transaction failed');
// Track the path of the file that will be created in the uploads directory.
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
const tempFilePath = path.join(uploadDir, uniqueFileName);
createdFilePaths.push(tempFilePath);
// Assert 2: Verify the flyer was NOT saved in the database.
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeUndefined();
}, 240000);
// Act 1: Upload the file to start the background job.
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', uniqueContent, uniqueFileName);
it('should NOT clean up temporary files when a job fails, to allow for manual inspection', async () => {
// Arrange: Mock the AI service to throw an error, causing the job to fail.
const aiError = new Error('Simulated AI failure for cleanup test.');
mockExtractCoreData.mockRejectedValue(aiError);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
// Arrange: Prepare a unique flyer file for upload.
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(`cleanup-test-${Date.now()}`)]);
const uniqueFileName = `cleanup-test-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, {
type: 'image/jpeg',
});
const checksum = await generateFileChecksum(mockImageFile);
// Act 2: Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request.get(`/api/ai/jobs/${jobId}/status`);
return statusResponse.body;
},
(status) => status.state === 'failed', // We expect this one to fail
{ timeout: 180000, interval: 3000, description: 'file cleanup failure test job' },
);
// Track the path of the file that will be created in the uploads directory.
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
const tempFilePath = path.join(uploadDir, uniqueFileName);
createdFilePaths.push(tempFilePath);
// Assert 1: Check that the job actually failed.
expect(jobStatus?.state).toBe('failed');
expect(jobStatus?.failedReason).toContain('Simulated AI failure for cleanup test.');
// Act 1: Upload the file to start the background job.
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.field('baseUrl', 'https://example.com')
.field('checksum', checksum)
.attach('flyerFile', uniqueContent, uniqueFileName);
// Assert 2: Verify the temporary file was NOT deleted.
// We check for its existence. If it doesn't exist, fs.access will throw an error.
await expect(fs.access(tempFilePath), 'Expected temporary file to exist after job failure, but it was deleted.');
},
240000,
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
);
// Act 2: Poll for job completion using the new utility.
const jobStatus = await poll(
async () => {
const statusResponse = await request.get(`/api/ai/jobs/${jobId}/status`);
return statusResponse.body;
},
(status) => status.state === 'failed', // We expect this one to fail
{ timeout: 180000, interval: 3000, description: 'file cleanup failure test job' },
);
// Assert 1: Check that the job actually failed.
expect(jobStatus?.state).toBe('failed');
expect(jobStatus?.failedReason).toContain('Simulated AI failure for cleanup test.');
// Assert 2: Verify the temporary file was NOT deleted.
// We check for its existence. If it doesn't exist, fs.access will throw an error.
await expect(
fs.access(tempFilePath),
'Expected temporary file to exist after job failure, but it was deleted.',
);
}, 240000);
});

View File

@@ -1,23 +1,17 @@
// src/tests/integration/gamification.integration.test.ts
import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest';
import { describe, it, expect, beforeAll, afterAll, vi, beforeEach } from 'vitest';
import supertest from 'supertest';
import path from 'path';
import fs from 'node:fs/promises';
import { getPool } from '../../services/db/connection.db';
import { createAndLoginUser, getTestBaseUrl } from '../utils/testHelpers';
import { createAndLoginUser } from '../utils/testHelpers';
import { generateFileChecksum } from '../../utils/checksum';
import * as db from '../../services/db/index.db';
import { cleanupDb } from '../utils/cleanup';
import { logger } from '../../services/logger.server';
import * as imageProcessor from '../../utils/imageProcessor';
import { poll } from '../utils/poll';
import type {
UserProfile,
UserAchievement,
LeaderboardUser,
Achievement,
ExtractedFlyerItem,
} from '../../types';
import type { UserProfile, LeaderboardUser } from '../../types';
import type { Flyer } from '../../types';
import { cleanupFiles } from '../utils/cleanupFiles';
import { aiService } from '../../services/aiService.server';
@@ -70,8 +64,13 @@ describe('Gamification Flow Integration Test', () => {
fullName: 'Gamification Tester',
request,
}));
});
// Setup default mock response for the AI service's extractCoreDataFromFlyerImage method.
beforeEach(() => {
vi.clearAllMocks();
// Reset AI Service Mock to default success state
mockExtractCoreData.mockReset();
mockExtractCoreData.mockResolvedValue({
store_name: 'Gamification Test Store',
valid_from: null,
@@ -87,12 +86,29 @@ describe('Gamification Flow Integration Test', () => {
},
],
});
// Reset Image Processor Mock
vi.mocked(imageProcessor.generateFlyerIcon).mockResolvedValue('mock-icon.webp');
});
afterAll(async () => {
vi.unstubAllEnvs();
vi.restoreAllMocks(); // Restore the AI spy
// CRITICAL: Close workers FIRST before any cleanup to ensure no pending jobs
// are trying to access files or databases during cleanup.
// This prevents the Node.js async hooks crash that occurs when fs operations
// are rejected during process shutdown.
if (workersModule) {
await workersModule.closeWorkers();
// Give workers a moment to fully release resources
await new Promise((resolve) => setTimeout(resolve, 100));
}
// Close the shared redis connection used by the workers/queues
const { connection } = await import('../../services/redis.server');
await connection.quit();
await cleanupDb({
userIds: testUser ? [testUser.user.user_id] : [],
flyerIds: createdFlyerIds,
@@ -100,143 +116,161 @@ describe('Gamification Flow Integration Test', () => {
});
await cleanupFiles(createdFilePaths);
// Clean up workers and Redis connection to prevent tests from hanging.
if (workersModule) {
await workersModule.closeWorkers();
}
// Close the shared redis connection used by the workers/queues
const { connection } = await import('../../services/redis.server');
await connection.quit();
// Final delay to let any remaining async operations settle
await new Promise((resolve) => setTimeout(resolve, 50));
});
it(
'should award the "First Upload" achievement after a user successfully uploads and processes their first flyer',
async () => {
// --- Arrange: Prepare a unique flyer file for upload ---
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(Date.now().toString())]);
const uniqueFileName = `gamification-test-flyer-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, { type: 'image/jpeg' });
const checksum = await generateFileChecksum(mockImageFile);
it('should award the "First Upload" achievement after a user successfully uploads and processes their first flyer', async () => {
// --- Arrange: Prepare a unique flyer file for upload ---
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(Date.now().toString())]);
const uniqueFileName = `gamification-test-flyer-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(uniqueContent)], uniqueFileName, {
type: 'image/jpeg',
});
const checksum = await generateFileChecksum(mockImageFile);
// Track created files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
const iconFileName = `icon-${path.parse(uniqueFileName).name}.webp`;
createdFilePaths.push(path.join(uploadDir, 'icons', iconFileName));
// Track created files for cleanup
const uploadDir = path.resolve(__dirname, '../../../flyer-images');
createdFilePaths.push(path.join(uploadDir, uniqueFileName));
const iconFileName = `icon-${path.parse(uniqueFileName).name}.webp`;
createdFilePaths.push(path.join(uploadDir, 'icons', iconFileName));
// --- Act 1: Upload the flyer to trigger the background job ---
const testBaseUrl = 'https://example.com';
console.error('--------------------------------------------------------------------------------');
console.error('[TEST DEBUG] STARTING UPLOAD STEP');
console.error(`[TEST DEBUG] Env FRONTEND_URL: "${process.env.FRONTEND_URL}"`);
console.error(`[TEST DEBUG] Sending baseUrl field: "${testBaseUrl}"`);
console.error('--------------------------------------------------------------------------------');
// --- Act 1: Upload the flyer to trigger the background job ---
const testBaseUrl = 'https://example.com';
console.error(
'--------------------------------------------------------------------------------',
);
console.error('[TEST DEBUG] STARTING UPLOAD STEP');
console.error(`[TEST DEBUG] Env FRONTEND_URL: "${process.env.FRONTEND_URL}"`);
console.error(`[TEST DEBUG] Sending baseUrl field: "${testBaseUrl}"`);
console.error(
'--------------------------------------------------------------------------------',
);
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.set('Authorization', `Bearer ${authToken}`)
.field('checksum', checksum)
.field('baseUrl', testBaseUrl)
.attach('flyerFile', uniqueContent, uniqueFileName);
const uploadResponse = await request
.post('/api/ai/upload-and-process')
.set('Authorization', `Bearer ${authToken}`)
.field('checksum', checksum)
.field('baseUrl', testBaseUrl)
.attach('flyerFile', uniqueContent, uniqueFileName);
console.error('--------------------------------------------------------------------------------');
console.error(`[TEST DEBUG] Upload Response Status: ${uploadResponse.status}`);
console.error(`[TEST DEBUG] Upload Response Body: ${JSON.stringify(uploadResponse.body, null, 2)}`);
console.error('--------------------------------------------------------------------------------');
console.error(
'--------------------------------------------------------------------------------',
);
console.error(`[TEST DEBUG] Upload Response Status: ${uploadResponse.status}`);
console.error(
`[TEST DEBUG] Upload Response Body: ${JSON.stringify(uploadResponse.body, null, 2)}`,
);
console.error(
'--------------------------------------------------------------------------------',
);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
console.error(`[TEST DEBUG] Job ID received: ${jobId}`);
const { jobId } = uploadResponse.body;
expect(jobId).toBeTypeOf('string');
console.error(`[TEST DEBUG] Job ID received: ${jobId}`);
// --- Act 2: Poll for job completion using the new utility ---
const jobStatus = await poll(
async () => {
const statusResponse = await request
.get(`/api/ai/jobs/${jobId}/status`)
.set('Authorization', `Bearer ${authToken}`);
console.error(`[TEST DEBUG] Polling status for ${jobId}: ${statusResponse.body?.state}`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'gamification flyer processing' },
);
// --- Act 2: Poll for job completion using the new utility ---
const jobStatus = await poll(
async () => {
const statusResponse = await request
.get(`/api/ai/jobs/${jobId}/status`)
.set('Authorization', `Bearer ${authToken}`);
console.error(`[TEST DEBUG] Polling status for ${jobId}: ${statusResponse.body?.state}`);
return statusResponse.body;
},
(status) => status.state === 'completed' || status.state === 'failed',
{ timeout: 180000, interval: 3000, description: 'gamification flyer processing' },
);
if (!jobStatus) {
if (!jobStatus) {
console.error('[DEBUG] Gamification test job timed out: No job status received.');
throw new Error('Gamification test job timed out: No job status received.');
}
console.error('--------------------------------------------------------------------------------');
console.error('[TEST DEBUG] Final Job Status Object:', JSON.stringify(jobStatus, null, 2));
if (jobStatus.state === 'failed') {
console.error(`[TEST DEBUG] Job Failed Reason: ${jobStatus.failedReason}`);
// If there is a progress object with error details, log it
if (jobStatus.progress) {
console.error(`[TEST DEBUG] Job Progress/Error Details:`, JSON.stringify(jobStatus.progress, null, 2));
}
console.error(
'--------------------------------------------------------------------------------',
);
console.error('[TEST DEBUG] Final Job Status Object:', JSON.stringify(jobStatus, null, 2));
if (jobStatus.state === 'failed') {
console.error(`[TEST DEBUG] Job Failed Reason: ${jobStatus.failedReason}`);
// If there is a progress object with error details, log it
if (jobStatus.progress) {
console.error(
`[TEST DEBUG] Job Progress/Error Details:`,
JSON.stringify(jobStatus.progress, null, 2),
);
}
console.error('--------------------------------------------------------------------------------');
}
console.error(
'--------------------------------------------------------------------------------',
);
// --- Assert 1: Verify the job completed successfully ---
if (jobStatus?.state === 'failed') {
console.error('[DEBUG] Gamification test job failed:', jobStatus.failedReason);
console.error('[DEBUG] Job stack trace:', jobStatus.stacktrace);
console.error('[DEBUG] Job return value:', JSON.stringify(jobStatus.returnValue, null, 2));
}
expect(jobStatus?.state).toBe('completed');
// --- Assert 1: Verify the job completed successfully ---
if (jobStatus?.state === 'failed') {
console.error('[DEBUG] Gamification test job failed:', jobStatus.failedReason);
console.error('[DEBUG] Job stack trace:', jobStatus.stacktrace);
console.error('[DEBUG] Job return value:', JSON.stringify(jobStatus.returnValue, null, 2));
}
expect(jobStatus?.state).toBe('completed');
const flyerId = jobStatus?.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
createdFlyerIds.push(flyerId); // Track for cleanup
const flyerId = jobStatus?.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
createdFlyerIds.push(flyerId); // Track for cleanup
// --- Assert 1.5: Verify the flyer was saved with the correct original filename ---
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
expect(savedFlyer?.file_name).toBe(uniqueFileName);
// Also add the final processed image path to the cleanup list.
// This is important because JPEGs are re-processed to strip EXIF data, creating a new file.
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath);
// --- Assert 1.5: Verify the flyer was saved with the correct original filename ---
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
expect(savedFlyer?.file_name).toBe(uniqueFileName);
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
// Also add the final processed image path to the cleanup list.
// This is important because JPEGs are re-processed to strip EXIF data, creating a new file.
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath);
// --- Act 3: Fetch the user's achievements ---
const achievementsResponse = await request
.get('/api/achievements/me')
.set('Authorization', `Bearer ${authToken}`);
// --- Act 3: Fetch the user's achievements (triggers endpoint, response not needed) ---
await request.get('/api/achievements/me').set('Authorization', `Bearer ${authToken}`);
// --- Assert 2: Verify the "First-Upload" achievement was awarded ---
// The 'user_registered' achievement is awarded on creation, so we expect at least two.
// Wait for the asynchronous achievement event to process
await vi.waitUntil(async () => {
const achievements = await db.gamificationRepo.getUserAchievements(testUser.user.user_id, logger);
// --- Assert 2: Verify the "First-Upload" achievement was awarded ---
// The 'user_registered' achievement is awarded on creation, so we expect at least two.
// Wait for the asynchronous achievement event to process
await vi.waitUntil(
async () => {
const achievements = await db.gamificationRepo.getUserAchievements(
testUser.user.user_id,
logger,
);
return achievements.length >= 2;
}, { timeout: 5000, interval: 200 });
},
{ timeout: 5000, interval: 200 },
);
// Final assertion and retrieval
const userAchievements = await db.gamificationRepo.getUserAchievements(testUser.user.user_id, logger);
expect(userAchievements.length).toBeGreaterThanOrEqual(2);
const firstUploadAchievement = userAchievements.find((ach) => ach.name === 'First-Upload');
expect(firstUploadAchievement).toBeDefined();
expect(firstUploadAchievement?.points_value).toBeGreaterThan(0);
// Final assertion and retrieval
const userAchievements = await db.gamificationRepo.getUserAchievements(
testUser.user.user_id,
logger,
);
expect(userAchievements.length).toBeGreaterThanOrEqual(2);
const firstUploadAchievement = userAchievements.find((ach) => ach.name === 'First-Upload');
expect(firstUploadAchievement).toBeDefined();
expect(firstUploadAchievement?.points_value).toBeGreaterThan(0);
// --- Act 4: Fetch the leaderboard ---
const leaderboardResponse = await request.get('/api/achievements/leaderboard');
const leaderboard: LeaderboardUser[] = leaderboardResponse.body;
// --- Act 4: Fetch the leaderboard ---
const leaderboardResponse = await request.get('/api/achievements/leaderboard');
const leaderboard: LeaderboardUser[] = leaderboardResponse.body;
// --- Assert 3: Verify the user is on the leaderboard with points ---
const userOnLeaderboard = leaderboard.find((u) => u.user_id === testUser.user.user_id);
expect(userOnLeaderboard).toBeDefined();
// The user should have points from 'user_registered' and 'First-Upload'.
// We check that the points are greater than or equal to the points from the upload achievement.
expect(Number(userOnLeaderboard?.points)).toBeGreaterThanOrEqual(
firstUploadAchievement!.points_value,
);
},
240000, // Increase timeout to 240s to match other long-running processing tests
);
// --- Assert 3: Verify the user is on the leaderboard with points ---
const userOnLeaderboard = leaderboard.find((u) => u.user_id === testUser.user.user_id);
expect(userOnLeaderboard).toBeDefined();
// The user should have points from 'user_registered' and 'First-Upload'.
// We check that the points are greater than or equal to the points from the upload achievement.
expect(Number(userOnLeaderboard?.points)).toBeGreaterThanOrEqual(
firstUploadAchievement!.points_value,
);
}, 240000); // Increase timeout to 240s to match other long-running processing tests
describe('Legacy Flyer Upload', () => {
it('should process a legacy upload and save fully qualified URLs to the database', async () => {
@@ -248,7 +282,9 @@ describe('Gamification Flow Integration Test', () => {
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const uniqueFileName = `legacy-upload-test-${Date.now()}.jpg`;
const mockImageFile = new File([new Uint8Array(imageBuffer)], uniqueFileName, { type: 'image/jpeg' });
const mockImageFile = new File([new Uint8Array(imageBuffer)], uniqueFileName, {
type: 'image/jpeg',
});
const checksum = await generateFileChecksum(mockImageFile);
// Track created files for cleanup.
@@ -304,4 +340,4 @@ describe('Gamification Flow Integration Test', () => {
expect(newFlyer.image_url).toContain(`${expectedBaseUrl}/flyer-images/`);
});
});
});
});

View File

@@ -88,27 +88,29 @@ describe('Price History API Integration Test (/api/price-history)', () => {
afterAll(async () => {
vi.unstubAllEnvs();
await cleanupDb({ userIds: createdUserIds });
const pool = getPool();
// The CASCADE on the tables should handle flyer_items.
// The delete on flyers cascades to flyer_items, which fires a trigger `recalculate_price_history_on_flyer_item_delete`.
// This trigger has a bug causing the test to fail. As a workaround for the test suite,
// we temporarily disable user-defined triggers on the flyer_items table during cleanup.
const flyerIds = [flyerId1, flyerId2, flyerId3].filter(Boolean);
try {
await pool.query('ALTER TABLE public.flyer_items DISABLE TRIGGER USER;');
if (flyerIds.length > 0) {
await pool.query('DELETE FROM public.flyers WHERE flyer_id = ANY($1::int[])', [flyerIds]);
}
if (storeId) await pool.query('DELETE FROM public.stores WHERE store_id = $1', [storeId]);
if (masterItemId)
await pool.query('DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = $1', [
masterItemId,
]);
} finally {
// Ensure triggers are always re-enabled, even if an error occurs during deletion.
await pool.query('ALTER TABLE public.flyer_items ENABLE TRIGGER USER;');
}
await cleanupDb({
userIds: createdUserIds,
masterItemIds: [masterItemId],
storeIds: [storeId],
});
});
it('should return the correct price history for a given master item ID', async () => {

View File

@@ -26,6 +26,7 @@ describe('Public API Routes Integration Tests', () => {
let testRecipe: Recipe;
let testFlyer: Flyer;
let testStoreId: number;
const createdRecipeCommentIds: number[] = [];
beforeAll(async () => {
vi.stubEnv('FRONTEND_URL', 'https://example.com');
@@ -85,6 +86,7 @@ describe('Public API Routes Integration Tests', () => {
recipeIds: testRecipe ? [testRecipe.recipe_id] : [],
flyerIds: testFlyer ? [testFlyer.flyer_id] : [],
storeIds: testStoreId ? [testStoreId] : [],
recipeCommentIds: createdRecipeCommentIds,
});
});
@@ -186,10 +188,11 @@ describe('Public API Routes Integration Tests', () => {
it('GET /api/recipes/:recipeId/comments should return comments for a recipe', async () => {
// Add a comment to our test recipe first
await getPool().query(
`INSERT INTO public.recipe_comments (recipe_id, user_id, content) VALUES ($1, $2, 'Test comment')`,
const commentRes = await getPool().query(
`INSERT INTO public.recipe_comments (recipe_id, user_id, content) VALUES ($1, $2, 'Test comment') RETURNING recipe_comment_id`,
[testRecipe.recipe_id, testUser.user.user_id],
);
createdRecipeCommentIds.push(commentRes.rows[0].recipe_comment_id);
const response = await request.get(`/api/recipes/${testRecipe.recipe_id}/comments`);
const comments: RecipeComment[] = response.body;
expect(response.status).toBe(200);

View File

@@ -1,5 +1,5 @@
// src/tests/integration/recipe.integration.test.ts
import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest';
import { describe, it, expect, beforeAll, afterAll, vi, afterEach } from 'vitest';
import supertest from 'supertest';
import { createAndLoginUser } from '../utils/testHelpers';
import { cleanupDb } from '../utils/cleanup';
@@ -49,6 +49,12 @@ describe('Recipe API Routes Integration Tests', () => {
createdRecipeIds.push(testRecipe.recipe_id);
});
afterEach(() => {
vi.clearAllMocks();
// Reset the mock to its default state for the next test
vi.mocked(aiService.generateRecipeSuggestion).mockResolvedValue('Default Mock Suggestion');
});
afterAll(async () => {
vi.unstubAllEnvs();
// Clean up all created resources

View File

@@ -19,6 +19,7 @@ describe('User API Routes Integration Tests', () => {
let testUser: UserProfile;
let authToken: string;
const createdUserIds: string[] = [];
const createdMasterItemIds: number[] = [];
// Before any tests run, create a new user and log them in.
// The token will be used for all subsequent API calls in this test suite.
@@ -38,7 +39,10 @@ describe('User API Routes Integration Tests', () => {
// This now cleans up ALL users created by this test suite to prevent pollution.
afterAll(async () => {
vi.unstubAllEnvs();
await cleanupDb({ userIds: createdUserIds });
await cleanupDb({
userIds: createdUserIds,
masterItemIds: createdMasterItemIds
});
// Safeguard to clean up any avatar files created during tests.
const uploadDir = path.resolve(__dirname, '../../../uploads/avatars');
@@ -244,6 +248,7 @@ describe('User API Routes Integration Tests', () => {
.send({ itemName: 'Integration Test Item', category: 'Other/Miscellaneous' });
const newItem = addResponse.body;
if (newItem?.master_grocery_item_id) createdMasterItemIds.push(newItem.master_grocery_item_id);
// Assert 1: Check that the item was created correctly.
expect(addResponse.status).toBe(201);
expect(newItem.name).toBe('Integration Test Item');

View File

@@ -14,22 +14,34 @@ let globalPool: ReturnType<typeof getPool> | null = null;
* This is critical because old jobs with outdated error messages can pollute test results.
*/
async function cleanAllQueues() {
console.log(`[PID:${process.pid}] Cleaning all BullMQ queues...`);
const { flyerQueue, cleanupQueue, emailQueue, analyticsQueue, weeklyAnalyticsQueue, tokenCleanupQueue } = await import('../../services/queues.server');
// Use console.error for visibility in CI logs (stderr is often more reliable)
console.error(`[PID:${process.pid}] [QUEUE CLEANUP] Starting BullMQ queue cleanup...`);
const queues = [flyerQueue, cleanupQueue, emailQueue, analyticsQueue, weeklyAnalyticsQueue, tokenCleanupQueue];
try {
const { flyerQueue, cleanupQueue, emailQueue, analyticsQueue, weeklyAnalyticsQueue, tokenCleanupQueue } = await import('../../services/queues.server');
console.error(`[QUEUE CLEANUP] Successfully imported queue modules`);
for (const queue of queues) {
try {
// obliterate() removes ALL data associated with the queue from Redis
await queue.obliterate({ force: true });
console.log(` ✅ Cleaned queue: ${queue.name}`);
} catch (error) {
// Log but don't fail - the queue might not exist yet
console.log(` ⚠️ Could not clean queue ${queue.name}: ${error instanceof Error ? error.message : 'Unknown error'}`);
const queues = [flyerQueue, cleanupQueue, emailQueue, analyticsQueue, weeklyAnalyticsQueue, tokenCleanupQueue];
for (const queue of queues) {
try {
// Log queue state before cleanup
const jobCounts = await queue.getJobCounts();
console.error(`[QUEUE CLEANUP] Queue "${queue.name}" before cleanup: ${JSON.stringify(jobCounts)}`);
// obliterate() removes ALL data associated with the queue from Redis
await queue.obliterate({ force: true });
console.error(` ✅ [QUEUE CLEANUP] Cleaned queue: ${queue.name}`);
} catch (error) {
// Log but don't fail - the queue might not exist yet
console.error(` ⚠️ [QUEUE CLEANUP] Could not clean queue ${queue.name}: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
console.error(`✅ [PID:${process.pid}] [QUEUE CLEANUP] All queues cleaned successfully.`);
} catch (error) {
console.error(`❌ [PID:${process.pid}] [QUEUE CLEANUP] CRITICAL ERROR during queue cleanup:`, error);
// Don't throw - we want the tests to continue even if cleanup fails
}
console.log(`✅ [PID:${process.pid}] All queues cleaned.`);
}
export async function setup() {
@@ -38,11 +50,15 @@ export async function setup() {
// Fix: Set the FRONTEND_URL globally for the test server instance
process.env.FRONTEND_URL = 'https://example.com';
console.log(`\n--- [PID:${process.pid}] Running Integration Test GLOBAL Setup ---`);
console.error(`\n--- [PID:${process.pid}] Running Integration Test GLOBAL Setup ---`);
console.error(`[SETUP] REDIS_URL: ${process.env.REDIS_URL}`);
console.error(`[SETUP] REDIS_PASSWORD is set: ${!!process.env.REDIS_PASSWORD}`);
// CRITICAL: Clean all queues BEFORE running any tests to remove stale jobs
// from previous test runs that may have outdated error messages.
console.error(`[SETUP] About to call cleanAllQueues()...`);
await cleanAllQueues();
console.error(`[SETUP] cleanAllQueues() completed.`);
// The integration setup is now the single source of truth for preparing the test DB.
// It runs the same seed script that `npm run db:reset:test` used.

View File

@@ -8,6 +8,10 @@ interface CleanupOptions {
storeIds?: (number | null | undefined)[];
recipeIds?: (number | null | undefined)[];
budgetIds?: (number | null | undefined)[];
masterItemIds?: (number | null | undefined)[];
shoppingListIds?: (number | null | undefined)[];
suggestedCorrectionIds?: (number | null | undefined)[];
recipeCommentIds?: (number | null | undefined)[];
}
/**
@@ -25,11 +29,21 @@ export const cleanupDb = async (options: CleanupOptions) => {
// Order of deletion matters to avoid foreign key violations.
// Children entities first, then parents.
if (options.suggestedCorrectionIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.suggested_corrections WHERE suggested_correction_id = ANY($1::int[])', [options.suggestedCorrectionIds]);
logger.debug(`Cleaned up ${options.suggestedCorrectionIds.length} suggested correction(s).`);
}
if (options.budgetIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.budgets WHERE budget_id = ANY($1::int[])', [options.budgetIds]);
logger.debug(`Cleaned up ${options.budgetIds.length} budget(s).`);
}
if (options.recipeCommentIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.recipe_comments WHERE recipe_comment_id = ANY($1::int[])', [options.recipeCommentIds]);
logger.debug(`Cleaned up ${options.recipeCommentIds.length} recipe comment(s).`);
}
if (options.recipeIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.recipes WHERE recipe_id = ANY($1::int[])', [options.recipeIds]);
logger.debug(`Cleaned up ${options.recipeIds.length} recipe(s).`);
@@ -45,6 +59,16 @@ export const cleanupDb = async (options: CleanupOptions) => {
logger.debug(`Cleaned up ${options.storeIds.length} store(s).`);
}
if (options.masterItemIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = ANY($1::int[])', [options.masterItemIds]);
logger.debug(`Cleaned up ${options.masterItemIds.length} master grocery item(s).`);
}
if (options.shoppingListIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.shopping_lists WHERE shopping_list_id = ANY($1::int[])', [options.shoppingListIds]);
logger.debug(`Cleaned up ${options.shoppingListIds.length} shopping list(s).`);
}
if (options.userIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.users WHERE user_id = ANY($1::uuid[])', [options.userIds]);
logger.debug(`Cleaned up ${options.userIds.length} user(s).`);