Compare commits

...

14 Commits

Author SHA1 Message Date
Gitea Actions
1814469eb4 ci: Bump version to 0.9.70 [skip ci] 2026-01-09 18:19:13 +05:00
b777430ff7 integration test fixes - claude for the win? try 4 - i have a good feeling
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Has been cancelled
2026-01-09 05:18:19 -08:00
Gitea Actions
23830c0d4e ci: Bump version to 0.9.69 [skip ci] 2026-01-09 17:24:00 +05:00
ef42fee982 integration test fixes - claude for the win? try 3
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 32m3s
2026-01-09 04:23:23 -08:00
Gitea Actions
65cb54500c ci: Bump version to 0.9.68 [skip ci] 2026-01-09 16:42:51 +05:00
664ad291be integration test fixes - claude for the win? try 3
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 30m3s
2026-01-09 03:41:57 -08:00
Gitea Actions
ff912b9055 ci: Bump version to 0.9.67 [skip ci] 2026-01-09 15:32:50 +05:00
ec32027bd4 integration test fixes - claude for the win?
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 33m43s
2026-01-09 02:32:16 -08:00
Gitea Actions
59f773639b ci: Bump version to 0.9.66 [skip ci] 2026-01-09 15:27:50 +05:00
dd2be5eecf integration test fixes - claude for the win?
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 1m0s
2026-01-09 02:27:14 -08:00
Gitea Actions
a94bfbd3e9 ci: Bump version to 0.9.65 [skip ci] 2026-01-09 14:43:36 +05:00
338bbc9440 integration test fixes - claude for the win?
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 20m4s
2026-01-09 01:42:51 -08:00
Gitea Actions
60aad04642 ci: Bump version to 0.9.64 [skip ci] 2026-01-09 13:57:52 +05:00
7f2aff9a24 unit test fixes
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 23m39s
2026-01-09 00:57:12 -08:00
61 changed files with 3685 additions and 120 deletions

View File

@@ -54,7 +54,15 @@
"mcp__memory__create_entities",
"mcp__memory__search_nodes",
"mcp__memory__delete_entities",
"mcp__sequential-thinking__sequentialthinking"
"mcp__sequential-thinking__sequentialthinking",
"mcp__filesystem__list_directory",
"mcp__filesystem__read_multiple_files",
"mcp__filesystem__directory_tree",
"mcp__filesystem__read_text_file",
"Bash(wc:*)",
"Bash(npm install:*)",
"Bash(git grep:*)",
"Bash(findstr:*)"
]
}
}

View File

@@ -117,7 +117,8 @@ jobs:
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_PROD }}
REDIS_URL: 'redis://localhost:6379'
# Explicitly use database 0 for production (test uses database 1)
REDIS_URL: 'redis://localhost:6379/0'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
FRONTEND_URL: 'https://flyer-crawler.projectium.com'
JWT_SECRET: ${{ secrets.JWT_SECRET }}

View File

@@ -96,6 +96,24 @@ jobs:
# It prevents the accumulation of duplicate processes from previous test runs.
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if (p.name && p.name.endsWith('-test')) { console.log('Deleting test process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id, e.message); } } }); console.log('✅ Test process cleanup complete.'); } catch (e) { if (e.stdout.toString().includes('No process found')) { console.log('No PM2 processes running, cleanup not needed.'); } else { console.error('Error cleaning up test processes:', e.message); } }" || true
- name: Flush Redis Test Database Before Tests
# CRITICAL: Clear Redis database 1 (test database) to remove stale BullMQ jobs.
# This prevents old jobs with outdated error messages from polluting test results.
# NOTE: We use database 1 for tests to isolate from production (database 0).
env:
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
run: |
echo "--- Flushing Redis database 1 (test database) to remove stale jobs ---"
if [ -z "$REDIS_PASSWORD" ]; then
echo "⚠️ REDIS_PASSWORD_TEST not set, attempting flush without password..."
redis-cli -n 1 FLUSHDB || echo "Redis flush failed (no password)"
else
redis-cli -a "$REDIS_PASSWORD" -n 1 FLUSHDB 2>/dev/null && echo "✅ Redis database 1 (test) flushed successfully." || echo "⚠️ Redis flush failed"
fi
# Verify the flush worked by checking key count on database 1
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 1 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Redis database 1 key count after flush: $KEY_COUNT"
- name: Run All Tests and Generate Merged Coverage Report
# This single step runs both unit and integration tests, then merges their
# coverage data into a single report. It combines the environment variables
@@ -109,7 +127,9 @@ jobs:
DB_NAME: 'flyer-crawler-test' # Explicitly set for tests
# --- Redis credentials for the test suite ---
REDIS_URL: 'redis://localhost:6379'
# CRITICAL: Use Redis database 1 to isolate tests from production (which uses db 0).
# This prevents the production worker from picking up test jobs.
REDIS_URL: 'redis://localhost:6379/1'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
# --- Integration test specific variables ---
@@ -384,8 +404,8 @@ jobs:
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_TEST }}
# Redis Credentials
REDIS_URL: 'redis://localhost:6379'
# Redis Credentials (use database 1 to isolate from production)
REDIS_URL: 'redis://localhost:6379/1'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
# Application Secrets

View File

@@ -116,7 +116,8 @@ jobs:
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_PROD }}
REDIS_URL: 'redis://localhost:6379'
# Explicitly use database 0 for production (test uses database 1)
REDIS_URL: 'redis://localhost:6379/0'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
FRONTEND_URL: 'https://flyer-crawler.projectium.com'
JWT_SECRET: ${{ secrets.JWT_SECRET }}

View File

@@ -0,0 +1,167 @@
# .gitea/workflows/manual-redis-flush-prod.yml
#
# DANGER: This workflow is DESTRUCTIVE and intended for manual execution only.
# It will completely FLUSH the PRODUCTION Redis database (db 0).
# This will clear all BullMQ queues, sessions, caches, and any other Redis data.
#
name: Manual - Flush Production Redis
on:
workflow_dispatch:
inputs:
confirmation:
description: 'DANGER: This will FLUSH production Redis. Type "flush-production-redis" to confirm.'
required: true
default: 'do-not-run'
flush_type:
description: 'What to flush?'
required: true
type: choice
options:
- 'queues-only'
- 'entire-database'
default: 'queues-only'
jobs:
flush-redis:
runs-on: projectium.com # This job runs on your self-hosted Gitea runner.
env:
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: '**/package-lock.json'
- name: Install Dependencies
run: npm ci
- name: Validate Secrets
run: |
if [ -z "$REDIS_PASSWORD" ]; then
echo "ERROR: REDIS_PASSWORD_PROD secret is not set in Gitea repository settings."
exit 1
fi
echo "✅ Redis password secret is present."
- name: Verify Confirmation Phrase
run: |
if [ "${{ gitea.event.inputs.confirmation }}" != "flush-production-redis" ]; then
echo "ERROR: Confirmation phrase did not match. Aborting Redis flush."
exit 1
fi
echo "✅ Confirmation accepted. Proceeding with Redis flush."
- name: Show Current Redis State
run: |
echo "--- Current Redis Database 0 (Production) State ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 INFO keyspace 2>/dev/null || echo "Could not get keyspace info"
echo ""
echo "--- Key Count ---"
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 0 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Production Redis (db 0) key count: $KEY_COUNT"
echo ""
echo "--- BullMQ Queue Keys ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 KEYS "bull:*" 2>/dev/null | head -20 || echo "No BullMQ keys found"
- name: 🚨 FINAL WARNING & PAUSE 🚨
run: |
echo "*********************************************************************"
echo "WARNING: YOU ARE ABOUT TO FLUSH PRODUCTION REDIS DATA."
echo "Flush type: ${{ gitea.event.inputs.flush_type }}"
echo ""
if [ "${{ gitea.event.inputs.flush_type }}" = "entire-database" ]; then
echo "This will DELETE ALL Redis data including sessions, caches, and queues!"
else
echo "This will DELETE ALL BullMQ queue data (pending jobs, failed jobs, etc.)"
fi
echo ""
echo "This action is IRREVERSIBLE. Press Ctrl+C in the runner terminal NOW to cancel."
echo "Sleeping for 10 seconds..."
echo "*********************************************************************"
sleep 10
- name: Flush BullMQ Queues Only
if: ${{ gitea.event.inputs.flush_type == 'queues-only' }}
env:
REDIS_URL: 'redis://localhost:6379/0'
run: |
echo "--- Obliterating BullMQ queues using Node.js ---"
node -e "
const { Queue } = require('bullmq');
const IORedis = require('ioredis');
const connection = new IORedis(process.env.REDIS_URL, {
maxRetriesPerRequest: null,
password: process.env.REDIS_PASSWORD,
});
const queueNames = [
'flyer-processing',
'email-sending',
'analytics-reporting',
'weekly-analytics-reporting',
'file-cleanup',
'token-cleanup'
];
(async () => {
for (const name of queueNames) {
try {
const queue = new Queue(name, { connection });
const counts = await queue.getJobCounts();
console.log('Queue \"' + name + '\" before obliterate:', JSON.stringify(counts));
await queue.obliterate({ force: true });
console.log('✅ Obliterated queue: ' + name);
await queue.close();
} catch (err) {
console.error('⚠️ Failed to obliterate queue ' + name + ':', err.message);
}
}
await connection.quit();
console.log('✅ All BullMQ queues obliterated.');
})();
"
- name: Flush Entire Redis Database
if: ${{ gitea.event.inputs.flush_type == 'entire-database' }}
run: |
echo "--- Flushing entire Redis database 0 (production) ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 FLUSHDB 2>/dev/null && echo "✅ Redis database 0 flushed successfully." || echo "❌ Redis flush failed"
- name: Verify Flush Results
run: |
echo "--- Redis Database 0 (Production) State After Flush ---"
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 0 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Production Redis (db 0) key count after flush: $KEY_COUNT"
echo ""
echo "--- Remaining BullMQ Queue Keys ---"
BULL_KEYS=$(redis-cli -a "$REDIS_PASSWORD" -n 0 KEYS "bull:*" 2>/dev/null | wc -l || echo "0")
echo "BullMQ key count: $BULL_KEYS"
if [ "${{ gitea.event.inputs.flush_type }}" = "queues-only" ] && [ "$BULL_KEYS" -gt 0 ]; then
echo "⚠️ Warning: Some BullMQ keys may still exist. This can happen if new jobs were added during the flush."
fi
- name: Summary
run: |
echo ""
echo "=========================================="
echo "PRODUCTION REDIS FLUSH COMPLETE"
echo "=========================================="
echo "Flush type: ${{ gitea.event.inputs.flush_type }}"
echo "Timestamp: $(date -u '+%Y-%m-%d %H:%M:%S UTC')"
echo ""
echo "NOTE: If you flushed queues, any pending jobs (flyer processing,"
echo "emails, analytics, etc.) have been permanently deleted."
echo ""
echo "The production workers will automatically start processing"
echo "new jobs as they are added to the queues."
echo "=========================================="

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -16,3 +16,82 @@ We will implement a dedicated background job processing system using a task queu
**Positive**: Decouples the API from heavy processing, allows for retries on failure, and enables scaling the processing workers independently. Increases application reliability and resilience.
**Negative**: Introduces a new dependency (Redis) into the infrastructure. Requires refactoring of the flyer processing logic to work within a job queue structure.
## Implementation Details
### Queue Infrastructure
The implementation uses **BullMQ v5.65.1** with **ioredis v5.8.2** for Redis connectivity. Six distinct queues handle different job types:
| Queue Name | Purpose | Retry Attempts | Backoff Strategy |
| ---------------------------- | --------------------------- | -------------- | ---------------------- |
| `flyer-processing` | OCR/AI processing of flyers | 3 | Exponential (5s base) |
| `email-sending` | Email delivery | 5 | Exponential (10s base) |
| `analytics-reporting` | Daily report generation | 2 | Exponential (60s base) |
| `weekly-analytics-reporting` | Weekly report generation | 2 | Exponential (1h base) |
| `file-cleanup` | Temporary file cleanup | 3 | Exponential (30s base) |
| `token-cleanup` | Expired token removal | 2 | Exponential (1h base) |
### Key Files
- `src/services/queues.server.ts` - Queue definitions and configuration
- `src/services/workers.server.ts` - Worker implementations with configurable concurrency
- `src/services/redis.server.ts` - Redis connection management
- `src/services/queueService.server.ts` - Queue lifecycle and graceful shutdown
- `src/services/flyerProcessingService.server.ts` - 5-stage flyer processing pipeline
- `src/types/job-data.ts` - TypeScript interfaces for all job data types
### API Design
Endpoints for long-running tasks return **202 Accepted** immediately with a job ID:
```text
POST /api/ai/upload-and-process → 202 { jobId: "..." }
GET /api/ai/jobs/:jobId/status → { state: "...", progress: ... }
```
### Worker Configuration
Workers are configured via environment variables:
- `WORKER_CONCURRENCY` - Flyer processing parallelism (default: 1)
- `EMAIL_WORKER_CONCURRENCY` - Email worker parallelism (default: 10)
- `ANALYTICS_WORKER_CONCURRENCY` - Analytics worker parallelism (default: 1)
- `CLEANUP_WORKER_CONCURRENCY` - Cleanup worker parallelism (default: 10)
### Monitoring
- **Bull Board UI** available at `/api/admin/jobs` for admin users
- Worker status endpoint: `GET /api/admin/workers/status`
- Queue status endpoint: `GET /api/admin/queues/status`
### Graceful Shutdown
Both API and worker processes implement graceful shutdown with a 30-second timeout, ensuring in-flight jobs complete before process termination.
## Compliance Notes
### Deprecated Synchronous Endpoints
The following endpoints process flyers synchronously and are **deprecated**:
- `POST /api/ai/upload-legacy` - For integration testing only
- `POST /api/ai/flyers/process` - Legacy workflow, should migrate to queue-based approach
New integrations MUST use `POST /api/ai/upload-and-process` for queue-based processing.
### Email Handling
- **Bulk emails** (deal notifications): Enqueued via `emailQueue`
- **Transactional emails** (password reset): Sent synchronously for immediate user feedback
## Future Enhancements
Potential improvements for consideration:
1. **Dead Letter Queue (DLQ)**: Move permanently failed jobs to a dedicated queue for analysis
2. **Job Priority Levels**: Allow priority-based processing for different job types
3. **Real-time Progress**: WebSocket/SSE for live job progress updates to clients
4. **Per-Queue Rate Limiting**: Throttle job processing based on external API limits
5. **Job Dependencies**: Support for jobs that depend on completion of other jobs
6. **Prometheus Metrics**: Export queue metrics for observability dashboards

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -20,3 +20,107 @@ We will implement a multi-layered caching strategy using an in-memory data store
**Positive**: Directly addresses application performance and scalability. Reduces database load and improves API response times for common requests.
**Negative**: Introduces Redis as a dependency if not already used. Adds complexity to the data-fetching logic and requires careful management of cache invalidation to prevent stale data.
## Implementation Details
### Cache Service
A centralized cache service (`src/services/cacheService.server.ts`) provides reusable caching functionality:
- **`getOrSet<T>(key, fetcher, options)`**: Cache-aside pattern implementation
- **`get<T>(key)`**: Retrieve cached value
- **`set<T>(key, value, ttl)`**: Store value with TTL
- **`del(key)`**: Delete specific key
- **`invalidatePattern(pattern)`**: Delete keys matching a pattern
All cache operations are fail-safe - cache failures do not break the application.
### TTL Configuration
Different data types use different TTL values based on volatility:
| Data Type | TTL | Rationale |
| ------------------- | --------- | -------------------------------------- |
| Brands/Stores | 1 hour | Rarely changes, safe to cache longer |
| Flyer lists | 5 minutes | Changes when new flyers are added |
| Individual flyers | 10 minutes| Stable once created |
| Flyer items | 10 minutes| Stable once created |
| Statistics | 5 minutes | Can be slightly stale |
| Frequent sales | 15 minutes| Aggregated data, updated periodically |
| Categories | 1 hour | Rarely changes |
### Cache Key Strategy
Cache keys follow a consistent prefix pattern for pattern-based invalidation:
- `cache:brands` - All brands list
- `cache:flyers:{limit}:{offset}` - Paginated flyer lists
- `cache:flyer:{id}` - Individual flyer data
- `cache:flyer-items:{flyerId}` - Items for a specific flyer
- `cache:stats:*` - Statistics data
- `geocode:{address}` - Geocoding results (30-day TTL)
### Cached Endpoints
The following repository methods implement server-side caching:
| Method | Cache Key Pattern | TTL |
| ------ | ----------------- | --- |
| `FlyerRepository.getAllBrands()` | `cache:brands` | 1 hour |
| `FlyerRepository.getFlyers()` | `cache:flyers:{limit}:{offset}` | 5 minutes |
| `FlyerRepository.getFlyerItems()` | `cache:flyer-items:{flyerId}` | 10 minutes |
### Cache Invalidation
**Event-based invalidation** is triggered on write operations:
- **Flyer creation** (`FlyerPersistenceService.saveFlyer`): Invalidates all `cache:flyers*` keys
- **Flyer deletion** (`FlyerRepository.deleteFlyer`): Invalidates specific flyer and flyer items cache, plus flyer lists
**Manual invalidation** via admin endpoints:
- `POST /api/admin/system/clear-cache` - Clears all application cache (flyers, brands, stats)
- `POST /api/admin/system/clear-geocode-cache` - Clears geocoding cache
### Client-Side Caching
TanStack React Query provides client-side caching with configurable stale times:
| Query Type | Stale Time |
| ----------------- | ----------- |
| Categories | 1 hour |
| Master Items | 10 minutes |
| Flyer Items | 5 minutes |
| Flyers | 2 minutes |
| Shopping Lists | 1 minute |
| Activity Log | 30 seconds |
### Multi-Layer Cache Architecture
```text
Client Request
[TanStack React Query] ← Client-side cache (staleTime-based)
[Express API]
[CacheService.getOrSet()] ← Server-side Redis cache (TTL-based)
[PostgreSQL Database]
```
## Key Files
- `src/services/cacheService.server.ts` - Centralized cache service
- `src/services/db/flyer.db.ts` - Repository with caching for brands, flyers, flyer items
- `src/services/flyerPersistenceService.server.ts` - Cache invalidation on flyer creation
- `src/routes/admin.routes.ts` - Admin cache management endpoints
- `src/config/queryClient.ts` - Client-side query cache configuration
## Future Enhancements
1. **Recipe caching**: Add caching to expensive recipe queries (by-sale-percentage, etc.)
2. **Cache warming**: Pre-populate cache on startup for frequently accessed static data
3. **Cache metrics**: Add hit/miss rate monitoring for observability
4. **Conditional caching**: Skip cache for authenticated user-specific data
5. **Cache compression**: Compress large cached payloads to reduce Redis memory usage

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -14,9 +14,305 @@ We will formalize the testing pyramid for the project, defining the role of each
1. **Unit Tests (Vitest)**: For isolated functions, components, and repository methods with mocked dependencies. High coverage is expected.
2. **Integration Tests (Supertest)**: For API routes, testing the interaction between controllers, services, and mocked database layers. Focus on contract and middleware correctness.
3. **End-to-End (E2E) Tests (Playwright/Cypress)**: For critical user flows (e.g., login, flyer upload, checkout), running against a real browser and a test database to ensure the entire system works together.
3. **End-to-End (E2E) Tests (Vitest + Supertest)**: For critical user flows (e.g., login, flyer upload, checkout), running against a real test server and database to ensure the entire system works together.
## Consequences
**Positive**: Ensures a consistent and comprehensive approach to quality assurance. Gives developers confidence when refactoring or adding new features. Clearly defines "done" for a new feature.
**Negative**: May require investment in setting up and maintaining the E2E testing environment. Can slightly increase the time required to develop a feature if all test layers are required.
## Implementation Details
### Testing Framework Stack
| Tool | Version | Purpose |
| ---- | ------- | ------- |
| Vitest | 4.0.15 | Test runner for all test types |
| @testing-library/react | 16.3.0 | React component testing |
| @testing-library/jest-dom | 6.9.1 | DOM assertion matchers |
| supertest | 7.1.4 | HTTP assertion library for API testing |
| msw | 2.12.3 | Mock Service Worker for network mocking |
| testcontainers | 11.8.1 | Database containerization (optional) |
| c8 + nyc | 10.1.3 / 17.1.0 | Coverage reporting |
### Test File Organization
```text
src/
├── components/
│ └── *.test.tsx # Component unit tests (colocated)
├── hooks/
│ └── *.test.ts # Hook unit tests (colocated)
├── services/
│ └── *.test.ts # Service unit tests (colocated)
├── routes/
│ └── *.test.ts # Route handler unit tests (colocated)
├── utils/
│ └── *.test.ts # Utility function tests (colocated)
└── tests/
├── setup/ # Test configuration and setup files
├── utils/ # Test utilities, factories, helpers
├── assets/ # Test fixtures (images, files)
├── integration/ # Integration test files (*.test.ts)
└── e2e/ # End-to-end test files (*.e2e.test.ts)
```
**Naming Convention**: `{filename}.test.ts` or `{filename}.test.tsx` for unit/integration, `{filename}.e2e.test.ts` for E2E.
### Configuration Files
| Config | Environment | Purpose |
| ------ | ----------- | ------- |
| `vite.config.ts` | jsdom | Unit tests (React components, hooks) |
| `vitest.config.integration.ts` | node | Integration tests (API routes) |
| `vitest.config.e2e.ts` | node | E2E tests (full user flows) |
| `vitest.workspace.ts` | - | Orchestrates all test projects |
### Test Pyramid
```text
┌─────────────┐
│ E2E │ 5 test files
│ Tests │ Critical user flows
├─────────────┤
│ Integration │ 17 test files
│ Tests │ API contracts + middleware
┌───┴─────────────┴───┐
│ Unit Tests │ 185 test files
│ Components, Hooks, │ Isolated functions
│ Services, Utils │ Mocked dependencies
└─────────────────────┘
```
### Unit Tests
**Purpose**: Test isolated functions, components, and modules with mocked dependencies.
**Environment**: jsdom (browser-like)
**Key Patterns**:
```typescript
// Component testing with providers
import { renderWithProviders, screen } from '@/tests/utils/renderWithProviders';
describe('MyComponent', () => {
it('renders correctly', () => {
renderWithProviders(<MyComponent />);
expect(screen.getByText('Hello')).toBeInTheDocument();
});
});
```
```typescript
// Hook testing
import { renderHook, waitFor } from '@testing-library/react';
import { useMyHook } from './useMyHook';
describe('useMyHook', () => {
it('returns expected value', async () => {
const { result } = renderHook(() => useMyHook());
await waitFor(() => expect(result.current.data).toBeDefined());
});
});
```
**Global Mocks** (automatically applied via `tests-setup-unit.ts`):
- Database connections (`pg.Pool`)
- AI services (`@google/genai`)
- Authentication (`jsonwebtoken`, `bcrypt`)
- Logging (`logger.server`, `logger.client`)
- Notifications (`notificationService`)
### Integration Tests
**Purpose**: Test API routes with real service interactions and database.
**Environment**: node
**Setup**: Real Express server on port 3001, real PostgreSQL database
```typescript
// API route testing pattern
import supertest from 'supertest';
import { createAndLoginUser } from '@/tests/utils/testHelpers';
describe('Auth API', () => {
let request: ReturnType<typeof supertest>;
let authToken: string;
beforeAll(async () => {
const app = (await import('../../../server')).default;
request = supertest(app);
const { token } = await createAndLoginUser(request);
authToken = token;
});
it('GET /api/auth/me returns user profile', async () => {
const response = await request
.get('/api/auth/me')
.set('Authorization', `Bearer ${authToken}`);
expect(response.status).toBe(200);
expect(response.body.user.email).toBeDefined();
});
});
```
**Database Cleanup**:
```typescript
import { cleanupDb } from '@/tests/utils/cleanup';
afterAll(async () => {
await cleanupDb({ users: [testUserId] });
});
```
### E2E Tests
**Purpose**: Test complete user journeys through the application.
**Timeout**: 120 seconds (for long-running flows)
**Current E2E Tests**:
- `auth.e2e.test.ts` - Registration, login, password reset
- `flyer-upload.e2e.test.ts` - Complete flyer upload pipeline
- `user-journey.e2e.test.ts` - Full user workflow
- `admin-authorization.e2e.test.ts` - Admin-specific flows
- `admin-dashboard.e2e.test.ts` - Admin dashboard functionality
### Mock Factories
The project uses comprehensive mock factories (`src/tests/utils/mockFactories.ts`, 1553 lines) for creating test data:
```typescript
import {
createMockUser,
createMockFlyer,
createMockFlyerItem,
createMockRecipe,
resetMockIds,
} from '@/tests/utils/mockFactories';
beforeEach(() => {
resetMockIds(); // Ensure deterministic IDs
});
it('creates flyer with items', () => {
const flyer = createMockFlyer({ store_name: 'TestMart' });
const items = [createMockFlyerItem({ flyer_id: flyer.flyer_id })];
// ...
});
```
**Factory Coverage**: 90+ factory functions for all domain entities including users, flyers, recipes, shopping lists, budgets, achievements, etc.
### Test Utilities
| Utility | Purpose |
| ------- | ------- |
| `renderWithProviders()` | Wrap components with AppProviders + Router |
| `createAndLoginUser()` | Create user and return auth token |
| `cleanupDb()` | Database cleanup respecting FK constraints |
| `createTestApp()` | Create Express app for route testing |
| `poll()` | Polling utility for async operations |
### Coverage Configuration
**Coverage Provider**: v8 (built-in Vitest)
**Report Directories**:
- `.coverage/unit/` - Unit test coverage
- `.coverage/integration/` - Integration test coverage
- `.coverage/e2e/` - E2E test coverage
**Excluded from Coverage**:
- `src/index.tsx`, `src/main.tsx` (entry points)
- `src/tests/**` (test files themselves)
- `src/**/*.d.ts` (type declarations)
- `src/components/icons/**` (icon components)
- `src/db/seed*.ts` (database seeding scripts)
### npm Scripts
```bash
# Run all tests
npm run test
# Run by level
npm run test:unit # Unit tests only (jsdom)
npm run test:integration # Integration tests only (node)
# With coverage
npm run test:coverage # Unit + Integration with reports
# Clean coverage directories
npm run clean
```
### Test Timeouts
| Test Type | Timeout | Rationale |
| --------- | ------- | --------- |
| Unit | 5 seconds | Fast, isolated tests |
| Integration | 60 seconds | AI service calls, DB operations |
| E2E | 120 seconds | Full user flow with multiple API calls |
## Best Practices
### When to Write Each Test Type
1. **Unit Tests** (required):
- Pure functions and utilities
- React components (rendering, user interactions)
- Custom hooks
- Service methods with mocked dependencies
- Repository methods
2. **Integration Tests** (required for API changes):
- New API endpoints
- Authentication/authorization flows
- Middleware behavior
- Database query correctness
3. **E2E Tests** (for critical paths):
- User registration and login
- Core business flows (flyer upload, shopping lists)
- Admin operations
### Test Isolation Guidelines
1. **Reset mock IDs**: Call `resetMockIds()` in `beforeEach()`
2. **Unique test data**: Use timestamps or UUIDs for emails/usernames
3. **Clean up after tests**: Use `cleanupDb()` in `afterAll()`
4. **Don't share state**: Each test should be independent
### Mocking Guidelines
1. **Unit tests**: Mock external dependencies (DB, APIs, services)
2. **Integration tests**: Mock only external APIs (AI services)
3. **E2E tests**: Minimal mocking, use real services where possible
## Key Files
- `vite.config.ts` - Unit test configuration
- `vitest.config.integration.ts` - Integration test configuration
- `vitest.config.e2e.ts` - E2E test configuration
- `vitest.workspace.ts` - Workspace orchestration
- `src/tests/setup/tests-setup-unit.ts` - Global mocks (488 lines)
- `src/tests/setup/integration-global-setup.ts` - Server + DB setup
- `src/tests/utils/mockFactories.ts` - Mock factories (1553 lines)
- `src/tests/utils/testHelpers.ts` - Test utilities
## Future Enhancements
1. **Browser E2E Tests**: Consider adding Playwright for actual browser testing
2. **Visual Regression**: Screenshot comparison for UI components
3. **Performance Testing**: Add benchmarks for critical paths
4. **Mutation Testing**: Verify test quality with mutation testing tools
5. **Coverage Thresholds**: Define minimum coverage requirements per module

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Partially Implemented
## Context
@@ -16,3 +16,255 @@ We will establish a formal Design System and Component Library. This will involv
- **Positive**: Ensures a consistent and high-quality user interface. Accelerates frontend development by providing reusable, well-documented components. Improves maintainability and reduces technical debt.
- **Negative**: Requires an initial investment in setting up Storybook and migrating existing components. Adds a new dependency and a new workflow for frontend development.
## Implementation Status
### What's Implemented
The codebase has a solid foundation for a design system:
-**Tailwind CSS v4.1.17** as the styling solution
-**Dark mode** fully implemented with system preference detection
-**55 custom icon components** for consistent iconography
-**Component organization** with shared vs. feature-specific separation
-**Accessibility patterns** with ARIA attributes and focus management
### What's Not Yet Implemented
-**Storybook** is not yet installed or configured
-**Formal design token documentation** (colors, typography, spacing)
-**Visual regression testing** for component changes
## Implementation Details
### Component Library Structure
```text
src/
├── components/ # 30+ shared UI components
│ ├── icons/ # 55 SVG icon components
│ ├── Header.tsx
│ ├── Footer.tsx
│ ├── LoadingSpinner.tsx
│ ├── ErrorDisplay.tsx
│ ├── ConfirmationModal.tsx
│ ├── DarkModeToggle.tsx
│ ├── StatCard.tsx
│ ├── PasswordInput.tsx
│ └── ...
├── features/ # Feature-specific components
│ ├── charts/ # PriceChart, PriceHistoryChart
│ ├── flyer/ # FlyerDisplay, FlyerList, FlyerUploader
│ ├── shopping/ # ShoppingListComponent, WatchedItemsList
│ └── voice-assistant/ # VoiceAssistant
├── layouts/ # Page layouts
│ └── MainLayout.tsx
├── pages/ # Page components
│ └── admin/components/ # Admin-specific components
└── providers/ # Context providers
```
### Styling Approach
**Tailwind CSS** with utility-first classes:
```typescript
// Component example with consistent styling patterns
<button className="px-4 py-2 bg-brand-primary text-white rounded-lg
hover:bg-brand-dark transition-colors duration-200
focus:outline-none focus:ring-2 focus:ring-brand-primary
focus:ring-offset-2 dark:focus:ring-offset-gray-800">
Click me
</button>
```
**Common Utility Patterns**:
| Pattern | Classes |
| ------- | ------- |
| Card container | `bg-white dark:bg-gray-800 rounded-lg shadow-md p-6` |
| Primary button | `bg-brand-primary hover:bg-brand-dark text-white rounded-lg px-4 py-2` |
| Secondary button | `bg-gray-100 dark:bg-gray-700 text-gray-700 dark:text-gray-200` |
| Input field | `border border-gray-300 dark:border-gray-600 rounded-md px-3 py-2` |
| Focus ring | `focus:outline-none focus:ring-2 focus:ring-brand-primary` |
### Color System
**Brand Colors** (Tailwind theme extensions):
- `brand-primary` - Primary brand color (blue/teal)
- `brand-light` - Lighter variant
- `brand-dark` - Darker variant for hover states
- `brand-secondary` - Secondary accent color
**Semantic Colors**:
- Gray scale: `gray-50` through `gray-950`
- Error: `red-500`, `red-600`
- Success: `green-500`, `green-600`
- Warning: `yellow-500`, `orange-500`
- Info: `blue-500`, `blue-600`
### Dark Mode Implementation
Dark mode is fully implemented using Tailwind's `dark:` variant:
```typescript
// Initialization in useAppInitialization hook
const initializeDarkMode = () => {
// Priority: user profile > localStorage > system preference
const stored = localStorage.getItem('darkMode');
const systemPreference = window.matchMedia('(prefers-color-scheme: dark)').matches;
const isDarkMode = stored ? stored === 'true' : systemPreference;
document.documentElement.classList.toggle('dark', isDarkMode);
return isDarkMode;
};
```
**Usage in components**:
```typescript
<div className="bg-white dark:bg-gray-800 text-gray-900 dark:text-white">
Content adapts to theme
</div>
```
### Icon System
**55 custom SVG icon components** in `src/components/icons/`:
```typescript
// Icon component pattern
interface IconProps extends React.SVGProps<SVGSVGElement> {
title?: string;
}
export const CheckCircleIcon: React.FC<IconProps> = ({ title, ...props }) => (
<svg {...props} fill="currentColor" viewBox="0 0 24 24">
{title && <title>{title}</title>}
<path d="..." />
</svg>
);
```
**Usage**:
```typescript
<CheckCircleIcon className="w-5 h-5 text-green-500" title="Success" />
```
**External icons**: Lucide React (`lucide-react` v0.555.0) used for additional icons.
### Accessibility Patterns
**ARIA Attributes**:
```typescript
// Modal pattern
<div role="dialog" aria-modal="true" aria-labelledby="modal-title">
<h2 id="modal-title">Modal Title</h2>
</div>
// Button with label
<button aria-label="Close modal">
<XMarkIcon aria-hidden="true" />
</button>
// Loading state
<div role="status" aria-live="polite">
<LoadingSpinner />
</div>
```
**Focus Management**:
- Consistent focus rings: `focus:ring-2 focus:ring-brand-primary focus:ring-offset-2`
- Dark mode offset: `dark:focus:ring-offset-gray-800`
- No outline: `focus:outline-none` (using ring instead)
### State Management
**Context Providers** (see ADR-005):
| Provider | Purpose |
| -------- | ------- |
| `AuthProvider` | Authentication state |
| `ModalProvider` | Modal open/close state |
| `FlyersProvider` | Flyer data |
| `MasterItemsProvider` | Grocery items |
| `UserDataProvider` | User-specific data |
**Provider Hierarchy** in `AppProviders.tsx`:
```typescript
<QueryClientProvider>
<ModalProvider>
<AuthProvider>
<FlyersProvider>
<MasterItemsProvider>
<UserDataProvider>
{children}
</UserDataProvider>
</MasterItemsProvider>
</FlyersProvider>
</AuthProvider>
</ModalProvider>
</QueryClientProvider>
```
## Key Files
- `tailwind.config.js` - Tailwind CSS configuration
- `src/index.css` - Tailwind CSS entry point
- `src/components/` - Shared UI components
- `src/components/icons/` - Icon component library (55 icons)
- `src/providers/AppProviders.tsx` - Context provider composition
- `src/hooks/useAppInitialization.ts` - Dark mode initialization
## Component Guidelines
### When to Create Shared Components
Create a shared component in `src/components/` when:
1. Used in 3+ places across the application
2. Represents a reusable UI pattern (buttons, cards, modals)
3. Has consistent styling/behavior requirements
### Naming Conventions
- **Components**: PascalCase (`LoadingSpinner.tsx`)
- **Icons**: PascalCase with `Icon` suffix (`CheckCircleIcon.tsx`)
- **Hooks**: camelCase with `use` prefix (`useModal.ts`)
- **Contexts**: PascalCase with `Context` suffix (`AuthContext.tsx`)
### Styling Guidelines
1. Use Tailwind utility classes exclusively
2. Include dark mode variants for all colors: `bg-white dark:bg-gray-800`
3. Add focus states for interactive elements
4. Use semantic color names from the design system
## Future Enhancements (Storybook Setup)
To complete ADR-012 implementation:
1. **Install Storybook**:
```bash
npx storybook@latest init
```
2. **Create stories for core components**:
- Button variants
- Form inputs (PasswordInput, etc.)
- Modal components
- Loading states
- Icon showcase
3. **Add visual regression testing** with Chromatic or Percy
4. **Document design tokens** formally in Storybook
5. **Create component composition guidelines**

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -20,3 +20,197 @@ We will implement a multi-layered security approach for the API:
- **Positive**: Significantly improves the application's security posture against common web vulnerabilities like XSS, clickjacking, and brute-force attacks.
- **Negative**: Requires careful configuration of CORS and rate limits to avoid blocking legitimate traffic. Content-Security-Policy can be complex to configure correctly.
## Implementation Status
### What's Implemented
-**Helmet** - Security headers middleware with CSP, HSTS, and more
-**Rate Limiting** - Comprehensive implementation with 17+ specific limiters
-**Input Validation** - Zod-based request validation on all routes
-**File Upload Security** - MIME type validation, size limits, filename sanitization
-**Error Handling** - Production-safe error responses (no sensitive data leakage)
-**Request Timeout** - 5-minute timeout protection
-**Secure Cookies** - httpOnly and secure flags for authentication cookies
### Not Required
- **CORS** - Not needed (API and frontend are same-origin)
## Implementation Details
### Helmet Security Headers
Using **helmet v8.x** configured in `server.ts` as the first middleware after app initialization.
**Security Headers Applied**:
| Header | Configuration | Purpose |
| ------ | ------------- | ------- |
| Content-Security-Policy | Custom directives | Prevents XSS, code injection |
| Strict-Transport-Security | 1 year, includeSubDomains, preload | Forces HTTPS connections |
| X-Content-Type-Options | nosniff | Prevents MIME type sniffing |
| X-Frame-Options | DENY | Prevents clickjacking |
| X-XSS-Protection | 0 (disabled) | Deprecated, CSP preferred |
| Referrer-Policy | strict-origin-when-cross-origin | Controls referrer information |
| Cross-Origin-Resource-Policy | cross-origin | Allows external resource loading |
**Content Security Policy Directives**:
```typescript
contentSecurityPolicy: {
directives: {
defaultSrc: ["'self'"],
scriptSrc: ["'self'", "'unsafe-inline'"], // React inline scripts
styleSrc: ["'self'", "'unsafe-inline'"], // Tailwind inline styles
imgSrc: ["'self'", 'data:', 'blob:', 'https:'], // External images
fontSrc: ["'self'", 'https:', 'data:'],
connectSrc: ["'self'", 'https:', 'wss:'], // API + WebSocket
frameSrc: ["'none'"], // No iframes
objectSrc: ["'none'"], // No plugins
upgradeInsecureRequests: [], // Production only
},
}
```
**HSTS Configuration**:
- Max-age: 1 year (31536000 seconds)
- Includes subdomains
- Preload-ready for browser HSTS lists
### Rate Limiting
Using **express-rate-limit v8.2.1** with a centralized configuration in `src/config/rateLimiters.ts`.
**Standard Configuration**:
```typescript
const standardConfig = {
standardHeaders: true, // Sends RateLimit-* headers
legacyHeaders: false,
skip: shouldSkipRateLimit, // Disabled in test environment
};
```
**Rate Limiters by Category**:
| Category | Limiter | Window | Max Requests |
| -------- | ------- | ------ | ------------ |
| **Authentication** | loginLimiter | 15 min | 5 |
| | registerLimiter | 1 hour | 5 |
| | forgotPasswordLimiter | 15 min | 5 |
| | resetPasswordLimiter | 15 min | 10 |
| | refreshTokenLimiter | 15 min | 20 |
| | logoutLimiter | 15 min | 10 |
| **Public/User Read** | publicReadLimiter | 15 min | 100 |
| | userReadLimiter | 15 min | 100 |
| | userUpdateLimiter | 15 min | 100 |
| **Sensitive Operations** | userSensitiveUpdateLimiter | 1 hour | 5 |
| | adminTriggerLimiter | 15 min | 30 |
| **AI/Costly** | aiGenerationLimiter | 15 min | 20 |
| | geocodeLimiter | 1 hour | 100 |
| | priceHistoryLimiter | 15 min | 50 |
| **Uploads** | adminUploadLimiter | 15 min | 20 |
| | aiUploadLimiter | 15 min | 10 |
| | batchLimiter | 15 min | 50 |
| **Tracking** | trackingLimiter | 15 min | 200 |
| | reactionToggleLimiter | 15 min | 150 |
**Test Environment Handling**:
Rate limiting is automatically disabled in test environment via `shouldSkipRateLimit` utility (`src/utils/rateLimit.ts`). Tests can opt-in to rate limiting by setting the `x-test-rate-limit-enable: true` header.
### Input Validation
**Zod Schema Validation** (`src/middleware/validation.middleware.ts`):
- Type-safe parsing and coercion for params, query, and body
- Applied to all API routes via `validateRequest()` middleware
- Returns structured validation errors with field-level details
**Filename Sanitization** (`src/utils/stringUtils.ts`):
```typescript
// Removes dangerous characters from uploaded filenames
sanitizeFilename(filename: string): string
```
### File Upload Security
**Multer Configuration** (`src/middleware/multer.middleware.ts`):
- MIME type validation via `imageFileFilter` (only image/* allowed)
- File size limits (2MB for logos, configurable per upload type)
- Unique filenames using timestamps + random suffixes
- User-scoped storage paths
### Error Handling
**Production-Safe Responses** (`src/middleware/errorHandler.ts`):
- Production mode: Returns generic error message with tracking ID
- Development mode: Returns detailed error information
- Sensitive error details are logged but never exposed to clients
### Request Security
**Timeout Protection** (`server.ts`):
- 5-minute request timeout via `connect-timeout` middleware
- Prevents resource exhaustion from long-running requests
**Secure Cookies**:
```typescript
// Cookie configuration for auth tokens
{
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'strict',
maxAge: 7 * 24 * 60 * 60 * 1000 // 7 days for refresh token
}
```
### Request Logging
Per-request structured logging (ADR-004):
- Request ID tracking
- User ID and IP address logging
- Failed request details (4xx+) logged with headers and body
- Unhandled errors assigned unique error IDs
## Key Files
- `server.ts` - Helmet middleware configuration (security headers)
- `src/config/rateLimiters.ts` - Rate limiter definitions (17+ limiters)
- `src/utils/rateLimit.ts` - Rate limit skip logic for testing
- `src/middleware/validation.middleware.ts` - Zod-based request validation
- `src/middleware/errorHandler.ts` - Production-safe error handling
- `src/middleware/multer.middleware.ts` - Secure file upload configuration
- `src/utils/stringUtils.ts` - Filename sanitization
## Future Enhancements
1. **Configure CORS** (if needed for cross-origin access):
```bash
npm install cors @types/cors
```
Add to `server.ts`:
```typescript
import cors from 'cors';
app.use(cors({
origin: process.env.ALLOWED_ORIGINS?.split(',') || 'http://localhost:3000',
credentials: true,
}));
```
2. **Redis-backed rate limiting**: For distributed deployments, use `rate-limit-redis` store
3. **CSP Nonce**: Generate per-request nonces for stricter script-src policy
4. **Report-Only CSP**: Add `Content-Security-Policy-Report-Only` header for testing policy changes

14
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "flyer-crawler",
"version": "0.9.63",
"version": "0.9.70",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "flyer-crawler",
"version": "0.9.63",
"version": "0.9.70",
"dependencies": {
"@bull-board/api": "^6.14.2",
"@bull-board/express": "^6.14.2",
@@ -22,6 +22,7 @@
"express": "^5.1.0",
"express-list-endpoints": "^7.1.1",
"express-rate-limit": "^8.2.1",
"helmet": "^8.1.0",
"ioredis": "^5.8.2",
"jsonwebtoken": "^9.0.2",
"lucide-react": "^0.555.0",
@@ -10193,6 +10194,15 @@
"dev": true,
"license": "MIT"
},
"node_modules/helmet": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/helmet/-/helmet-8.1.0.tgz",
"integrity": "sha512-jOiHyAZsmnr8LqoPGmCjYAaiuWwjAPLgY8ZX2XrmHawt99/u1y6RgrZMTeoPfpUbV96HOalYgz1qzkRbw54Pmg==",
"license": "MIT",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/help-me": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/help-me/-/help-me-5.0.0.tgz",

View File

@@ -1,7 +1,7 @@
{
"name": "flyer-crawler",
"private": true,
"version": "0.9.63",
"version": "0.9.70",
"type": "module",
"scripts": {
"dev": "concurrently \"npm:start:dev\" \"vite\"",
@@ -41,6 +41,7 @@
"express": "^5.1.0",
"express-list-endpoints": "^7.1.1",
"express-rate-limit": "^8.2.1",
"helmet": "^8.1.0",
"ioredis": "^5.8.2",
"jsonwebtoken": "^9.0.2",
"lucide-react": "^0.555.0",

View File

@@ -1,6 +1,7 @@
// server.ts
import express, { Request, Response, NextFunction } from 'express';
import { randomUUID } from 'crypto';
import helmet from 'helmet';
import timeout from 'connect-timeout';
import cookieParser from 'cookie-parser';
import listEndpoints from 'express-list-endpoints';
@@ -62,6 +63,38 @@ logger.info('-----------------------------------------------\n');
const app = express();
// --- Security Headers Middleware (ADR-016) ---
// Helmet sets various HTTP headers to help protect the app from common web vulnerabilities.
// Must be applied early in the middleware chain, before any routes.
app.use(
helmet({
// Content Security Policy - configured for API + SPA frontend
contentSecurityPolicy: {
directives: {
defaultSrc: ["'self'"],
scriptSrc: ["'self'", "'unsafe-inline'"], // Allow inline scripts for React
styleSrc: ["'self'", "'unsafe-inline'"], // Allow inline styles for Tailwind
imgSrc: ["'self'", 'data:', 'blob:', 'https:'], // Allow images from various sources
fontSrc: ["'self'", 'https:', 'data:'],
connectSrc: ["'self'", 'https:', 'wss:'], // Allow API and WebSocket connections
frameSrc: ["'none'"], // Disallow iframes
objectSrc: ["'none'"], // Disallow plugins
upgradeInsecureRequests: process.env.NODE_ENV === 'production' ? [] : null,
},
},
// Cross-Origin settings for API
crossOriginEmbedderPolicy: false, // Disabled to allow loading external images
crossOriginResourcePolicy: { policy: 'cross-origin' }, // Allow cross-origin resource loading
// Additional security headers
hsts: {
maxAge: 31536000, // 1 year in seconds
includeSubDomains: true,
preload: true,
},
referrerPolicy: { policy: 'strict-origin-when-cross-origin' },
}),
);
// --- Core Middleware ---
// Increase the limit for JSON and URL-encoded bodies. This is crucial for handling large file uploads
// that are part of multipart/form-data requests, as the overall request size is checked.

View File

@@ -0,0 +1,128 @@
// src/hooks/mutations/useAddShoppingListItemMutation.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useAddShoppingListItemMutation } from './useAddShoppingListItemMutation';
import * as apiClient from '../../services/apiClient';
import * as notificationService from '../../services/notificationService';
vi.mock('../../services/apiClient');
vi.mock('../../services/notificationService');
const mockedApiClient = vi.mocked(apiClient);
const mockedNotifications = vi.mocked(notificationService);
describe('useAddShoppingListItemMutation', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
mutations: { retry: false },
},
});
});
it('should add a master item to shopping list successfully', async () => {
const mockResponse = { shopping_list_item_id: 1, master_item_id: 42 };
mockedApiClient.addShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useAddShoppingListItemMutation(), { wrapper });
result.current.mutate({ listId: 1, item: { masterItemId: 42 } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.addShoppingListItem).toHaveBeenCalledWith(1, { masterItemId: 42 });
expect(mockedNotifications.notifySuccess).toHaveBeenCalledWith('Item added to shopping list');
});
it('should add a custom item to shopping list successfully', async () => {
const mockResponse = { shopping_list_item_id: 2, custom_item_name: 'Special Milk' };
mockedApiClient.addShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useAddShoppingListItemMutation(), { wrapper });
result.current.mutate({ listId: 1, item: { customItemName: 'Special Milk' } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.addShoppingListItem).toHaveBeenCalledWith(1, { customItemName: 'Special Milk' });
});
it('should invalidate shopping-lists query on success', async () => {
mockedApiClient.addShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({}),
} as Response);
const invalidateQueriesSpy = vi.spyOn(queryClient, 'invalidateQueries');
const { result } = renderHook(() => useAddShoppingListItemMutation(), { wrapper });
result.current.mutate({ listId: 1, item: { masterItemId: 42 } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(invalidateQueriesSpy).toHaveBeenCalledWith({ queryKey: ['shopping-lists'] });
});
it('should handle API error with error message', async () => {
mockedApiClient.addShoppingListItem.mockResolvedValue({
ok: false,
status: 400,
json: () => Promise.resolve({ message: 'Item already exists' }),
} as Response);
const { result } = renderHook(() => useAddShoppingListItemMutation(), { wrapper });
result.current.mutate({ listId: 1, item: { masterItemId: 42 } });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Item already exists');
expect(mockedNotifications.notifyError).toHaveBeenCalledWith('Item already exists');
});
it('should handle API error without message', async () => {
mockedApiClient.addShoppingListItem.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useAddShoppingListItemMutation(), { wrapper });
result.current.mutate({ listId: 1, item: { masterItemId: 42 } });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
expect(mockedNotifications.notifyError).toHaveBeenCalledWith('Request failed with status 500');
});
it('should handle network error', async () => {
mockedApiClient.addShoppingListItem.mockRejectedValue(new Error('Network error'));
const { result } = renderHook(() => useAddShoppingListItemMutation(), { wrapper });
result.current.mutate({ listId: 1, item: { masterItemId: 42 } });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Network error');
});
});

View File

@@ -0,0 +1,115 @@
// src/hooks/mutations/useAddWatchedItemMutation.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useAddWatchedItemMutation } from './useAddWatchedItemMutation';
import * as apiClient from '../../services/apiClient';
import * as notificationService from '../../services/notificationService';
vi.mock('../../services/apiClient');
vi.mock('../../services/notificationService');
const mockedApiClient = vi.mocked(apiClient);
const mockedNotifications = vi.mocked(notificationService);
describe('useAddWatchedItemMutation', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
mutations: { retry: false },
},
});
});
it('should add a watched item successfully with category', async () => {
const mockResponse = { id: 1, item_name: 'Milk', category: 'Dairy' };
mockedApiClient.addWatchedItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useAddWatchedItemMutation(), { wrapper });
result.current.mutate({ itemName: 'Milk', category: 'Dairy' });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.addWatchedItem).toHaveBeenCalledWith('Milk', 'Dairy');
expect(mockedNotifications.notifySuccess).toHaveBeenCalledWith('Item added to watched list');
});
it('should add a watched item without category', async () => {
const mockResponse = { id: 1, item_name: 'Bread' };
mockedApiClient.addWatchedItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useAddWatchedItemMutation(), { wrapper });
result.current.mutate({ itemName: 'Bread' });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.addWatchedItem).toHaveBeenCalledWith('Bread', '');
});
it('should invalidate watched-items query on success', async () => {
mockedApiClient.addWatchedItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ id: 1 }),
} as Response);
const invalidateQueriesSpy = vi.spyOn(queryClient, 'invalidateQueries');
const { result } = renderHook(() => useAddWatchedItemMutation(), { wrapper });
result.current.mutate({ itemName: 'Eggs' });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(invalidateQueriesSpy).toHaveBeenCalledWith({ queryKey: ['watched-items'] });
});
it('should handle API error with error message', async () => {
mockedApiClient.addWatchedItem.mockResolvedValue({
ok: false,
status: 409,
json: () => Promise.resolve({ message: 'Item already watched' }),
} as Response);
const { result } = renderHook(() => useAddWatchedItemMutation(), { wrapper });
result.current.mutate({ itemName: 'Milk' });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Item already watched');
expect(mockedNotifications.notifyError).toHaveBeenCalledWith('Item already watched');
});
it('should handle API error without message', async () => {
mockedApiClient.addWatchedItem.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useAddWatchedItemMutation(), { wrapper });
result.current.mutate({ itemName: 'Cheese' });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
});

View File

@@ -0,0 +1,99 @@
// src/hooks/mutations/useCreateShoppingListMutation.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useCreateShoppingListMutation } from './useCreateShoppingListMutation';
import * as apiClient from '../../services/apiClient';
import * as notificationService from '../../services/notificationService';
vi.mock('../../services/apiClient');
vi.mock('../../services/notificationService');
const mockedApiClient = vi.mocked(apiClient);
const mockedNotifications = vi.mocked(notificationService);
describe('useCreateShoppingListMutation', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
mutations: { retry: false },
},
});
});
it('should create a shopping list successfully', async () => {
const mockResponse = { shopping_list_id: 1, name: 'Weekly Groceries' };
mockedApiClient.createShoppingList.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useCreateShoppingListMutation(), { wrapper });
result.current.mutate({ name: 'Weekly Groceries' });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.createShoppingList).toHaveBeenCalledWith('Weekly Groceries');
expect(mockedNotifications.notifySuccess).toHaveBeenCalledWith('Shopping list created');
});
it('should invalidate shopping-lists query on success', async () => {
mockedApiClient.createShoppingList.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ shopping_list_id: 1 }),
} as Response);
const invalidateQueriesSpy = vi.spyOn(queryClient, 'invalidateQueries');
const { result } = renderHook(() => useCreateShoppingListMutation(), { wrapper });
result.current.mutate({ name: 'Test List' });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(invalidateQueriesSpy).toHaveBeenCalledWith({ queryKey: ['shopping-lists'] });
});
it('should handle API error with error message', async () => {
mockedApiClient.createShoppingList.mockResolvedValue({
ok: false,
status: 400,
json: () => Promise.resolve({ message: 'List name already exists' }),
} as Response);
const { result } = renderHook(() => useCreateShoppingListMutation(), { wrapper });
result.current.mutate({ name: 'Duplicate List' });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('List name already exists');
expect(mockedNotifications.notifyError).toHaveBeenCalledWith('List name already exists');
});
it('should handle API error without message', async () => {
mockedApiClient.createShoppingList.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useCreateShoppingListMutation(), { wrapper });
result.current.mutate({ name: 'Test' });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
});

View File

@@ -0,0 +1,99 @@
// src/hooks/mutations/useDeleteShoppingListMutation.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useDeleteShoppingListMutation } from './useDeleteShoppingListMutation';
import * as apiClient from '../../services/apiClient';
import * as notificationService from '../../services/notificationService';
vi.mock('../../services/apiClient');
vi.mock('../../services/notificationService');
const mockedApiClient = vi.mocked(apiClient);
const mockedNotifications = vi.mocked(notificationService);
describe('useDeleteShoppingListMutation', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
mutations: { retry: false },
},
});
});
it('should delete a shopping list successfully', async () => {
const mockResponse = { success: true };
mockedApiClient.deleteShoppingList.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useDeleteShoppingListMutation(), { wrapper });
result.current.mutate({ listId: 123 });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.deleteShoppingList).toHaveBeenCalledWith(123);
expect(mockedNotifications.notifySuccess).toHaveBeenCalledWith('Shopping list deleted');
});
it('should invalidate shopping-lists query on success', async () => {
mockedApiClient.deleteShoppingList.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ success: true }),
} as Response);
const invalidateQueriesSpy = vi.spyOn(queryClient, 'invalidateQueries');
const { result } = renderHook(() => useDeleteShoppingListMutation(), { wrapper });
result.current.mutate({ listId: 456 });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(invalidateQueriesSpy).toHaveBeenCalledWith({ queryKey: ['shopping-lists'] });
});
it('should handle API error with error message', async () => {
mockedApiClient.deleteShoppingList.mockResolvedValue({
ok: false,
status: 404,
json: () => Promise.resolve({ message: 'Shopping list not found' }),
} as Response);
const { result } = renderHook(() => useDeleteShoppingListMutation(), { wrapper });
result.current.mutate({ listId: 999 });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Shopping list not found');
expect(mockedNotifications.notifyError).toHaveBeenCalledWith('Shopping list not found');
});
it('should handle API error without message', async () => {
mockedApiClient.deleteShoppingList.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useDeleteShoppingListMutation(), { wrapper });
result.current.mutate({ listId: 123 });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
});

View File

@@ -0,0 +1,99 @@
// src/hooks/mutations/useRemoveShoppingListItemMutation.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useRemoveShoppingListItemMutation } from './useRemoveShoppingListItemMutation';
import * as apiClient from '../../services/apiClient';
import * as notificationService from '../../services/notificationService';
vi.mock('../../services/apiClient');
vi.mock('../../services/notificationService');
const mockedApiClient = vi.mocked(apiClient);
const mockedNotifications = vi.mocked(notificationService);
describe('useRemoveShoppingListItemMutation', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
mutations: { retry: false },
},
});
});
it('should remove an item from shopping list successfully', async () => {
const mockResponse = { success: true };
mockedApiClient.removeShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useRemoveShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42 });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.removeShoppingListItem).toHaveBeenCalledWith(42);
expect(mockedNotifications.notifySuccess).toHaveBeenCalledWith('Item removed from shopping list');
});
it('should invalidate shopping-lists query on success', async () => {
mockedApiClient.removeShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ success: true }),
} as Response);
const invalidateQueriesSpy = vi.spyOn(queryClient, 'invalidateQueries');
const { result } = renderHook(() => useRemoveShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 100 });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(invalidateQueriesSpy).toHaveBeenCalledWith({ queryKey: ['shopping-lists'] });
});
it('should handle API error with error message', async () => {
mockedApiClient.removeShoppingListItem.mockResolvedValue({
ok: false,
status: 404,
json: () => Promise.resolve({ message: 'Item not found' }),
} as Response);
const { result } = renderHook(() => useRemoveShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 999 });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Item not found');
expect(mockedNotifications.notifyError).toHaveBeenCalledWith('Item not found');
});
it('should handle API error without message', async () => {
mockedApiClient.removeShoppingListItem.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useRemoveShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42 });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
});

View File

@@ -0,0 +1,99 @@
// src/hooks/mutations/useRemoveWatchedItemMutation.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useRemoveWatchedItemMutation } from './useRemoveWatchedItemMutation';
import * as apiClient from '../../services/apiClient';
import * as notificationService from '../../services/notificationService';
vi.mock('../../services/apiClient');
vi.mock('../../services/notificationService');
const mockedApiClient = vi.mocked(apiClient);
const mockedNotifications = vi.mocked(notificationService);
describe('useRemoveWatchedItemMutation', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
mutations: { retry: false },
},
});
});
it('should remove a watched item successfully', async () => {
const mockResponse = { success: true };
mockedApiClient.removeWatchedItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useRemoveWatchedItemMutation(), { wrapper });
result.current.mutate({ masterItemId: 123 });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.removeWatchedItem).toHaveBeenCalledWith(123);
expect(mockedNotifications.notifySuccess).toHaveBeenCalledWith('Item removed from watched list');
});
it('should invalidate watched-items query on success', async () => {
mockedApiClient.removeWatchedItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ success: true }),
} as Response);
const invalidateQueriesSpy = vi.spyOn(queryClient, 'invalidateQueries');
const { result } = renderHook(() => useRemoveWatchedItemMutation(), { wrapper });
result.current.mutate({ masterItemId: 456 });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(invalidateQueriesSpy).toHaveBeenCalledWith({ queryKey: ['watched-items'] });
});
it('should handle API error with error message', async () => {
mockedApiClient.removeWatchedItem.mockResolvedValue({
ok: false,
status: 404,
json: () => Promise.resolve({ message: 'Watched item not found' }),
} as Response);
const { result } = renderHook(() => useRemoveWatchedItemMutation(), { wrapper });
result.current.mutate({ masterItemId: 999 });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Watched item not found');
expect(mockedNotifications.notifyError).toHaveBeenCalledWith('Watched item not found');
});
it('should handle API error without message', async () => {
mockedApiClient.removeWatchedItem.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useRemoveWatchedItemMutation(), { wrapper });
result.current.mutate({ masterItemId: 123 });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
});

View File

@@ -0,0 +1,159 @@
// src/hooks/mutations/useUpdateShoppingListItemMutation.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useUpdateShoppingListItemMutation } from './useUpdateShoppingListItemMutation';
import * as apiClient from '../../services/apiClient';
import * as notificationService from '../../services/notificationService';
vi.mock('../../services/apiClient');
vi.mock('../../services/notificationService');
const mockedApiClient = vi.mocked(apiClient);
const mockedNotifications = vi.mocked(notificationService);
describe('useUpdateShoppingListItemMutation', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
mutations: { retry: false },
},
});
});
it('should update a shopping list item successfully', async () => {
const mockResponse = { id: 42, quantity: 3 };
mockedApiClient.updateShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockResponse),
} as Response);
const { result } = renderHook(() => useUpdateShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42, updates: { quantity: 3 } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.updateShoppingListItem).toHaveBeenCalledWith(42, { quantity: 3 });
expect(mockedNotifications.notifySuccess).toHaveBeenCalledWith('Shopping list item updated');
});
it('should update is_purchased status', async () => {
mockedApiClient.updateShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ id: 42, is_purchased: true }),
} as Response);
const { result } = renderHook(() => useUpdateShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42, updates: { is_purchased: true } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.updateShoppingListItem).toHaveBeenCalledWith(42, { is_purchased: true });
});
it('should update custom_item_name', async () => {
mockedApiClient.updateShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ id: 42, custom_item_name: 'Organic Milk' }),
} as Response);
const { result } = renderHook(() => useUpdateShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42, updates: { custom_item_name: 'Organic Milk' } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.updateShoppingListItem).toHaveBeenCalledWith(42, { custom_item_name: 'Organic Milk' });
});
it('should update notes', async () => {
mockedApiClient.updateShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ id: 42, notes: 'Get the 2% variety' }),
} as Response);
const { result } = renderHook(() => useUpdateShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42, updates: { notes: 'Get the 2% variety' } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.updateShoppingListItem).toHaveBeenCalledWith(42, { notes: 'Get the 2% variety' });
});
it('should update multiple fields at once', async () => {
mockedApiClient.updateShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ id: 42, quantity: 2, notes: 'Important' }),
} as Response);
const { result } = renderHook(() => useUpdateShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42, updates: { quantity: 2, notes: 'Important' } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.updateShoppingListItem).toHaveBeenCalledWith(42, { quantity: 2, notes: 'Important' });
});
it('should invalidate shopping-lists query on success', async () => {
mockedApiClient.updateShoppingListItem.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ id: 42 }),
} as Response);
const invalidateQueriesSpy = vi.spyOn(queryClient, 'invalidateQueries');
const { result } = renderHook(() => useUpdateShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42, updates: { quantity: 5 } });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(invalidateQueriesSpy).toHaveBeenCalledWith({ queryKey: ['shopping-lists'] });
});
it('should handle API error with error message', async () => {
mockedApiClient.updateShoppingListItem.mockResolvedValue({
ok: false,
status: 404,
json: () => Promise.resolve({ message: 'Item not found' }),
} as Response);
const { result } = renderHook(() => useUpdateShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 999, updates: { quantity: 1 } });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Item not found');
expect(mockedNotifications.notifyError).toHaveBeenCalledWith('Item not found');
});
it('should handle API error without message', async () => {
mockedApiClient.updateShoppingListItem.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useUpdateShoppingListItemMutation(), { wrapper });
result.current.mutate({ itemId: 42, updates: { quantity: 1 } });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
});

View File

@@ -0,0 +1,102 @@
// src/hooks/queries/useActivityLogQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useActivityLogQuery } from './useActivityLogQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useActivityLogQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch activity log with default params', async () => {
const mockActivityLog = [
{ id: 1, action: 'user_login', timestamp: '2024-01-01T10:00:00Z' },
{ id: 2, action: 'flyer_uploaded', timestamp: '2024-01-01T11:00:00Z' },
];
mockedApiClient.fetchActivityLog.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockActivityLog),
} as Response);
const { result } = renderHook(() => useActivityLogQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchActivityLog).toHaveBeenCalledWith(20, 0);
expect(result.current.data).toEqual(mockActivityLog);
});
it('should fetch activity log with custom limit and offset', async () => {
const mockActivityLog = [{ id: 3, action: 'item_added', timestamp: '2024-01-01T12:00:00Z' }];
mockedApiClient.fetchActivityLog.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockActivityLog),
} as Response);
const { result } = renderHook(() => useActivityLogQuery(10, 5), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchActivityLog).toHaveBeenCalledWith(10, 5);
expect(result.current.data).toEqual(mockActivityLog);
});
it('should handle API error with error message', async () => {
mockedApiClient.fetchActivityLog.mockResolvedValue({
ok: false,
status: 403,
json: () => Promise.resolve({ message: 'Admin access required' }),
} as Response);
const { result } = renderHook(() => useActivityLogQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Admin access required');
});
it('should handle API error without message', async () => {
mockedApiClient.fetchActivityLog.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useActivityLogQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
it('should return empty array for no activity log entries', async () => {
mockedApiClient.fetchActivityLog.mockResolvedValue({
ok: true,
json: () => Promise.resolve([]),
} as Response);
const { result } = renderHook(() => useActivityLogQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
});

View File

@@ -0,0 +1,78 @@
// src/hooks/queries/useApplicationStatsQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useApplicationStatsQuery } from './useApplicationStatsQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useApplicationStatsQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch application stats successfully', async () => {
const mockStats = {
flyerCount: 150,
userCount: 500,
flyerItemCount: 5000,
storeCount: 25,
pendingCorrectionsCount: 10,
recipeCount: 75,
};
mockedApiClient.getApplicationStats.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockStats),
} as Response);
const { result } = renderHook(() => useApplicationStatsQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.getApplicationStats).toHaveBeenCalled();
expect(result.current.data).toEqual(mockStats);
});
it('should handle API error with error message', async () => {
mockedApiClient.getApplicationStats.mockResolvedValue({
ok: false,
status: 403,
json: () => Promise.resolve({ message: 'Admin access required' }),
} as Response);
const { result } = renderHook(() => useApplicationStatsQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Admin access required');
});
it('should handle API error without message', async () => {
mockedApiClient.getApplicationStats.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useApplicationStatsQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
});

View File

@@ -0,0 +1,88 @@
// src/hooks/queries/useCategoriesQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useCategoriesQuery } from './useCategoriesQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useCategoriesQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch categories successfully', async () => {
const mockCategories = [
{ category_id: 1, name: 'Dairy' },
{ category_id: 2, name: 'Bakery' },
{ category_id: 3, name: 'Produce' },
];
mockedApiClient.fetchCategories.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockCategories),
} as Response);
const { result } = renderHook(() => useCategoriesQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchCategories).toHaveBeenCalled();
expect(result.current.data).toEqual(mockCategories);
});
it('should handle API error with error message', async () => {
mockedApiClient.fetchCategories.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.resolve({ message: 'Database error' }),
} as Response);
const { result } = renderHook(() => useCategoriesQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Database error');
});
it('should handle API error without message', async () => {
mockedApiClient.fetchCategories.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useCategoriesQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
it('should return empty array for no categories', async () => {
mockedApiClient.fetchCategories.mockResolvedValue({
ok: true,
json: () => Promise.resolve([]),
} as Response);
const { result } = renderHook(() => useCategoriesQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
});

View File

@@ -0,0 +1,111 @@
// src/hooks/queries/useFlyerItemsQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useFlyerItemsQuery } from './useFlyerItemsQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useFlyerItemsQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch flyer items when flyerId is provided', async () => {
const mockFlyerItems = [
{ item_id: 1, name: 'Milk', price: 3.99, flyer_id: 42 },
{ item_id: 2, name: 'Bread', price: 2.49, flyer_id: 42 },
];
mockedApiClient.fetchFlyerItems.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ items: mockFlyerItems }),
} as Response);
const { result } = renderHook(() => useFlyerItemsQuery(42), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchFlyerItems).toHaveBeenCalledWith(42);
expect(result.current.data).toEqual(mockFlyerItems);
});
it('should not fetch when flyerId is undefined', async () => {
const { result } = renderHook(() => useFlyerItemsQuery(undefined), { wrapper });
// Wait a bit to ensure the query doesn't run
await new Promise((resolve) => setTimeout(resolve, 100));
expect(mockedApiClient.fetchFlyerItems).not.toHaveBeenCalled();
expect(result.current.isLoading).toBe(false);
expect(result.current.isFetching).toBe(false);
});
it('should handle API error with error message', async () => {
mockedApiClient.fetchFlyerItems.mockResolvedValue({
ok: false,
status: 404,
json: () => Promise.resolve({ message: 'Flyer not found' }),
} as Response);
const { result } = renderHook(() => useFlyerItemsQuery(999), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Flyer not found');
});
it('should handle API error without message', async () => {
mockedApiClient.fetchFlyerItems.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useFlyerItemsQuery(42), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
it('should return empty array when API returns no items', async () => {
mockedApiClient.fetchFlyerItems.mockResolvedValue({
ok: true,
json: () => Promise.resolve({ items: [] }),
} as Response);
const { result } = renderHook(() => useFlyerItemsQuery(42), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
it('should handle response without items property', async () => {
mockedApiClient.fetchFlyerItems.mockResolvedValue({
ok: true,
json: () => Promise.resolve({}),
} as Response);
const { result } = renderHook(() => useFlyerItemsQuery(42), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
});

View File

@@ -0,0 +1,102 @@
// src/hooks/queries/useFlyersQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useFlyersQuery } from './useFlyersQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useFlyersQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch flyers successfully with default params', async () => {
const mockFlyers = [
{ flyer_id: 1, store_name: 'Store A', valid_from: '2024-01-01', valid_to: '2024-01-07' },
{ flyer_id: 2, store_name: 'Store B', valid_from: '2024-01-01', valid_to: '2024-01-07' },
];
mockedApiClient.fetchFlyers.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockFlyers),
} as Response);
const { result } = renderHook(() => useFlyersQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchFlyers).toHaveBeenCalledWith(20, 0);
expect(result.current.data).toEqual(mockFlyers);
});
it('should fetch flyers with custom limit and offset', async () => {
const mockFlyers = [{ flyer_id: 3, store_name: 'Store C' }];
mockedApiClient.fetchFlyers.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockFlyers),
} as Response);
const { result } = renderHook(() => useFlyersQuery(10, 5), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchFlyers).toHaveBeenCalledWith(10, 5);
expect(result.current.data).toEqual(mockFlyers);
});
it('should handle API error with error message', async () => {
mockedApiClient.fetchFlyers.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.resolve({ message: 'Server error' }),
} as Response);
const { result } = renderHook(() => useFlyersQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Server error');
});
it('should handle API error without message', async () => {
mockedApiClient.fetchFlyers.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useFlyersQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
it('should return empty array for no flyers', async () => {
mockedApiClient.fetchFlyers.mockResolvedValue({
ok: true,
json: () => Promise.resolve([]),
} as Response);
const { result } = renderHook(() => useFlyersQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
});

View File

@@ -0,0 +1,88 @@
// src/hooks/queries/useMasterItemsQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useMasterItemsQuery } from './useMasterItemsQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useMasterItemsQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch master items successfully', async () => {
const mockMasterItems = [
{ master_item_id: 1, name: 'Milk', category: 'Dairy' },
{ master_item_id: 2, name: 'Bread', category: 'Bakery' },
{ master_item_id: 3, name: 'Eggs', category: 'Dairy' },
];
mockedApiClient.fetchMasterItems.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockMasterItems),
} as Response);
const { result } = renderHook(() => useMasterItemsQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchMasterItems).toHaveBeenCalled();
expect(result.current.data).toEqual(mockMasterItems);
});
it('should handle API error with error message', async () => {
mockedApiClient.fetchMasterItems.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.resolve({ message: 'Database error' }),
} as Response);
const { result } = renderHook(() => useMasterItemsQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Database error');
});
it('should handle API error without message', async () => {
mockedApiClient.fetchMasterItems.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useMasterItemsQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
it('should return empty array for no master items', async () => {
mockedApiClient.fetchMasterItems.mockResolvedValue({
ok: true,
json: () => Promise.resolve([]),
} as Response);
const { result } = renderHook(() => useMasterItemsQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
});

View File

@@ -0,0 +1,98 @@
// src/hooks/queries/useShoppingListsQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useShoppingListsQuery } from './useShoppingListsQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useShoppingListsQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch shopping lists when enabled', async () => {
const mockShoppingLists = [
{ shopping_list_id: 1, name: 'Weekly Groceries', items: [] },
{ shopping_list_id: 2, name: 'Party Supplies', items: [] },
];
mockedApiClient.fetchShoppingLists.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockShoppingLists),
} as Response);
const { result } = renderHook(() => useShoppingListsQuery(true), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchShoppingLists).toHaveBeenCalled();
expect(result.current.data).toEqual(mockShoppingLists);
});
it('should not fetch shopping lists when disabled', async () => {
const { result } = renderHook(() => useShoppingListsQuery(false), { wrapper });
// Wait a bit to ensure the query doesn't run
await new Promise((resolve) => setTimeout(resolve, 100));
expect(mockedApiClient.fetchShoppingLists).not.toHaveBeenCalled();
expect(result.current.isLoading).toBe(false);
expect(result.current.isFetching).toBe(false);
});
it('should handle API error with error message', async () => {
mockedApiClient.fetchShoppingLists.mockResolvedValue({
ok: false,
status: 401,
json: () => Promise.resolve({ message: 'Unauthorized' }),
} as Response);
const { result } = renderHook(() => useShoppingListsQuery(true), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Unauthorized');
});
it('should handle API error without message', async () => {
mockedApiClient.fetchShoppingLists.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useShoppingListsQuery(true), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
it('should return empty array for no shopping lists', async () => {
mockedApiClient.fetchShoppingLists.mockResolvedValue({
ok: true,
json: () => Promise.resolve([]),
} as Response);
const { result } = renderHook(() => useShoppingListsQuery(true), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
});

View File

@@ -0,0 +1,87 @@
// src/hooks/queries/useSuggestedCorrectionsQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useSuggestedCorrectionsQuery } from './useSuggestedCorrectionsQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useSuggestedCorrectionsQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch suggested corrections successfully', async () => {
const mockCorrections = [
{ correction_id: 1, item_name: 'Milk', suggested_name: 'Whole Milk', status: 'pending' },
{ correction_id: 2, item_name: 'Bread', suggested_name: 'White Bread', status: 'pending' },
];
mockedApiClient.getSuggestedCorrections.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockCorrections),
} as Response);
const { result } = renderHook(() => useSuggestedCorrectionsQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.getSuggestedCorrections).toHaveBeenCalled();
expect(result.current.data).toEqual(mockCorrections);
});
it('should handle API error with error message', async () => {
mockedApiClient.getSuggestedCorrections.mockResolvedValue({
ok: false,
status: 403,
json: () => Promise.resolve({ message: 'Admin access required' }),
} as Response);
const { result } = renderHook(() => useSuggestedCorrectionsQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Admin access required');
});
it('should handle API error without message', async () => {
mockedApiClient.getSuggestedCorrections.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useSuggestedCorrectionsQuery(), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
it('should return empty array for no corrections', async () => {
mockedApiClient.getSuggestedCorrections.mockResolvedValue({
ok: true,
json: () => Promise.resolve([]),
} as Response);
const { result } = renderHook(() => useSuggestedCorrectionsQuery(), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
});

View File

@@ -0,0 +1,98 @@
// src/hooks/queries/useWatchedItemsQuery.test.tsx
import { renderHook, waitFor } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { ReactNode } from 'react';
import { useWatchedItemsQuery } from './useWatchedItemsQuery';
import * as apiClient from '../../services/apiClient';
vi.mock('../../services/apiClient');
const mockedApiClient = vi.mocked(apiClient);
describe('useWatchedItemsQuery', () => {
let queryClient: QueryClient;
const wrapper = ({ children }: { children: ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
beforeEach(() => {
vi.resetAllMocks();
queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
},
});
});
it('should fetch watched items when enabled', async () => {
const mockWatchedItems = [
{ master_item_id: 1, name: 'Milk', category: 'Dairy' },
{ master_item_id: 2, name: 'Bread', category: 'Bakery' },
];
mockedApiClient.fetchWatchedItems.mockResolvedValue({
ok: true,
json: () => Promise.resolve(mockWatchedItems),
} as Response);
const { result } = renderHook(() => useWatchedItemsQuery(true), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(mockedApiClient.fetchWatchedItems).toHaveBeenCalled();
expect(result.current.data).toEqual(mockWatchedItems);
});
it('should not fetch watched items when disabled', async () => {
const { result } = renderHook(() => useWatchedItemsQuery(false), { wrapper });
// Wait a bit to ensure the query doesn't run
await new Promise((resolve) => setTimeout(resolve, 100));
expect(mockedApiClient.fetchWatchedItems).not.toHaveBeenCalled();
expect(result.current.isLoading).toBe(false);
expect(result.current.isFetching).toBe(false);
});
it('should handle API error with error message', async () => {
mockedApiClient.fetchWatchedItems.mockResolvedValue({
ok: false,
status: 401,
json: () => Promise.resolve({ message: 'Unauthorized' }),
} as Response);
const { result } = renderHook(() => useWatchedItemsQuery(true), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Unauthorized');
});
it('should handle API error without message', async () => {
mockedApiClient.fetchWatchedItems.mockResolvedValue({
ok: false,
status: 500,
json: () => Promise.reject(new Error('Parse error')),
} as Response);
const { result } = renderHook(() => useWatchedItemsQuery(true), { wrapper });
await waitFor(() => expect(result.current.isError).toBe(true));
expect(result.current.error?.message).toBe('Request failed with status 500');
});
it('should return empty array for no watched items', async () => {
mockedApiClient.fetchWatchedItems.mockResolvedValue({
ok: true,
json: () => Promise.resolve([]),
} as Response);
const { result } = renderHook(() => useWatchedItemsQuery(true), { wrapper });
await waitFor(() => expect(result.current.isSuccess).toBe(true));
expect(result.current.data).toEqual([]);
});
});

View File

@@ -8,6 +8,7 @@ import { z } from 'zod';
import * as db from '../services/db/index.db';
import type { UserProfile } from '../types';
import { geocodingService } from '../services/geocodingService.server';
import { cacheService } from '../services/cacheService.server';
import { requireFileUpload } from '../middleware/fileUpload.middleware'; // This was a duplicate, fixed.
import {
createUploadMiddleware,
@@ -635,6 +636,44 @@ router.post(
},
);
/**
* POST /api/admin/system/clear-cache - Clears the application data cache.
* Clears cached flyers, brands, and stats data from Redis.
* Requires admin privileges.
*/
router.post(
'/system/clear-cache',
adminTriggerLimiter,
validateRequest(emptySchema),
async (req: Request, res: Response, next: NextFunction) => {
const userProfile = req.user as UserProfile;
req.log.info(
`[Admin] Manual cache clear received from user: ${userProfile.user.user_id}`,
);
try {
const [flyersDeleted, brandsDeleted, statsDeleted] = await Promise.all([
cacheService.invalidateFlyers(req.log),
cacheService.invalidateBrands(req.log),
cacheService.invalidateStats(req.log),
]);
const totalDeleted = flyersDeleted + brandsDeleted + statsDeleted;
res.status(200).json({
message: `Successfully cleared the application cache. ${totalDeleted} keys were removed.`,
details: {
flyers: flyersDeleted,
brands: brandsDeleted,
stats: statsDeleted,
},
});
} catch (error) {
req.log.error({ error }, '[Admin] Failed to clear application cache.');
next(error);
}
},
);
/* Catches errors from multer (e.g., file size, file filter) */
router.use(handleMulterError);

View File

@@ -234,6 +234,9 @@ router.post(
* POST /api/ai/upload-legacy - Process a flyer upload from a legacy client.
* This is an authenticated route that processes the flyer synchronously.
* This is used for integration testing the legacy upload flow.
*
* @deprecated Use POST /api/ai/upload-and-process instead for async queue-based processing (ADR-0006).
* This synchronous endpoint is retained only for integration testing purposes.
*/
router.post(
'/upload-legacy',
@@ -282,9 +285,12 @@ router.get(
);
/**
* This endpoint saves the processed flyer data to the database. It is the final step
* in the flyer upload workflow after the AI has extracted the data.
* POST /api/ai/flyers/process - Saves the processed flyer data to the database.
* This is the final step in the flyer upload workflow after the AI has extracted the data.
* It uses `optionalAuth` to handle submissions from both anonymous and authenticated users.
*
* @deprecated Use POST /api/ai/upload-and-process instead for async queue-based processing (ADR-0006).
* This synchronous endpoint processes flyers inline and should be migrated to the queue-based approach.
*/
router.post(
'/flyers/process',

View File

@@ -0,0 +1,168 @@
// src/schemas/flyer.schemas.test.ts
import { describe, it, expect } from 'vitest';
import { flyerInsertSchema, flyerDbInsertSchema } from './flyer.schemas';
describe('flyerInsertSchema', () => {
const validFlyer = {
file_name: 'flyer.jpg',
image_url: 'https://example.com/flyer.jpg',
icon_url: 'https://example.com/icon.jpg',
checksum: 'a'.repeat(64),
store_name: 'Test Store',
valid_from: '2023-01-01T00:00:00Z',
valid_to: '2023-01-07T00:00:00Z',
store_address: '123 Main St',
status: 'processed',
item_count: 10,
uploaded_by: '123e4567-e89b-12d3-a456-426614174000',
};
it('should validate a correct flyer object', () => {
const result = flyerInsertSchema.safeParse(validFlyer);
expect(result.success).toBe(true);
});
it('should fail if file_name is missing or empty', () => {
const invalid = { ...validFlyer, file_name: '' };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe('File name is required');
}
});
it('should fail if image_url is invalid', () => {
const invalid = { ...validFlyer, image_url: 'ftp://invalid.com' };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe(
'Flyer image URL must be a valid HTTP or HTTPS URL',
);
}
});
it('should fail if icon_url is invalid', () => {
const invalid = { ...validFlyer, icon_url: 'not-a-url' };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
it('should fail if checksum length is incorrect', () => {
const invalid = { ...validFlyer, checksum: 'abc' };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe('Checksum must be 64 characters');
}
});
it('should fail if checksum is not hex', () => {
const invalid = { ...validFlyer, checksum: 'z'.repeat(64) };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe('Checksum must be a valid hexadecimal string');
}
});
it('should allow null checksum', () => {
const valid = { ...validFlyer, checksum: null };
const result = flyerInsertSchema.safeParse(valid);
expect(result.success).toBe(true);
});
it('should fail if store_name is missing', () => {
const invalid = { ...validFlyer, store_name: '' };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
it('should validate valid_from and valid_to as datetimes', () => {
const invalid = { ...validFlyer, valid_from: 'not-a-date' };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
it('should allow null valid_from, valid_to, store_address', () => {
const valid = {
...validFlyer,
valid_from: null,
valid_to: null,
store_address: null,
};
const result = flyerInsertSchema.safeParse(valid);
expect(result.success).toBe(true);
});
it('should validate status enum', () => {
const invalid = { ...validFlyer, status: 'invalid_status' };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
it('should fail if item_count is negative', () => {
const invalid = { ...validFlyer, item_count: -1 };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe('Item count must be non-negative');
}
});
it('should validate uploaded_by as UUID if present', () => {
const invalid = { ...validFlyer, uploaded_by: 'not-a-uuid' };
const result = flyerInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
it('should allow null or undefined uploaded_by', () => {
const validNull = { ...validFlyer, uploaded_by: null };
expect(flyerInsertSchema.safeParse(validNull).success).toBe(true);
const validUndefined = { ...validFlyer, uploaded_by: undefined };
expect(flyerInsertSchema.safeParse(validUndefined).success).toBe(true);
});
});
describe('flyerDbInsertSchema', () => {
const validDbFlyer = {
file_name: 'flyer.jpg',
image_url: 'https://example.com/flyer.jpg',
icon_url: 'https://example.com/icon.jpg',
checksum: 'a'.repeat(64),
store_id: 1,
valid_from: '2023-01-01T00:00:00Z',
valid_to: '2023-01-07T00:00:00Z',
store_address: '123 Main St',
status: 'processed',
item_count: 10,
uploaded_by: '123e4567-e89b-12d3-a456-426614174000',
};
it('should validate a correct DB flyer object', () => {
const result = flyerDbInsertSchema.safeParse(validDbFlyer);
expect(result.success).toBe(true);
});
it('should fail if store_id is missing', () => {
const { store_id, ...invalid } = validDbFlyer;
const result = flyerDbInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
it('should fail if store_id is not positive', () => {
const invalid = { ...validDbFlyer, store_id: 0 };
const result = flyerDbInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe('Store ID must be a positive integer');
}
});
it('should fail if store_id is not an integer', () => {
const invalid = { ...validDbFlyer, store_id: 1.5 };
const result = flyerDbInsertSchema.safeParse(invalid);
expect(result.success).toBe(false);
});
});

View File

@@ -0,0 +1,226 @@
// src/services/cacheService.server.ts
/**
* @file Centralized caching service implementing the Cache-Aside pattern.
* This service provides a reusable wrapper around Redis for caching read-heavy operations.
* See ADR-009 for the caching strategy documentation.
*/
import type { Logger } from 'pino';
import { connection as redis } from './redis.server';
import { logger as globalLogger } from './logger.server';
/**
* TTL values in seconds for different cache types.
* These can be tuned based on data volatility and freshness requirements.
*/
export const CACHE_TTL = {
/** Brand/store list - rarely changes, safe to cache for 1 hour */
BRANDS: 60 * 60,
/** Flyer list - changes when new flyers are added, cache for 5 minutes */
FLYERS: 5 * 60,
/** Individual flyer data - cache for 10 minutes */
FLYER: 10 * 60,
/** Flyer items - cache for 10 minutes */
FLYER_ITEMS: 10 * 60,
/** Statistics - can be slightly stale, cache for 5 minutes */
STATS: 5 * 60,
/** Most frequent sales - aggregated data, cache for 15 minutes */
FREQUENT_SALES: 15 * 60,
/** Categories - rarely changes, cache for 1 hour */
CATEGORIES: 60 * 60,
} as const;
/**
* Cache key prefixes for different data types.
* Using consistent prefixes allows for pattern-based invalidation.
*/
export const CACHE_PREFIX = {
BRANDS: 'cache:brands',
FLYERS: 'cache:flyers',
FLYER: 'cache:flyer',
FLYER_ITEMS: 'cache:flyer-items',
STATS: 'cache:stats',
FREQUENT_SALES: 'cache:frequent-sales',
CATEGORIES: 'cache:categories',
} as const;
export interface CacheOptions {
/** Time-to-live in seconds */
ttl: number;
/** Optional logger for this operation */
logger?: Logger;
}
/**
* Centralized cache service implementing the Cache-Aside pattern.
* All cache operations are fail-safe - cache failures do not break the application.
*/
class CacheService {
/**
* Retrieves a value from cache.
* @param key The cache key
* @param logger Optional logger for this operation
* @returns The cached value or null if not found/error
*/
async get<T>(key: string, logger: Logger = globalLogger): Promise<T | null> {
try {
const cached = await redis.get(key);
if (cached) {
logger.debug({ cacheKey: key }, 'Cache hit');
return JSON.parse(cached) as T;
}
logger.debug({ cacheKey: key }, 'Cache miss');
return null;
} catch (error) {
logger.warn({ err: error, cacheKey: key }, 'Redis GET failed, proceeding without cache');
return null;
}
}
/**
* Stores a value in cache with TTL.
* @param key The cache key
* @param value The value to cache (will be JSON stringified)
* @param ttl Time-to-live in seconds
* @param logger Optional logger for this operation
*/
async set<T>(key: string, value: T, ttl: number, logger: Logger = globalLogger): Promise<void> {
try {
await redis.set(key, JSON.stringify(value), 'EX', ttl);
logger.debug({ cacheKey: key, ttl }, 'Value cached');
} catch (error) {
logger.warn({ err: error, cacheKey: key }, 'Redis SET failed, value not cached');
}
}
/**
* Deletes a specific key from cache.
* @param key The cache key to delete
* @param logger Optional logger for this operation
*/
async del(key: string, logger: Logger = globalLogger): Promise<void> {
try {
await redis.del(key);
logger.debug({ cacheKey: key }, 'Cache key deleted');
} catch (error) {
logger.warn({ err: error, cacheKey: key }, 'Redis DEL failed');
}
}
/**
* Invalidates all cache keys matching a pattern.
* Uses SCAN for safe iteration over large key sets.
* @param pattern The pattern to match (e.g., 'cache:flyers*')
* @param logger Optional logger for this operation
* @returns The number of keys deleted
*/
async invalidatePattern(pattern: string, logger: Logger = globalLogger): Promise<number> {
let cursor = '0';
let totalDeleted = 0;
try {
do {
const [nextCursor, keys] = await redis.scan(cursor, 'MATCH', pattern, 'COUNT', 100);
cursor = nextCursor;
if (keys.length > 0) {
const deletedCount = await redis.del(...keys);
totalDeleted += deletedCount;
}
} while (cursor !== '0');
logger.info({ pattern, totalDeleted }, 'Cache invalidation completed');
return totalDeleted;
} catch (error) {
logger.error({ err: error, pattern }, 'Cache invalidation failed');
throw error;
}
}
/**
* Implements the Cache-Aside pattern: try cache first, fall back to fetcher, cache result.
* This is the primary method for adding caching to existing repository methods.
*
* @param key The cache key
* @param fetcher Function that retrieves data from the source (e.g., database)
* @param options Cache options including TTL
* @returns The data (from cache or fetcher)
*
* @example
* ```typescript
* const brands = await cacheService.getOrSet(
* CACHE_PREFIX.BRANDS,
* () => this.db.query('SELECT * FROM stores'),
* { ttl: CACHE_TTL.BRANDS, logger }
* );
* ```
*/
async getOrSet<T>(
key: string,
fetcher: () => Promise<T>,
options: CacheOptions,
): Promise<T> {
const logger = options.logger ?? globalLogger;
// Try to get from cache first
const cached = await this.get<T>(key, logger);
if (cached !== null) {
return cached;
}
// Cache miss - fetch from source
const data = await fetcher();
// Cache the result (fire-and-forget, don't await)
this.set(key, data, options.ttl, logger).catch(() => {
// Error already logged in set()
});
return data;
}
// --- Convenience methods for specific cache types ---
/**
* Invalidates all brand-related cache entries.
*/
async invalidateBrands(logger: Logger = globalLogger): Promise<number> {
return this.invalidatePattern(`${CACHE_PREFIX.BRANDS}*`, logger);
}
/**
* Invalidates all flyer-related cache entries.
*/
async invalidateFlyers(logger: Logger = globalLogger): Promise<number> {
const patterns = [
`${CACHE_PREFIX.FLYERS}*`,
`${CACHE_PREFIX.FLYER}*`,
`${CACHE_PREFIX.FLYER_ITEMS}*`,
];
let total = 0;
for (const pattern of patterns) {
total += await this.invalidatePattern(pattern, logger);
}
return total;
}
/**
* Invalidates cache for a specific flyer and its items.
*/
async invalidateFlyer(flyerId: number, logger: Logger = globalLogger): Promise<void> {
await Promise.all([
this.del(`${CACHE_PREFIX.FLYER}:${flyerId}`, logger),
this.del(`${CACHE_PREFIX.FLYER_ITEMS}:${flyerId}`, logger),
// Also invalidate the flyers list since it may contain this flyer
this.invalidatePattern(`${CACHE_PREFIX.FLYERS}*`, logger),
]);
}
/**
* Invalidates all statistics cache entries.
*/
async invalidateStats(logger: Logger = globalLogger): Promise<number> {
return this.invalidatePattern(`${CACHE_PREFIX.STATS}*`, logger);
}
}
export const cacheService = new CacheService();

View File

@@ -18,6 +18,7 @@ describe('Address DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.query.mockReset();
addressRepo = new AddressRepository(mockDb);
});

View File

@@ -40,6 +40,7 @@ describe('Admin DB Service', () => {
beforeEach(() => {
// Reset the global mock's call history before each test.
vi.clearAllMocks();
mockDb.query.mockReset();
// Reset the withTransaction mock before each test
vi.mocked(withTransaction).mockImplementation(async (callback) => {

View File

@@ -47,6 +47,7 @@ describe('Budget DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.query.mockReset();
// Instantiate the repository with the minimal mock db for each test
budgetRepo = new BudgetRepository(mockDb);
});

View File

@@ -28,6 +28,7 @@ import { logger as mockLogger } from '../logger.server';
describe('Conversion DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
// Make getPool return our mock instance for each test
vi.mocked(getPool).mockReturnValue(mockPoolInstance as any);
});

View File

@@ -34,6 +34,16 @@ vi.mock('../logger.server', () => ({
}));
import { logger as mockLogger } from '../logger.server';
// Mock cacheService to bypass caching logic during tests
vi.mock('../cacheService.server', () => ({
cacheService: {
getOrSet: vi.fn(async (_key, callback) => callback()),
invalidateFlyer: vi.fn(),
},
CACHE_TTL: { BRANDS: 3600, FLYERS: 300, FLYER_ITEMS: 600 },
CACHE_PREFIX: { BRANDS: 'brands', FLYERS: 'flyers', FLYER_ITEMS: 'flyer_items' },
}));
// Mock the withTransaction helper
vi.mock('./connection.db', async (importOriginal) => {
const actual = await importOriginal<typeof import('./connection.db')>();
@@ -46,6 +56,7 @@ describe('Flyer DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
//In a transaction, `pool.connect()` returns a client. That client has a `release` method.
// For these tests, we simulate this by having `connect` resolve to the pool instance itself,
// and we ensure the `release` method is mocked on that instance.
@@ -244,8 +255,9 @@ describe('Flyer DB Service', () => {
await expect(flyerRepo.insertFlyer(flyerData, mockLogger)).rejects.toThrow(
CheckConstraintError,
);
// The implementation now generates a more detailed error message.
await expect(flyerRepo.insertFlyer(flyerData, mockLogger)).rejects.toThrow(
'Invalid URL format provided for image or icon.',
"[URL_CHECK_FAIL] Invalid URL format. Image: 'https://example.com/not-a-url', Icon: 'null'",
);
});
});
@@ -585,18 +597,6 @@ describe('Flyer DB Service', () => {
});
describe('getFlyers', () => {
const expectedQuery = `
SELECT
f.*,
json_build_object(
'store_id', s.store_id,
'name', s.name,
'logo_url', s.logo_url
) as store
FROM public.flyers f
JOIN public.stores s ON f.store_id = s.store_id
ORDER BY f.created_at DESC LIMIT $1 OFFSET $2`;
it('should use default limit and offset when none are provided', async () => {
console.log('[TEST DEBUG] Running test: getFlyers > should use default limit and offset');
const mockFlyers: Flyer[] = [createMockFlyer({ flyer_id: 1 })];
@@ -610,7 +610,7 @@ describe('Flyer DB Service', () => {
);
expect(mockPoolInstance.query).toHaveBeenCalledWith(
expectedQuery,
expect.stringContaining('FROM public.flyers f'),
[20, 0], // Default values
);
});
@@ -628,7 +628,7 @@ describe('Flyer DB Service', () => {
);
expect(mockPoolInstance.query).toHaveBeenCalledWith(
expectedQuery,
expect.stringContaining('FROM public.flyers f'),
[10, 5], // Provided values
);
});

View File

@@ -3,6 +3,7 @@ import type { Pool, PoolClient } from 'pg';
import { getPool, withTransaction } from './connection.db';
import type { Logger } from 'pino';
import { UniqueConstraintError, NotFoundError, handleDbError } from './errors.db';
import { cacheService, CACHE_TTL, CACHE_PREFIX } from '../cacheService.server';
import type {
Flyer,
FlyerItem,
@@ -229,22 +230,31 @@ export class FlyerRepository {
/**
* Retrieves all distinct brands from the stores table.
* Uses cache-aside pattern with 1-hour TTL (brands rarely change).
* @returns A promise that resolves to an array of Brand objects.
*/
async getAllBrands(logger: Logger): Promise<Brand[]> {
try {
const query = `
SELECT s.store_id as brand_id, s.name, s.logo_url, s.created_at, s.updated_at
FROM public.stores s
ORDER BY s.name;
`;
const res = await this.db.query<Brand>(query);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getAllBrands', {}, {
defaultMessage: 'Failed to retrieve brands from database.',
});
}
const cacheKey = CACHE_PREFIX.BRANDS;
return cacheService.getOrSet<Brand[]>(
cacheKey,
async () => {
try {
const query = `
SELECT s.store_id as brand_id, s.name, s.logo_url, s.created_at, s.updated_at
FROM public.stores s
ORDER BY s.name;
`;
const res = await this.db.query<Brand>(query);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getAllBrands', {}, {
defaultMessage: 'Failed to retrieve brands from database.',
});
}
},
{ ttl: CACHE_TTL.BRANDS, logger },
);
}
/**
@@ -262,49 +272,67 @@ export class FlyerRepository {
/**
* Retrieves all flyers from the database, ordered by creation date.
* Uses cache-aside pattern with 5-minute TTL.
* @param limit The maximum number of flyers to return.
* @param offset The number of flyers to skip.
* @returns A promise that resolves to an array of Flyer objects.
*/
async getFlyers(logger: Logger, limit: number = 20, offset: number = 0): Promise<Flyer[]> {
try {
const query = `
SELECT
f.*,
json_build_object(
'store_id', s.store_id,
'name', s.name,
'logo_url', s.logo_url
) as store
FROM public.flyers f
JOIN public.stores s ON f.store_id = s.store_id
ORDER BY f.created_at DESC LIMIT $1 OFFSET $2`;
const res = await this.db.query<Flyer>(query, [limit, offset]);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getFlyers', { limit, offset }, {
defaultMessage: 'Failed to retrieve flyers from database.',
});
}
const cacheKey = `${CACHE_PREFIX.FLYERS}:${limit}:${offset}`;
return cacheService.getOrSet<Flyer[]>(
cacheKey,
async () => {
try {
const query = `
SELECT
f.*,
json_build_object(
'store_id', s.store_id,
'name', s.name,
'logo_url', s.logo_url
) as store
FROM public.flyers f
JOIN public.stores s ON f.store_id = s.store_id
ORDER BY f.created_at DESC LIMIT $1 OFFSET $2`;
const res = await this.db.query<Flyer>(query, [limit, offset]);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getFlyers', { limit, offset }, {
defaultMessage: 'Failed to retrieve flyers from database.',
});
}
},
{ ttl: CACHE_TTL.FLYERS, logger },
);
}
/**
* Retrieves all items for a specific flyer.
* Uses cache-aside pattern with 10-minute TTL.
* @param flyerId The ID of the flyer.
* @returns A promise that resolves to an array of FlyerItem objects.
*/
async getFlyerItems(flyerId: number, logger: Logger): Promise<FlyerItem[]> {
try {
const res = await this.db.query<FlyerItem>(
'SELECT * FROM public.flyer_items WHERE flyer_id = $1 ORDER BY flyer_item_id ASC',
[flyerId],
);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getFlyerItems', { flyerId }, {
defaultMessage: 'Failed to retrieve flyer items from database.',
});
}
const cacheKey = `${CACHE_PREFIX.FLYER_ITEMS}:${flyerId}`;
return cacheService.getOrSet<FlyerItem[]>(
cacheKey,
async () => {
try {
const res = await this.db.query<FlyerItem>(
'SELECT * FROM public.flyer_items WHERE flyer_id = $1 ORDER BY flyer_item_id ASC',
[flyerId],
);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getFlyerItems', { flyerId }, {
defaultMessage: 'Failed to retrieve flyer items from database.',
});
}
},
{ ttl: CACHE_TTL.FLYER_ITEMS, logger },
);
}
/**
@@ -399,6 +427,7 @@ export class FlyerRepository {
/**
* Deletes a flyer and all its associated items in a transaction.
* This should typically be an admin-only action.
* Invalidates related cache entries after successful deletion.
* @param flyerId The ID of the flyer to delete.
*/
async deleteFlyer(flyerId: number, logger: Logger): Promise<void> {
@@ -413,6 +442,9 @@ export class FlyerRepository {
}
logger.info(`Successfully deleted flyer with ID: ${flyerId}`);
});
// Invalidate cache after successful deletion
await cacheService.invalidateFlyer(flyerId, logger);
} catch (error) {
handleDbError(error, logger, 'Database transaction error in deleteFlyer', { flyerId }, {
defaultMessage: 'Failed to delete flyer.',

View File

@@ -29,6 +29,7 @@ describe('Gamification DB Service', () => {
beforeEach(() => {
// Reset the global mock's call history before each test.
vi.clearAllMocks();
mockDb.query.mockReset();
// Instantiate the repository with the mock pool for each test
gamificationRepo = new GamificationRepository(mockDb);

View File

@@ -30,6 +30,7 @@ describe('Notification DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
// Instantiate the repository with the mock pool for each test
notificationRepo = new NotificationRepository(mockPoolInstance as unknown as Pool);

View File

@@ -35,6 +35,7 @@ describe('Personalization DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockQuery.mockReset();
// Reset the withTransaction mock before each test
vi.mocked(withTransaction).mockImplementation(async (callback) => {
const mockClient = { query: vi.fn() };

View File

@@ -27,6 +27,7 @@ import { logger as mockLogger } from '../logger.server';
describe('Price DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
// Make getPool return our mock instance for each test
vi.mocked(getPool).mockReturnValue(mockPoolInstance as any);
});

View File

@@ -34,6 +34,7 @@ describe('Reaction DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockDb.query.mockReset();
reactionRepo = new ReactionRepository(mockDb);
});

View File

@@ -28,6 +28,7 @@ describe('Recipe DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockQuery.mockReset();
// Instantiate the repository with the mock pool for each test
recipeRepo = new RecipeRepository(mockPoolInstance as unknown as Pool);
});

View File

@@ -36,6 +36,7 @@ describe('Shopping DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
// Instantiate the repository with the mock pool for each test
shoppingRepo = new ShoppingRepository(mockPoolInstance as unknown as Pool);
});

View File

@@ -62,6 +62,7 @@ describe('User DB Service', () => {
beforeEach(() => {
vi.clearAllMocks();
mockPoolInstance.query.mockReset();
userRepo = new UserRepository(mockPoolInstance as unknown as PoolClient);
// Provide a default mock implementation for withTransaction for all tests.
vi.mocked(withTransaction).mockImplementation(

View File

@@ -4,12 +4,13 @@ import { withTransaction } from './db/connection.db';
import { createFlyerAndItems } from './db/flyer.db';
import { AdminRepository } from './db/admin.db';
import { GamificationRepository } from './db/gamification.db';
import { cacheService } from './cacheService.server';
import type { FlyerInsert, FlyerItemInsert, Flyer } from '../types';
export class FlyerPersistenceService {
/**
* Saves the flyer and its items to the database within a transaction.
* Also logs the activity.
* Also logs the activity and invalidates related cache entries.
*/
async saveFlyer(
flyerData: FlyerInsert,
@@ -17,7 +18,7 @@ export class FlyerPersistenceService {
userId: string | undefined,
logger: Logger,
): Promise<Flyer> {
return withTransaction(async (client) => {
const flyer = await withTransaction(async (client) => {
const { flyer, items } = await createFlyerAndItems(flyerData, itemsForDb, logger, client);
logger.info(
@@ -43,5 +44,12 @@ export class FlyerPersistenceService {
}
return flyer;
});
// Invalidate flyer list cache after successful creation (fire-and-forget)
cacheService.invalidateFlyers(logger).catch(() => {
// Error already logged in invalidateFlyers
});
return flyer;
}
}

View File

@@ -1,6 +1,9 @@
// src/services/logger.server.test.ts
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
// Unmock the module we are testing to override the global mock from setupFiles.
vi.unmock('./logger.server');
// Mock pino before importing the logger
const pinoMock = vi.fn(() => ({
info: vi.fn(),
@@ -25,14 +28,25 @@ describe('Server Logger', () => {
it('should initialize pino with the correct level for production', async () => {
vi.stubEnv('NODE_ENV', 'production');
await import('./logger.server');
expect(pinoMock).toHaveBeenCalledWith(expect.objectContaining({ level: 'info' }));
expect(pinoMock).toHaveBeenCalledWith(
expect.objectContaining({ level: 'info', transport: undefined }),
);
});
it('should initialize pino with pretty-print transport for development', async () => {
vi.stubEnv('NODE_ENV', 'development');
await import('./logger.server');
expect(pinoMock).toHaveBeenCalledWith(
expect.objectContaining({ transport: expect.any(Object) }),
expect.objectContaining({ level: 'debug', transport: expect.any(Object) }),
);
});
it('should initialize pino with debug level and no transport for test', async () => {
// This is the default for vitest, but we stub it for clarity.
vi.stubEnv('NODE_ENV', 'test');
await import('./logger.server');
expect(pinoMock).toHaveBeenCalledWith(
expect.objectContaining({ level: 'debug', transport: undefined }),
);
});
});

View File

@@ -2,6 +2,7 @@
import { describe, it, expect, afterAll } from 'vitest';
import crypto from 'crypto';
import * as apiClient from '../../services/apiClient';
import { getPool } from '../../services/db/connection.db';
import path from 'path';
import fs from 'fs';
import { cleanupDb } from '../utils/cleanup';
@@ -19,12 +20,14 @@ describe('E2E Flyer Upload and Processing Workflow', () => {
let authToken: string;
let userId: string | null = null;
let flyerId: number | null = null;
let storeId: number | null = null;
afterAll(async () => {
// Use the centralized cleanup utility for robustness.
await cleanupDb({
userIds: [userId],
flyerIds: [flyerId],
storeIds: [storeId],
});
});
@@ -98,5 +101,13 @@ describe('E2E Flyer Upload and Processing Workflow', () => {
expect(jobStatus.state).toBe('completed');
flyerId = jobStatus.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
// Fetch the store_id associated with the created flyer for robust cleanup
if (flyerId) {
const flyerRes = await getPool().query('SELECT store_id FROM public.flyers WHERE flyer_id = $1', [flyerId]);
if (flyerRes.rows.length > 0) {
storeId = flyerRes.rows[0].store_id;
}
}
}, 240000); // Extended timeout for AI processing
});

View File

@@ -18,6 +18,8 @@ describe('Admin API Routes Integration Tests', () => {
let regularUserToken: string;
const createdUserIds: string[] = [];
const createdStoreIds: number[] = [];
const createdCorrectionIds: number[] = [];
const createdFlyerIds: number[] = [];
beforeAll(async () => {
vi.stubEnv('FRONTEND_URL', 'https://example.com');
@@ -47,6 +49,8 @@ describe('Admin API Routes Integration Tests', () => {
await cleanupDb({
userIds: createdUserIds,
storeIds: createdStoreIds,
suggestedCorrectionIds: createdCorrectionIds,
flyerIds: createdFlyerIds,
});
});
@@ -174,6 +178,7 @@ describe('Admin API Routes Integration Tests', () => {
[testStoreId, `checksum-${Date.now()}-${Math.random()}`.padEnd(64, '0')],
);
const flyerId = flyerRes.rows[0].flyer_id;
createdFlyerIds.push(flyerId);
const flyerItemRes = await getPool().query(
`INSERT INTO public.flyer_items (flyer_id, item, price_display, price_in_cents, quantity)
@@ -188,6 +193,7 @@ describe('Admin API Routes Integration Tests', () => {
[testFlyerItemId, adminUser.user.user_id],
);
testCorrectionId = correctionRes.rows[0].suggested_correction_id;
createdCorrectionIds.push(testCorrectionId);
});
it('should allow an admin to approve a correction', async () => {

View File

@@ -59,17 +59,40 @@ vi.mock('../../services/storage/storageService', () => {
};
});
// FIX: Import the singleton instance directly to spy on it
import { aiService } from '../../services/aiService.server';
/**
* @vitest-environment node
*/
const { mockExtractCoreData } = vi.hoisted(() => ({
mockExtractCoreData: vi.fn(),
}));
// CRITICAL: This mock function must be declared with vi.hoisted() to ensure it's available
// at the module level BEFORE any imports are resolved.
const { mockExtractCoreData } = vi.hoisted(() => {
return {
mockExtractCoreData: vi.fn(),
};
});
// CRITICAL: Mock the aiService module BEFORE any other imports that depend on it.
// This ensures workers get the mocked version, not the real one.
// We use a partial mock that only overrides extractCoreDataFromFlyerImage.
vi.mock('../../services/aiService.server', async (importOriginal) => {
const actual = await importOriginal<typeof import('../../services/aiService.server')>();
// Create a proxy around the actual aiService that intercepts extractCoreDataFromFlyerImage
const proxiedAiService = new Proxy(actual.aiService, {
get(target, prop) {
if (prop === 'extractCoreDataFromFlyerImage') {
return mockExtractCoreData;
}
// For all other properties/methods, return the original
return target[prop as keyof typeof target];
},
});
return {
...actual,
aiService: proxiedAiService,
};
});
// Mock the connection DB service to intercept withTransaction.
// This is crucial because FlyerPersistenceService imports directly from connection.db,
@@ -87,6 +110,7 @@ describe('Flyer Processing Background Job Integration Test', () => {
const createdUserIds: string[] = [];
const createdFlyerIds: number[] = [];
const createdFilePaths: string[] = [];
const createdStoreIds: number[] = [];
let workersModule: typeof import('../../services/workers.server');
const originalFrontendUrl = process.env.FRONTEND_URL;
@@ -99,9 +123,8 @@ describe('Flyer Processing Background Job Integration Test', () => {
process.env.FRONTEND_URL = 'https://example.com';
console.error('[TEST SETUP] FRONTEND_URL stubbed to:', process.env.FRONTEND_URL);
// FIX: Spy on the actual singleton instance. This ensures that when the worker
// imports 'aiService', it gets the instance we are controlling here.
vi.spyOn(aiService, 'extractCoreDataFromFlyerImage').mockImplementation(mockExtractCoreData);
// NOTE: The aiService mock is now set up via vi.mock() at the module level (above).
// This ensures workers get the mocked version when they import aiService.
// NEW: Import workers to start them IN-PROCESS.
// This ensures they run in the same memory space as our mocks.
@@ -155,6 +178,7 @@ describe('Flyer Processing Background Job Integration Test', () => {
await cleanupDb({
userIds: createdUserIds,
flyerIds: createdFlyerIds,
storeIds: createdStoreIds,
});
// Use the centralized file cleanup utility.
@@ -252,6 +276,9 @@ describe('Flyer Processing Background Job Integration Test', () => {
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
expect(savedFlyer?.flyer_id).toBe(flyerId);
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
expect(savedFlyer?.file_name).toBe(uniqueFileName);
// Also add the final processed image path to the cleanup list.
// This is important because JPEGs are re-processed to strip EXIF data, creating a new file.
@@ -363,6 +390,9 @@ describe('Flyer Processing Background Job Integration Test', () => {
// 4. Verify EXIF data is stripped from the saved file
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath); // Add final path for cleanup
@@ -454,6 +484,9 @@ describe('Flyer Processing Background Job Integration Test', () => {
// 4. Verify metadata is stripped from the saved file
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath); // Add final path for cleanup

View File

@@ -1,5 +1,5 @@
// src/tests/integration/gamification.integration.test.ts
import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest';
import { describe, it, expect, beforeAll, afterAll, vi, beforeEach } from 'vitest';
import supertest from 'supertest';
import path from 'path';
import fs from 'node:fs/promises';
@@ -70,8 +70,13 @@ describe('Gamification Flow Integration Test', () => {
fullName: 'Gamification Tester',
request,
}));
});
// Setup default mock response for the AI service's extractCoreDataFromFlyerImage method.
beforeEach(() => {
vi.clearAllMocks();
// Reset AI Service Mock to default success state
mockExtractCoreData.mockReset();
mockExtractCoreData.mockResolvedValue({
store_name: 'Gamification Test Store',
valid_from: null,
@@ -87,6 +92,9 @@ describe('Gamification Flow Integration Test', () => {
},
],
});
// Reset Image Processor Mock
vi.mocked(imageProcessor.generateFlyerIcon).mockResolvedValue('mock-icon.webp');
});
afterAll(async () => {
@@ -196,6 +204,9 @@ describe('Gamification Flow Integration Test', () => {
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
expect(savedFlyer?.file_name).toBe(uniqueFileName);
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
// Also add the final processed image path to the cleanup list.
// This is important because JPEGs are re-processed to strip EXIF data, creating a new file.
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));

View File

@@ -88,27 +88,29 @@ describe('Price History API Integration Test (/api/price-history)', () => {
afterAll(async () => {
vi.unstubAllEnvs();
await cleanupDb({ userIds: createdUserIds });
const pool = getPool();
// The CASCADE on the tables should handle flyer_items.
// The delete on flyers cascades to flyer_items, which fires a trigger `recalculate_price_history_on_flyer_item_delete`.
// This trigger has a bug causing the test to fail. As a workaround for the test suite,
// we temporarily disable user-defined triggers on the flyer_items table during cleanup.
const flyerIds = [flyerId1, flyerId2, flyerId3].filter(Boolean);
try {
await pool.query('ALTER TABLE public.flyer_items DISABLE TRIGGER USER;');
if (flyerIds.length > 0) {
await pool.query('DELETE FROM public.flyers WHERE flyer_id = ANY($1::int[])', [flyerIds]);
}
if (storeId) await pool.query('DELETE FROM public.stores WHERE store_id = $1', [storeId]);
if (masterItemId)
await pool.query('DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = $1', [
masterItemId,
]);
} finally {
// Ensure triggers are always re-enabled, even if an error occurs during deletion.
await pool.query('ALTER TABLE public.flyer_items ENABLE TRIGGER USER;');
}
await cleanupDb({
userIds: createdUserIds,
masterItemIds: [masterItemId],
storeIds: [storeId],
});
});
it('should return the correct price history for a given master item ID', async () => {

View File

@@ -26,6 +26,7 @@ describe('Public API Routes Integration Tests', () => {
let testRecipe: Recipe;
let testFlyer: Flyer;
let testStoreId: number;
const createdRecipeCommentIds: number[] = [];
beforeAll(async () => {
vi.stubEnv('FRONTEND_URL', 'https://example.com');
@@ -85,6 +86,7 @@ describe('Public API Routes Integration Tests', () => {
recipeIds: testRecipe ? [testRecipe.recipe_id] : [],
flyerIds: testFlyer ? [testFlyer.flyer_id] : [],
storeIds: testStoreId ? [testStoreId] : [],
recipeCommentIds: createdRecipeCommentIds,
});
});
@@ -186,10 +188,11 @@ describe('Public API Routes Integration Tests', () => {
it('GET /api/recipes/:recipeId/comments should return comments for a recipe', async () => {
// Add a comment to our test recipe first
await getPool().query(
`INSERT INTO public.recipe_comments (recipe_id, user_id, content) VALUES ($1, $2, 'Test comment')`,
const commentRes = await getPool().query(
`INSERT INTO public.recipe_comments (recipe_id, user_id, content) VALUES ($1, $2, 'Test comment') RETURNING recipe_comment_id`,
[testRecipe.recipe_id, testUser.user.user_id],
);
createdRecipeCommentIds.push(commentRes.rows[0].recipe_comment_id);
const response = await request.get(`/api/recipes/${testRecipe.recipe_id}/comments`);
const comments: RecipeComment[] = response.body;
expect(response.status).toBe(200);

View File

@@ -1,5 +1,5 @@
// src/tests/integration/recipe.integration.test.ts
import { describe, it, expect, beforeAll, afterAll, vi } from 'vitest';
import { describe, it, expect, beforeAll, afterAll, vi, afterEach } from 'vitest';
import supertest from 'supertest';
import { createAndLoginUser } from '../utils/testHelpers';
import { cleanupDb } from '../utils/cleanup';
@@ -49,6 +49,12 @@ describe('Recipe API Routes Integration Tests', () => {
createdRecipeIds.push(testRecipe.recipe_id);
});
afterEach(() => {
vi.clearAllMocks();
// Reset the mock to its default state for the next test
vi.mocked(aiService.generateRecipeSuggestion).mockResolvedValue('Default Mock Suggestion');
});
afterAll(async () => {
vi.unstubAllEnvs();
// Clean up all created resources

View File

@@ -19,6 +19,7 @@ describe('User API Routes Integration Tests', () => {
let testUser: UserProfile;
let authToken: string;
const createdUserIds: string[] = [];
const createdMasterItemIds: number[] = [];
// Before any tests run, create a new user and log them in.
// The token will be used for all subsequent API calls in this test suite.
@@ -38,7 +39,10 @@ describe('User API Routes Integration Tests', () => {
// This now cleans up ALL users created by this test suite to prevent pollution.
afterAll(async () => {
vi.unstubAllEnvs();
await cleanupDb({ userIds: createdUserIds });
await cleanupDb({
userIds: createdUserIds,
masterItemIds: createdMasterItemIds
});
// Safeguard to clean up any avatar files created during tests.
const uploadDir = path.resolve(__dirname, '../../../uploads/avatars');
@@ -244,6 +248,7 @@ describe('User API Routes Integration Tests', () => {
.send({ itemName: 'Integration Test Item', category: 'Other/Miscellaneous' });
const newItem = addResponse.body;
if (newItem?.master_grocery_item_id) createdMasterItemIds.push(newItem.master_grocery_item_id);
// Assert 1: Check that the item was created correctly.
expect(addResponse.status).toBe(201);
expect(newItem.name).toBe('Integration Test Item');

View File

@@ -9,13 +9,56 @@ let server: Server;
// This will hold the single database pool instance for the entire test run.
let globalPool: ReturnType<typeof getPool> | null = null;
/**
* Cleans all BullMQ queues to ensure no stale jobs from previous test runs.
* This is critical because old jobs with outdated error messages can pollute test results.
*/
async function cleanAllQueues() {
// Use console.error for visibility in CI logs (stderr is often more reliable)
console.error(`[PID:${process.pid}] [QUEUE CLEANUP] Starting BullMQ queue cleanup...`);
try {
const { flyerQueue, cleanupQueue, emailQueue, analyticsQueue, weeklyAnalyticsQueue, tokenCleanupQueue } = await import('../../services/queues.server');
console.error(`[QUEUE CLEANUP] Successfully imported queue modules`);
const queues = [flyerQueue, cleanupQueue, emailQueue, analyticsQueue, weeklyAnalyticsQueue, tokenCleanupQueue];
for (const queue of queues) {
try {
// Log queue state before cleanup
const jobCounts = await queue.getJobCounts();
console.error(`[QUEUE CLEANUP] Queue "${queue.name}" before cleanup: ${JSON.stringify(jobCounts)}`);
// obliterate() removes ALL data associated with the queue from Redis
await queue.obliterate({ force: true });
console.error(` ✅ [QUEUE CLEANUP] Cleaned queue: ${queue.name}`);
} catch (error) {
// Log but don't fail - the queue might not exist yet
console.error(` ⚠️ [QUEUE CLEANUP] Could not clean queue ${queue.name}: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
console.error(`✅ [PID:${process.pid}] [QUEUE CLEANUP] All queues cleaned successfully.`);
} catch (error) {
console.error(`❌ [PID:${process.pid}] [QUEUE CLEANUP] CRITICAL ERROR during queue cleanup:`, error);
// Don't throw - we want the tests to continue even if cleanup fails
}
}
export async function setup() {
// Ensure we are in the correct environment for these tests.
process.env.NODE_ENV = 'test';
// Fix: Set the FRONTEND_URL globally for the test server instance
process.env.FRONTEND_URL = 'https://example.com';
console.log(`\n--- [PID:${process.pid}] Running Integration Test GLOBAL Setup ---`);
console.error(`\n--- [PID:${process.pid}] Running Integration Test GLOBAL Setup ---`);
console.error(`[SETUP] REDIS_URL: ${process.env.REDIS_URL}`);
console.error(`[SETUP] REDIS_PASSWORD is set: ${!!process.env.REDIS_PASSWORD}`);
// CRITICAL: Clean all queues BEFORE running any tests to remove stale jobs
// from previous test runs that may have outdated error messages.
console.error(`[SETUP] About to call cleanAllQueues()...`);
await cleanAllQueues();
console.error(`[SETUP] cleanAllQueues() completed.`);
// The integration setup is now the single source of truth for preparing the test DB.
// It runs the same seed script that `npm run db:reset:test` used.

View File

@@ -8,6 +8,10 @@ interface CleanupOptions {
storeIds?: (number | null | undefined)[];
recipeIds?: (number | null | undefined)[];
budgetIds?: (number | null | undefined)[];
masterItemIds?: (number | null | undefined)[];
shoppingListIds?: (number | null | undefined)[];
suggestedCorrectionIds?: (number | null | undefined)[];
recipeCommentIds?: (number | null | undefined)[];
}
/**
@@ -25,11 +29,21 @@ export const cleanupDb = async (options: CleanupOptions) => {
// Order of deletion matters to avoid foreign key violations.
// Children entities first, then parents.
if (options.suggestedCorrectionIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.suggested_corrections WHERE suggested_correction_id = ANY($1::int[])', [options.suggestedCorrectionIds]);
logger.debug(`Cleaned up ${options.suggestedCorrectionIds.length} suggested correction(s).`);
}
if (options.budgetIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.budgets WHERE budget_id = ANY($1::int[])', [options.budgetIds]);
logger.debug(`Cleaned up ${options.budgetIds.length} budget(s).`);
}
if (options.recipeCommentIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.recipe_comments WHERE recipe_comment_id = ANY($1::int[])', [options.recipeCommentIds]);
logger.debug(`Cleaned up ${options.recipeCommentIds.length} recipe comment(s).`);
}
if (options.recipeIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.recipes WHERE recipe_id = ANY($1::int[])', [options.recipeIds]);
logger.debug(`Cleaned up ${options.recipeIds.length} recipe(s).`);
@@ -45,6 +59,16 @@ export const cleanupDb = async (options: CleanupOptions) => {
logger.debug(`Cleaned up ${options.storeIds.length} store(s).`);
}
if (options.masterItemIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = ANY($1::int[])', [options.masterItemIds]);
logger.debug(`Cleaned up ${options.masterItemIds.length} master grocery item(s).`);
}
if (options.shoppingListIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.shopping_lists WHERE shopping_list_id = ANY($1::int[])', [options.shoppingListIds]);
logger.debug(`Cleaned up ${options.shoppingListIds.length} shopping list(s).`);
}
if (options.userIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.users WHERE user_id = ANY($1::uuid[])', [options.userIds]);
logger.debug(`Cleaned up ${options.userIds.length} user(s).`);

View File

@@ -1,14 +1,23 @@
// src/utils/imageProcessor.test.ts
import { describe, it, expect, vi, beforeEach } from 'vitest';
import type { Logger } from 'pino';
import path from 'path';
// --- Hoisted Mocks ---
const mocks = vi.hoisted(() => {
// Create a chainable mock for the sharp library
const toFile = vi.fn().mockResolvedValue({ info: 'mocked' });
// Chain for generateFlyerIcon
const webp = vi.fn(() => ({ toFile }));
const resize = vi.fn(() => ({ webp }));
const sharpInstance = { resize };
// Chain for processAndSaveImage
const png = vi.fn(() => ({ toFile }));
const jpeg = vi.fn(() => ({ png }));
const withMetadata = vi.fn(() => ({ jpeg }));
const sharpInstance = { resize, withMetadata };
// Mock the sharp function and attach static properties required by the implementation
const sharp = vi.fn(() => sharpInstance);
@@ -18,6 +27,9 @@ const mocks = vi.hoisted(() => {
sharp: sharp,
resize,
webp,
withMetadata,
jpeg,
png,
toFile,
mkdir: vi.fn().mockResolvedValue(undefined),
};
@@ -54,7 +66,7 @@ const logger = createMockLogger();
vi.mock('../services/logger.server', () => ({ logger }));
// --- Import the function to be tested ---
import { generateFlyerIcon } from './imageProcessor';
import { generateFlyerIcon, processAndSaveImage } from './imageProcessor';
describe('generateFlyerIcon', () => {
beforeEach(() => {
@@ -95,3 +107,48 @@ describe('generateFlyerIcon', () => {
);
});
});
describe('processAndSaveImage', () => {
beforeEach(() => {
vi.clearAllMocks();
// Ensure toFile is in a resolved state
mocks.toFile.mockResolvedValue({ info: 'mocked' });
});
it('should process the image, strip metadata, and return the new filename', async () => {
const sourcePath = '/tmp/upload/original.jpg';
const destinationDir = '/var/www/images';
const originalFileName = 'original.jpg';
const result = await processAndSaveImage(sourcePath, destinationDir, originalFileName, logger);
// Check that the destination directory was created
expect(mocks.mkdir).toHaveBeenCalledWith(destinationDir, { recursive: true });
// Check that sharp was called with the correct source
expect(mocks.sharp).toHaveBeenCalledWith(sourcePath, { failOn: 'none' });
// Check the processing chain
expect(mocks.withMetadata).toHaveBeenCalledWith({});
expect(mocks.jpeg).toHaveBeenCalledWith({ quality: 85, mozjpeg: true });
expect(mocks.png).toHaveBeenCalledWith({ compressionLevel: 8, quality: 85 });
expect(mocks.toFile).toHaveBeenCalledWith(expect.stringContaining(path.join(destinationDir, 'original-')));
// Check the returned filename format (original-timestamp.jpg)
expect(result).toMatch(/^original-\d+\.jpg$/);
});
it('should throw an error if sharp fails to process the image', async () => {
const sharpError = new Error('Processing failed');
mocks.toFile.mockRejectedValueOnce(sharpError);
await expect(
processAndSaveImage('/path/img.jpg', '/dest', 'img.jpg', logger),
).rejects.toThrow('Failed to process image img.jpg.');
expect(logger.error).toHaveBeenCalledWith(
expect.objectContaining({ err: sharpError, sourcePath: '/path/img.jpg' }),
'An error occurred during image processing and saving.',
);
});
});

View File

@@ -20,57 +20,100 @@ const createMockLogger = (): Logger =>
describe('serverUtils', () => {
describe('getBaseUrl', () => {
const originalEnv = process.env;
let mockLogger: Logger;
// Store original env values to restore after tests
const originalFrontendUrl = process.env.FRONTEND_URL;
const originalBaseUrl = process.env.BASE_URL;
const originalNodeEnv = process.env.NODE_ENV;
const originalPort = process.env.PORT;
beforeEach(() => {
// Reset mocks and environment variables before each test for isolation
vi.resetModules();
process.env = { ...originalEnv };
vi.unstubAllEnvs();
// CRITICAL: Clear env vars that might be set globally (e.g., from vitest config)
// vi.unstubAllEnvs() only removes vars set via vi.stubEnv(), not direct assignments
delete process.env.FRONTEND_URL;
delete process.env.BASE_URL;
delete process.env.NODE_ENV;
delete process.env.PORT;
mockLogger = createMockLogger();
});
afterEach(() => {
// Restore original environment variables after each test
process.env = originalEnv;
vi.unstubAllEnvs();
// Restore original values
if (originalFrontendUrl !== undefined) process.env.FRONTEND_URL = originalFrontendUrl;
else delete process.env.FRONTEND_URL;
if (originalBaseUrl !== undefined) process.env.BASE_URL = originalBaseUrl;
else delete process.env.BASE_URL;
if (originalNodeEnv !== undefined) process.env.NODE_ENV = originalNodeEnv;
else delete process.env.NODE_ENV;
if (originalPort !== undefined) process.env.PORT = originalPort;
else delete process.env.PORT;
});
it('should use FRONTEND_URL if it is a valid URL', () => {
process.env.FRONTEND_URL = 'https://valid.example.com';
vi.stubEnv('FRONTEND_URL', 'https://valid.example.com');
const baseUrl = getBaseUrl(mockLogger);
expect(baseUrl).toBe('https://valid.example.com');
expect(mockLogger.warn).not.toHaveBeenCalled();
});
it('should trim a trailing slash from FRONTEND_URL', () => {
process.env.FRONTEND_URL = 'https://valid.example.com/';
vi.stubEnv('FRONTEND_URL', 'https://valid.example.com/');
const baseUrl = getBaseUrl(mockLogger);
expect(baseUrl).toBe('https://valid.example.com');
});
it('should use BASE_URL if FRONTEND_URL is not set', () => {
delete process.env.FRONTEND_URL;
process.env.BASE_URL = 'https://base.example.com';
vi.stubEnv('BASE_URL', 'https://base.example.com');
const baseUrl = getBaseUrl(mockLogger);
expect(baseUrl).toBe('https://base.example.com');
expect(mockLogger.warn).not.toHaveBeenCalled();
});
it('should fall back to example.com with default port 3000 if no URL is provided', () => {
delete process.env.FRONTEND_URL;
delete process.env.BASE_URL;
delete process.env.PORT;
it('should fall back to localhost with default port 3000 in test environment', () => {
vi.stubEnv('NODE_ENV', 'test');
const baseUrl = getBaseUrl(mockLogger);
expect(baseUrl).toBe('https://example.com:3000');
expect(baseUrl).toBe('http://localhost:3000');
expect(mockLogger.warn).not.toHaveBeenCalled();
});
it('should log a warning and fall back if FRONTEND_URL is invalid (does not start with http)', () => {
process.env.FRONTEND_URL = 'invalid.url.com';
it('should fall back to example.com in non-test environment', () => {
vi.stubEnv('NODE_ENV', 'development');
vi.stubEnv('PORT', '4000');
const baseUrl = getBaseUrl(mockLogger);
expect(baseUrl).toBe('https://example.com:3000');
expect(baseUrl).toBe('http://example.com:4000');
expect(mockLogger.warn).not.toHaveBeenCalled();
});
it('should log a warning and fall back to localhost if FRONTEND_URL is invalid in test env', () => {
vi.stubEnv('NODE_ENV', 'test');
vi.stubEnv('FRONTEND_URL', 'invalid.url.com');
const baseUrl = getBaseUrl(mockLogger);
expect(baseUrl).toBe('http://localhost:3000');
expect(mockLogger.warn).toHaveBeenCalledWith(
"[getBaseUrl] FRONTEND_URL/BASE_URL is invalid or incomplete ('invalid.url.com'). Falling back to default local URL: https://example.com:3000",
"[getBaseUrl] FRONTEND_URL/BASE_URL is invalid or incomplete ('invalid.url.com'). Falling back to: http://localhost:3000",
);
});
it('should log a warning and fall back to example.com if FRONTEND_URL is invalid in non-test env', () => {
vi.stubEnv('NODE_ENV', 'production');
vi.stubEnv('FRONTEND_URL', 'invalid.url.com');
const baseUrl = getBaseUrl(mockLogger);
expect(baseUrl).toBe('http://example.com:3000');
expect(mockLogger.warn).toHaveBeenCalledWith(
"[getBaseUrl] FRONTEND_URL/BASE_URL is invalid or incomplete ('invalid.url.com'). Falling back to: http://example.com:3000",
);
});
it('should throw an error if the final URL is invalid', () => {
vi.stubEnv('FRONTEND_URL', 'http:invalid');
expect(() => getBaseUrl(mockLogger)).toThrow(
`[getBaseUrl] Generated URL 'http:invalid' does not match required pattern (must start with http:// or https://)`,
);
});
});