integration test fixes - claude for the win? try 4 - i have a good feeling
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Has been cancelled

This commit is contained in:
2026-01-09 05:18:19 -08:00
parent 23830c0d4e
commit b777430ff7
18 changed files with 512 additions and 24 deletions

View File

@@ -59,7 +59,10 @@
"mcp__filesystem__read_multiple_files",
"mcp__filesystem__directory_tree",
"mcp__filesystem__read_text_file",
"Bash(wc:*)"
"Bash(wc:*)",
"Bash(npm install:*)",
"Bash(git grep:*)",
"Bash(findstr:*)"
]
}
}

View File

@@ -117,7 +117,8 @@ jobs:
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_PROD }}
REDIS_URL: 'redis://localhost:6379'
# Explicitly use database 0 for production (test uses database 1)
REDIS_URL: 'redis://localhost:6379/0'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
FRONTEND_URL: 'https://flyer-crawler.projectium.com'
JWT_SECRET: ${{ secrets.JWT_SECRET }}

View File

@@ -96,22 +96,23 @@ jobs:
# It prevents the accumulation of duplicate processes from previous test runs.
node -e "const exec = require('child_process').execSync; try { const list = JSON.parse(exec('pm2 jlist').toString()); list.forEach(p => { if (p.name && p.name.endsWith('-test')) { console.log('Deleting test process: ' + p.name + ' (' + p.pm2_env.pm_id + ')'); try { exec('pm2 delete ' + p.pm2_env.pm_id); } catch(e) { console.error('Failed to delete ' + p.pm2_env.pm_id, e.message); } } }); console.log('✅ Test process cleanup complete.'); } catch (e) { if (e.stdout.toString().includes('No process found')) { console.log('No PM2 processes running, cleanup not needed.'); } else { console.error('Error cleaning up test processes:', e.message); } }" || true
- name: Flush Redis Before Tests
# CRITICAL: Clear all Redis data to remove stale BullMQ jobs from previous test runs.
- name: Flush Redis Test Database Before Tests
# CRITICAL: Clear Redis database 1 (test database) to remove stale BullMQ jobs.
# This prevents old jobs with outdated error messages from polluting test results.
# NOTE: We use database 1 for tests to isolate from production (database 0).
env:
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
run: |
echo "--- Flushing Redis database to remove stale jobs ---"
echo "--- Flushing Redis database 1 (test database) to remove stale jobs ---"
if [ -z "$REDIS_PASSWORD" ]; then
echo "⚠️ REDIS_PASSWORD_TEST not set, attempting flush without password..."
redis-cli FLUSHDB || echo "Redis flush failed (no password)"
redis-cli -n 1 FLUSHDB || echo "Redis flush failed (no password)"
else
redis-cli -a "$REDIS_PASSWORD" FLUSHDB 2>/dev/null && echo "✅ Redis database flushed successfully." || echo "⚠️ Redis flush failed"
redis-cli -a "$REDIS_PASSWORD" -n 1 FLUSHDB 2>/dev/null && echo "✅ Redis database 1 (test) flushed successfully." || echo "⚠️ Redis flush failed"
fi
# Verify the flush worked by checking key count
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Redis key count after flush: $KEY_COUNT"
# Verify the flush worked by checking key count on database 1
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 1 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Redis database 1 key count after flush: $KEY_COUNT"
- name: Run All Tests and Generate Merged Coverage Report
# This single step runs both unit and integration tests, then merges their
@@ -126,7 +127,9 @@ jobs:
DB_NAME: 'flyer-crawler-test' # Explicitly set for tests
# --- Redis credentials for the test suite ---
REDIS_URL: 'redis://localhost:6379'
# CRITICAL: Use Redis database 1 to isolate tests from production (which uses db 0).
# This prevents the production worker from picking up test jobs.
REDIS_URL: 'redis://localhost:6379/1'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
# --- Integration test specific variables ---
@@ -401,8 +404,8 @@ jobs:
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_TEST }}
# Redis Credentials
REDIS_URL: 'redis://localhost:6379'
# Redis Credentials (use database 1 to isolate from production)
REDIS_URL: 'redis://localhost:6379/1'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_TEST }}
# Application Secrets

View File

@@ -116,7 +116,8 @@ jobs:
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_PROD }}
REDIS_URL: 'redis://localhost:6379'
# Explicitly use database 0 for production (test uses database 1)
REDIS_URL: 'redis://localhost:6379/0'
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
FRONTEND_URL: 'https://flyer-crawler.projectium.com'
JWT_SECRET: ${{ secrets.JWT_SECRET }}

View File

@@ -0,0 +1,167 @@
# .gitea/workflows/manual-redis-flush-prod.yml
#
# DANGER: This workflow is DESTRUCTIVE and intended for manual execution only.
# It will completely FLUSH the PRODUCTION Redis database (db 0).
# This will clear all BullMQ queues, sessions, caches, and any other Redis data.
#
name: Manual - Flush Production Redis
on:
workflow_dispatch:
inputs:
confirmation:
description: 'DANGER: This will FLUSH production Redis. Type "flush-production-redis" to confirm.'
required: true
default: 'do-not-run'
flush_type:
description: 'What to flush?'
required: true
type: choice
options:
- 'queues-only'
- 'entire-database'
default: 'queues-only'
jobs:
flush-redis:
runs-on: projectium.com # This job runs on your self-hosted Gitea runner.
env:
REDIS_PASSWORD: ${{ secrets.REDIS_PASSWORD_PROD }}
steps:
- name: Checkout Code
uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '20'
cache: 'npm'
cache-dependency-path: '**/package-lock.json'
- name: Install Dependencies
run: npm ci
- name: Validate Secrets
run: |
if [ -z "$REDIS_PASSWORD" ]; then
echo "ERROR: REDIS_PASSWORD_PROD secret is not set in Gitea repository settings."
exit 1
fi
echo "✅ Redis password secret is present."
- name: Verify Confirmation Phrase
run: |
if [ "${{ gitea.event.inputs.confirmation }}" != "flush-production-redis" ]; then
echo "ERROR: Confirmation phrase did not match. Aborting Redis flush."
exit 1
fi
echo "✅ Confirmation accepted. Proceeding with Redis flush."
- name: Show Current Redis State
run: |
echo "--- Current Redis Database 0 (Production) State ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 INFO keyspace 2>/dev/null || echo "Could not get keyspace info"
echo ""
echo "--- Key Count ---"
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 0 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Production Redis (db 0) key count: $KEY_COUNT"
echo ""
echo "--- BullMQ Queue Keys ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 KEYS "bull:*" 2>/dev/null | head -20 || echo "No BullMQ keys found"
- name: 🚨 FINAL WARNING & PAUSE 🚨
run: |
echo "*********************************************************************"
echo "WARNING: YOU ARE ABOUT TO FLUSH PRODUCTION REDIS DATA."
echo "Flush type: ${{ gitea.event.inputs.flush_type }}"
echo ""
if [ "${{ gitea.event.inputs.flush_type }}" = "entire-database" ]; then
echo "This will DELETE ALL Redis data including sessions, caches, and queues!"
else
echo "This will DELETE ALL BullMQ queue data (pending jobs, failed jobs, etc.)"
fi
echo ""
echo "This action is IRREVERSIBLE. Press Ctrl+C in the runner terminal NOW to cancel."
echo "Sleeping for 10 seconds..."
echo "*********************************************************************"
sleep 10
- name: Flush BullMQ Queues Only
if: ${{ gitea.event.inputs.flush_type == 'queues-only' }}
env:
REDIS_URL: 'redis://localhost:6379/0'
run: |
echo "--- Obliterating BullMQ queues using Node.js ---"
node -e "
const { Queue } = require('bullmq');
const IORedis = require('ioredis');
const connection = new IORedis(process.env.REDIS_URL, {
maxRetriesPerRequest: null,
password: process.env.REDIS_PASSWORD,
});
const queueNames = [
'flyer-processing',
'email-sending',
'analytics-reporting',
'weekly-analytics-reporting',
'file-cleanup',
'token-cleanup'
];
(async () => {
for (const name of queueNames) {
try {
const queue = new Queue(name, { connection });
const counts = await queue.getJobCounts();
console.log('Queue \"' + name + '\" before obliterate:', JSON.stringify(counts));
await queue.obliterate({ force: true });
console.log('✅ Obliterated queue: ' + name);
await queue.close();
} catch (err) {
console.error('⚠️ Failed to obliterate queue ' + name + ':', err.message);
}
}
await connection.quit();
console.log('✅ All BullMQ queues obliterated.');
})();
"
- name: Flush Entire Redis Database
if: ${{ gitea.event.inputs.flush_type == 'entire-database' }}
run: |
echo "--- Flushing entire Redis database 0 (production) ---"
redis-cli -a "$REDIS_PASSWORD" -n 0 FLUSHDB 2>/dev/null && echo "✅ Redis database 0 flushed successfully." || echo "❌ Redis flush failed"
- name: Verify Flush Results
run: |
echo "--- Redis Database 0 (Production) State After Flush ---"
KEY_COUNT=$(redis-cli -a "$REDIS_PASSWORD" -n 0 DBSIZE 2>/dev/null | grep -oE '[0-9]+' || echo "unknown")
echo "Production Redis (db 0) key count after flush: $KEY_COUNT"
echo ""
echo "--- Remaining BullMQ Queue Keys ---"
BULL_KEYS=$(redis-cli -a "$REDIS_PASSWORD" -n 0 KEYS "bull:*" 2>/dev/null | wc -l || echo "0")
echo "BullMQ key count: $BULL_KEYS"
if [ "${{ gitea.event.inputs.flush_type }}" = "queues-only" ] && [ "$BULL_KEYS" -gt 0 ]; then
echo "⚠️ Warning: Some BullMQ keys may still exist. This can happen if new jobs were added during the flush."
fi
- name: Summary
run: |
echo ""
echo "=========================================="
echo "PRODUCTION REDIS FLUSH COMPLETE"
echo "=========================================="
echo "Flush type: ${{ gitea.event.inputs.flush_type }}"
echo "Timestamp: $(date -u '+%Y-%m-%d %H:%M:%S UTC')"
echo ""
echo "NOTE: If you flushed queues, any pending jobs (flyer processing,"
echo "emails, analytics, etc.) have been permanently deleted."
echo ""
echo "The production workers will automatically start processing"
echo "new jobs as they are added to the queues."
echo "=========================================="

View File

@@ -2,7 +2,7 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted
## Context
@@ -20,3 +20,197 @@ We will implement a multi-layered security approach for the API:
- **Positive**: Significantly improves the application's security posture against common web vulnerabilities like XSS, clickjacking, and brute-force attacks.
- **Negative**: Requires careful configuration of CORS and rate limits to avoid blocking legitimate traffic. Content-Security-Policy can be complex to configure correctly.
## Implementation Status
### What's Implemented
-**Helmet** - Security headers middleware with CSP, HSTS, and more
-**Rate Limiting** - Comprehensive implementation with 17+ specific limiters
-**Input Validation** - Zod-based request validation on all routes
-**File Upload Security** - MIME type validation, size limits, filename sanitization
-**Error Handling** - Production-safe error responses (no sensitive data leakage)
-**Request Timeout** - 5-minute timeout protection
-**Secure Cookies** - httpOnly and secure flags for authentication cookies
### Not Required
- **CORS** - Not needed (API and frontend are same-origin)
## Implementation Details
### Helmet Security Headers
Using **helmet v8.x** configured in `server.ts` as the first middleware after app initialization.
**Security Headers Applied**:
| Header | Configuration | Purpose |
| ------ | ------------- | ------- |
| Content-Security-Policy | Custom directives | Prevents XSS, code injection |
| Strict-Transport-Security | 1 year, includeSubDomains, preload | Forces HTTPS connections |
| X-Content-Type-Options | nosniff | Prevents MIME type sniffing |
| X-Frame-Options | DENY | Prevents clickjacking |
| X-XSS-Protection | 0 (disabled) | Deprecated, CSP preferred |
| Referrer-Policy | strict-origin-when-cross-origin | Controls referrer information |
| Cross-Origin-Resource-Policy | cross-origin | Allows external resource loading |
**Content Security Policy Directives**:
```typescript
contentSecurityPolicy: {
directives: {
defaultSrc: ["'self'"],
scriptSrc: ["'self'", "'unsafe-inline'"], // React inline scripts
styleSrc: ["'self'", "'unsafe-inline'"], // Tailwind inline styles
imgSrc: ["'self'", 'data:', 'blob:', 'https:'], // External images
fontSrc: ["'self'", 'https:', 'data:'],
connectSrc: ["'self'", 'https:', 'wss:'], // API + WebSocket
frameSrc: ["'none'"], // No iframes
objectSrc: ["'none'"], // No plugins
upgradeInsecureRequests: [], // Production only
},
}
```
**HSTS Configuration**:
- Max-age: 1 year (31536000 seconds)
- Includes subdomains
- Preload-ready for browser HSTS lists
### Rate Limiting
Using **express-rate-limit v8.2.1** with a centralized configuration in `src/config/rateLimiters.ts`.
**Standard Configuration**:
```typescript
const standardConfig = {
standardHeaders: true, // Sends RateLimit-* headers
legacyHeaders: false,
skip: shouldSkipRateLimit, // Disabled in test environment
};
```
**Rate Limiters by Category**:
| Category | Limiter | Window | Max Requests |
| -------- | ------- | ------ | ------------ |
| **Authentication** | loginLimiter | 15 min | 5 |
| | registerLimiter | 1 hour | 5 |
| | forgotPasswordLimiter | 15 min | 5 |
| | resetPasswordLimiter | 15 min | 10 |
| | refreshTokenLimiter | 15 min | 20 |
| | logoutLimiter | 15 min | 10 |
| **Public/User Read** | publicReadLimiter | 15 min | 100 |
| | userReadLimiter | 15 min | 100 |
| | userUpdateLimiter | 15 min | 100 |
| **Sensitive Operations** | userSensitiveUpdateLimiter | 1 hour | 5 |
| | adminTriggerLimiter | 15 min | 30 |
| **AI/Costly** | aiGenerationLimiter | 15 min | 20 |
| | geocodeLimiter | 1 hour | 100 |
| | priceHistoryLimiter | 15 min | 50 |
| **Uploads** | adminUploadLimiter | 15 min | 20 |
| | aiUploadLimiter | 15 min | 10 |
| | batchLimiter | 15 min | 50 |
| **Tracking** | trackingLimiter | 15 min | 200 |
| | reactionToggleLimiter | 15 min | 150 |
**Test Environment Handling**:
Rate limiting is automatically disabled in test environment via `shouldSkipRateLimit` utility (`src/utils/rateLimit.ts`). Tests can opt-in to rate limiting by setting the `x-test-rate-limit-enable: true` header.
### Input Validation
**Zod Schema Validation** (`src/middleware/validation.middleware.ts`):
- Type-safe parsing and coercion for params, query, and body
- Applied to all API routes via `validateRequest()` middleware
- Returns structured validation errors with field-level details
**Filename Sanitization** (`src/utils/stringUtils.ts`):
```typescript
// Removes dangerous characters from uploaded filenames
sanitizeFilename(filename: string): string
```
### File Upload Security
**Multer Configuration** (`src/middleware/multer.middleware.ts`):
- MIME type validation via `imageFileFilter` (only image/* allowed)
- File size limits (2MB for logos, configurable per upload type)
- Unique filenames using timestamps + random suffixes
- User-scoped storage paths
### Error Handling
**Production-Safe Responses** (`src/middleware/errorHandler.ts`):
- Production mode: Returns generic error message with tracking ID
- Development mode: Returns detailed error information
- Sensitive error details are logged but never exposed to clients
### Request Security
**Timeout Protection** (`server.ts`):
- 5-minute request timeout via `connect-timeout` middleware
- Prevents resource exhaustion from long-running requests
**Secure Cookies**:
```typescript
// Cookie configuration for auth tokens
{
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
sameSite: 'strict',
maxAge: 7 * 24 * 60 * 60 * 1000 // 7 days for refresh token
}
```
### Request Logging
Per-request structured logging (ADR-004):
- Request ID tracking
- User ID and IP address logging
- Failed request details (4xx+) logged with headers and body
- Unhandled errors assigned unique error IDs
## Key Files
- `server.ts` - Helmet middleware configuration (security headers)
- `src/config/rateLimiters.ts` - Rate limiter definitions (17+ limiters)
- `src/utils/rateLimit.ts` - Rate limit skip logic for testing
- `src/middleware/validation.middleware.ts` - Zod-based request validation
- `src/middleware/errorHandler.ts` - Production-safe error handling
- `src/middleware/multer.middleware.ts` - Secure file upload configuration
- `src/utils/stringUtils.ts` - Filename sanitization
## Future Enhancements
1. **Configure CORS** (if needed for cross-origin access):
```bash
npm install cors @types/cors
```
Add to `server.ts`:
```typescript
import cors from 'cors';
app.use(cors({
origin: process.env.ALLOWED_ORIGINS?.split(',') || 'http://localhost:3000',
credentials: true,
}));
```
2. **Redis-backed rate limiting**: For distributed deployments, use `rate-limit-redis` store
3. **CSP Nonce**: Generate per-request nonces for stricter script-src policy
4. **Report-Only CSP**: Add `Content-Security-Policy-Report-Only` header for testing policy changes

10
package-lock.json generated
View File

@@ -22,6 +22,7 @@
"express": "^5.1.0",
"express-list-endpoints": "^7.1.1",
"express-rate-limit": "^8.2.1",
"helmet": "^8.1.0",
"ioredis": "^5.8.2",
"jsonwebtoken": "^9.0.2",
"lucide-react": "^0.555.0",
@@ -10193,6 +10194,15 @@
"dev": true,
"license": "MIT"
},
"node_modules/helmet": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/helmet/-/helmet-8.1.0.tgz",
"integrity": "sha512-jOiHyAZsmnr8LqoPGmCjYAaiuWwjAPLgY8ZX2XrmHawt99/u1y6RgrZMTeoPfpUbV96HOalYgz1qzkRbw54Pmg==",
"license": "MIT",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/help-me": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/help-me/-/help-me-5.0.0.tgz",

View File

@@ -41,6 +41,7 @@
"express": "^5.1.0",
"express-list-endpoints": "^7.1.1",
"express-rate-limit": "^8.2.1",
"helmet": "^8.1.0",
"ioredis": "^5.8.2",
"jsonwebtoken": "^9.0.2",
"lucide-react": "^0.555.0",

View File

@@ -1,6 +1,7 @@
// server.ts
import express, { Request, Response, NextFunction } from 'express';
import { randomUUID } from 'crypto';
import helmet from 'helmet';
import timeout from 'connect-timeout';
import cookieParser from 'cookie-parser';
import listEndpoints from 'express-list-endpoints';
@@ -62,6 +63,38 @@ logger.info('-----------------------------------------------\n');
const app = express();
// --- Security Headers Middleware (ADR-016) ---
// Helmet sets various HTTP headers to help protect the app from common web vulnerabilities.
// Must be applied early in the middleware chain, before any routes.
app.use(
helmet({
// Content Security Policy - configured for API + SPA frontend
contentSecurityPolicy: {
directives: {
defaultSrc: ["'self'"],
scriptSrc: ["'self'", "'unsafe-inline'"], // Allow inline scripts for React
styleSrc: ["'self'", "'unsafe-inline'"], // Allow inline styles for Tailwind
imgSrc: ["'self'", 'data:', 'blob:', 'https:'], // Allow images from various sources
fontSrc: ["'self'", 'https:', 'data:'],
connectSrc: ["'self'", 'https:', 'wss:'], // Allow API and WebSocket connections
frameSrc: ["'none'"], // Disallow iframes
objectSrc: ["'none'"], // Disallow plugins
upgradeInsecureRequests: process.env.NODE_ENV === 'production' ? [] : null,
},
},
// Cross-Origin settings for API
crossOriginEmbedderPolicy: false, // Disabled to allow loading external images
crossOriginResourcePolicy: { policy: 'cross-origin' }, // Allow cross-origin resource loading
// Additional security headers
hsts: {
maxAge: 31536000, // 1 year in seconds
includeSubDomains: true,
preload: true,
},
referrerPolicy: { policy: 'strict-origin-when-cross-origin' },
}),
);
// --- Core Middleware ---
// Increase the limit for JSON and URL-encoded bodies. This is crucial for handling large file uploads
// that are part of multipart/form-data requests, as the overall request size is checked.

View File

@@ -34,6 +34,16 @@ vi.mock('../logger.server', () => ({
}));
import { logger as mockLogger } from '../logger.server';
// Mock cacheService to bypass caching logic during tests
vi.mock('../cacheService.server', () => ({
cacheService: {
getOrSet: vi.fn(async (_key, callback) => callback()),
invalidateFlyer: vi.fn(),
},
CACHE_TTL: { BRANDS: 3600, FLYERS: 300, FLYER_ITEMS: 600 },
CACHE_PREFIX: { BRANDS: 'brands', FLYERS: 'flyers', FLYER_ITEMS: 'flyer_items' },
}));
// Mock the withTransaction helper
vi.mock('./connection.db', async (importOriginal) => {
const actual = await importOriginal<typeof import('./connection.db')>();

View File

@@ -2,6 +2,7 @@
import { describe, it, expect, afterAll } from 'vitest';
import crypto from 'crypto';
import * as apiClient from '../../services/apiClient';
import { getPool } from '../../services/db/connection.db';
import path from 'path';
import fs from 'fs';
import { cleanupDb } from '../utils/cleanup';
@@ -19,12 +20,14 @@ describe('E2E Flyer Upload and Processing Workflow', () => {
let authToken: string;
let userId: string | null = null;
let flyerId: number | null = null;
let storeId: number | null = null;
afterAll(async () => {
// Use the centralized cleanup utility for robustness.
await cleanupDb({
userIds: [userId],
flyerIds: [flyerId],
storeIds: [storeId],
});
});
@@ -98,5 +101,13 @@ describe('E2E Flyer Upload and Processing Workflow', () => {
expect(jobStatus.state).toBe('completed');
flyerId = jobStatus.returnValue?.flyerId;
expect(flyerId).toBeTypeOf('number');
// Fetch the store_id associated with the created flyer for robust cleanup
if (flyerId) {
const flyerRes = await getPool().query('SELECT store_id FROM public.flyers WHERE flyer_id = $1', [flyerId]);
if (flyerRes.rows.length > 0) {
storeId = flyerRes.rows[0].store_id;
}
}
}, 240000); // Extended timeout for AI processing
});

View File

@@ -18,6 +18,8 @@ describe('Admin API Routes Integration Tests', () => {
let regularUserToken: string;
const createdUserIds: string[] = [];
const createdStoreIds: number[] = [];
const createdCorrectionIds: number[] = [];
const createdFlyerIds: number[] = [];
beforeAll(async () => {
vi.stubEnv('FRONTEND_URL', 'https://example.com');
@@ -47,6 +49,8 @@ describe('Admin API Routes Integration Tests', () => {
await cleanupDb({
userIds: createdUserIds,
storeIds: createdStoreIds,
suggestedCorrectionIds: createdCorrectionIds,
flyerIds: createdFlyerIds,
});
});
@@ -174,6 +178,7 @@ describe('Admin API Routes Integration Tests', () => {
[testStoreId, `checksum-${Date.now()}-${Math.random()}`.padEnd(64, '0')],
);
const flyerId = flyerRes.rows[0].flyer_id;
createdFlyerIds.push(flyerId);
const flyerItemRes = await getPool().query(
`INSERT INTO public.flyer_items (flyer_id, item, price_display, price_in_cents, quantity)
@@ -188,6 +193,7 @@ describe('Admin API Routes Integration Tests', () => {
[testFlyerItemId, adminUser.user.user_id],
);
testCorrectionId = correctionRes.rows[0].suggested_correction_id;
createdCorrectionIds.push(testCorrectionId);
});
it('should allow an admin to approve a correction', async () => {

View File

@@ -110,6 +110,7 @@ describe('Flyer Processing Background Job Integration Test', () => {
const createdUserIds: string[] = [];
const createdFlyerIds: number[] = [];
const createdFilePaths: string[] = [];
const createdStoreIds: number[] = [];
let workersModule: typeof import('../../services/workers.server');
const originalFrontendUrl = process.env.FRONTEND_URL;
@@ -177,6 +178,7 @@ describe('Flyer Processing Background Job Integration Test', () => {
await cleanupDb({
userIds: createdUserIds,
flyerIds: createdFlyerIds,
storeIds: createdStoreIds,
});
// Use the centralized file cleanup utility.
@@ -274,6 +276,9 @@ describe('Flyer Processing Background Job Integration Test', () => {
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
expect(savedFlyer?.flyer_id).toBe(flyerId);
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
expect(savedFlyer?.file_name).toBe(uniqueFileName);
// Also add the final processed image path to the cleanup list.
// This is important because JPEGs are re-processed to strip EXIF data, creating a new file.
@@ -385,6 +390,9 @@ describe('Flyer Processing Background Job Integration Test', () => {
// 4. Verify EXIF data is stripped from the saved file
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath); // Add final path for cleanup
@@ -476,6 +484,9 @@ describe('Flyer Processing Background Job Integration Test', () => {
// 4. Verify metadata is stripped from the saved file
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));
createdFilePaths.push(savedImagePath); // Add final path for cleanup

View File

@@ -204,6 +204,9 @@ describe('Gamification Flow Integration Test', () => {
const savedFlyer = await db.flyerRepo.findFlyerByChecksum(checksum, logger);
expect(savedFlyer).toBeDefined();
expect(savedFlyer?.file_name).toBe(uniqueFileName);
if (savedFlyer?.store_id) {
createdStoreIds.push(savedFlyer.store_id);
}
// Also add the final processed image path to the cleanup list.
// This is important because JPEGs are re-processed to strip EXIF data, creating a new file.
const savedImagePath = path.join(uploadDir, path.basename(savedFlyer!.image_url));

View File

@@ -88,27 +88,29 @@ describe('Price History API Integration Test (/api/price-history)', () => {
afterAll(async () => {
vi.unstubAllEnvs();
await cleanupDb({ userIds: createdUserIds });
const pool = getPool();
// The CASCADE on the tables should handle flyer_items.
// The delete on flyers cascades to flyer_items, which fires a trigger `recalculate_price_history_on_flyer_item_delete`.
// This trigger has a bug causing the test to fail. As a workaround for the test suite,
// we temporarily disable user-defined triggers on the flyer_items table during cleanup.
const flyerIds = [flyerId1, flyerId2, flyerId3].filter(Boolean);
try {
await pool.query('ALTER TABLE public.flyer_items DISABLE TRIGGER USER;');
if (flyerIds.length > 0) {
await pool.query('DELETE FROM public.flyers WHERE flyer_id = ANY($1::int[])', [flyerIds]);
}
if (storeId) await pool.query('DELETE FROM public.stores WHERE store_id = $1', [storeId]);
if (masterItemId)
await pool.query('DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = $1', [
masterItemId,
]);
} finally {
// Ensure triggers are always re-enabled, even if an error occurs during deletion.
await pool.query('ALTER TABLE public.flyer_items ENABLE TRIGGER USER;');
}
await cleanupDb({
userIds: createdUserIds,
masterItemIds: [masterItemId],
storeIds: [storeId],
});
});
it('should return the correct price history for a given master item ID', async () => {

View File

@@ -26,6 +26,7 @@ describe('Public API Routes Integration Tests', () => {
let testRecipe: Recipe;
let testFlyer: Flyer;
let testStoreId: number;
const createdRecipeCommentIds: number[] = [];
beforeAll(async () => {
vi.stubEnv('FRONTEND_URL', 'https://example.com');
@@ -85,6 +86,7 @@ describe('Public API Routes Integration Tests', () => {
recipeIds: testRecipe ? [testRecipe.recipe_id] : [],
flyerIds: testFlyer ? [testFlyer.flyer_id] : [],
storeIds: testStoreId ? [testStoreId] : [],
recipeCommentIds: createdRecipeCommentIds,
});
});
@@ -186,10 +188,11 @@ describe('Public API Routes Integration Tests', () => {
it('GET /api/recipes/:recipeId/comments should return comments for a recipe', async () => {
// Add a comment to our test recipe first
await getPool().query(
`INSERT INTO public.recipe_comments (recipe_id, user_id, content) VALUES ($1, $2, 'Test comment')`,
const commentRes = await getPool().query(
`INSERT INTO public.recipe_comments (recipe_id, user_id, content) VALUES ($1, $2, 'Test comment') RETURNING recipe_comment_id`,
[testRecipe.recipe_id, testUser.user.user_id],
);
createdRecipeCommentIds.push(commentRes.rows[0].recipe_comment_id);
const response = await request.get(`/api/recipes/${testRecipe.recipe_id}/comments`);
const comments: RecipeComment[] = response.body;
expect(response.status).toBe(200);

View File

@@ -19,6 +19,7 @@ describe('User API Routes Integration Tests', () => {
let testUser: UserProfile;
let authToken: string;
const createdUserIds: string[] = [];
const createdMasterItemIds: number[] = [];
// Before any tests run, create a new user and log them in.
// The token will be used for all subsequent API calls in this test suite.
@@ -38,7 +39,10 @@ describe('User API Routes Integration Tests', () => {
// This now cleans up ALL users created by this test suite to prevent pollution.
afterAll(async () => {
vi.unstubAllEnvs();
await cleanupDb({ userIds: createdUserIds });
await cleanupDb({
userIds: createdUserIds,
masterItemIds: createdMasterItemIds
});
// Safeguard to clean up any avatar files created during tests.
const uploadDir = path.resolve(__dirname, '../../../uploads/avatars');
@@ -244,6 +248,7 @@ describe('User API Routes Integration Tests', () => {
.send({ itemName: 'Integration Test Item', category: 'Other/Miscellaneous' });
const newItem = addResponse.body;
if (newItem?.master_grocery_item_id) createdMasterItemIds.push(newItem.master_grocery_item_id);
// Assert 1: Check that the item was created correctly.
expect(addResponse.status).toBe(201);
expect(newItem.name).toBe('Integration Test Item');

View File

@@ -8,6 +8,10 @@ interface CleanupOptions {
storeIds?: (number | null | undefined)[];
recipeIds?: (number | null | undefined)[];
budgetIds?: (number | null | undefined)[];
masterItemIds?: (number | null | undefined)[];
shoppingListIds?: (number | null | undefined)[];
suggestedCorrectionIds?: (number | null | undefined)[];
recipeCommentIds?: (number | null | undefined)[];
}
/**
@@ -25,11 +29,21 @@ export const cleanupDb = async (options: CleanupOptions) => {
// Order of deletion matters to avoid foreign key violations.
// Children entities first, then parents.
if (options.suggestedCorrectionIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.suggested_corrections WHERE suggested_correction_id = ANY($1::int[])', [options.suggestedCorrectionIds]);
logger.debug(`Cleaned up ${options.suggestedCorrectionIds.length} suggested correction(s).`);
}
if (options.budgetIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.budgets WHERE budget_id = ANY($1::int[])', [options.budgetIds]);
logger.debug(`Cleaned up ${options.budgetIds.length} budget(s).`);
}
if (options.recipeCommentIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.recipe_comments WHERE recipe_comment_id = ANY($1::int[])', [options.recipeCommentIds]);
logger.debug(`Cleaned up ${options.recipeCommentIds.length} recipe comment(s).`);
}
if (options.recipeIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.recipes WHERE recipe_id = ANY($1::int[])', [options.recipeIds]);
logger.debug(`Cleaned up ${options.recipeIds.length} recipe(s).`);
@@ -45,6 +59,16 @@ export const cleanupDb = async (options: CleanupOptions) => {
logger.debug(`Cleaned up ${options.storeIds.length} store(s).`);
}
if (options.masterItemIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = ANY($1::int[])', [options.masterItemIds]);
logger.debug(`Cleaned up ${options.masterItemIds.length} master grocery item(s).`);
}
if (options.shoppingListIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.shopping_lists WHERE shopping_list_id = ANY($1::int[])', [options.shoppingListIds]);
logger.debug(`Cleaned up ${options.shoppingListIds.length} shopping list(s).`);
}
if (options.userIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.users WHERE user_id = ANY($1::uuid[])', [options.userIds]);
logger.debug(`Cleaned up ${options.userIds.length} user(s).`);