Compare commits

...

10 Commits

Author SHA1 Message Date
Gitea Actions
cf2cc5b832 ci: Bump version to 0.16.2 [skip ci] 2026-02-18 15:01:02 +05:00
d2db3562bb test deploy
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 24m32s
2026-02-18 01:35:16 -08:00
Gitea Actions
0532b4b22e style: auto-format code via Prettier [skip ci] 2026-02-18 14:06:10 +05:00
Gitea Actions
e767ccbb21 ci: Bump version to 0.16.1 [skip ci] 2026-02-18 14:04:40 +05:00
1ff813f495 job to fix pm2
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 24m45s
2026-02-18 00:54:08 -08:00
204fe4394a oh god maybe pm2 finally workin
Some checks are pending
Deploy to Test Environment / deploy-to-test (push) Has started running
2026-02-17 23:54:27 -08:00
Gitea Actions
029b621632 ci: Bump version to 0.16.0 for production release [skip ci] 2026-02-18 11:21:36 +05:00
Gitea Actions
0656ab3ae7 style: auto-format code via Prettier [skip ci] 2026-02-18 10:48:03 +05:00
Gitea Actions
ae0bb9e04d ci: Bump version to 0.15.2 [skip ci] 2026-02-18 10:46:29 +05:00
b83c37b977 deploy fixes
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 25m45s
2026-02-17 21:44:34 -08:00
89 changed files with 1009 additions and 408 deletions

View File

@@ -59,6 +59,8 @@ GITHUB_CLIENT_SECRET=
# AI/ML Services
# ===================
# REQUIRED: Google Gemini API key for flyer OCR processing
# NOTE: Test/staging environment deliberately OMITS this to preserve free API quota.
# Production has a working key. Deploy warnings in test are expected and safe to ignore.
GEMINI_API_KEY=your-gemini-api-key
# ===================

View File

@@ -121,6 +121,11 @@ jobs:
run: |
echo "Deploying application files to /var/www/flyer-crawler.projectium.com..."
APP_PATH="/var/www/flyer-crawler.projectium.com"
# CRITICAL: Stop PM2 processes BEFORE deploying files to prevent CWD errors
echo "--- Stopping production PM2 processes before file deployment ---"
pm2 stop flyer-crawler-api flyer-crawler-worker flyer-crawler-analytics-worker || echo "No production processes to stop"
mkdir -p "$APP_PATH"
mkdir -p "$APP_PATH/flyer-images/icons" "$APP_PATH/flyer-images/archive"
rsync -avz --delete --exclude 'node_modules' --exclude '.git' --exclude 'dist' --exclude 'flyer-images' ./ "$APP_PATH/"

View File

@@ -81,8 +81,24 @@ jobs:
- name: TypeScript Type-Check
run: npm run type-check
- name: Prettier Check
run: npx prettier --check . || true
- name: Prettier Auto-Fix
run: |
echo "--- Running Prettier auto-fix for test/staging deployment ---"
# Auto-format all files
npx prettier --write .
# Check if any files were changed
if ! git diff --quiet; then
echo "📝 Prettier made formatting changes. Committing..."
git config --global user.name 'Gitea Actions'
git config --global user.email 'actions@gitea.projectium.com'
git add .
git commit -m "style: auto-format code via Prettier [skip ci]"
git push
echo "✅ Formatting changes committed and pushed."
else
echo "✅ No formatting changes needed."
fi
- name: Lint Check
run: npm run lint || true
@@ -490,6 +506,10 @@ jobs:
echo "Deploying application files to /var/www/flyer-crawler-test.projectium.com..."
APP_PATH="/var/www/flyer-crawler-test.projectium.com"
# CRITICAL: Stop PM2 processes BEFORE deploying files to prevent CWD errors
echo "--- Stopping test PM2 processes before file deployment ---"
pm2 stop flyer-crawler-api-test flyer-crawler-worker-test flyer-crawler-analytics-worker-test || echo "No test processes to stop"
# Ensure the destination directory exists
mkdir -p "$APP_PATH"
mkdir -p "$APP_PATH/flyer-images/icons" "$APP_PATH/flyer-images/archive" # Ensure all required subdirectories exist

View File

@@ -0,0 +1,86 @@
# .gitea/workflows/restart-pm2.yml
#
# Manual workflow to restart PM2 processes and verify their status.
# Useful for recovering from PM2 daemon crashes or process issues.
name: Restart PM2 Processes
on:
workflow_dispatch:
inputs:
environment:
description: 'Environment to restart (test, production, or both)'
required: true
default: 'test'
type: choice
options:
- test
- production
- both
jobs:
restart-pm2:
runs-on: projectium.com
steps:
- name: Validate Environment Input
run: |
echo "Restarting PM2 processes for environment: ${{ gitea.event.inputs.environment }}"
- name: Restart Test Environment
if: gitea.event.inputs.environment == 'test' || gitea.event.inputs.environment == 'both'
run: |
echo "=== RESTARTING TEST ENVIRONMENT ==="
cd /var/www/flyer-crawler-test.projectium.com
echo "--- Current PM2 State (Before Restart) ---"
pm2 list
echo "--- Restarting Test Processes ---"
pm2 restart flyer-crawler-api-test flyer-crawler-worker-test flyer-crawler-analytics-worker-test || {
echo "Restart failed, attempting to start processes..."
pm2 start ecosystem-test.config.cjs
}
echo "--- Saving PM2 Process List ---"
pm2 save
echo "--- Waiting 3 seconds for processes to stabilize ---"
sleep 3
echo "=== TEST ENVIRONMENT STATUS ==="
pm2 ps
- name: Restart Production Environment
if: gitea.event.inputs.environment == 'production' || gitea.event.inputs.environment == 'both'
run: |
echo "=== RESTARTING PRODUCTION ENVIRONMENT ==="
cd /var/www/flyer-crawler.projectium.com
echo "--- Current PM2 State (Before Restart) ---"
pm2 list
echo "--- Restarting Production Processes ---"
pm2 restart flyer-crawler-api flyer-crawler-worker flyer-crawler-analytics-worker || {
echo "Restart failed, attempting to start processes..."
pm2 start ecosystem.config.cjs
}
echo "--- Saving PM2 Process List ---"
pm2 save
echo "--- Waiting 3 seconds for processes to stabilize ---"
sleep 3
echo "=== PRODUCTION ENVIRONMENT STATUS ==="
pm2 ps
- name: Final PM2 Status (All Processes)
run: |
echo "========================================="
echo "FINAL PM2 STATUS - ALL PROCESSES"
echo "========================================="
pm2 ps
echo ""
echo "--- PM2 Logs (Last 20 Lines) ---"
pm2 logs --lines 20 --nostream || echo "No logs available"

View File

@@ -139,3 +139,5 @@ See [INSTALL.md](INSTALL.md) for the complete list.
## License
[Add license information here]
annoyed

View File

@@ -56,7 +56,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -90,7 +90,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -114,7 +114,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -138,7 +138,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -161,7 +161,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -189,7 +189,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -211,7 +211,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -234,7 +234,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -259,7 +259,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -284,7 +284,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -307,7 +307,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -330,7 +330,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -355,7 +355,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -379,7 +379,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -425,7 +425,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -448,7 +448,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -476,7 +476,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -502,7 +502,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -529,7 +529,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -555,7 +555,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -579,7 +579,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -612,7 +612,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -637,7 +637,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -656,7 +656,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -681,7 +681,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -705,7 +705,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -757,7 +757,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Measurements**: **********************\_\_\_**********************
**Measurements**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -765,7 +765,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
### Test 8.1: Chrome/Edge
**Browser Version**: ******\_\_\_******
**Browser Version**: **\*\***\_\_\_**\*\***
**Tests to Run**:
@@ -775,13 +775,13 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
### Test 8.2: Firefox
**Browser Version**: ******\_\_\_******
**Browser Version**: **\*\***\_\_\_**\*\***
**Tests to Run**:
@@ -791,13 +791,13 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
### Test 8.3: Safari (macOS/iOS)
**Browser Version**: ******\_\_\_******
**Browser Version**: **\*\***\_\_\_**\*\***
**Tests to Run**:
@@ -807,7 +807,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
**Pass/Fail**: [ ]
**Notes**: **********************\_\_\_**********************
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
---
@@ -849,8 +849,8 @@ podman exec -it flyer-crawler-dev npm run dev:container
## Sign-Off
**Tester Name**: **********************\_\_\_**********************
**Date Completed**: **********************\_\_\_**********************
**Tester Name**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
**Date Completed**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
**Overall Status**: [ ] PASS [ ] PASS WITH ISSUES [ ] FAIL
**Ready for Production**: [ ] YES [ ] NO [ ] WITH FIXES

View File

@@ -208,7 +208,7 @@ Press F12 or Ctrl+Shift+I
**Result**: [ ] PASS [ ] FAIL
**Errors found**: ******************\_\_\_******************
**Errors found**: **\*\*\*\***\*\***\*\*\*\***\_\_\_**\*\*\*\***\*\***\*\*\*\***
---
@@ -224,7 +224,7 @@ Check for:
**Result**: [ ] PASS [ ] FAIL
**Issues found**: ******************\_\_\_******************
**Issues found**: **\*\*\*\***\*\***\*\*\*\***\_\_\_**\*\*\*\***\*\***\*\*\*\***
---
@@ -272,4 +272,4 @@ Check for:
2. ***
3. ***
**Sign-off**: ********\_\_\_******** **Date**: ****\_\_\_****
**Sign-off**: **\*\*\*\***\_\_\_**\*\*\*\*** **Date**: \***\*\_\_\_\*\***

View File

@@ -39,15 +39,15 @@ All cache operations are fail-safe - cache failures do not break the application
Different data types use different TTL values based on volatility:
| Data Type | TTL | Rationale |
| ------------------- | --------- | -------------------------------------- |
| Brands/Stores | 1 hour | Rarely changes, safe to cache longer |
| Flyer lists | 5 minutes | Changes when new flyers are added |
| Individual flyers | 10 minutes| Stable once created |
| Flyer items | 10 minutes| Stable once created |
| Statistics | 5 minutes | Can be slightly stale |
| Frequent sales | 15 minutes| Aggregated data, updated periodically |
| Categories | 1 hour | Rarely changes |
| Data Type | TTL | Rationale |
| ----------------- | ---------- | ------------------------------------- |
| Brands/Stores | 1 hour | Rarely changes, safe to cache longer |
| Flyer lists | 5 minutes | Changes when new flyers are added |
| Individual flyers | 10 minutes | Stable once created |
| Flyer items | 10 minutes | Stable once created |
| Statistics | 5 minutes | Can be slightly stale |
| Frequent sales | 15 minutes | Aggregated data, updated periodically |
| Categories | 1 hour | Rarely changes |
### Cache Key Strategy
@@ -64,11 +64,11 @@ Cache keys follow a consistent prefix pattern for pattern-based invalidation:
The following repository methods implement server-side caching:
| Method | Cache Key Pattern | TTL |
| ------ | ----------------- | --- |
| `FlyerRepository.getAllBrands()` | `cache:brands` | 1 hour |
| `FlyerRepository.getFlyers()` | `cache:flyers:{limit}:{offset}` | 5 minutes |
| `FlyerRepository.getFlyerItems()` | `cache:flyer-items:{flyerId}` | 10 minutes |
| Method | Cache Key Pattern | TTL |
| --------------------------------- | ------------------------------- | ---------- |
| `FlyerRepository.getAllBrands()` | `cache:brands` | 1 hour |
| `FlyerRepository.getFlyers()` | `cache:flyers:{limit}:{offset}` | 5 minutes |
| `FlyerRepository.getFlyerItems()` | `cache:flyer-items:{flyerId}` | 10 minutes |
### Cache Invalidation
@@ -86,14 +86,14 @@ The following repository methods implement server-side caching:
TanStack React Query provides client-side caching with configurable stale times:
| Query Type | Stale Time |
| ----------------- | ----------- |
| Categories | 1 hour |
| Master Items | 10 minutes |
| Flyer Items | 5 minutes |
| Flyers | 2 minutes |
| Shopping Lists | 1 minute |
| Activity Log | 30 seconds |
| Query Type | Stale Time |
| -------------- | ---------- |
| Categories | 1 hour |
| Master Items | 10 minutes |
| Flyer Items | 5 minutes |
| Flyers | 2 minutes |
| Shopping Lists | 1 minute |
| Activity Log | 30 seconds |
### Multi-Layer Cache Architecture

View File

@@ -80,13 +80,13 @@ src/
**Common Utility Patterns**:
| Pattern | Classes |
| ------- | ------- |
| Card container | `bg-white dark:bg-gray-800 rounded-lg shadow-md p-6` |
| Primary button | `bg-brand-primary hover:bg-brand-dark text-white rounded-lg px-4 py-2` |
| Secondary button | `bg-gray-100 dark:bg-gray-700 text-gray-700 dark:text-gray-200` |
| Input field | `border border-gray-300 dark:border-gray-600 rounded-md px-3 py-2` |
| Focus ring | `focus:outline-none focus:ring-2 focus:ring-brand-primary` |
| Pattern | Classes |
| ---------------- | ---------------------------------------------------------------------- |
| Card container | `bg-white dark:bg-gray-800 rounded-lg shadow-md p-6` |
| Primary button | `bg-brand-primary hover:bg-brand-dark text-white rounded-lg px-4 py-2` |
| Secondary button | `bg-gray-100 dark:bg-gray-700 text-gray-700 dark:text-gray-200` |
| Input field | `border border-gray-300 dark:border-gray-600 rounded-md px-3 py-2` |
| Focus ring | `focus:outline-none focus:ring-2 focus:ring-brand-primary` |
### Color System
@@ -187,13 +187,13 @@ export const CheckCircleIcon: React.FC<IconProps> = ({ title, ...props }) => (
**Context Providers** (see ADR-005):
| Provider | Purpose |
| -------- | ------- |
| `AuthProvider` | Authentication state |
| `ModalProvider` | Modal open/close state |
| `FlyersProvider` | Flyer data |
| `MasterItemsProvider` | Grocery items |
| `UserDataProvider` | User-specific data |
| Provider | Purpose |
| --------------------- | ---------------------- |
| `AuthProvider` | Authentication state |
| `ModalProvider` | Modal open/close state |
| `FlyersProvider` | Flyer data |
| `MasterItemsProvider` | Grocery items |
| `UserDataProvider` | User-specific data |
**Provider Hierarchy** in `AppProviders.tsx`:

View File

@@ -45,15 +45,15 @@ Using **helmet v8.x** configured in `server.ts` as the first middleware after ap
**Security Headers Applied**:
| Header | Configuration | Purpose |
| ------ | ------------- | ------- |
| Content-Security-Policy | Custom directives | Prevents XSS, code injection |
| Strict-Transport-Security | 1 year, includeSubDomains, preload | Forces HTTPS connections |
| X-Content-Type-Options | nosniff | Prevents MIME type sniffing |
| X-Frame-Options | DENY | Prevents clickjacking |
| X-XSS-Protection | 0 (disabled) | Deprecated, CSP preferred |
| Referrer-Policy | strict-origin-when-cross-origin | Controls referrer information |
| Cross-Origin-Resource-Policy | cross-origin | Allows external resource loading |
| Header | Configuration | Purpose |
| ---------------------------- | ---------------------------------- | -------------------------------- |
| Content-Security-Policy | Custom directives | Prevents XSS, code injection |
| Strict-Transport-Security | 1 year, includeSubDomains, preload | Forces HTTPS connections |
| X-Content-Type-Options | nosniff | Prevents MIME type sniffing |
| X-Frame-Options | DENY | Prevents clickjacking |
| X-XSS-Protection | 0 (disabled) | Deprecated, CSP preferred |
| Referrer-Policy | strict-origin-when-cross-origin | Controls referrer information |
| Cross-Origin-Resource-Policy | cross-origin | Allows external resource loading |
**Content Security Policy Directives**:
@@ -87,35 +87,35 @@ Using **express-rate-limit v8.2.1** with a centralized configuration in `src/con
```typescript
const standardConfig = {
standardHeaders: true, // Sends RateLimit-* headers
standardHeaders: true, // Sends RateLimit-* headers
legacyHeaders: false,
skip: shouldSkipRateLimit, // Disabled in test environment
skip: shouldSkipRateLimit, // Disabled in test environment
};
```
**Rate Limiters by Category**:
| Category | Limiter | Window | Max Requests |
| -------- | ------- | ------ | ------------ |
| **Authentication** | loginLimiter | 15 min | 5 |
| | registerLimiter | 1 hour | 5 |
| | forgotPasswordLimiter | 15 min | 5 |
| | resetPasswordLimiter | 15 min | 10 |
| | refreshTokenLimiter | 15 min | 20 |
| | logoutLimiter | 15 min | 10 |
| **Public/User Read** | publicReadLimiter | 15 min | 100 |
| | userReadLimiter | 15 min | 100 |
| | userUpdateLimiter | 15 min | 100 |
| **Sensitive Operations** | userSensitiveUpdateLimiter | 1 hour | 5 |
| | adminTriggerLimiter | 15 min | 30 |
| **AI/Costly** | aiGenerationLimiter | 15 min | 20 |
| | geocodeLimiter | 1 hour | 100 |
| | priceHistoryLimiter | 15 min | 50 |
| **Uploads** | adminUploadLimiter | 15 min | 20 |
| | aiUploadLimiter | 15 min | 10 |
| | batchLimiter | 15 min | 50 |
| **Tracking** | trackingLimiter | 15 min | 200 |
| | reactionToggleLimiter | 15 min | 150 |
| Category | Limiter | Window | Max Requests |
| ------------------------ | -------------------------- | ------ | ------------ |
| **Authentication** | loginLimiter | 15 min | 5 |
| | registerLimiter | 1 hour | 5 |
| | forgotPasswordLimiter | 15 min | 5 |
| | resetPasswordLimiter | 15 min | 10 |
| | refreshTokenLimiter | 15 min | 20 |
| | logoutLimiter | 15 min | 10 |
| **Public/User Read** | publicReadLimiter | 15 min | 100 |
| | userReadLimiter | 15 min | 100 |
| | userUpdateLimiter | 15 min | 100 |
| **Sensitive Operations** | userSensitiveUpdateLimiter | 1 hour | 5 |
| | adminTriggerLimiter | 15 min | 30 |
| **AI/Costly** | aiGenerationLimiter | 15 min | 20 |
| | geocodeLimiter | 1 hour | 100 |
| | priceHistoryLimiter | 15 min | 50 |
| **Uploads** | adminUploadLimiter | 15 min | 20 |
| | aiUploadLimiter | 15 min | 10 |
| | batchLimiter | 15 min | 50 |
| **Tracking** | trackingLimiter | 15 min | 200 |
| | reactionToggleLimiter | 15 min | 150 |
**Test Environment Handling**:
@@ -140,7 +140,7 @@ sanitizeFilename(filename: string): string
**Multer Configuration** (`src/middleware/multer.middleware.ts`):
- MIME type validation via `imageFileFilter` (only image/* allowed)
- MIME type validation via `imageFileFilter` (only image/\* allowed)
- File size limits (2MB for logos, configurable per upload type)
- Unique filenames using timestamps + random suffixes
- User-scoped storage paths
@@ -203,10 +203,12 @@ Per-request structured logging (ADR-004):
```typescript
import cors from 'cors';
app.use(cors({
origin: process.env.ALLOWED_ORIGINS?.split(',') || 'http://localhost:3000',
credentials: true,
}));
app.use(
cors({
origin: process.env.ALLOWED_ORIGINS?.split(',') || 'http://localhost:3000',
credentials: true,
}),
);
```
2. **Redis-backed rate limiting**: For distributed deployments, use `rate-limit-redis` store

View File

@@ -16,12 +16,12 @@ We will adopt a hybrid naming convention strategy to explicitly distinguish betw
1. **Database and AI Types (`snake_case`)**:
Interfaces, Type definitions, and Zod schemas that represent raw database rows or direct AI responses **MUST** use `snake_case`.
- *Examples*: `AiFlyerDataSchema`, `ExtractedFlyerItemSchema`, `FlyerInsert`.
- *Reasoning*: This avoids unnecessary mapping layers when inserting data into the database or parsing AI output. It serves as a visual cue that the data is "raw", "external", or destined for persistence.
- _Examples_: `AiFlyerDataSchema`, `ExtractedFlyerItemSchema`, `FlyerInsert`.
- _Reasoning_: This avoids unnecessary mapping layers when inserting data into the database or parsing AI output. It serves as a visual cue that the data is "raw", "external", or destined for persistence.
2. **Internal Application Logic (`camelCase`)**:
Variables, function arguments, and processed data structures used within the application logic (Service layer, UI components, utility functions) **MUST** use `camelCase`.
- *Reasoning*: This adheres to standard JavaScript/TypeScript practices and maintains consistency with the rest of the ecosystem (React, etc.).
- _Reasoning_: This adheres to standard JavaScript/TypeScript practices and maintains consistency with the rest of the ecosystem (React, etc.).
3. **Boundary Handling**:
- For background jobs that primarily move data from AI to DB, preserving `snake_case` is preferred to minimize transformation logic.

View File

@@ -486,9 +486,9 @@ Attach screenshots for:
## 🔐 Sign-Off
**Tester Name**: ******\*\*\*\*******\_\_\_******\*\*\*\*******
**Tester Name**: **\*\***\*\*\*\***\*\***\_\_\_**\*\***\*\*\*\***\*\***
**Date/Time Completed**: ****\*\*\*\*****\_\_\_****\*\*\*\*****
**Date/Time Completed**: \***\*\*\*\*\*\*\***\_\_\_\***\*\*\*\*\*\*\***
**Total Testing Time**: **\_\_** minutes

View File

@@ -123,6 +123,8 @@ node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
**Get API Key**: [Google AI Studio](https://aistudio.google.com/app/apikey)
**Test Environment Note**: The test/staging environment **deliberately omits** `GEMINI_API_KEY` to preserve free API quota. This is intentional - the API has strict daily limits on the free tier, and we want to reserve tokens for production use. AI features will be non-functional in test, but all other features can be tested normally. Deploy warnings about missing `GEMINI_API_KEY` in test logs are expected and safe to ignore.
### Google Services
| Variable | Required | Description |

View File

@@ -50,7 +50,7 @@ if (fs.existsSync(envPath)) {
} else {
console.warn('[ecosystem-test.config.cjs] No .env file found at:', envPath);
console.warn(
'[ecosystem-test.config.cjs] Environment variables must be provided by the shell or CI/CD.'
'[ecosystem-test.config.cjs] Environment variables must be provided by the shell or CI/CD.',
);
}
@@ -60,12 +60,16 @@ if (fs.existsSync(envPath)) {
// The actual application will fail to start if secrets are missing,
// which PM2 will handle with its restart logic.
const requiredSecrets = ['DB_HOST', 'JWT_SECRET', 'GEMINI_API_KEY'];
const missingSecrets = requiredSecrets.filter(key => !process.env[key]);
const missingSecrets = requiredSecrets.filter((key) => !process.env[key]);
if (missingSecrets.length > 0) {
console.warn('\n[ecosystem.config.test.cjs] WARNING: The following environment variables are MISSING:');
missingSecrets.forEach(key => console.warn(` - ${key}`));
console.warn('[ecosystem.config.test.cjs] The application may fail to start if these are required.\n');
console.warn(
'\n[ecosystem.config.test.cjs] WARNING: The following environment variables are MISSING:',
);
missingSecrets.forEach((key) => console.warn(` - ${key}`));
console.warn(
'[ecosystem.config.test.cjs] The application may fail to start if these are required.\n',
);
} else {
console.log('[ecosystem.config.test.cjs] Critical environment variables are present.');
}

View File

@@ -16,11 +16,13 @@
// The actual application will fail to start if secrets are missing,
// which PM2 will handle with its restart logic.
const requiredSecrets = ['DB_HOST', 'JWT_SECRET', 'GEMINI_API_KEY'];
const missingSecrets = requiredSecrets.filter(key => !process.env[key]);
const missingSecrets = requiredSecrets.filter((key) => !process.env[key]);
if (missingSecrets.length > 0) {
console.warn('\n[ecosystem.config.cjs] WARNING: The following environment variables are MISSING:');
missingSecrets.forEach(key => console.warn(` - ${key}`));
console.warn(
'\n[ecosystem.config.cjs] WARNING: The following environment variables are MISSING:',
);
missingSecrets.forEach((key) => console.warn(` - ${key}`));
console.warn('[ecosystem.config.cjs] The application may fail to start if these are required.\n');
} else {
console.log('[ecosystem.config.cjs] Critical environment variables are present.');

View File

@@ -34,9 +34,7 @@ if (missingVars.length > 0) {
'\n[ecosystem.dev.config.cjs] WARNING: The following environment variables are MISSING:',
);
missingVars.forEach((key) => console.warn(` - ${key}`));
console.warn(
'[ecosystem.dev.config.cjs] These should be set in compose.dev.yml or .env.local\n',
);
console.warn('[ecosystem.dev.config.cjs] These should be set in compose.dev.yml or .env.local\n');
} else {
console.log('[ecosystem.dev.config.cjs] Required environment variables are present.');
}

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "flyer-crawler",
"version": "0.15.1",
"version": "0.16.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "flyer-crawler",
"version": "0.15.1",
"version": "0.16.2",
"dependencies": {
"@bull-board/api": "^6.14.2",
"@bull-board/express": "^6.14.2",

View File

@@ -1,7 +1,7 @@
{
"name": "flyer-crawler",
"private": true,
"version": "0.15.1",
"version": "0.16.2",
"type": "module",
"engines": {
"node": ">=18.0.0"

View File

@@ -7,6 +7,7 @@
## Current State Analysis
### What We Have
1.**TanStack Query v5.90.12 already installed** in package.json
2.**Not being used** - Custom hooks reimplementing its functionality
3.**Custom `useInfiniteQuery` hook** ([src/hooks/useInfiniteQuery.ts](../src/hooks/useInfiniteQuery.ts)) using `useState`/`useEffect`
@@ -16,10 +17,12 @@
### Current Data Fetching Patterns
#### Pattern 1: Custom useInfiniteQuery Hook
**Location**: [src/hooks/useInfiniteQuery.ts](../src/hooks/useInfiniteQuery.ts)
**Used By**: [src/providers/FlyersProvider.tsx](../src/providers/FlyersProvider.tsx)
**Problems**:
- Reimplements pagination logic that TanStack Query provides
- Manual loading state management
- Manual error handling
@@ -28,10 +31,12 @@
- No request deduplication
#### Pattern 2: useApiOnMount Hook
**Location**: Unknown (needs investigation)
**Used By**: [src/providers/UserDataProvider.tsx](../src/providers/UserDataProvider.tsx)
**Problems**:
- Fetches data on mount only
- Manual loading/error state management
- No caching between unmount/remount
@@ -42,6 +47,7 @@
### Phase 1: Setup TanStack Query Infrastructure (Day 1)
#### 1.1 Create QueryClient Configuration
**File**: `src/config/queryClient.ts`
```typescript
@@ -51,7 +57,7 @@ export const queryClient = new QueryClient({
defaultOptions: {
queries: {
staleTime: 1000 * 60 * 5, // 5 minutes
gcTime: 1000 * 60 * 30, // 30 minutes (formerly cacheTime)
gcTime: 1000 * 60 * 30, // 30 minutes (formerly cacheTime)
retry: 1,
refetchOnWindowFocus: false,
refetchOnMount: true,
@@ -64,9 +70,11 @@ export const queryClient = new QueryClient({
```
#### 1.2 Wrap App with QueryClientProvider
**File**: `src/providers/AppProviders.tsx`
Add TanStack Query provider at the top level:
```typescript
import { QueryClientProvider } from '@tanstack/react-query';
import { ReactQueryDevtools } from '@tanstack/react-query-devtools';
@@ -158,6 +166,7 @@ export const FlyersProvider: React.FC<{ children: ReactNode }> = ({ children })
```
**Benefits**:
- ~100 lines of code removed
- Automatic caching
- Background refetching
@@ -170,6 +179,7 @@ export const FlyersProvider: React.FC<{ children: ReactNode }> = ({ children })
**Action**: Use TanStack Query's `useQuery` for watched items and shopping lists
**New Files**:
- `src/hooks/queries/useWatchedItemsQuery.ts`
- `src/hooks/queries/useShoppingListsQuery.ts`
@@ -208,6 +218,7 @@ export const useShoppingListsQuery = (enabled: boolean) => {
```
**Updated Provider**:
```typescript
import React, { ReactNode, useMemo } from 'react';
import { UserDataContext } from '../contexts/UserDataContext';
@@ -240,6 +251,7 @@ export const UserDataProvider: React.FC<{ children: ReactNode }> = ({ children }
```
**Benefits**:
- ~40 lines of code removed
- No manual state synchronization
- Automatic cache invalidation on user logout
@@ -292,7 +304,7 @@ export const useUpdateShoppingListMutation = () => {
// Optimistically update
queryClient.setQueryData(['shopping-lists'], (old) =>
old.map((list) => (list.id === newList.id ? newList : list))
old.map((list) => (list.id === newList.id ? newList : list)),
);
return { previousLists };
@@ -313,20 +325,24 @@ export const useUpdateShoppingListMutation = () => {
### Phase 4: Remove Old Custom Hooks (Day 9)
#### Files to Remove:
-`src/hooks/useInfiniteQuery.ts` (if not used elsewhere)
-`src/hooks/useApiOnMount.ts` (needs investigation)
#### Files to Update:
- Update any remaining usages in other components
### Phase 5: Testing & Documentation (Day 10)
#### 5.1 Update Tests
- Update provider tests to work with QueryClient
- Add tests for new query hooks
- Add tests for mutation hooks
#### 5.2 Update Documentation
- Mark ADR-0005 as **Accepted** and **Implemented**
- Add usage examples to documentation
- Update developer onboarding guide
@@ -334,11 +350,13 @@ export const useUpdateShoppingListMutation = () => {
## Migration Checklist
### Prerequisites
- [x] TanStack Query installed
- [ ] QueryClient configuration created
- [ ] App wrapped with QueryClientProvider
### Queries
- [ ] Flyers infinite query migrated
- [ ] Watched items query migrated
- [ ] Shopping lists query migrated
@@ -346,6 +364,7 @@ export const useUpdateShoppingListMutation = () => {
- [ ] Active deals query migrated (if applicable)
### Mutations
- [ ] Add watched item mutation
- [ ] Remove watched item mutation
- [ ] Update shopping list mutation
@@ -353,12 +372,14 @@ export const useUpdateShoppingListMutation = () => {
- [ ] Remove shopping list item mutation
### Cleanup
- [ ] Remove custom useInfiniteQuery hook
- [ ] Remove custom useApiOnMount hook
- [ ] Update all tests
- [ ] Remove redundant state management code
### Documentation
- [ ] Update ADR-0005 status to "Accepted"
- [ ] Add usage guidelines to README
- [ ] Document query key conventions
@@ -367,10 +388,12 @@ export const useUpdateShoppingListMutation = () => {
## Benefits Summary
### Code Reduction
- **Estimated**: ~300-500 lines of custom hook code removed
- **Result**: Simpler, more maintainable codebase
### Performance Improvements
- ✅ Automatic request deduplication
- ✅ Background data synchronization
- ✅ Smart cache invalidation
@@ -378,12 +401,14 @@ export const useUpdateShoppingListMutation = () => {
- ✅ Automatic retry logic
### Developer Experience
- ✅ React Query Devtools for debugging
- ✅ Type-safe query hooks
- ✅ Standardized patterns across the app
- ✅ Less boilerplate code
### User Experience
- ✅ Faster perceived performance (cached data)
- ✅ Better offline experience
- ✅ Smoother UI interactions (optimistic updates)
@@ -392,11 +417,13 @@ export const useUpdateShoppingListMutation = () => {
## Risk Assessment
### Low Risk
- TanStack Query is industry-standard
- Already installed in project
- Incremental migration possible
### Mitigation Strategies
1. **Test thoroughly** - Maintain existing test coverage
2. **Migrate incrementally** - One provider at a time
3. **Monitor performance** - Use React Query Devtools

View File

@@ -45,6 +45,7 @@ Successfully completed Phase 2 of ADR-0005 enforcement by migrating all remainin
## Code Reduction Summary
### Phase 1 + Phase 2 Combined
- **Total custom state management code removed**: ~200 lines
- **New query hooks created**: 5 files (~200 lines of standardized code)
- **Providers simplified**: 4 files
@@ -53,34 +54,38 @@ Successfully completed Phase 2 of ADR-0005 enforcement by migrating all remainin
## Technical Improvements
### 1. Intelligent Caching Strategy
```typescript
// Master items (rarely change) - 10 min stale time
useMasterItemsQuery() // staleTime: 10 minutes
useMasterItemsQuery(); // staleTime: 10 minutes
// Flyers (moderate changes) - 2 min stale time
useFlyersQuery() // staleTime: 2 minutes
useFlyersQuery(); // staleTime: 2 minutes
// User data (frequent changes) - 1 min stale time
useWatchedItemsQuery() // staleTime: 1 minute
useShoppingListsQuery() // staleTime: 1 minute
useWatchedItemsQuery(); // staleTime: 1 minute
useShoppingListsQuery(); // staleTime: 1 minute
// Flyer items (static) - 5 min stale time
useFlyerItemsQuery() // staleTime: 5 minutes
useFlyerItemsQuery(); // staleTime: 5 minutes
```
### 2. Per-Resource Caching
Each flyer's items are cached separately:
```typescript
// Flyer 1 items cached with key: ['flyer-items', 1]
useFlyerItemsQuery(1)
useFlyerItemsQuery(1);
// Flyer 2 items cached with key: ['flyer-items', 2]
useFlyerItemsQuery(2)
useFlyerItemsQuery(2);
// Both caches persist independently
```
### 3. Automatic Query Disabling
```typescript
// Query automatically disabled when flyerId is undefined
const { data } = useFlyerItemsQuery(selectedFlyer?.flyer_id);
@@ -90,24 +95,28 @@ const { data } = useFlyerItemsQuery(selectedFlyer?.flyer_id);
## Benefits Achieved
### Performance
-**Reduced API calls** - Data cached between component unmounts
-**Background refetching** - Stale data updates in background
-**Request deduplication** - Multiple components can use same query
-**Optimized cache times** - Different strategies for different data types
### Code Quality
-**Removed ~50 more lines** of custom state management
-**Eliminated useApiOnMount** from all providers
-**Standardized patterns** - All queries follow same structure
-**Better type safety** - TypeScript types flow through queries
### Developer Experience
-**React Query Devtools** - Inspect all queries and cache
-**Easier debugging** - Clear query states and transitions
-**Less boilerplate** - No manual loading/error state management
-**Automatic retries** - Failed queries retry automatically
### User Experience
-**Faster perceived performance** - Cached data shows instantly
-**Fresh data** - Background refetching keeps data current
-**Better offline handling** - Cached data available offline
@@ -116,12 +125,14 @@ const { data } = useFlyerItemsQuery(selectedFlyer?.flyer_id);
## Remaining Work
### Phase 3: Mutations (Next)
- [ ] Create mutation hooks for data modifications
- [ ] Add/remove watched items with optimistic updates
- [ ] Shopping list CRUD operations
- [ ] Proper cache invalidation strategies
### Phase 4: Cleanup (Final)
- [ ] Remove `useApiOnMount` hook entirely
- [ ] Remove `useApi` hook if no longer used
- [ ] Remove stub implementations in providers
@@ -159,10 +170,13 @@ Before merging, test the following:
## Migration Notes
### Breaking Changes
None! All providers maintain the same interface.
### Deprecation Warnings
The following will log warnings if used:
- `setWatchedItems()` in UserDataProvider
- `setShoppingLists()` in UserDataProvider

View File

@@ -12,6 +12,7 @@ Successfully completed Phase 3 of ADR-0005 enforcement by creating all mutation
### Mutation Hooks
All mutation hooks follow a consistent pattern:
- Automatic cache invalidation via `queryClient.invalidateQueries()`
- Success/error notifications via notification service
- Proper TypeScript types for parameters
@@ -113,15 +114,12 @@ function WatchedItemsManager() {
{
onSuccess: () => console.log('Added to watched list!'),
onError: (error) => console.error('Failed:', error),
}
},
);
};
return (
<button
onClick={handleAdd}
disabled={addWatchedItem.isPending}
>
<button onClick={handleAdd} disabled={addWatchedItem.isPending}>
{addWatchedItem.isPending ? 'Adding...' : 'Add to Watched List'}
</button>
);
@@ -134,7 +132,7 @@ function WatchedItemsManager() {
import {
useCreateShoppingListMutation,
useAddShoppingListItemMutation,
useUpdateShoppingListItemMutation
useUpdateShoppingListItemMutation,
} from '../hooks/mutations';
function ShoppingListManager() {
@@ -149,14 +147,14 @@ function ShoppingListManager() {
const handleAddItem = (listId: number, masterItemId: number) => {
addItem.mutate({
listId,
item: { masterItemId }
item: { masterItemId },
});
};
const handleMarkPurchased = (itemId: number) => {
updateItem.mutate({
itemId,
updates: { is_purchased: true }
updates: { is_purchased: true },
});
};
@@ -172,23 +170,27 @@ function ShoppingListManager() {
## Benefits Achieved
### Performance
-**Automatic cache updates** - Queries automatically refetch after mutations
-**Request deduplication** - Multiple mutation calls are properly queued
-**Optimistic updates ready** - Infrastructure in place for Phase 4
### Code Quality
-**Standardized pattern** - All mutations follow the same structure
-**Comprehensive documentation** - JSDoc with examples for every hook
-**Type safety** - Full TypeScript types for all parameters
-**Error handling** - Consistent error handling and user notifications
### Developer Experience
-**React Query Devtools** - Inspect mutation states in real-time
-**Easy imports** - Barrel export for clean imports
-**Consistent API** - Same pattern across all mutations
-**Built-in loading states** - `isPending`, `isError`, `isSuccess` states
### User Experience
-**Automatic notifications** - Success/error toasts on all mutations
-**Fresh data** - Queries automatically update after mutations
-**Loading states** - UI can show loading indicators during mutations
@@ -197,6 +199,7 @@ function ShoppingListManager() {
## Current State
### Completed
- ✅ All 7 mutation hooks created
- ✅ Barrel export created for easy imports
- ✅ Comprehensive documentation with examples
@@ -225,12 +228,14 @@ These hooks are actively used throughout the application and will need careful r
### Phase 4: Hook Refactoring & Cleanup
#### Step 1: Refactor useWatchedItems
- [ ] Replace `useApi` calls with mutation hooks
- [ ] Remove manual state management logic
- [ ] Simplify to just wrap mutation hooks with custom logic
- [ ] Update all tests
#### Step 2: Refactor useShoppingLists
- [ ] Replace `useApi` calls with mutation hooks
- [ ] Remove manual state management logic
- [ ] Remove complex state synchronization
@@ -238,17 +243,20 @@ These hooks are actively used throughout the application and will need careful r
- [ ] Update all tests
#### Step 3: Remove Deprecated Code
- [ ] Remove `setWatchedItems` from UserDataContext
- [ ] Remove `setShoppingLists` from UserDataContext
- [ ] Remove `useApi` hook (if no longer used)
- [ ] Remove `useApiOnMount` hook (already deprecated)
#### Step 4: Add Optimistic Updates (Optional)
- [ ] Implement optimistic updates for better UX
- [ ] Use `onMutate` to update cache before server response
- [ ] Implement rollback on error
#### Step 5: Documentation & Testing
- [ ] Update all component documentation
- [ ] Update developer onboarding guide
- [ ] Add integration tests for mutation flows

View File

@@ -41,13 +41,13 @@ Successfully completed Phase 4 of ADR-0005 enforcement by refactoring the remain
### Phase 1-4 Combined
| Metric | Before | After | Reduction |
|--------|--------|-------|-----------|
| **useWatchedItems** | 77 lines | 71 lines | -6 lines (cleaner) |
| **useShoppingLists** | 222 lines | 176 lines | -46 lines (-21%) |
| **Manual state management** | ~150 lines | 0 lines | -150 lines (100%) |
| **useApi dependencies** | 7 hooks | 0 hooks | -7 dependencies |
| **Total for Phase 4** | 299 lines | 247 lines | **-52 lines (-17%)** |
| Metric | Before | After | Reduction |
| --------------------------- | ---------- | --------- | -------------------- |
| **useWatchedItems** | 77 lines | 71 lines | -6 lines (cleaner) |
| **useShoppingLists** | 222 lines | 176 lines | -46 lines (-21%) |
| **Manual state management** | ~150 lines | 0 lines | -150 lines (100%) |
| **useApi dependencies** | 7 hooks | 0 hooks | -7 dependencies |
| **Total for Phase 4** | 299 lines | 247 lines | **-52 lines (-17%)** |
### Overall ADR-0005 Impact (Phases 1-4)
@@ -61,45 +61,54 @@ Successfully completed Phase 4 of ADR-0005 enforcement by refactoring the remain
### 1. Simplified useWatchedItems
**Before (useApi pattern):**
```typescript
const { execute: addWatchedItemApi, error: addError } = useApi<MasterGroceryItem, [string, string]>(
(itemName, category) => apiClient.addWatchedItem(itemName, category)
(itemName, category) => apiClient.addWatchedItem(itemName, category),
);
const addWatchedItem = useCallback(async (itemName: string, category: string) => {
if (!userProfile) return;
const updatedOrNewItem = await addWatchedItemApi(itemName, category);
const addWatchedItem = useCallback(
async (itemName: string, category: string) => {
if (!userProfile) return;
const updatedOrNewItem = await addWatchedItemApi(itemName, category);
if (updatedOrNewItem) {
setWatchedItems((currentItems) => {
const itemExists = currentItems.some(
(item) => item.master_grocery_item_id === updatedOrNewItem.master_grocery_item_id
);
if (!itemExists) {
return [...currentItems, updatedOrNewItem].sort((a, b) => a.name.localeCompare(b.name));
}
return currentItems;
});
}
}, [userProfile, setWatchedItems, addWatchedItemApi]);
if (updatedOrNewItem) {
setWatchedItems((currentItems) => {
const itemExists = currentItems.some(
(item) => item.master_grocery_item_id === updatedOrNewItem.master_grocery_item_id,
);
if (!itemExists) {
return [...currentItems, updatedOrNewItem].sort((a, b) => a.name.localeCompare(b.name));
}
return currentItems;
});
}
},
[userProfile, setWatchedItems, addWatchedItemApi],
);
```
**After (TanStack Query):**
```typescript
const addWatchedItemMutation = useAddWatchedItemMutation();
const addWatchedItem = useCallback(async (itemName: string, category: string) => {
if (!userProfile) return;
const addWatchedItem = useCallback(
async (itemName: string, category: string) => {
if (!userProfile) return;
try {
await addWatchedItemMutation.mutateAsync({ itemName, category });
} catch (error) {
console.error('useWatchedItems: Failed to add item', error);
}
}, [userProfile, addWatchedItemMutation]);
try {
await addWatchedItemMutation.mutateAsync({ itemName, category });
} catch (error) {
console.error('useWatchedItems: Failed to add item', error);
}
},
[userProfile, addWatchedItemMutation],
);
```
**Benefits:**
- No manual state updates
- Cache automatically invalidated
- Success/error notifications handled
@@ -108,6 +117,7 @@ const addWatchedItem = useCallback(async (itemName: string, category: string) =>
### 2. Dramatically Simplified useShoppingLists
**Before:** 222 lines with:
- 5 separate `useApi` hooks
- Complex manual state synchronization
- Client-side duplicate checking
@@ -115,6 +125,7 @@ const addWatchedItem = useCallback(async (itemName: string, category: string) =>
- Try-catch blocks for each operation
**After:** 176 lines with:
- 5 TanStack Query mutation hooks
- Zero manual state management
- Server-side validation
@@ -122,6 +133,7 @@ const addWatchedItem = useCallback(async (itemName: string, category: string) =>
- Consistent error handling
**Removed Complexity:**
```typescript
// OLD: Manual state update with complex logic
const addItemToList = useCallback(async (listId: number, item: {...}) => {
@@ -158,6 +170,7 @@ const addItemToList = useCallback(async (listId: number, item: {...}) => {
```
**NEW: Simple mutation call:**
```typescript
const addItemToList = useCallback(async (listId: number, item: {...}) => {
if (!userProfile) return;
@@ -173,18 +186,20 @@ const addItemToList = useCallback(async (listId: number, item: {...}) => {
### 3. Cleaner Context Interface
**Before:**
```typescript
export interface UserDataContextType {
watchedItems: MasterGroceryItem[];
shoppingLists: ShoppingList[];
setWatchedItems: React.Dispatch<React.SetStateAction<MasterGroceryItem[]>>; // ❌ Removed
setShoppingLists: React.Dispatch<React.SetStateAction<ShoppingList[]>>; // ❌ Removed
setWatchedItems: React.Dispatch<React.SetStateAction<MasterGroceryItem[]>>; // ❌ Removed
setShoppingLists: React.Dispatch<React.SetStateAction<ShoppingList[]>>; // ❌ Removed
isLoading: boolean;
error: string | null;
}
```
**After:**
```typescript
export interface UserDataContextType {
watchedItems: MasterGroceryItem[];
@@ -195,6 +210,7 @@ export interface UserDataContextType {
```
**Why this matters:**
- Context now truly represents "server state" (read-only from context perspective)
- Mutations are handled separately via mutation hooks
- Clear separation of concerns: queries for reads, mutations for writes
@@ -202,12 +218,14 @@ export interface UserDataContextType {
## Benefits Achieved
### Performance
-**Eliminated redundant refetches** - No more manual state sync causing stale data
-**Automatic cache updates** - Mutations invalidate queries automatically
-**Optimistic updates ready** - Infrastructure supports adding optimistic updates in future
-**Reduced bundle size** - 52 lines less code in custom hooks
### Code Quality
-**Removed 150+ lines** of manual state management across all hooks
-**Eliminated useApi dependency** from user-facing hooks
-**Consistent error handling** - All mutations use same pattern
@@ -215,12 +233,14 @@ export interface UserDataContextType {
-**Removed complex logic** - No more client-side duplicate checking
### Developer Experience
-**Simpler hook implementations** - 46 lines less in useShoppingLists alone
-**Easier debugging** - React Query Devtools show all mutations
-**Type safety** - Mutation hooks provide full TypeScript types
-**Consistent patterns** - All operations follow same mutation pattern
### User Experience
-**Automatic notifications** - Success/error toasts on all operations
-**Fresh data** - Cache automatically updates after mutations
-**Better error messages** - Server-side validation provides better feedback
@@ -231,6 +251,7 @@ export interface UserDataContextType {
### Breaking Changes
**Direct UserDataContext usage:**
```typescript
// ❌ OLD: This no longer works
const { setWatchedItems } = useUserData();
@@ -245,6 +266,7 @@ addWatchedItem.mutate({ itemName: 'Milk', category: 'Dairy' });
### Non-Breaking Changes
**Custom hooks maintain backward compatibility:**
```typescript
// ✅ STILL WORKS: Custom hooks maintain same interface
const { addWatchedItem, removeWatchedItem } = useWatchedItems();
@@ -273,6 +295,7 @@ addWatchedItem.mutate({ itemName: 'Milk', category: 'Dairy' });
### Testing Approach
**Current tests mock useApi:**
```typescript
vi.mock('./useApi');
const mockedUseApi = vi.mocked(useApi);
@@ -280,6 +303,7 @@ mockedUseApi.mockReturnValue({ execute: mockFn, error: null, loading: false });
```
**New tests should mock mutations:**
```typescript
vi.mock('./mutations', () => ({
useAddWatchedItemMutation: vi.fn(),
@@ -300,17 +324,20 @@ useAddWatchedItemMutation.mockReturnValue({
## Remaining Work
### Immediate Follow-Up (Phase 4.5)
- [ ] Update [src/hooks/useWatchedItems.test.tsx](../src/hooks/useWatchedItems.test.tsx)
- [ ] Update [src/hooks/useShoppingLists.test.tsx](../src/hooks/useShoppingLists.test.tsx)
- [ ] Add integration tests for mutation flows
### Phase 5: Admin Features (Next)
- [ ] Create query hooks for admin features
- [ ] Migrate ActivityLog.tsx
- [ ] Migrate AdminStatsPage.tsx
- [ ] Migrate CorrectionsPage.tsx
### Phase 6: Final Cleanup
- [ ] Remove `useApi` hook (no longer used by core features)
- [ ] Remove `useApiOnMount` hook (deprecated)
- [ ] Remove custom `useInfiniteQuery` hook (deprecated)
@@ -350,12 +377,14 @@ None! Phase 4 implementation is complete and working.
## Performance Metrics
### Before Phase 4
- Multiple redundant state updates per mutation
- Client-side validation adding latency
- Complex nested state updates causing re-renders
- Manual cache synchronization prone to bugs
### After Phase 4
- Single mutation triggers automatic cache update
- Server-side validation (proper place for business logic)
- Simple refetch after mutation (no manual updates)
@@ -372,6 +401,7 @@ None! Phase 4 implementation is complete and working.
Phase 4 successfully refactored the remaining custom hooks (`useWatchedItems` and `useShoppingLists`) to use TanStack Query mutations, eliminating all manual state management for user-facing features. The codebase is now significantly simpler, more maintainable, and follows consistent patterns throughout.
**Key Achievements:**
- Removed 52 lines of code from custom hooks
- Eliminated 7 `useApi` dependencies
- Removed 150+ lines of manual state management
@@ -380,6 +410,7 @@ Phase 4 successfully refactored the remaining custom hooks (`useWatchedItems` an
- Zero regressions in functionality
**Next Steps**:
1. Update tests for refactored hooks (Phase 4.5 - follow-up)
2. Proceed to Phase 5 to migrate admin features
3. Final cleanup in Phase 6

View File

@@ -100,6 +100,7 @@ Successfully completed Phase 5 of ADR-0005 by migrating all admin features from
### Before (Manual State Management)
**ActivityLog.tsx - Before:**
```typescript
const [logs, setLogs] = useState<ActivityLogItem[]>([]);
const [isLoading, setIsLoading] = useState(true);
@@ -116,8 +117,7 @@ useEffect(() => {
setError(null);
try {
const response = await fetchActivityLog(20, 0);
if (!response.ok)
throw new Error((await response.json()).message || 'Failed to fetch logs');
if (!response.ok) throw new Error((await response.json()).message || 'Failed to fetch logs');
setLogs(await response.json());
} catch (err) {
setError(err instanceof Error ? err.message : 'Failed to load activity.');
@@ -131,6 +131,7 @@ useEffect(() => {
```
**ActivityLog.tsx - After:**
```typescript
const { data: logs = [], isLoading, error } = useActivityLogQuery(20, 0);
```
@@ -138,6 +139,7 @@ const { data: logs = [], isLoading, error } = useActivityLogQuery(20, 0);
### Before (Manual Parallel Fetching)
**CorrectionsPage.tsx - Before:**
```typescript
const [corrections, setCorrections] = useState<SuggestedCorrection[]>([]);
const [isLoading, setIsLoading] = useState(true);
@@ -172,6 +174,7 @@ useEffect(() => {
```
**CorrectionsPage.tsx - After:**
```typescript
const {
data: corrections = [],
@@ -180,15 +183,9 @@ const {
refetch: refetchCorrections,
} = useSuggestedCorrectionsQuery();
const {
data: masterItems = [],
isLoading: isLoadingMasterItems,
} = useMasterItemsQuery();
const { data: masterItems = [], isLoading: isLoadingMasterItems } = useMasterItemsQuery();
const {
data: categories = [],
isLoading: isLoadingCategories,
} = useCategoriesQuery();
const { data: categories = [], isLoading: isLoadingCategories } = useCategoriesQuery();
const isLoading = isLoadingCorrections || isLoadingMasterItems || isLoadingCategories;
const error = correctionsError?.message || null;
@@ -197,12 +194,14 @@ const error = correctionsError?.message || null;
## Benefits Achieved
### Performance
-**Automatic parallel fetching** - CorrectionsPage fetches 3 queries simultaneously
-**Shared cache** - Multiple components can reuse the same queries
-**Smart refetching** - Queries refetch on window focus automatically
-**Stale-while-revalidate** - Shows cached data while fetching fresh data
### Code Quality
-**~77 lines removed** from admin components (-20% average)
-**Eliminated manual state management** for all admin queries
-**Consistent error handling** across all admin features
@@ -210,6 +209,7 @@ const error = correctionsError?.message || null;
-**Removed complex Promise.all logic** from CorrectionsPage
### Developer Experience
-**Simpler component code** - Focus on UI, not data fetching
-**Easier debugging** - React Query Devtools show all queries
-**Type safety** - Query hooks provide full TypeScript types
@@ -217,6 +217,7 @@ const error = correctionsError?.message || null;
-**Consistent patterns** - All admin features follow same query pattern
### User Experience
-**Faster perceived performance** - Show cached data instantly
-**Background updates** - Data refreshes without loading spinners
-**Network resilience** - Automatic retry on failure
@@ -224,12 +225,12 @@ const error = correctionsError?.message || null;
## Code Reduction Summary
| Component | Before | After | Reduction |
|-----------|--------|-------|-----------|
| **ActivityLog.tsx** | 158 lines | 133 lines | -25 lines (-16%) |
| **AdminStatsPage.tsx** | 104 lines | 78 lines | -26 lines (-25%) |
| Component | Before | After | Reduction |
| ----------------------- | ----------------------- | ----------------- | --------------------------- |
| **ActivityLog.tsx** | 158 lines | 133 lines | -25 lines (-16%) |
| **AdminStatsPage.tsx** | 104 lines | 78 lines | -26 lines (-25%) |
| **CorrectionsPage.tsx** | ~120 lines (state mgmt) | ~50 lines (hooks) | ~70 lines (-58% state code) |
| **Total Reduction** | ~382 lines | ~261 lines | **~121 lines (-32%)** |
| **Total Reduction** | ~382 lines | ~261 lines | **~121 lines (-32%)** |
**Note**: CorrectionsPage reduction is approximate as the full component includes rendering logic that wasn't changed.
@@ -334,6 +335,7 @@ export const AdminComponent: React.FC = () => {
All changes are backward compatible at the component level. Components maintain their existing props and behavior.
**Example: ActivityLog component still accepts same props:**
```typescript
interface ActivityLogProps {
userProfile: UserProfile | null;

View File

@@ -2,7 +2,8 @@
**Date**: 2026-01-08
**Environment**: Windows 10, VSCode with Claude Code integration
**Configuration Files**:
**Configuration Files**:
- [`mcp.json`](c:/Users/games3/AppData/Roaming/Code/User/mcp.json:1)
- [`mcp-servers.json`](c:/Users/games3/AppData/Roaming/Code/User/globalStorage/mcp-servers.json:1)
@@ -13,6 +14,7 @@
You have **8 MCP servers** configured in your environment. These servers extend Claude's capabilities by providing specialized tools for browser automation, file conversion, Git hosting integration, container management, filesystem access, and HTTP requests.
**Key Findings**:
- ✅ 7 servers are properly configured and ready to test
- ⚠️ 1 server requires token update (gitea-lan)
- 📋 Testing guide and automated script provided
@@ -23,11 +25,13 @@ You have **8 MCP servers** configured in your environment. These servers extend
## MCP Server Inventory
### 1. Chrome DevTools MCP Server
**Status**: ✅ Configured
**Type**: Browser Automation
**Command**: `npx -y chrome-devtools-mcp@latest`
**Capabilities**:
- Launch and control Chrome browser
- Navigate to URLs
- Click elements and interact with DOM
@@ -36,6 +40,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
- Execute JavaScript in browser context
**Use Cases**:
- Web scraping
- Automated testing
- UI verification
@@ -43,6 +48,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
- Debugging frontend issues
**Configuration Details**:
- Headless mode: Enabled
- Isolated: False (shares browser state)
- Channel: Stable
@@ -50,11 +56,13 @@ You have **8 MCP servers** configured in your environment. These servers extend
---
### 2. Markitdown MCP Server
**Status**: ✅ Configured
**Type**: File Conversion
**Command**: `C:\Users\games3\.local\bin\uvx.exe markitdown-mcp`
**Capabilities**:
- Convert PDF files to markdown
- Convert DOCX files to markdown
- Convert HTML to markdown
@@ -62,24 +70,28 @@ You have **8 MCP servers** configured in your environment. These servers extend
- Convert PowerPoint presentations
**Use Cases**:
- Document processing
- Content extraction from various formats
- Making documents AI-readable
- Converting legacy documents to markdown
**Notes**:
- Requires Python and `uvx` to be installed
- Uses Microsoft's Markitdown library
---
### 3. Gitea Torbonium
**Status**: ✅ Configured
**Type**: Git Hosting Integration
**Host**: https://gitea.torbonium.com
**Command**: `d:\gitea-mcp\gitea-mcp.exe run -t stdio`
**Capabilities**:
- List and manage repositories
- Create and update issues
- Manage pull requests
@@ -89,6 +101,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
- Manage repository settings
**Use Cases**:
- Automated issue creation
- Repository management
- Code review automation
@@ -96,12 +109,14 @@ You have **8 MCP servers** configured in your environment. These servers extend
- Release management
**Configuration**:
- Token: Configured (ending in ...fcf8)
- Access: Full API access based on token permissions
---
### 4. Gitea LAN (Torbolan)
**Status**: ⚠️ Requires Configuration
**Type**: Git Hosting Integration
**Host**: https://gitea.torbolan.com
@@ -110,6 +125,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
**Issue**: Access token is set to `REPLACE_WITH_NEW_TOKEN`
**Action Required**:
1. Log into https://gitea.torbolan.com
2. Navigate to Settings → Applications
3. Generate a new access token
@@ -120,6 +136,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
---
### 5. Gitea Projectium
**Status**: ✅ Configured
**Type**: Git Hosting Integration
**Host**: https://gitea.projectium.com
@@ -128,6 +145,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
**Capabilities**: Same as Gitea Torbonium
**Configuration**:
- Token: Configured (ending in ...9ef)
- This appears to be the Gitea instance for your current project
@@ -136,11 +154,13 @@ You have **8 MCP servers** configured in your environment. These servers extend
---
### 6. Podman/Docker MCP Server
**Status**: ✅ Configured
**Type**: Container Management
**Command**: `npx -y @modelcontextprotocol/server-docker`
**Capabilities**:
- List running containers
- Start and stop containers
- View container logs
@@ -150,6 +170,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
- Create and manage networks
**Use Cases**:
- Container orchestration
- Development environment management
- Log analysis
@@ -157,22 +178,26 @@ You have **8 MCP servers** configured in your environment. These servers extend
- Image management
**Configuration**:
- Docker Host: `npipe:////./pipe/docker_engine`
- Requires: Docker Desktop or Podman running on Windows
**Prerequisites**:
- Docker Desktop must be running
- Named pipe access configured
---
### 7. Filesystem MCP Server
**Status**: ✅ Configured
**Type**: File System Access
**Path**: `D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com`
**Command**: `npx -y @modelcontextprotocol/server-filesystem`
**Capabilities**:
- List directory contents recursively
- Read file contents
- Write and modify files
@@ -181,27 +206,31 @@ You have **8 MCP servers** configured in your environment. These servers extend
- Create and delete files/directories
**Use Cases**:
- Project file management
- Bulk file operations
- Code generation and modifications
- File content analysis
- Project structure exploration
**Security Note**:
**Security Note**:
This server has full read/write access to your project directory. It operates within the specified directory only.
**Scope**:
**Scope**:
- Limited to: `D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com`
- Cannot access files outside this directory
---
### 8. Fetch MCP Server
**Status**: ✅ Configured
**Type**: HTTP Client
**Command**: `npx -y @modelcontextprotocol/server-fetch`
**Capabilities**:
- Send HTTP GET requests
- Send HTTP POST requests
- Send PUT, DELETE, PATCH requests
@@ -211,6 +240,7 @@ This server has full read/write access to your project directory. It operates wi
- Handle authentication
**Use Cases**:
- API testing
- Web scraping
- Data fetching from external services
@@ -218,6 +248,7 @@ This server has full read/write access to your project directory. It operates wi
- Integration with external APIs
**Examples**:
- Fetch data from REST APIs
- Download web content
- Test API endpoints
@@ -228,11 +259,12 @@ This server has full read/write access to your project directory. It operates wi
## Current Status: MCP Server Tool Availability
**Important Note**: While these MCP servers are configured in your environment, they are **not currently exposed as callable tools** in this Claude Code session.
**Important Note**: While these MCP servers are configured in your environment, they are **not currently exposed as callable tools** in this Claude Code session.
### What This Means:
MCP servers typically work by:
1. Running as separate processes
2. Exposing tools and resources via the Model Context Protocol
3. Being connected to the AI assistant by the client application (VSCode)
@@ -240,12 +272,14 @@ MCP servers typically work by:
### Current Situation:
In the current session, Claude Code has access to:
- ✅ Built-in file operations (read, write, search, list)
- ✅ Browser actions
- ✅ Mode switching
- ✅ Task management tools
But does **NOT** have direct access to:
- ❌ MCP server-specific tools (e.g., Gitea API operations)
- ❌ Chrome DevTools controls
- ❌ Markitdown conversion functions
@@ -255,6 +289,7 @@ But does **NOT** have direct access to:
### Why This Happens:
MCP servers need to be:
1. Actively connected by the client (VSCode)
2. Running in the background
3. Properly registered with the AI assistant
@@ -277,6 +312,7 @@ cd plans
```
This will:
- Test each server's basic functionality
- Check API connectivity for Gitea servers
- Verify Docker daemon access
@@ -297,6 +333,7 @@ mcp-inspector npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-cra
```
The inspector provides a web UI to:
- View available tools
- Test tool invocations
- See real-time logs
@@ -343,14 +380,14 @@ Follow the comprehensive guide in [`mcp-server-testing-guide.md`](plans/mcp-serv
## MCP Server Use Case Matrix
| Server | Code Analysis | Testing | Deployment | Documentation | API Integration |
|--------|--------------|---------|------------|---------------|-----------------|
| Chrome DevTools | ✓ (UI testing) | ✓✓✓ | - | ✓ (screenshots) | ✓ |
| Markitdown | - | - | - | ✓✓✓ | - |
| Gitea (all 3) | ✓✓✓ | ✓ | ✓✓✓ | ✓✓ | ✓✓✓ |
| Docker | ✓ | ✓✓✓ | ✓✓✓ | - | ✓ |
| Filesystem | ✓✓✓ | ✓✓ | ✓ | ✓✓ | ✓ |
| Fetch | ✓ | ✓✓ | ✓ | - | ✓✓✓ |
| Server | Code Analysis | Testing | Deployment | Documentation | API Integration |
| --------------- | -------------- | ------- | ---------- | --------------- | --------------- |
| Chrome DevTools | ✓ (UI testing) | ✓✓✓ | - | ✓ (screenshots) | ✓ |
| Markitdown | - | - | - | ✓✓✓ | - |
| Gitea (all 3) | ✓✓✓ | ✓ | ✓✓✓ | ✓✓ | ✓✓✓ |
| Docker | ✓ | ✓✓✓ | ✓✓✓ | - | ✓ |
| Filesystem | ✓✓✓ | ✓✓ | ✓ | ✓✓ | ✓ |
| Fetch | ✓ | ✓✓ | ✓ | - | ✓✓✓ |
Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable, - = Not applicable
@@ -359,12 +396,14 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
## Potential Workflows
### Workflow 1: Automated Documentation Updates
1. **Fetch server**: Get latest API documentation from external service
2. **Markitdown**: Convert to markdown format
3. **Filesystem server**: Write to project documentation folder
4. **Gitea server**: Create commit and push changes
### Workflow 2: Container-Based Testing
1. **Docker server**: Start test containers
2. **Fetch server**: Send test API requests
3. **Docker server**: Collect container logs
@@ -372,6 +411,7 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
5. **Gitea server**: Update test status in issues
### Workflow 3: Web UI Testing
1. **Chrome DevTools**: Launch browser and navigate to app
2. **Chrome DevTools**: Interact with UI elements
3. **Chrome DevTools**: Capture screenshots
@@ -379,6 +419,7 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
5. **Gitea server**: Update test documentation
### Workflow 4: Repository Management
1. **Gitea server**: List all repositories
2. **Gitea server**: Check for outdated dependencies
3. **Gitea server**: Create issues for updates needed
@@ -389,24 +430,28 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
## Next Steps
### Phase 1: Verification (Immediate)
1. Run the test script: [`test-mcp-servers.ps1`](plans/test-mcp-servers.ps1:1)
2. Review results and identify issues
3. Fix Gitea LAN token configuration
4. Re-test all servers
### Phase 2: Documentation (Short-term)
1. Document successful test results
2. Create usage examples for each server
3. Set up troubleshooting guides
4. Document common error scenarios
### Phase 3: Integration (Medium-term)
1. Verify MCP server connectivity in Claude Code sessions
2. Test tool availability and functionality
3. Create workflow templates
4. Integrate into development processes
### Phase 4: Optimization (Long-term)
1. Monitor MCP server performance
2. Optimize configurations
3. Add additional MCP servers as needed
@@ -419,7 +464,7 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
- **MCP Protocol Specification**: https://modelcontextprotocol.io
- **Testing Guide**: [`mcp-server-testing-guide.md`](plans/mcp-server-testing-guide.md:1)
- **Test Script**: [`test-mcp-servers.ps1`](plans/test-mcp-servers.ps1:1)
- **Configuration Files**:
- **Configuration Files**:
- [`mcp.json`](c:/Users/games3/AppData/Roaming/Code/User/mcp.json:1)
- [`mcp-servers.json`](c:/Users/games3/AppData/Roaming/Code/User/globalStorage/mcp-servers.json:1)
@@ -447,6 +492,7 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
## Conclusion
You have a comprehensive MCP server setup that provides powerful capabilities for:
- **Browser automation** (Chrome DevTools)
- **Document conversion** (Markitdown)
- **Git hosting integration** (3 Gitea instances)
@@ -454,12 +500,14 @@ You have a comprehensive MCP server setup that provides powerful capabilities fo
- **File system operations** (Filesystem)
- **HTTP requests** (Fetch)
**Immediate Action Required**:
**Immediate Action Required**:
- Fix the Gitea LAN token configuration
- Run the test script to verify all servers are operational
- Review test results and address any failures
**Current Limitation**:
**Current Limitation**:
- MCP server tools are not exposed in the current Claude Code session
- May require VSCode or client-side configuration to enable

View File

@@ -9,9 +9,11 @@ MCP (Model Context Protocol) servers are standalone processes that expose tools
## Testing Prerequisites
1. **MCP Inspector Tool** - Install the official MCP testing tool:
```bash
npm install -g @modelcontextprotocol/inspector
```
```powershell
npm install -g @modelcontextprotocol/inspector
```
@@ -25,20 +27,24 @@ MCP (Model Context Protocol) servers are standalone processes that expose tools
**Purpose**: Browser automation and Chrome DevTools integration
### Test Command:
```bash
npx -y chrome-devtools-mcp@latest --headless true --isolated false --channel stable
```
```powershell
npx -y chrome-devtools-mcp@latest --headless true --isolated false --channel stable
```
### Expected Capabilities:
- Browser launch and control
- DOM inspection
- Network monitoring
- JavaScript execution in browser context
### Manual Test Steps:
1. Run the command above
2. The server should start and output MCP protocol messages
3. Use MCP Inspector to connect:
@@ -50,6 +56,7 @@ npx -y chrome-devtools-mcp@latest --headless true --isolated false --channel sta
```
### Success Indicators:
- Server starts without errors
- Lists available tools (e.g., `navigate`, `click`, `screenshot`)
- Can execute browser actions
@@ -61,20 +68,24 @@ npx -y chrome-devtools-mcp@latest --headless true --isolated false --channel sta
**Purpose**: Convert various file formats to markdown
### Test Command:
```bash
C:\Users\games3\.local\bin\uvx.exe markitdown-mcp
```
```powershell
C:\Users\games3\.local\bin\uvx.exe markitdown-mcp
```
### Expected Capabilities:
- Convert PDF to markdown
- Convert DOCX to markdown
- Convert HTML to markdown
- Convert images (OCR) to markdown
### Manual Test Steps:
1. Ensure `uvx` is installed (Python tool)
2. Run the command above
3. Test with MCP Inspector:
@@ -86,11 +97,13 @@ C:\Users\games3\.local\bin\uvx.exe markitdown-mcp
```
### Success Indicators:
- Server initializes successfully
- Lists conversion tools
- Can convert a test file
### Troubleshooting:
- If `uvx` is not found, install it:
```bash
pip install uvx
@@ -111,6 +124,7 @@ You have three Gitea server configurations. All use the same executable but conn
**Host**: https://gitea.torbonium.com
#### Test Command:
```powershell
$env:GITEA_HOST="https://gitea.torbonium.com"
$env:GITEA_ACCESS_TOKEN="391c9ddbe113378bc87bb8184800ba954648fcf8"
@@ -118,6 +132,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
```
#### Expected Capabilities:
- List repositories
- Create/update issues
- Manage pull requests
@@ -125,6 +140,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
- Manage branches
#### Manual Test Steps:
1. Set environment variables
2. Run gitea-mcp.exe
3. Use MCP Inspector or test direct API access:
@@ -141,6 +157,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
**Status**: ⚠️ Token needs replacement
#### Test Command:
```powershell
$env:GITEA_HOST="https://gitea.torbolan.com"
$env:GITEA_ACCESS_TOKEN="REPLACE_WITH_NEW_TOKEN" # ⚠️ UPDATE THIS
@@ -148,6 +165,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
```
#### Before Testing:
1. Generate a new access token:
- Log into https://gitea.torbolan.com
- Go to Settings → Applications → Generate New Token
@@ -158,6 +176,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
**Host**: https://gitea.projectium.com
#### Test Command:
```powershell
$env:GITEA_HOST="https://gitea.projectium.com"
$env:GITEA_ACCESS_TOKEN="c72bc0f14f623fec233d3c94b3a16397fe3649ef"
@@ -165,12 +184,14 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
```
### Success Indicators for All Gitea Servers:
- Server connects to Gitea instance
- Lists available repositories
- Can read repository metadata
- Authentication succeeds
### Troubleshooting:
- **401 Unauthorized**: Token is invalid or expired
- **Connection refused**: Check if Gitea instance is accessible
- **SSL errors**: Verify HTTPS certificate validity
@@ -182,12 +203,14 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
**Purpose**: Container management and Docker operations
### Test Command:
```powershell
$env:DOCKER_HOST="npipe:////./pipe/docker_engine"
npx -y @modelcontextprotocol/server-docker
```
### Expected Capabilities:
- List containers
- Start/stop containers
- View container logs
@@ -195,6 +218,7 @@ npx -y @modelcontextprotocol/server-docker
- Manage images
### Manual Test Steps:
1. Ensure Docker Desktop or Podman is running
2. Verify named pipe exists: `npipe:////./pipe/docker_engine`
3. Run the server command
@@ -207,17 +231,20 @@ npx -y @modelcontextprotocol/server-docker
```
### Verify Docker Access Directly:
```powershell
docker ps
docker images
```
### Success Indicators:
- Server connects to Docker daemon
- Can list containers and images
- Can execute container operations
### Troubleshooting:
- **Cannot connect to Docker daemon**: Ensure Docker Desktop is running
- **Named pipe error**: Check DOCKER_HOST configuration
- **Permission denied**: Run as administrator
@@ -229,14 +256,17 @@ docker images
**Purpose**: Access and manipulate files in specified directory
### Test Command:
```bash
npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com"
```
```powershell
npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com"
```
### Expected Capabilities:
- List directory contents
- Read files
- Write files
@@ -244,6 +274,7 @@ npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-crawler.projectiu
- Get file metadata
### Manual Test Steps:
1. Run the command above
2. Use MCP Inspector:
```bash
@@ -255,18 +286,21 @@ npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-crawler.projectiu
3. Test listing directory contents
### Verify Directory Access:
```powershell
Test-Path "D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com"
Get-ChildItem "D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com" | Select-Object -First 5
```
### Success Indicators:
- Server starts successfully
- Can list directory contents
- Can read file contents
- Write operations work (if permissions allow)
### Security Note:
This server has access to your entire project directory. Ensure it's only used in trusted contexts.
---
@@ -276,14 +310,17 @@ This server has access to your entire project directory. Ensure it's only used i
**Purpose**: Make HTTP requests to external APIs and websites
### Test Command:
```bash
npx -y @modelcontextprotocol/server-fetch
```
```powershell
npx -y @modelcontextprotocol/server-fetch
```
### Expected Capabilities:
- HTTP GET requests
- HTTP POST requests
- Handle JSON/text responses
@@ -291,6 +328,7 @@ npx -y @modelcontextprotocol/server-fetch
- Follow redirects
### Manual Test Steps:
1. Run the server command
2. Use MCP Inspector:
```bash
@@ -302,9 +340,11 @@ npx -y @modelcontextprotocol/server-fetch
3. Test fetching a URL through the inspector
### Test Fetch Capability Directly:
```bash
curl https://api.github.com/users/github
```
```powershell
# Test if curl/web requests work
curl https://api.github.com/users/github
@@ -313,6 +353,7 @@ Invoke-RestMethod -Uri "https://api.github.com/users/github"
```
### Success Indicators:
- Server initializes
- Can fetch URLs
- Returns proper HTTP responses
@@ -414,6 +455,7 @@ npm install -g @modelcontextprotocol/inspector
# Test any server
mcp-inspector <command> <args>
```
```powershell
# Install globally
npm install -g @modelcontextprotocol/inspector
@@ -434,6 +476,7 @@ mcp-inspector npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-cra
# Test Docker server
mcp-inspector npx -y @modelcontextprotocol/server-docker
```
```powershell
# Test fetch server
mcp-inspector npx -y @modelcontextprotocol/server-fetch
@@ -450,19 +493,25 @@ mcp-inspector npx -y @modelcontextprotocol/server-docker
## Common Issues and Solutions
### Issue: "Cannot find module" or "Command not found"
**Solution**: Ensure Node.js and npm are installed and in PATH
### Issue: MCP server starts but doesn't respond
**Solution**: Check server logs, verify stdio communication, ensure no JSON parsing errors
### Issue: Authentication failures with Gitea
**Solution**:
**Solution**:
1. Verify tokens haven't expired
2. Check token permissions in Gitea settings
3. Ensure network access to Gitea instances
### Issue: Docker server cannot connect
**Solution**:
1. Start Docker Desktop
2. Verify DOCKER_HOST environment variable
3. Check Windows named pipe permissions
@@ -472,6 +521,7 @@ mcp-inspector npx -y @modelcontextprotocol/server-docker
## Next Steps
After testing:
1. Document which servers are working
2. Fix any configuration issues
3. Update tokens as needed

View File

@@ -6,6 +6,7 @@
## Configuration Summary
### MCP Configuration File
**Location**: `c:/Users/games3/AppData/Roaming/Code/User/mcp.json`
```json
@@ -19,6 +20,7 @@
```
### Key Configuration Details
- **Package**: `docker-mcp` (community MCP server with SSH support)
- **Connection Method**: SSH to Podman machine
- **SSH Endpoint**: `root@127.0.0.1:2972`
@@ -27,12 +29,14 @@
## Podman System Status
### Podman Machine
```
NAME VM TYPE CREATED CPUS MEMORY DISK SIZE
podman-machine-default wsl 4 weeks ago 4 2GiB 100GiB
```
### Connection Information
```
Name: podman-machine-default-root
URI: ssh://root@127.0.0.1:2972/run/podman/podman.sock
@@ -40,7 +44,9 @@ Default: true
```
### Container Status
Podman is operational with 3 containers:
- `flyer-dev` (Ubuntu) - Exited
- `flyer-crawler-redis` (Redis) - Exited
- `flyer-crawler-postgres` (PostGIS) - Exited
@@ -48,11 +54,13 @@ Podman is operational with 3 containers:
## Test Results
### Command Line Tests
**Podman CLI**: Working - `podman ps` returns successfully
**Container Management**: Working - Can list and manage containers
**Socket Connection**: Working - SSH connection to Podman machine functional
### MCP Server Integration Tests
**Configuration File**: Updated and valid JSON
**VSCode Restart**: Completed to load new MCP configuration
**Package Selection**: Using `docker-mcp` (supports SSH connections)
@@ -85,16 +93,19 @@ Once the MCP server is fully loaded, the following tools should be available:
### If MCP Server Doesn't Connect
1. **Verify Podman is running**:
```bash
podman ps
```
2. **Check SSH connection**:
```bash
podman system connection list
```
3. **Test docker-mcp package manually**:
```powershell
$env:DOCKER_HOST="ssh://root@127.0.0.1:2972/run/podman/podman.sock"
npx -y docker-mcp

View File

@@ -50,12 +50,12 @@ async function main() {
DIRECTORIES_TO_CLEAN.map((dir) => {
const absolutePath = resolve(projectRoot, dir);
return removeDirectory(absolutePath);
})
}),
);
const successCount = results.filter(Boolean).length;
console.log(
`Clean complete: ${successCount}/${DIRECTORIES_TO_CLEAN.length} directories processed.`
`Clean complete: ${successCount}/${DIRECTORIES_TO_CLEAN.length} directories processed.`,
);
// Always exit successfully (matches rimraf behavior)

View File

@@ -9,11 +9,7 @@ import '@testing-library/jest-dom';
describe('StatCard', () => {
it('renders title and value correctly', () => {
renderWithProviders(
<StatCard
title="Total Users"
value="1,234"
icon={<div data-testid="mock-icon">Icon</div>}
/>,
<StatCard title="Total Users" value="1,234" icon={<div data-testid="mock-icon">Icon</div>} />,
);
expect(screen.getByText('Total Users')).toBeInTheDocument();
@@ -22,13 +18,9 @@ describe('StatCard', () => {
it('renders the icon', () => {
renderWithProviders(
<StatCard
title="Total Users"
value="1,234"
icon={<div data-testid="mock-icon">Icon</div>}
/>,
<StatCard title="Total Users" value="1,234" icon={<div data-testid="mock-icon">Icon</div>} />,
);
expect(screen.getByTestId('mock-icon')).toBeInTheDocument();
});
});
});

View File

@@ -144,4 +144,4 @@ export const batchLimiter = rateLimit({
message: 'Too many batch requests from this IP, please try again later.',
});
export const budgetUpdateLimiter = batchLimiter; // Alias
export const budgetUpdateLimiter = batchLimiter; // Alias

View File

@@ -73,9 +73,15 @@ export const FlyerReviewPage: React.FC = () => {
flyers.map((flyer) => (
<li key={flyer.flyer_id} className="p-4 hover:bg-gray-50 dark:hover:bg-gray-700/50">
<Link to={`/flyers/${flyer.flyer_id}`} className="flex items-center space-x-4">
<img src={flyer.icon_url || undefined} alt={flyer.store?.name || 'Unknown Store'} className="w-12 h-12 rounded-md object-cover" />
<img
src={flyer.icon_url || undefined}
alt={flyer.store?.name || 'Unknown Store'}
className="w-12 h-12 rounded-md object-cover"
/>
<div className="flex-1">
<p className="font-semibold text-gray-800 dark:text-white">{flyer.store?.name || 'Unknown Store'}</p>
<p className="font-semibold text-gray-800 dark:text-white">
{flyer.store?.name || 'Unknown Store'}
</p>
<p className="text-sm text-gray-500 dark:text-gray-400">{flyer.file_name}</p>
</div>
<div className="text-right text-sm text-gray-500 dark:text-gray-400">
@@ -90,4 +96,4 @@ export const FlyerReviewPage: React.FC = () => {
)}
</div>
);
};
};

View File

@@ -6,7 +6,9 @@ import { renderWithProviders } from '../../../tests/utils/renderWithProviders';
describe('StatCard', () => {
it('should render the title and value correctly', () => {
renderWithProviders(<StatCard title="Test Stat" value="1,234" icon={<div data-testid="icon" />} />);
renderWithProviders(
<StatCard title="Test Stat" value="1,234" icon={<div data-testid="icon" />} />,
);
expect(screen.getByText('Test Stat')).toBeInTheDocument();
expect(screen.getByText('1,234')).toBeInTheDocument();

View File

@@ -69,4 +69,4 @@ describe('AppProviders', () => {
expect(masterItemsProvider).toContainElement(userDataProvider);
expect(userDataProvider).toContainElement(child);
});
});
});

View File

@@ -35,7 +35,7 @@ export const FlyersProvider: React.FC<{ children: ReactNode }> = ({ children })
isRefetchingFlyers,
refetchFlyers,
}),
[flyers, isLoadingFlyers, error, isRefetchingFlyers, refetchFlyers]
[flyers, isLoadingFlyers, error, isRefetchingFlyers, refetchFlyers],
);
return <FlyersContext.Provider value={value}>{children}</FlyersContext.Provider>;

View File

@@ -12,11 +12,7 @@ import { useMasterItemsQuery } from '../hooks/queries/useMasterItemsQuery';
* Master items are cached longer (10 minutes) since they change infrequently.
*/
export const MasterItemsProvider: React.FC<{ children: ReactNode }> = ({ children }) => {
const {
data: masterItems = [],
isLoading,
error,
} = useMasterItemsQuery();
const { data: masterItems = [], isLoading, error } = useMasterItemsQuery();
const value = useMemo(
() => ({
@@ -24,7 +20,7 @@ export const MasterItemsProvider: React.FC<{ children: ReactNode }> = ({ childre
isLoading,
error: error?.message || null,
}),
[masterItems, isLoading, error]
[masterItems, isLoading, error],
);
return <MasterItemsContext.Provider value={value}>{children}</MasterItemsContext.Provider>;

View File

@@ -38,7 +38,15 @@ export const UserDataProvider: React.FC<{ children: ReactNode }> = ({ children }
isLoading: isEnabled && (isLoadingWatched || isLoadingLists),
error: watchedError?.message || listsError?.message || null,
}),
[watchedItems, shoppingLists, isEnabled, isLoadingWatched, isLoadingLists, watchedError, listsError]
[
watchedItems,
shoppingLists,
isEnabled,
isLoadingWatched,
isLoadingLists,
watchedError,
listsError,
],
);
return <UserDataContext.Provider value={value}>{children}</UserDataContext.Provider>;

View File

@@ -705,7 +705,9 @@ describe('AI Routes (/api/v1/ai)', () => {
});
it('should return 200 with a stubbed response on success', async () => {
const response = await supertest(app).post('/api/v1/ai/check-flyer').attach('image', imagePath);
const response = await supertest(app)
.post('/api/v1/ai/check-flyer')
.attach('image', imagePath);
expect(response.status).toBe(200);
expect(response.body.data.is_flyer).toBe(true);
});
@@ -717,7 +719,9 @@ describe('AI Routes (/api/v1/ai)', () => {
throw new Error('Logging failed');
});
// Attach a valid file to get past the `if (!req.file)` check.
const response = await supertest(app).post('/api/v1/ai/check-flyer').attach('image', imagePath);
const response = await supertest(app)
.post('/api/v1/ai/check-flyer')
.attach('image', imagePath);
expect(response.status).toBe(500);
});
});
@@ -900,14 +904,18 @@ describe('AI Routes (/api/v1/ai)', () => {
});
it('POST /generate-image should return 501 Not Implemented', async () => {
const response = await supertest(app).post('/api/v1/ai/generate-image').send({ prompt: 'test' });
const response = await supertest(app)
.post('/api/v1/ai/generate-image')
.send({ prompt: 'test' });
expect(response.status).toBe(501);
expect(response.body.error.message).toBe('Image generation is not yet implemented.');
});
it('POST /generate-speech should return 501 Not Implemented', async () => {
const response = await supertest(app).post('/api/v1/ai/generate-speech').send({ text: 'test' });
const response = await supertest(app)
.post('/api/v1/ai/generate-speech')
.send({ text: 'test' });
expect(response.status).toBe(501);
expect(response.body.error.message).toBe('Speech generation is not yet implemented.');
});

View File

@@ -204,7 +204,9 @@ describe('Gamification Routes (/api/v1/achievements)', () => {
mockedIsAdmin.mockImplementation((req: Request, res: Response, next: NextFunction) => next()); // Grant admin access
vi.mocked(db.gamificationRepo.awardAchievement).mockResolvedValue(undefined);
const response = await supertest(adminApp).post('/api/v1/achievements/award').send(awardPayload);
const response = await supertest(adminApp)
.post('/api/v1/achievements/award')
.send(awardPayload);
expect(response.status).toBe(200);
expect(response.body.data.message).toContain('Successfully awarded');
@@ -224,7 +226,9 @@ describe('Gamification Routes (/api/v1/achievements)', () => {
mockedIsAdmin.mockImplementation((req: Request, res: Response, next: NextFunction) => next());
vi.mocked(db.gamificationRepo.awardAchievement).mockRejectedValue(new Error('DB Error'));
const response = await supertest(adminApp).post('/api/v1/achievements/award').send(awardPayload);
const response = await supertest(adminApp)
.post('/api/v1/achievements/award')
.send(awardPayload);
expect(response.status).toBe(500);
expect(response.body.error.message).toBe('DB Error');
});

View File

@@ -99,7 +99,9 @@ describe('Price Routes (/api/v1/price-history)', () => {
});
it('should return 400 if masterItemIds is an empty array', async () => {
const response = await supertest(app).post('/api/v1/price-history').send({ masterItemIds: [] });
const response = await supertest(app)
.post('/api/v1/price-history')
.send({ masterItemIds: [] });
expect(response.status).toBe(400);
expect(response.body.error.details[0].message).toBe(

View File

@@ -60,7 +60,9 @@ describe('Stats Routes (/api/v1/stats)', () => {
});
it('should return 400 for invalid query parameters', async () => {
const response = await supertest(app).get('/api/v1/stats/most-frequent-sales?days=0&limit=abc');
const response = await supertest(app).get(
'/api/v1/stats/most-frequent-sales?days=0&limit=abc',
);
expect(response.status).toBe(400);
expect(response.body.error.details).toBeDefined();
expect(response.body.error.details.length).toBe(2);

View File

@@ -388,7 +388,9 @@ describe('User Routes (/api/v1/users)', () => {
describe('Shopping List Item Routes', () => {
describe('POST /shopping-lists/:listId/items (Validation)', () => {
it('should return 400 if neither masterItemId nor customItemName are provided', async () => {
const response = await supertest(app).post('/api/v1/users/shopping-lists/1/items').send({});
const response = await supertest(app)
.post('/api/v1/users/shopping-lists/1/items')
.send({});
expect(response.status).toBe(400);
expect(response.body.error.details[0].message).toBe(
'Either masterItemId or customItemName must be provided.',
@@ -512,7 +514,9 @@ describe('User Routes (/api/v1/users)', () => {
});
it('should return 400 if no update fields are provided for an item', async () => {
const response = await supertest(app).put(`/api/v1/users/shopping-lists/items/101`).send({});
const response = await supertest(app)
.put(`/api/v1/users/shopping-lists/items/101`)
.send({});
expect(response.status).toBe(400);
expect(response.body.error.details[0].message).toContain(
'At least one field (quantity, is_purchased) must be provided.',
@@ -1011,7 +1015,9 @@ describe('User Routes (/api/v1/users)', () => {
const addressData = { address_line_1: '123 New St' };
vi.mocked(userService.upsertUserAddress).mockResolvedValue(5);
const response = await supertest(app).put('/api/v1/users/profile/address').send(addressData);
const response = await supertest(app)
.put('/api/v1/users/profile/address')
.send(addressData);
expect(response.status).toBe(200);
expect(userService.upsertUserAddress).toHaveBeenCalledWith(

View File

@@ -51,7 +51,9 @@ export class AiAnalysisService {
// Normalize sources to a consistent format.
const mappedSources = (response.sources || []).map(
(s: RawSource) =>
(s.web ? { uri: s.web.uri || '', title: s.web.title || 'Untitled' } : { uri: '', title: 'Untitled' }) as Source,
(s.web
? { uri: s.web.uri || '', title: s.web.title || 'Untitled' }
: { uri: '', title: 'Untitled' }) as Source,
);
return { ...response, sources: mappedSources };
}
@@ -82,7 +84,9 @@ export class AiAnalysisService {
// Normalize sources to a consistent format.
const mappedSources = (response.sources || []).map(
(s: RawSource) =>
(s.web ? { uri: s.web.uri || '', title: s.web.title || 'Untitled' } : { uri: '', title: 'Untitled' }) as Source,
(s.web
? { uri: s.web.uri || '', title: s.web.title || 'Untitled' }
: { uri: '', title: 'Untitled' }) as Source,
);
return { ...response, sources: mappedSources };
}

View File

@@ -45,7 +45,7 @@ describe('AnalyticsService', () => {
data,
attemptsMade: 1,
updateProgress: vi.fn(),
} as unknown as Job<T>);
}) as unknown as Job<T>;
describe('processDailyReportJob', () => {
it('should process successfully', async () => {
@@ -207,4 +207,4 @@ describe('AnalyticsService', () => {
);
});
});
});
});

View File

@@ -76,4 +76,4 @@ export class AnalyticsService {
}
}
export const analyticsService = new AnalyticsService();
export const analyticsService = new AnalyticsService();

View File

@@ -45,7 +45,9 @@ describe('BrandService', () => {
vi.mocked(db.adminRepo.updateBrandLogo).mockRejectedValue(dbError);
await expect(brandService.updateBrandLogo(brandId, mockFile, mockLogger)).rejects.toThrow('DB Error');
await expect(brandService.updateBrandLogo(brandId, mockFile, mockLogger)).rejects.toThrow(
'DB Error',
);
});
});
});
});

View File

@@ -3,11 +3,15 @@ import * as db from './db/index.db';
import type { Logger } from 'pino';
class BrandService {
async updateBrandLogo(brandId: number, file: Express.Multer.File, logger: Logger): Promise<string> {
async updateBrandLogo(
brandId: number,
file: Express.Multer.File,
logger: Logger,
): Promise<string> {
const logoUrl = `/flyer-images/${file.filename}`;
await db.adminRepo.updateBrandLogo(brandId, logoUrl, logger);
return logoUrl;
}
}
export const brandService = new BrandService();
export const brandService = new BrandService();

View File

@@ -28,9 +28,15 @@ export class BudgetRepository {
);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getBudgetsForUser', { userId }, {
defaultMessage: 'Failed to retrieve budgets.',
});
handleDbError(
error,
logger,
'Database error in getBudgetsForUser',
{ userId },
{
defaultMessage: 'Failed to retrieve budgets.',
},
);
}
}
@@ -60,12 +66,18 @@ export class BudgetRepository {
return res.rows[0];
});
} catch (error) {
handleDbError(error, logger, 'Database error in createBudget', { budgetData, userId }, {
fkMessage: 'The specified user does not exist.',
notNullMessage: 'One or more required budget fields are missing.',
checkMessage: 'Invalid value provided for budget period.',
defaultMessage: 'Failed to create budget.',
});
handleDbError(
error,
logger,
'Database error in createBudget',
{ budgetData, userId },
{
fkMessage: 'The specified user does not exist.',
notNullMessage: 'One or more required budget fields are missing.',
checkMessage: 'Invalid value provided for budget period.',
defaultMessage: 'Failed to create budget.',
},
);
}
}
@@ -98,9 +110,15 @@ export class BudgetRepository {
return res.rows[0];
} catch (error) {
if (error instanceof NotFoundError) throw error;
handleDbError(error, logger, 'Database error in updateBudget', { budgetId, userId }, {
defaultMessage: 'Failed to update budget.',
});
handleDbError(
error,
logger,
'Database error in updateBudget',
{ budgetId, userId },
{
defaultMessage: 'Failed to update budget.',
},
);
}
}
@@ -120,9 +138,15 @@ export class BudgetRepository {
}
} catch (error) {
if (error instanceof NotFoundError) throw error;
handleDbError(error, logger, 'Database error in deleteBudget', { budgetId, userId }, {
defaultMessage: 'Failed to delete budget.',
});
handleDbError(
error,
logger,
'Database error in deleteBudget',
{ budgetId, userId },
{
defaultMessage: 'Failed to delete budget.',
},
);
}
}

View File

@@ -158,4 +158,4 @@ describe('Conversion DB Service', () => {
);
});
});
});
});

View File

@@ -194,6 +194,7 @@ export function handleDbError(
// Fallback generic error
// Use the consistent DatabaseError from the processing errors module for the fallback.
const errorMessage = options.defaultMessage || `Failed to perform operation on ${options.entityName || 'database'}.`;
const errorMessage =
options.defaultMessage || `Failed to perform operation on ${options.entityName || 'database'}.`;
throw new ProcessingDatabaseError(errorMessage);
}

View File

@@ -94,4 +94,4 @@ describe('Price DB Service', () => {
);
});
});
});
});

View File

@@ -61,4 +61,4 @@ export const priceRepo = {
);
}
},
};
};

View File

@@ -25,9 +25,15 @@ export class RecipeRepository {
);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getRecipesBySalePercentage', { minPercentage }, {
defaultMessage: 'Failed to get recipes by sale percentage.',
});
handleDbError(
error,
logger,
'Database error in getRecipesBySalePercentage',
{ minPercentage },
{
defaultMessage: 'Failed to get recipes by sale percentage.',
},
);
}
}
@@ -95,9 +101,15 @@ export class RecipeRepository {
);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getUserFavoriteRecipes', { userId }, {
defaultMessage: 'Failed to get favorite recipes.',
});
handleDbError(
error,
logger,
'Database error in getUserFavoriteRecipes',
{ userId },
{
defaultMessage: 'Failed to get favorite recipes.',
},
);
}
}
@@ -124,10 +136,16 @@ export class RecipeRepository {
}
return res.rows[0];
} catch (error) {
handleDbError(error, logger, 'Database error in addFavoriteRecipe', { userId, recipeId }, {
fkMessage: 'The specified user or recipe does not exist.',
defaultMessage: 'Failed to add favorite recipe.',
});
handleDbError(
error,
logger,
'Database error in addFavoriteRecipe',
{ userId, recipeId },
{
fkMessage: 'The specified user or recipe does not exist.',
defaultMessage: 'Failed to add favorite recipe.',
},
);
}
}
@@ -146,9 +164,15 @@ export class RecipeRepository {
throw new NotFoundError('Favorite recipe not found for this user.');
}
} catch (error) {
handleDbError(error, logger, 'Database error in removeFavoriteRecipe', { userId, recipeId }, {
defaultMessage: 'Failed to remove favorite recipe.',
});
handleDbError(
error,
logger,
'Database error in removeFavoriteRecipe',
{ userId, recipeId },
{
defaultMessage: 'Failed to remove favorite recipe.',
},
);
}
}
@@ -160,23 +184,55 @@ export class RecipeRepository {
*/
async createRecipe(
userId: string,
recipeData: Pick<Recipe, 'name' | 'instructions' | 'description' | 'prep_time_minutes' | 'cook_time_minutes' | 'servings' | 'photo_url'>,
logger: Logger
recipeData: Pick<
Recipe,
| 'name'
| 'instructions'
| 'description'
| 'prep_time_minutes'
| 'cook_time_minutes'
| 'servings'
| 'photo_url'
>,
logger: Logger,
): Promise<Recipe> {
try {
const { name, instructions, description, prep_time_minutes, cook_time_minutes, servings, photo_url } = recipeData;
const {
name,
instructions,
description,
prep_time_minutes,
cook_time_minutes,
servings,
photo_url,
} = recipeData;
const res = await this.db.query<Recipe>(
`INSERT INTO public.recipes
(user_id, name, instructions, description, prep_time_minutes, cook_time_minutes, servings, photo_url, status)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'public')
RETURNING *`,
[userId, name, instructions, description, prep_time_minutes, cook_time_minutes, servings, photo_url]
[
userId,
name,
instructions,
description,
prep_time_minutes,
cook_time_minutes,
servings,
photo_url,
],
);
return res.rows[0];
} catch (error) {
handleDbError(error, logger, 'Database error in createRecipe', { userId, recipeData }, {
defaultMessage: 'Failed to create recipe.',
});
handleDbError(
error,
logger,
'Database error in createRecipe',
{ userId, recipeData },
{
defaultMessage: 'Failed to create recipe.',
},
);
}
}
@@ -206,9 +262,15 @@ export class RecipeRepository {
throw new NotFoundError('Recipe not found or user does not have permission to delete.');
}
} catch (error) {
handleDbError(error, logger, 'Database error in deleteRecipe', { recipeId, userId, isAdmin }, {
defaultMessage: 'Failed to delete recipe.',
});
handleDbError(
error,
logger,
'Database error in deleteRecipe',
{ recipeId, userId, isAdmin },
{
defaultMessage: 'Failed to delete recipe.',
},
);
}
}
@@ -271,9 +333,15 @@ export class RecipeRepository {
if (error instanceof Error && error.message === 'No fields provided to update.') {
throw error;
}
handleDbError(error, logger, 'Database error in updateRecipe', { recipeId, userId, updates }, {
defaultMessage: 'Failed to update recipe.',
});
handleDbError(
error,
logger,
'Database error in updateRecipe',
{ recipeId, userId, updates },
{
defaultMessage: 'Failed to update recipe.',
},
);
}
}
@@ -315,9 +383,15 @@ export class RecipeRepository {
}
return res.rows[0];
} catch (error) {
handleDbError(error, logger, 'Database error in getRecipeById', { recipeId }, {
defaultMessage: 'Failed to retrieve recipe.',
});
handleDbError(
error,
logger,
'Database error in getRecipeById',
{ recipeId },
{
defaultMessage: 'Failed to retrieve recipe.',
},
);
}
}
@@ -341,9 +415,15 @@ export class RecipeRepository {
const res = await this.db.query<RecipeComment>(query, [recipeId]);
return res.rows;
} catch (error) {
handleDbError(error, logger, 'Database error in getRecipeComments', { recipeId }, {
defaultMessage: 'Failed to get recipe comments.',
});
handleDbError(
error,
logger,
'Database error in getRecipeComments',
{ recipeId },
{
defaultMessage: 'Failed to get recipe comments.',
},
);
}
}
@@ -374,7 +454,10 @@ export class RecipeRepository {
logger,
'Database error in addRecipeComment',
{ recipeId, userId, parentCommentId },
{ fkMessage: 'The specified recipe, user, or parent comment does not exist.', defaultMessage: 'Failed to add recipe comment.' },
{
fkMessage: 'The specified recipe, user, or parent comment does not exist.',
defaultMessage: 'Failed to add recipe comment.',
},
);
}
}
@@ -398,10 +481,16 @@ export class RecipeRepository {
// raise_exception
throw new Error(error.message); // Re-throw the user-friendly message from the DB function.
}
handleDbError(error, logger, 'Database error in forkRecipe', { userId, originalRecipeId }, {
fkMessage: 'The specified user or original recipe does not exist.',
defaultMessage: 'Failed to fork recipe.',
});
handleDbError(
error,
logger,
'Database error in forkRecipe',
{ userId, originalRecipeId },
{
fkMessage: 'The specified user or original recipe does not exist.',
defaultMessage: 'Failed to fork recipe.',
},
);
}
}
}

View File

@@ -81,4 +81,4 @@ describe('EventBus', () => {
// callback2 should be called again
expect(callback2).toHaveBeenCalledTimes(2);
});
});
});

View File

@@ -4,7 +4,11 @@ import sharp from 'sharp';
import type { Dirent } from 'node:fs';
import type { Job } from 'bullmq';
import type { Logger } from 'pino';
import { ImageConversionError, PdfConversionError, UnsupportedFileTypeError } from './processingErrors';
import {
ImageConversionError,
PdfConversionError,
UnsupportedFileTypeError,
} from './processingErrors';
import type { FlyerJobData } from '../types/job-data';
// Define the image formats supported by the AI model
const SUPPORTED_IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.webp', '.heic', '.heif'];
@@ -169,7 +173,9 @@ export class FlyerFileHandler {
return outputPath;
} catch (error) {
logger.error({ err: error, filePath }, 'Failed to convert image to PNG using sharp.');
throw new ImageConversionError(`Image conversion to PNG failed for ${path.basename(filePath)}.`);
throw new ImageConversionError(
`Image conversion to PNG failed for ${path.basename(filePath)}.`,
);
}
}
@@ -217,7 +223,10 @@ export class FlyerFileHandler {
// For other supported types like WEBP, etc., which are less likely to have problematic EXIF,
// we can process them directly without modification for now.
logger.info(`Processing as a single image file (non-JPEG/PNG): ${filePath}`);
return { imagePaths: [{ path: filePath, mimetype: `image/${fileExt.slice(1)}` }], createdImagePaths: [] };
return {
imagePaths: [{ path: filePath, mimetype: `image/${fileExt.slice(1)}` }],
createdImagePaths: [],
};
}
/**
@@ -294,9 +303,11 @@ export class FlyerFileHandler {
await this.fs.rename(tempPath, image.path);
} catch (error) {
logger.error({ err: error, path: image.path }, 'Failed to optimize image.');
throw new ImageConversionError(`Image optimization failed for ${path.basename(image.path)}.`);
throw new ImageConversionError(
`Image optimization failed for ${path.basename(image.path)}.`,
);
}
}
logger.info('Image optimization complete.');
}
}
}

View File

@@ -102,10 +102,10 @@ describe('FlyerPersistenceService', () => {
mockFlyerData,
mockItemsForDb,
mockLogger,
mockClient
mockClient,
);
expect(mockLogger.info).toHaveBeenCalledWith(
expect.stringContaining('Successfully processed flyer')
expect.stringContaining('Successfully processed flyer'),
);
// Verify AdminRepository usage
@@ -117,7 +117,7 @@ describe('FlyerPersistenceService', () => {
displayText: `Processed a new flyer for ${mockFlyerData.store_name}.`,
details: { flyerId: mockCreatedFlyer.flyer_id, storeName: mockFlyerData.store_name },
}),
mockLogger
mockLogger,
);
// Verify GamificationRepository usage
@@ -153,8 +153,8 @@ describe('FlyerPersistenceService', () => {
vi.mocked(createFlyerAndItems).mockRejectedValue(error);
await expect(
service.saveFlyer(mockFlyerData, mockItemsForDb, 'user-1', mockLogger)
service.saveFlyer(mockFlyerData, mockItemsForDb, 'user-1', mockLogger),
).rejects.toThrow(error);
});
});
});
});

View File

@@ -52,7 +52,11 @@ describe('GamificationService', () => {
await gamificationService.awardAchievement(userId, achievementName, mockLogger);
expect(gamificationRepo.awardAchievement).toHaveBeenCalledWith(userId, achievementName, mockLogger);
expect(gamificationRepo.awardAchievement).toHaveBeenCalledWith(
userId,
achievementName,
mockLogger,
);
});
it('should re-throw ForeignKeyConstraintError without logging it as a service error', async () => {
@@ -163,4 +167,4 @@ describe('GamificationService', () => {
);
});
});
});
});

View File

@@ -72,4 +72,4 @@ class GamificationService {
}
}
export const gamificationService = new GamificationService();
export const gamificationService = new GamificationService();

View File

@@ -25,7 +25,10 @@ export class GeocodingService {
return JSON.parse(cached);
}
} catch (error) {
logger.error({ err: error, cacheKey }, 'Redis GET or JSON.parse command failed. Proceeding without cache.');
logger.error(
{ err: error, cacheKey },
'Redis GET or JSON.parse command failed. Proceeding without cache.',
);
}
if (process.env.GOOGLE_MAPS_API_KEY) {
@@ -42,7 +45,7 @@ export class GeocodingService {
} catch (error) {
logger.error(
{ err: error },
'An error occurred while calling the Google Maps Geocoding API. Falling back to Nominatim.'
'An error occurred while calling the Google Maps Geocoding API. Falling back to Nominatim.',
);
}
} else {
@@ -69,7 +72,10 @@ export class GeocodingService {
try {
await redis.set(cacheKey, JSON.stringify(result), 'EX', 60 * 60 * 24 * 30); // Cache for 30 days
} catch (error) {
logger.error({ err: error, cacheKey }, 'Redis SET command failed. Result will not be cached.');
logger.error(
{ err: error, cacheKey },
'Redis SET command failed. Result will not be cached.',
);
}
}

View File

@@ -121,7 +121,9 @@ describe('Processing Errors', () => {
expect(error).toBeInstanceOf(TransformationError);
expect(error.message).toBe(message);
expect(error.errorCode).toBe('TRANSFORMATION_FAILED');
expect(error.userMessage).toBe('There was a problem transforming the flyer data. Please check the input.');
expect(error.userMessage).toBe(
'There was a problem transforming the flyer data. Please check the input.',
);
});
});
@@ -147,7 +149,9 @@ describe('Processing Errors', () => {
expect(error).toBeInstanceOf(ImageConversionError);
expect(error.message).toBe(message);
expect(error.errorCode).toBe('IMAGE_CONVERSION_FAILED');
expect(error.userMessage).toBe('The uploaded image could not be processed. It might be corrupt or in an unsupported format.');
expect(error.userMessage).toBe(
'The uploaded image could not be processed. It might be corrupt or in an unsupported format.',
);
});
});

View File

@@ -66,7 +66,10 @@ describe('SystemService', () => {
});
it('should return success: false when process does not exist', async () => {
const error = new Error('Command failed') as ExecException & { stdout?: string; stderr?: string };
const error = new Error('Command failed') as ExecException & {
stdout?: string;
stderr?: string;
};
error.code = 1;
error.stderr = "[PM2][ERROR] Process or Namespace flyer-crawler-api doesn't exist";
@@ -83,4 +86,4 @@ describe('SystemService', () => {
);
});
});
});
});

View File

@@ -121,4 +121,4 @@ describe('tokenStorage', () => {
);
});
});
});
});

View File

@@ -43,4 +43,4 @@ export const removeToken = (): void => {
} catch (error) {
console.error('SecurityError: Failed to access localStorage to remove token.', error);
}
};
};

View File

@@ -193,7 +193,9 @@ describe('UserService', () => {
// Act & Assert
// The service should wrap the generic error in a `DatabaseError`.
await expect(userService.upsertUserAddress(user, addressData, logger)).rejects.toBeInstanceOf(DatabaseError);
await expect(userService.upsertUserAddress(user, addressData, logger)).rejects.toBeInstanceOf(
DatabaseError,
);
// Assert that the error was logged correctly
expect(logger.error).toHaveBeenCalledWith(
@@ -285,7 +287,10 @@ describe('UserService', () => {
await expect(userService.updateUserAvatar(userId, file, logger)).rejects.toThrow(
DatabaseError,
);
expect(logger.error).toHaveBeenCalledWith(expect.any(Object), `Failed to update user avatar: ${genericError.message}`);
expect(logger.error).toHaveBeenCalledWith(
expect.any(Object),
`Failed to update user avatar: ${genericError.message}`,
);
});
});
@@ -313,8 +318,13 @@ describe('UserService', () => {
vi.mocked(bcrypt.hash).mockResolvedValue();
mocks.mockUpdateUserPassword.mockRejectedValue(genericError);
await expect(userService.updateUserPassword(userId, newPassword, logger)).rejects.toThrow(DatabaseError);
expect(logger.error).toHaveBeenCalledWith(expect.any(Object), `Failed to update user password: ${genericError.message}`);
await expect(userService.updateUserPassword(userId, newPassword, logger)).rejects.toThrow(
DatabaseError,
);
expect(logger.error).toHaveBeenCalledWith(
expect.any(Object),
`Failed to update user password: ${genericError.message}`,
);
});
});
@@ -340,9 +350,9 @@ describe('UserService', () => {
const { logger } = await import('./logger.server');
mocks.mockFindUserWithPasswordHashById.mockResolvedValue(null);
await expect(
userService.deleteUserAccount('user-123', 'password', logger),
).rejects.toThrow(NotFoundError);
await expect(userService.deleteUserAccount('user-123', 'password', logger)).rejects.toThrow(
NotFoundError,
);
});
it('should throw ValidationError if password does not match', async () => {
@@ -371,8 +381,13 @@ describe('UserService', () => {
});
vi.mocked(bcrypt.compare).mockRejectedValue(genericError);
await expect(userService.deleteUserAccount(userId, password, logger)).rejects.toThrow(DatabaseError);
expect(logger.error).toHaveBeenCalledWith(expect.any(Object), `Failed to delete user account: ${genericError.message}`);
await expect(userService.deleteUserAccount(userId, password, logger)).rejects.toThrow(
DatabaseError,
);
expect(logger.error).toHaveBeenCalledWith(
expect.any(Object),
`Failed to delete user account: ${genericError.message}`,
);
});
});
@@ -430,8 +445,13 @@ describe('UserService', () => {
mocks.mockDeleteUserById.mockRejectedValue(genericError);
await expect(userService.deleteUserAsAdmin(deleterId, targetId, logger)).rejects.toThrow(DatabaseError);
expect(logger.error).toHaveBeenCalledWith(expect.any(Object), `Admin failed to delete user account: ${genericError.message}`);
await expect(userService.deleteUserAsAdmin(deleterId, targetId, logger)).rejects.toThrow(
DatabaseError,
);
expect(logger.error).toHaveBeenCalledWith(
expect.any(Object),
`Admin failed to delete user account: ${genericError.message}`,
);
});
});
});

View File

@@ -38,13 +38,20 @@ class UserService {
logger,
);
if (!userprofile.address_id) {
await userRepo.updateUserProfile(userprofile.user.user_id, { address_id: addressId }, logger);
await userRepo.updateUserProfile(
userprofile.user.user_id,
{ address_id: addressId },
logger,
);
}
return addressId;
})
.catch((error) => {
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
logger.error({ err: error, userId: userprofile.user.user_id }, `Transaction to upsert user address failed: ${errorMessage}`);
logger.error(
{ err: error, userId: userprofile.user.user_id },
`Transaction to upsert user address failed: ${errorMessage}`,
);
// Wrap the original error in a service-level DatabaseError to standardize the error contract,
// as this is an unexpected failure within the transaction boundary.
throw new DatabaseError(errorMessage);
@@ -68,7 +75,10 @@ class UserService {
return { deletedCount };
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
logger.error({ err: error, attemptsMade: job.attemptsMade }, `Expired token cleanup job failed: ${errorMessage}`);
logger.error(
{ err: error, attemptsMade: job.attemptsMade },
`Expired token cleanup job failed: ${errorMessage}`,
);
// This is a background job, but wrapping in a standard error type is good practice.
throw new DatabaseError(errorMessage);
}
@@ -81,7 +91,11 @@ class UserService {
* @param logger The logger instance.
* @returns The updated user profile.
*/
async updateUserAvatar(userId: string, file: Express.Multer.File, logger: Logger): Promise<Profile> {
async updateUserAvatar(
userId: string,
file: Express.Multer.File,
logger: Logger,
): Promise<Profile> {
try {
const baseUrl = getBaseUrl(logger);
const avatarUrl = `${baseUrl}/uploads/avatars/${file.filename}`;
@@ -151,7 +165,11 @@ class UserService {
* @param logger The logger instance.
* @returns The address object.
*/
async getUserAddress(userProfile: UserProfile, addressId: number, logger: Logger): Promise<Address> {
async getUserAddress(
userProfile: UserProfile,
addressId: number,
logger: Logger,
): Promise<Address> {
if (userProfile.address_id !== addressId) {
throw new ValidationError([], 'Forbidden: You can only access your own address.');
}
@@ -162,7 +180,10 @@ class UserService {
throw error;
}
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
logger.error({ err: error, userId: userProfile.user.user_id, addressId }, `Failed to get user address: ${errorMessage}`);
logger.error(
{ err: error, userId: userProfile.user.user_id, addressId },
`Failed to get user address: ${errorMessage}`,
);
// Wrap unexpected errors.
throw new DatabaseError(errorMessage);
}
@@ -187,7 +208,10 @@ class UserService {
throw error;
}
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
log.error({ err: error, deleterId, userToDeleteId }, `Admin failed to delete user account: ${errorMessage}`);
log.error(
{ err: error, deleterId, userToDeleteId },
`Admin failed to delete user account: ${errorMessage}`,
);
// Wrap unexpected errors.
throw new DatabaseError(errorMessage);
}

View File

@@ -173,4 +173,4 @@ describe('Worker Entry Point', () => {
);
});
});
});
});

View File

@@ -28,4 +28,4 @@ process.on('unhandledRejection', (reason, promise) => {
logger.error({ reason, promise }, '[Worker] Unhandled Rejection');
});
logger.info('[Worker] Worker process is running and listening for jobs.');
logger.info('[Worker] Worker process is running and listening for jobs.');

View File

@@ -209,7 +209,9 @@ describe('Authentication E2E Flow', () => {
expect(loginResponse?.status).toBe(200);
// Request password reset (do not poll, as this endpoint is rate-limited)
const forgotResponse = await getRequest().post('/api/v1/auth/forgot-password').send({ email });
const forgotResponse = await getRequest()
.post('/api/v1/auth/forgot-password')
.send({ email });
expect(forgotResponse.status).toBe(200);
const resetToken = forgotResponse.body.data.token;

View File

@@ -112,7 +112,9 @@ describe('Reactions API Routes Integration Tests', () => {
});
it('should return 400 when entityId is missing', async () => {
const response = await request.get('/api/v1/reactions/summary').query({ entityType: 'recipe' });
const response = await request
.get('/api/v1/reactions/summary')
.query({ entityType: 'recipe' });
expect(response.status).toBe(400);
expect(response.body.success).toBe(false);

View File

@@ -72,4 +72,4 @@ vi.mock('../../components/WhatsNewModal', async () => {
vi.mock('../../layouts/MainLayout', async () => {
const { MockMainLayout } = await import('../utils/componentMocks');
return { MainLayout: MockMainLayout };
});
});

View File

@@ -30,47 +30,69 @@ export const cleanupDb = async (options: CleanupOptions) => {
// Children entities first, then parents.
if (options.suggestedCorrectionIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.suggested_corrections WHERE suggested_correction_id = ANY($1::int[])', [options.suggestedCorrectionIds]);
await client.query(
'DELETE FROM public.suggested_corrections WHERE suggested_correction_id = ANY($1::int[])',
[options.suggestedCorrectionIds],
);
logger.debug(`Cleaned up ${options.suggestedCorrectionIds.length} suggested correction(s).`);
}
if (options.budgetIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.budgets WHERE budget_id = ANY($1::int[])', [options.budgetIds]);
await client.query('DELETE FROM public.budgets WHERE budget_id = ANY($1::int[])', [
options.budgetIds,
]);
logger.debug(`Cleaned up ${options.budgetIds.length} budget(s).`);
}
if (options.recipeCommentIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.recipe_comments WHERE recipe_comment_id = ANY($1::int[])', [options.recipeCommentIds]);
await client.query(
'DELETE FROM public.recipe_comments WHERE recipe_comment_id = ANY($1::int[])',
[options.recipeCommentIds],
);
logger.debug(`Cleaned up ${options.recipeCommentIds.length} recipe comment(s).`);
}
if (options.recipeIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.recipes WHERE recipe_id = ANY($1::int[])', [options.recipeIds]);
await client.query('DELETE FROM public.recipes WHERE recipe_id = ANY($1::int[])', [
options.recipeIds,
]);
logger.debug(`Cleaned up ${options.recipeIds.length} recipe(s).`);
}
if (options.flyerIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.flyers WHERE flyer_id = ANY($1::int[])', [options.flyerIds]);
await client.query('DELETE FROM public.flyers WHERE flyer_id = ANY($1::int[])', [
options.flyerIds,
]);
logger.debug(`Cleaned up ${options.flyerIds.length} flyer(s).`);
}
if (options.storeIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.stores WHERE store_id = ANY($1::int[])', [options.storeIds]);
await client.query('DELETE FROM public.stores WHERE store_id = ANY($1::int[])', [
options.storeIds,
]);
logger.debug(`Cleaned up ${options.storeIds.length} store(s).`);
}
if (options.masterItemIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = ANY($1::int[])', [options.masterItemIds]);
await client.query(
'DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = ANY($1::int[])',
[options.masterItemIds],
);
logger.debug(`Cleaned up ${options.masterItemIds.length} master grocery item(s).`);
}
if (options.shoppingListIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.shopping_lists WHERE shopping_list_id = ANY($1::int[])', [options.shoppingListIds]);
await client.query(
'DELETE FROM public.shopping_lists WHERE shopping_list_id = ANY($1::int[])',
[options.shoppingListIds],
);
logger.debug(`Cleaned up ${options.shoppingListIds.length} shopping list(s).`);
}
if (options.userIds?.filter(Boolean).length) {
await client.query('DELETE FROM public.users WHERE user_id = ANY($1::uuid[])', [options.userIds]);
await client.query('DELETE FROM public.users WHERE user_id = ANY($1::uuid[])', [
options.userIds,
]);
logger.debug(`Cleaned up ${options.userIds.length} user(s).`);
}
} catch (error) {
@@ -78,4 +100,4 @@ export const cleanupDb = async (options: CleanupOptions) => {
} finally {
client.release();
}
};
};

View File

@@ -27,4 +27,4 @@ export const cleanupFiles = async (filePaths: (string | undefined | null)[]) =>
});
await Promise.allSettled(cleanupPromises);
};
};

View File

@@ -6,4 +6,4 @@ export const createMockRequest = (overrides: Partial<Request> = {}): Request =>
log: mockLogger,
...overrides,
} as unknown as Request;
};
};

View File

@@ -33,4 +33,4 @@ export async function poll<T>(
}
throw new Error(`Polling timed out for ${description} after ${timeout}ms.`);
}
}

View File

@@ -28,4 +28,4 @@ export const AiFlyerDataSchema = z.object({
valid_to: z.string().nullable(),
store_address: z.string().nullable(),
items: z.array(ExtractedFlyerItemSchema),
});
});

View File

@@ -105,4 +105,4 @@ declare module 'exif-parser' {
}
export default ExifParser;
}
}

View File

@@ -105,7 +105,10 @@ declare module 'pdf-poppler' {
export class Poppler {
constructor(binPath?: string);
pdfToCairo(file: string, outputFilePrefix?: string, options?: PopplerOptions): Promise<string>;
pdfInfo(file: string, options?: { ownerPassword?: string; userPassword?: string }): Promise<PdfInfo>;
pdfInfo(
file: string,
options?: { ownerPassword?: string; userPassword?: string },
): Promise<PdfInfo>;
pdfToPs(file: string, outputFile: string, options?: any): Promise<string>;
pdfToText(file: string, outputFile: string, options?: any): Promise<string>;
}

View File

@@ -64,9 +64,7 @@ describe('validatePasswordStrength', () => {
it('should return invalid for a medium password (score 2)', () => {
// Arrange: Mock zxcvbn to return a score of 2
vi.mocked(zxcvbn).mockReturnValue(
createMockZxcvbnResult(2, ['Add another symbol or number']),
);
vi.mocked(zxcvbn).mockReturnValue(createMockZxcvbnResult(2, ['Add another symbol or number']));
// Act
const result = validatePasswordStrength('Password123');
@@ -99,4 +97,4 @@ describe('validatePasswordStrength', () => {
expect(result.isValid).toBe(true);
expect(result.feedback).toBe('');
});
});
});

View File

@@ -17,4 +17,4 @@ export function validatePasswordStrength(password: string): {
return { isValid: false, feedback: `Password is too weak. ${suggestions}` };
}
return { isValid: true, feedback: '' };
}
}

View File

@@ -175,9 +175,7 @@ describe('dateUtils', () => {
it('should handle dates with time components correctly', () => {
// parseISO should handle the time component and formatShortDate should strip it
expect(formatDateRange('2023-01-01T10:00:00', '2023-01-05T15:30:00')).toBe(
'Jan 1 - Jan 5',
);
expect(formatDateRange('2023-01-01T10:00:00', '2023-01-05T15:30:00')).toBe('Jan 1 - Jan 5');
});
describe('verbose mode', () => {

View File

@@ -11,7 +11,10 @@ import { parseISO, format, isValid, differenceInDays } from 'date-fns';
* @param date The date to calculate the simple week for. Defaults to the current date.
* @returns An object containing the year and week number.
*/
export function calculateSimpleWeekAndYear(date: Date = new Date()): { year: number; week: number } {
export function calculateSimpleWeekAndYear(date: Date = new Date()): {
year: number;
week: number;
} {
const year = date.getFullYear();
// Use UTC dates to calculate the difference in days.
// This avoids issues with Daylight Saving Time (DST) where a day might have 23 or 25 hours,

View File

@@ -94,4 +94,4 @@ describe('fileUtils', () => {
expect(mockedFs.unlink).not.toHaveBeenCalled();
});
});
});
});

View File

@@ -23,4 +23,4 @@ export const cleanupUploadedFiles = async (files?: Express.Multer.File[]) => {
if (!files || !Array.isArray(files)) return;
// Use Promise.all to run cleanups in parallel for efficiency.
await Promise.all(files.map((file) => cleanupUploadedFile(file)));
};
};

View File

@@ -30,4 +30,4 @@ describe('formatCurrency', () => {
it('should handle negative cents correctly', () => {
expect(formatCurrency(-500)).toBe('-$5.00');
});
});
});

View File

@@ -10,5 +10,7 @@
export const formatCurrency = (amountInCents: number | null | undefined): string => {
if (amountInCents === null || amountInCents === undefined) return 'N/A';
return new Intl.NumberFormat('en-US', { style: 'currency', currency: 'USD' }).format(amountInCents / 100);
};
return new Intl.NumberFormat('en-US', { style: 'currency', currency: 'USD' }).format(
amountInCents / 100,
);
};

View File

@@ -102,7 +102,11 @@ describe('generateFlyerIcon', () => {
generateFlyerIcon('/path/to/bad-image.jpg', '/path/to/icons', logger), // This was a duplicate, fixed.
).rejects.toThrow('Failed to generate icon for /path/to/bad-image.jpg.');
expect(logger.error).toHaveBeenCalledWith(
{ err: sharpError, sourcePath: '/path/to/bad-image.jpg', outputPath: '/path/to/icons/icon-bad-image.webp' },
{
err: sharpError,
sourcePath: '/path/to/bad-image.jpg',
outputPath: '/path/to/icons/icon-bad-image.webp',
},
'An error occurred during icon generation.',
);
});
@@ -132,7 +136,9 @@ describe('processAndSaveImage', () => {
expect(mocks.withMetadata).toHaveBeenCalledWith({});
expect(mocks.jpeg).toHaveBeenCalledWith({ quality: 85, mozjpeg: true });
expect(mocks.png).toHaveBeenCalledWith({ compressionLevel: 8, quality: 85 });
expect(mocks.toFile).toHaveBeenCalledWith(expect.stringContaining(path.join(destinationDir, 'original-')));
expect(mocks.toFile).toHaveBeenCalledWith(
expect.stringContaining(path.join(destinationDir, 'original-')),
);
// Check the returned filename format (original-timestamp.jpg)
expect(result).toMatch(/^original-\d+\.jpg$/);
@@ -142,9 +148,9 @@ describe('processAndSaveImage', () => {
const sharpError = new Error('Processing failed');
mocks.toFile.mockRejectedValueOnce(sharpError);
await expect(
processAndSaveImage('/path/img.jpg', '/dest', 'img.jpg', logger),
).rejects.toThrow('Failed to process image img.jpg.');
await expect(processAndSaveImage('/path/img.jpg', '/dest', 'img.jpg', logger)).rejects.toThrow(
'Failed to process image img.jpg.',
);
expect(logger.error).toHaveBeenCalledWith(
expect.objectContaining({ err: sharpError, sourcePath: '/path/img.jpg' }),

View File

@@ -31,7 +31,10 @@ export async function processAndSaveImage(
// Ensure the destination directory exists.
await fs.mkdir(destinationDir, { recursive: true });
logger.debug({ sourcePath, outputPath }, 'Starting image processing: stripping metadata and optimizing.');
logger.debug(
{ sourcePath, outputPath },
'Starting image processing: stripping metadata and optimizing.',
);
// Use sharp to process the image.
// .withMetadata({}) strips all EXIF and other metadata.
@@ -95,4 +98,4 @@ export async function generateFlyerIcon(
// Re-throw the error to be handled by the calling service.
throw new Error(`Failed to generate icon for ${sourcePath}.`);
}
}
}

View File

@@ -24,7 +24,9 @@ const mockPdfDocument = {
numPages: 3,
// Explicitly type the mock function to accept a number and return the correct promise type.
// This resolves the TypeScript error when using mockImplementation with arguments later.
getPage: vi.fn<(pageNumber: number) => Promise<typeof mockPdfPage>>(() => Promise.resolve(mockPdfPage)),
getPage: vi.fn<(pageNumber: number) => Promise<typeof mockPdfPage>>(() =>
Promise.resolve(mockPdfPage),
),
};
vi.mock('pdfjs-dist', () => ({

View File

@@ -117,4 +117,4 @@ describe('serverUtils', () => {
);
});
});
});
});

View File

@@ -117,7 +117,9 @@ describe('Zod Utilities', () => {
const result = schema.safeParse({ params: { id: -1 } });
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe("Invalid ID for parameter 'id'. Must be a number.");
expect(result.error.issues[0].message).toBe(
"Invalid ID for parameter 'id'. Must be a number.",
);
}
});
@@ -125,7 +127,9 @@ describe('Zod Utilities', () => {
const result = schema.safeParse({ params: { id: 1.5 } });
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe("Invalid ID for parameter 'id'. Must be a number.");
expect(result.error.issues[0].message).toBe(
"Invalid ID for parameter 'id'. Must be a number.",
);
}
});
@@ -133,7 +137,9 @@ describe('Zod Utilities', () => {
const result = schema.safeParse({ params: { id: 0 } });
expect(result.success).toBe(false);
if (!result.success) {
expect(result.error.issues[0].message).toBe("Invalid ID for parameter 'id'. Must be a number.");
expect(result.error.issues[0].message).toBe(
"Invalid ID for parameter 'id'. Must be a number.",
);
}
});
@@ -237,7 +243,10 @@ describe('Zod Utilities', () => {
expect(schema.safeParse('123').success).toBe(true);
const floatResult = schema.safeParse('123.45');
expect(floatResult.success).toBe(false);
if (!floatResult.success) expect(floatResult.error.issues[0].message).toBe('Invalid input: expected int, received number');
if (!floatResult.success)
expect(floatResult.error.issues[0].message).toBe(
'Invalid input: expected int, received number',
);
});
it('should enforce positive constraint', () => {
@@ -266,7 +275,9 @@ describe('Zod Utilities', () => {
const tooSmallResult = schema.safeParse('9');
expect(tooSmallResult.success).toBe(false);
if (!tooSmallResult.success) {
expect(tooSmallResult.error.issues[0].message).toBe('Too small: expected number to be >=10');
expect(tooSmallResult.error.issues[0].message).toBe(
'Too small: expected number to be >=10',
);
}
const tooLargeResult = schema.safeParse('21');
expect(tooLargeResult.success).toBe(false);

View File

@@ -39,6 +39,18 @@ const shouldUploadSourceMaps =
process.env.VITE_SENTRY_DSN &&
process.env.SENTRY_AUTH_TOKEN;
/**
* Determines the Sentry project name based on environment.
* Test/staging deployments use 'flyer-crawler-frontend-test'.
* Production uses 'flyer-crawler-frontend'.
*/
const getSentryProject = () => {
const environment = process.env.VITE_SENTRY_ENVIRONMENT || process.env.NODE_ENV;
return environment === 'test' || environment === 'staging'
? 'flyer-crawler-frontend-test'
: 'flyer-crawler-frontend';
};
/**
* This is the main configuration file for Vite and the Vitest 'unit' test project.
* When running `vitest`, it is orchestrated by `vitest.workspace.ts`, which
@@ -61,9 +73,10 @@ export default defineConfig({
// URL of the Bugsink instance (Sentry-compatible)
url: process.env.SENTRY_URL,
// Org and project are required by the API but Bugsink ignores them
// Org and project names for Bugsink
// Project name changes based on environment (test vs production)
org: 'flyer-crawler',
project: 'flyer-crawler-frontend',
project: getSentryProject(),
// Auth token from environment variable
authToken: process.env.SENTRY_AUTH_TOKEN,