Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6af2533e9e | ||
| f434a5846a | |||
|
|
aea368677f | ||
| cd8ee92813 | |||
|
|
cf2cc5b832 | ||
| d2db3562bb | |||
|
|
0532b4b22e | ||
|
|
e767ccbb21 | ||
| 1ff813f495 | |||
| 204fe4394a | |||
|
|
029b621632 | ||
|
|
0656ab3ae7 |
@@ -119,13 +119,82 @@ jobs:
|
||||
|
||||
- name: Deploy Application to Production Server
|
||||
run: |
|
||||
echo "Deploying application files to /var/www/flyer-crawler.projectium.com..."
|
||||
echo "========================================="
|
||||
echo "DEPLOYING TO PRODUCTION SERVER"
|
||||
echo "========================================="
|
||||
APP_PATH="/var/www/flyer-crawler.projectium.com"
|
||||
|
||||
# ========================================
|
||||
# LAYER 1: PRE-FLIGHT SAFETY CHECKS
|
||||
# ========================================
|
||||
echo ""
|
||||
echo "--- Pre-Flight Safety Checks ---"
|
||||
|
||||
# Check 1: Verify we're in a git repository
|
||||
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
||||
echo "❌ FATAL: Not in a git repository! Aborting to prevent data loss."
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Git repository verified"
|
||||
|
||||
# Check 2: Verify critical files exist before deployment
|
||||
if [ ! -f "package.json" ] || [ ! -f "server.ts" ]; then
|
||||
echo "❌ FATAL: Critical files missing (package.json or server.ts). Aborting."
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Critical files verified"
|
||||
|
||||
# Check 3: Verify we have actual content to deploy (prevent empty checkout)
|
||||
FILE_COUNT=$(find . -type f | wc -l)
|
||||
if [ "$FILE_COUNT" -lt 10 ]; then
|
||||
echo "❌ FATAL: Suspiciously few files ($FILE_COUNT). Aborting to prevent catastrophic deletion."
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ File count verified: $FILE_COUNT files ready to deploy"
|
||||
|
||||
# ========================================
|
||||
# LAYER 2: STOP PM2 BEFORE FILE OPERATIONS
|
||||
# ========================================
|
||||
echo ""
|
||||
echo "--- Stopping PM2 Processes ---"
|
||||
pm2 stop flyer-crawler-api flyer-crawler-worker flyer-crawler-analytics-worker || echo "No production processes to stop"
|
||||
pm2 list
|
||||
|
||||
# ========================================
|
||||
# LAYER 3: SAFE RSYNC WITH COMPREHENSIVE EXCLUDES
|
||||
# ========================================
|
||||
echo ""
|
||||
echo "--- Deploying Application Files ---"
|
||||
mkdir -p "$APP_PATH"
|
||||
mkdir -p "$APP_PATH/flyer-images/icons" "$APP_PATH/flyer-images/archive"
|
||||
rsync -avz --delete --exclude 'node_modules' --exclude '.git' --exclude 'dist' --exclude 'flyer-images' ./ "$APP_PATH/"
|
||||
rsync -avz dist/ "$APP_PATH"
|
||||
echo "Application deployment complete."
|
||||
|
||||
# Deploy backend with critical file exclusions
|
||||
rsync -avz --delete \
|
||||
--exclude 'node_modules' \
|
||||
--exclude '.git' \
|
||||
--exclude 'dist' \
|
||||
--exclude 'flyer-images' \
|
||||
--exclude 'ecosystem.config.cjs' \
|
||||
--exclude 'ecosystem-test.config.cjs' \
|
||||
--exclude 'ecosystem.dev.config.cjs' \
|
||||
--exclude '.env.*' \
|
||||
--exclude 'coverage' \
|
||||
--exclude '.coverage' \
|
||||
--exclude 'test-results' \
|
||||
--exclude 'playwright-report' \
|
||||
--exclude 'playwright-report-visual' \
|
||||
./ "$APP_PATH/" 2>&1 | tail -20
|
||||
|
||||
echo "✅ Backend files deployed ($(find "$APP_PATH" -type f | wc -l) files)"
|
||||
|
||||
# Deploy frontend assets
|
||||
rsync -avz dist/ "$APP_PATH" 2>&1 | tail -10
|
||||
echo "✅ Frontend assets deployed"
|
||||
|
||||
echo ""
|
||||
echo "========================================="
|
||||
echo "DEPLOYMENT COMPLETE"
|
||||
echo "========================================="
|
||||
|
||||
- name: Log Workflow Metadata
|
||||
run: |
|
||||
|
||||
@@ -503,21 +503,121 @@ jobs:
|
||||
|
||||
- name: Deploy Application to Test Server
|
||||
run: |
|
||||
set -x # Enable command tracing for debugging
|
||||
echo "========================================="
|
||||
echo "DEPLOYING TO TEST SERVER"
|
||||
echo "========================================="
|
||||
echo "Timestamp: $(date)"
|
||||
echo "Deploying application files to /var/www/flyer-crawler-test.projectium.com..."
|
||||
APP_PATH="/var/www/flyer-crawler-test.projectium.com"
|
||||
|
||||
# ======================================================================
|
||||
# LAYER 1: PRE-FLIGHT SAFETY CHECKS
|
||||
# ======================================================================
|
||||
# These checks prevent catastrophic deployments (e.g., empty rsync source)
|
||||
# that could wipe out the entire application directory.
|
||||
echo ""
|
||||
echo "--- LAYER 1: Pre-Flight Safety Checks ---"
|
||||
|
||||
# Check 1: Verify we're in a git repository
|
||||
if ! git rev-parse --git-dir > /dev/null 2>&1; then
|
||||
echo "FATAL: Not in a git repository. Aborting deployment."
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Git repository verified"
|
||||
|
||||
# Check 2: Verify critical files exist before syncing
|
||||
CRITICAL_FILES=("package.json" "server.ts" "ecosystem.config.cjs" "ecosystem-test.config.cjs")
|
||||
for file in "${CRITICAL_FILES[@]}"; do
|
||||
if [ ! -f "$file" ]; then
|
||||
echo "FATAL: Critical file '$file' not found. Aborting deployment."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "✅ Critical files verified"
|
||||
|
||||
# Check 3: Verify minimum file count (prevent empty directory sync)
|
||||
FILE_COUNT=$(find . -type f | wc -l)
|
||||
if [ "$FILE_COUNT" -lt 50 ]; then
|
||||
echo "FATAL: Suspiciously low file count ($FILE_COUNT). Expected >50 files. Aborting deployment."
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ File count check passed ($FILE_COUNT files)"
|
||||
|
||||
# ======================================================================
|
||||
# LAYER 2: STOP PM2 BEFORE FILE OPERATIONS
|
||||
# ======================================================================
|
||||
# Prevents ENOENT/uv_cwd errors by stopping processes before rsync --delete
|
||||
echo ""
|
||||
echo "--- LAYER 2: Stopping test PM2 processes ---"
|
||||
echo "Current PM2 state:"
|
||||
pm2 list || echo "PM2 list failed"
|
||||
|
||||
echo "Stopping flyer-crawler test processes..."
|
||||
pm2 stop flyer-crawler-api-test flyer-crawler-worker-test flyer-crawler-analytics-worker-test 2>&1 || echo "No test processes to stop (exit code: $?)"
|
||||
|
||||
echo "PM2 state after stop:"
|
||||
pm2 list || echo "PM2 list failed"
|
||||
|
||||
# ======================================================================
|
||||
# LAYER 3: SAFE RSYNC WITH COMPREHENSIVE EXCLUDES
|
||||
# ======================================================================
|
||||
# Protects critical runtime files from deletion
|
||||
echo ""
|
||||
echo "--- LAYER 3: Safe rsync deployment ---"
|
||||
|
||||
# Ensure the destination directory exists
|
||||
mkdir -p "$APP_PATH"
|
||||
mkdir -p "$APP_PATH/flyer-images/icons" "$APP_PATH/flyer-images/archive" # Ensure all required subdirectories exist
|
||||
mkdir -p "$APP_PATH/flyer-images/icons" "$APP_PATH/flyer-images/archive"
|
||||
echo "Directories created/verified"
|
||||
|
||||
# 1. Copy the backend source code and project files first.
|
||||
# CRITICAL: We exclude 'node_modules', '.git', and 'dist'.
|
||||
rsync -avz --delete --exclude 'node_modules' --exclude '.git' --exclude 'dist' --exclude 'flyer-images' ./ "$APP_PATH/"
|
||||
echo ""
|
||||
echo "--- Step 3: Deploying backend files ---"
|
||||
# CRITICAL: Comprehensive excludes prevent deletion of:
|
||||
# - PM2 configuration files (ecosystem*.config.cjs)
|
||||
# - Environment files (.env.*)
|
||||
# - Test artifacts (coverage, .vitest-results)
|
||||
# - Development files (.vscode, .idea)
|
||||
# - Generated files (dist, node_modules)
|
||||
rsync -avz --delete \
|
||||
--exclude 'node_modules' \
|
||||
--exclude '.git' \
|
||||
--exclude '.gitea' \
|
||||
--exclude 'dist' \
|
||||
--exclude 'flyer-images' \
|
||||
--exclude 'ecosystem.config.cjs' \
|
||||
--exclude 'ecosystem-test.config.cjs' \
|
||||
--exclude 'ecosystem.dev.config.cjs' \
|
||||
--exclude '.env' \
|
||||
--exclude '.env.local' \
|
||||
--exclude '.env.test' \
|
||||
--exclude '.env.production' \
|
||||
--exclude '.env.*.local' \
|
||||
--exclude 'coverage' \
|
||||
--exclude '.coverage' \
|
||||
--exclude '.nyc_output' \
|
||||
--exclude '.vitest-results' \
|
||||
--exclude 'test-results' \
|
||||
--exclude '.vscode' \
|
||||
--exclude '.idea' \
|
||||
--exclude '*.log' \
|
||||
--exclude '.DS_Store' \
|
||||
--exclude 'Thumbs.db' \
|
||||
./ "$APP_PATH/" 2>&1 | tail -20
|
||||
echo "Backend files deployed"
|
||||
|
||||
echo ""
|
||||
echo "--- Step 4: Deploying frontend assets ---"
|
||||
# 2. Copy the built frontend assets into the same directory.
|
||||
# This will correctly place index.html and the assets/ folder in the webroot.
|
||||
rsync -avz dist/ "$APP_PATH"
|
||||
echo "Application deployment complete."
|
||||
rsync -avz dist/ "$APP_PATH" 2>&1 | tail -10
|
||||
echo "Frontend assets deployed"
|
||||
|
||||
echo ""
|
||||
echo "========================================="
|
||||
echo "APPLICATION DEPLOYMENT COMPLETE"
|
||||
echo "========================================="
|
||||
set +x # Disable command tracing
|
||||
|
||||
- name: Deploy Coverage Report to Public URL
|
||||
if: always()
|
||||
|
||||
188
.gitea/workflows/pm2-diagnostics.yml
Normal file
188
.gitea/workflows/pm2-diagnostics.yml
Normal file
@@ -0,0 +1,188 @@
|
||||
# .gitea/workflows/pm2-diagnostics.yml
|
||||
#
|
||||
# Comprehensive PM2 diagnostics to identify crash causes and problematic projects
|
||||
name: PM2 Diagnostics
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
capture_interval:
|
||||
description: 'Seconds between PM2 state captures (default: 5)'
|
||||
required: false
|
||||
default: '5'
|
||||
duration:
|
||||
description: 'Total monitoring duration in seconds (default: 60)'
|
||||
required: false
|
||||
default: '60'
|
||||
|
||||
jobs:
|
||||
pm2-diagnostics:
|
||||
runs-on: projectium.com
|
||||
|
||||
steps:
|
||||
- name: PM2 Current State Snapshot
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "PM2 CURRENT STATE SNAPSHOT"
|
||||
echo "========================================="
|
||||
echo ""
|
||||
echo "--- PM2 List (Human Readable) ---"
|
||||
pm2 list
|
||||
echo ""
|
||||
echo "--- PM2 List (JSON) ---"
|
||||
pm2 jlist > /tmp/pm2-state-initial.json
|
||||
cat /tmp/pm2-state-initial.json | jq '.'
|
||||
echo ""
|
||||
echo "--- PM2 Daemon Info ---"
|
||||
pm2 info pm2-logrotate || echo "pm2-logrotate not found"
|
||||
echo ""
|
||||
echo "--- PM2 Version ---"
|
||||
pm2 --version
|
||||
echo ""
|
||||
echo "--- Node Version ---"
|
||||
node --version
|
||||
|
||||
- name: PM2 Process Working Directories
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "PROCESS WORKING DIRECTORIES"
|
||||
echo "========================================="
|
||||
pm2 jlist | jq -r '.[] | "Process: \(.name) | CWD: \(.pm2_env.pm_cwd) | Exists: \(if .pm2_env.pm_cwd then "checking..." else "N/A" end)"'
|
||||
echo ""
|
||||
echo "--- Checking if CWDs still exist ---"
|
||||
pm2 jlist | jq -r '.[].pm2_env.pm_cwd' | while read cwd; do
|
||||
if [ -d "$cwd" ]; then
|
||||
echo "✅ EXISTS: $cwd"
|
||||
else
|
||||
echo "❌ MISSING: $cwd (THIS WILL CAUSE CRASHES!)"
|
||||
fi
|
||||
done
|
||||
|
||||
- name: PM2 Log Analysis
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "PM2 LOG ANALYSIS"
|
||||
echo "========================================="
|
||||
echo ""
|
||||
echo "--- PM2 Daemon Log (Last 100 Lines) ---"
|
||||
tail -100 /home/gitea-runner/.pm2/pm2.log
|
||||
echo ""
|
||||
echo "--- Searching for ENOENT errors ---"
|
||||
grep -i "ENOENT\|no such file or directory\|uv_cwd" /home/gitea-runner/.pm2/pm2.log || echo "No ENOENT errors found"
|
||||
echo ""
|
||||
echo "--- Searching for crash patterns ---"
|
||||
grep -i "crash\|error\|exception" /home/gitea-runner/.pm2/pm2.log | tail -50 || echo "No crashes found"
|
||||
|
||||
- name: Identify All PM2-Managed Projects
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "ALL PM2-MANAGED PROJECTS"
|
||||
echo "========================================="
|
||||
pm2 jlist | jq -r '.[] | "[\(.pm_id)] \(.name) - v\(.pm2_env.version // "N/A") - \(.pm2_env.status) - CWD: \(.pm2_env.pm_cwd)"'
|
||||
echo ""
|
||||
echo "--- Projects by CWD ---"
|
||||
pm2 jlist | jq -r '.[].pm2_env.pm_cwd' | sort -u
|
||||
echo ""
|
||||
echo "--- Checking which projects might interfere ---"
|
||||
for dir in /var/www/*; do
|
||||
if [ -d "$dir" ]; then
|
||||
echo ""
|
||||
echo "Directory: $dir"
|
||||
ls -la "$dir" | grep -E "ecosystem|package.json|node_modules" || echo " No PM2/Node files"
|
||||
fi
|
||||
done
|
||||
|
||||
- name: Monitor PM2 State Over Time
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "PM2 STATE MONITORING"
|
||||
echo "========================================="
|
||||
echo "Monitoring PM2 for ${{ gitea.event.inputs.duration }} seconds..."
|
||||
echo "Capturing state every ${{ gitea.event.inputs.capture_interval }} seconds"
|
||||
echo ""
|
||||
|
||||
INTERVAL=${{ gitea.event.inputs.capture_interval }}
|
||||
DURATION=${{ gitea.event.inputs.duration }}
|
||||
COUNT=$((DURATION / INTERVAL))
|
||||
|
||||
for i in $(seq 1 $COUNT); do
|
||||
echo "--- Capture $i at $(date) ---"
|
||||
pm2 jlist | jq -r '.[] | "\(.name): \(.pm2_env.status) (restarts: \(.pm2_env.restart_time))"'
|
||||
|
||||
# Check for new crashes
|
||||
CRASHED=$(pm2 jlist | jq '[.[] | select(.pm2_env.status == "errored" or .pm2_env.status == "stopped")] | length')
|
||||
if [ "$CRASHED" -gt 0 ]; then
|
||||
echo "⚠️ WARNING: $CRASHED process(es) in crashed state!"
|
||||
pm2 jlist | jq -r '.[] | select(.pm2_env.status == "errored" or .pm2_env.status == "stopped") | " - \(.name): \(.pm2_env.status)"'
|
||||
fi
|
||||
|
||||
sleep $INTERVAL
|
||||
done
|
||||
|
||||
- name: PM2 Dump File Analysis
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "PM2 DUMP FILE ANALYSIS"
|
||||
echo "========================================="
|
||||
echo "--- Dump file location ---"
|
||||
ls -lh /home/gitea-runner/.pm2/dump.pm2
|
||||
echo ""
|
||||
echo "--- Dump file contents ---"
|
||||
cat /home/gitea-runner/.pm2/dump.pm2 | jq '.'
|
||||
echo ""
|
||||
echo "--- Processes in dump ---"
|
||||
cat /home/gitea-runner/.pm2/dump.pm2 | jq -r '.apps[] | "\(.name) at \(.pm_cwd)"'
|
||||
|
||||
- name: Check for Rogue Deployment Scripts
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "DEPLOYMENT SCRIPT ANALYSIS"
|
||||
echo "========================================="
|
||||
echo "Checking for scripts that might delete directories..."
|
||||
echo ""
|
||||
for project in flyer-crawler stock-alert; do
|
||||
for env in "" "-test"; do
|
||||
DIR="/var/www/$project$env.projectium.com"
|
||||
if [ -d "$DIR" ]; then
|
||||
echo "--- Project: $project$env ---"
|
||||
echo "Location: $DIR"
|
||||
if [ -f "$DIR/.gitea/workflows/deploy-to-test.yml" ]; then
|
||||
echo "Has deploy-to-test workflow"
|
||||
grep -n "rsync.*--delete\|rm -rf" "$DIR/.gitea/workflows/deploy-to-test.yml" | head -5 || echo "No dangerous commands found"
|
||||
fi
|
||||
if [ -f "$DIR/.gitea/workflows/deploy-to-prod.yml" ]; then
|
||||
echo "Has deploy-to-prod workflow"
|
||||
grep -n "rsync.*--delete\|rm -rf" "$DIR/.gitea/workflows/deploy-to-prod.yml" | head -5 || echo "No dangerous commands found"
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
- name: Generate Diagnostic Report
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "DIAGNOSTIC SUMMARY"
|
||||
echo "========================================="
|
||||
echo ""
|
||||
echo "Total PM2 processes: $(pm2 jlist | jq 'length')"
|
||||
echo "Online: $(pm2 jlist | jq '[.[] | select(.pm2_env.status == "online")] | length')"
|
||||
echo "Stopped: $(pm2 jlist | jq '[.[] | select(.pm2_env.status == "stopped")] | length')"
|
||||
echo "Errored: $(pm2 jlist | jq '[.[] | select(.pm2_env.status == "errored")] | length')"
|
||||
echo ""
|
||||
echo "Flyer-crawler processes:"
|
||||
pm2 jlist | jq -r '.[] | select(.name | contains("flyer-crawler")) | " \(.name): \(.pm2_env.status)"'
|
||||
echo ""
|
||||
echo "Stock-alert processes:"
|
||||
pm2 jlist | jq -r '.[] | select(.name | contains("stock-alert")) | " \(.name): \(.pm2_env.status)"'
|
||||
echo ""
|
||||
echo "Other processes:"
|
||||
pm2 jlist | jq -r '.[] | select(.name | contains("flyer-crawler") | not) | select(.name | contains("stock-alert") | not) | " \(.name): \(.pm2_env.status)"'
|
||||
echo ""
|
||||
echo "========================================="
|
||||
echo "RECOMMENDATIONS"
|
||||
echo "========================================="
|
||||
echo "1. Check for missing CWDs (marked with ❌ above)"
|
||||
echo "2. Review PM2 daemon log for ENOENT errors"
|
||||
echo "3. Verify no deployments are running rsync --delete while processes are online"
|
||||
echo "4. Consider separating PM2 daemons by user or using PM2 namespaces"
|
||||
86
.gitea/workflows/restart-pm2.yml
Normal file
86
.gitea/workflows/restart-pm2.yml
Normal file
@@ -0,0 +1,86 @@
|
||||
# .gitea/workflows/restart-pm2.yml
|
||||
#
|
||||
# Manual workflow to restart PM2 processes and verify their status.
|
||||
# Useful for recovering from PM2 daemon crashes or process issues.
|
||||
name: Restart PM2 Processes
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
description: 'Environment to restart (test, production, or both)'
|
||||
required: true
|
||||
default: 'test'
|
||||
type: choice
|
||||
options:
|
||||
- test
|
||||
- production
|
||||
- both
|
||||
|
||||
jobs:
|
||||
restart-pm2:
|
||||
runs-on: projectium.com
|
||||
|
||||
steps:
|
||||
- name: Validate Environment Input
|
||||
run: |
|
||||
echo "Restarting PM2 processes for environment: ${{ gitea.event.inputs.environment }}"
|
||||
|
||||
- name: Restart Test Environment
|
||||
if: gitea.event.inputs.environment == 'test' || gitea.event.inputs.environment == 'both'
|
||||
run: |
|
||||
echo "=== RESTARTING TEST ENVIRONMENT ==="
|
||||
cd /var/www/flyer-crawler-test.projectium.com
|
||||
|
||||
echo "--- Current PM2 State (Before Restart) ---"
|
||||
pm2 list
|
||||
|
||||
echo "--- Restarting Test Processes ---"
|
||||
pm2 restart flyer-crawler-api-test flyer-crawler-worker-test flyer-crawler-analytics-worker-test || {
|
||||
echo "Restart failed, attempting to start processes..."
|
||||
pm2 start ecosystem-test.config.cjs
|
||||
}
|
||||
|
||||
echo "--- Saving PM2 Process List ---"
|
||||
pm2 save
|
||||
|
||||
echo "--- Waiting 3 seconds for processes to stabilize ---"
|
||||
sleep 3
|
||||
|
||||
echo "=== TEST ENVIRONMENT STATUS ==="
|
||||
pm2 ps
|
||||
|
||||
- name: Restart Production Environment
|
||||
if: gitea.event.inputs.environment == 'production' || gitea.event.inputs.environment == 'both'
|
||||
run: |
|
||||
echo "=== RESTARTING PRODUCTION ENVIRONMENT ==="
|
||||
cd /var/www/flyer-crawler.projectium.com
|
||||
|
||||
echo "--- Current PM2 State (Before Restart) ---"
|
||||
pm2 list
|
||||
|
||||
echo "--- Restarting Production Processes ---"
|
||||
pm2 restart flyer-crawler-api flyer-crawler-worker flyer-crawler-analytics-worker || {
|
||||
echo "Restart failed, attempting to start processes..."
|
||||
pm2 start ecosystem.config.cjs
|
||||
}
|
||||
|
||||
echo "--- Saving PM2 Process List ---"
|
||||
pm2 save
|
||||
|
||||
echo "--- Waiting 3 seconds for processes to stabilize ---"
|
||||
sleep 3
|
||||
|
||||
echo "=== PRODUCTION ENVIRONMENT STATUS ==="
|
||||
pm2 ps
|
||||
|
||||
- name: Final PM2 Status (All Processes)
|
||||
run: |
|
||||
echo "========================================="
|
||||
echo "FINAL PM2 STATUS - ALL PROCESSES"
|
||||
echo "========================================="
|
||||
pm2 ps
|
||||
|
||||
echo ""
|
||||
echo "--- PM2 Logs (Last 20 Lines) ---"
|
||||
pm2 logs --lines 20 --nostream || echo "No logs available"
|
||||
@@ -139,3 +139,5 @@ See [INSTALL.md](INSTALL.md) for the complete list.
|
||||
## License
|
||||
|
||||
[Add license information here]
|
||||
|
||||
annoyed
|
||||
|
||||
@@ -56,7 +56,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -90,7 +90,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -114,7 +114,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -138,7 +138,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -161,7 +161,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -189,7 +189,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -211,7 +211,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -234,7 +234,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -259,7 +259,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -284,7 +284,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -307,7 +307,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -330,7 +330,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -355,7 +355,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -379,7 +379,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -425,7 +425,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -448,7 +448,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -476,7 +476,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -502,7 +502,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -529,7 +529,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -555,7 +555,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -579,7 +579,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -612,7 +612,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -637,7 +637,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -656,7 +656,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -681,7 +681,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -705,7 +705,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -757,7 +757,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Measurements**: **********************\_\_\_**********************
|
||||
**Measurements**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -765,7 +765,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
### Test 8.1: Chrome/Edge
|
||||
|
||||
**Browser Version**: ******\_\_\_******
|
||||
**Browser Version**: **\*\***\_\_\_**\*\***
|
||||
|
||||
**Tests to Run**:
|
||||
|
||||
@@ -775,13 +775,13 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
### Test 8.2: Firefox
|
||||
|
||||
**Browser Version**: ******\_\_\_******
|
||||
**Browser Version**: **\*\***\_\_\_**\*\***
|
||||
|
||||
**Tests to Run**:
|
||||
|
||||
@@ -791,13 +791,13 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
### Test 8.3: Safari (macOS/iOS)
|
||||
|
||||
**Browser Version**: ******\_\_\_******
|
||||
**Browser Version**: **\*\***\_\_\_**\*\***
|
||||
|
||||
**Tests to Run**:
|
||||
|
||||
@@ -807,7 +807,7 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
**Pass/Fail**: [ ]
|
||||
|
||||
**Notes**: **********************\_\_\_**********************
|
||||
**Notes**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -849,8 +849,8 @@ podman exec -it flyer-crawler-dev npm run dev:container
|
||||
|
||||
## Sign-Off
|
||||
|
||||
**Tester Name**: **********************\_\_\_**********************
|
||||
**Date Completed**: **********************\_\_\_**********************
|
||||
**Tester Name**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
**Date Completed**: \***\*\*\*\*\***\*\*\***\*\*\*\*\***\_\_\_\***\*\*\*\*\***\*\*\***\*\*\*\*\***
|
||||
**Overall Status**: [ ] PASS [ ] PASS WITH ISSUES [ ] FAIL
|
||||
|
||||
**Ready for Production**: [ ] YES [ ] NO [ ] WITH FIXES
|
||||
|
||||
@@ -208,7 +208,7 @@ Press F12 or Ctrl+Shift+I
|
||||
|
||||
**Result**: [ ] PASS [ ] FAIL
|
||||
|
||||
**Errors found**: ******************\_\_\_******************
|
||||
**Errors found**: **\*\*\*\***\*\***\*\*\*\***\_\_\_**\*\*\*\***\*\***\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -224,7 +224,7 @@ Check for:
|
||||
|
||||
**Result**: [ ] PASS [ ] FAIL
|
||||
|
||||
**Issues found**: ******************\_\_\_******************
|
||||
**Issues found**: **\*\*\*\***\*\***\*\*\*\***\_\_\_**\*\*\*\***\*\***\*\*\*\***
|
||||
|
||||
---
|
||||
|
||||
@@ -272,4 +272,4 @@ Check for:
|
||||
2. ***
|
||||
3. ***
|
||||
|
||||
**Sign-off**: ********\_\_\_******** **Date**: ****\_\_\_****
|
||||
**Sign-off**: **\*\*\*\***\_\_\_**\*\*\*\*** **Date**: \***\*\_\_\_\*\***
|
||||
|
||||
@@ -39,15 +39,15 @@ All cache operations are fail-safe - cache failures do not break the application
|
||||
|
||||
Different data types use different TTL values based on volatility:
|
||||
|
||||
| Data Type | TTL | Rationale |
|
||||
| ------------------- | --------- | -------------------------------------- |
|
||||
| Brands/Stores | 1 hour | Rarely changes, safe to cache longer |
|
||||
| Flyer lists | 5 minutes | Changes when new flyers are added |
|
||||
| Individual flyers | 10 minutes| Stable once created |
|
||||
| Flyer items | 10 minutes| Stable once created |
|
||||
| Statistics | 5 minutes | Can be slightly stale |
|
||||
| Frequent sales | 15 minutes| Aggregated data, updated periodically |
|
||||
| Categories | 1 hour | Rarely changes |
|
||||
| Data Type | TTL | Rationale |
|
||||
| ----------------- | ---------- | ------------------------------------- |
|
||||
| Brands/Stores | 1 hour | Rarely changes, safe to cache longer |
|
||||
| Flyer lists | 5 minutes | Changes when new flyers are added |
|
||||
| Individual flyers | 10 minutes | Stable once created |
|
||||
| Flyer items | 10 minutes | Stable once created |
|
||||
| Statistics | 5 minutes | Can be slightly stale |
|
||||
| Frequent sales | 15 minutes | Aggregated data, updated periodically |
|
||||
| Categories | 1 hour | Rarely changes |
|
||||
|
||||
### Cache Key Strategy
|
||||
|
||||
@@ -64,11 +64,11 @@ Cache keys follow a consistent prefix pattern for pattern-based invalidation:
|
||||
|
||||
The following repository methods implement server-side caching:
|
||||
|
||||
| Method | Cache Key Pattern | TTL |
|
||||
| ------ | ----------------- | --- |
|
||||
| `FlyerRepository.getAllBrands()` | `cache:brands` | 1 hour |
|
||||
| `FlyerRepository.getFlyers()` | `cache:flyers:{limit}:{offset}` | 5 minutes |
|
||||
| `FlyerRepository.getFlyerItems()` | `cache:flyer-items:{flyerId}` | 10 minutes |
|
||||
| Method | Cache Key Pattern | TTL |
|
||||
| --------------------------------- | ------------------------------- | ---------- |
|
||||
| `FlyerRepository.getAllBrands()` | `cache:brands` | 1 hour |
|
||||
| `FlyerRepository.getFlyers()` | `cache:flyers:{limit}:{offset}` | 5 minutes |
|
||||
| `FlyerRepository.getFlyerItems()` | `cache:flyer-items:{flyerId}` | 10 minutes |
|
||||
|
||||
### Cache Invalidation
|
||||
|
||||
@@ -86,14 +86,14 @@ The following repository methods implement server-side caching:
|
||||
|
||||
TanStack React Query provides client-side caching with configurable stale times:
|
||||
|
||||
| Query Type | Stale Time |
|
||||
| ----------------- | ----------- |
|
||||
| Categories | 1 hour |
|
||||
| Master Items | 10 minutes |
|
||||
| Flyer Items | 5 minutes |
|
||||
| Flyers | 2 minutes |
|
||||
| Shopping Lists | 1 minute |
|
||||
| Activity Log | 30 seconds |
|
||||
| Query Type | Stale Time |
|
||||
| -------------- | ---------- |
|
||||
| Categories | 1 hour |
|
||||
| Master Items | 10 minutes |
|
||||
| Flyer Items | 5 minutes |
|
||||
| Flyers | 2 minutes |
|
||||
| Shopping Lists | 1 minute |
|
||||
| Activity Log | 30 seconds |
|
||||
|
||||
### Multi-Layer Cache Architecture
|
||||
|
||||
|
||||
@@ -80,13 +80,13 @@ src/
|
||||
|
||||
**Common Utility Patterns**:
|
||||
|
||||
| Pattern | Classes |
|
||||
| ------- | ------- |
|
||||
| Card container | `bg-white dark:bg-gray-800 rounded-lg shadow-md p-6` |
|
||||
| Primary button | `bg-brand-primary hover:bg-brand-dark text-white rounded-lg px-4 py-2` |
|
||||
| Secondary button | `bg-gray-100 dark:bg-gray-700 text-gray-700 dark:text-gray-200` |
|
||||
| Input field | `border border-gray-300 dark:border-gray-600 rounded-md px-3 py-2` |
|
||||
| Focus ring | `focus:outline-none focus:ring-2 focus:ring-brand-primary` |
|
||||
| Pattern | Classes |
|
||||
| ---------------- | ---------------------------------------------------------------------- |
|
||||
| Card container | `bg-white dark:bg-gray-800 rounded-lg shadow-md p-6` |
|
||||
| Primary button | `bg-brand-primary hover:bg-brand-dark text-white rounded-lg px-4 py-2` |
|
||||
| Secondary button | `bg-gray-100 dark:bg-gray-700 text-gray-700 dark:text-gray-200` |
|
||||
| Input field | `border border-gray-300 dark:border-gray-600 rounded-md px-3 py-2` |
|
||||
| Focus ring | `focus:outline-none focus:ring-2 focus:ring-brand-primary` |
|
||||
|
||||
### Color System
|
||||
|
||||
@@ -187,13 +187,13 @@ export const CheckCircleIcon: React.FC<IconProps> = ({ title, ...props }) => (
|
||||
|
||||
**Context Providers** (see ADR-005):
|
||||
|
||||
| Provider | Purpose |
|
||||
| -------- | ------- |
|
||||
| `AuthProvider` | Authentication state |
|
||||
| `ModalProvider` | Modal open/close state |
|
||||
| `FlyersProvider` | Flyer data |
|
||||
| `MasterItemsProvider` | Grocery items |
|
||||
| `UserDataProvider` | User-specific data |
|
||||
| Provider | Purpose |
|
||||
| --------------------- | ---------------------- |
|
||||
| `AuthProvider` | Authentication state |
|
||||
| `ModalProvider` | Modal open/close state |
|
||||
| `FlyersProvider` | Flyer data |
|
||||
| `MasterItemsProvider` | Grocery items |
|
||||
| `UserDataProvider` | User-specific data |
|
||||
|
||||
**Provider Hierarchy** in `AppProviders.tsx`:
|
||||
|
||||
|
||||
@@ -45,15 +45,15 @@ Using **helmet v8.x** configured in `server.ts` as the first middleware after ap
|
||||
|
||||
**Security Headers Applied**:
|
||||
|
||||
| Header | Configuration | Purpose |
|
||||
| ------ | ------------- | ------- |
|
||||
| Content-Security-Policy | Custom directives | Prevents XSS, code injection |
|
||||
| Strict-Transport-Security | 1 year, includeSubDomains, preload | Forces HTTPS connections |
|
||||
| X-Content-Type-Options | nosniff | Prevents MIME type sniffing |
|
||||
| X-Frame-Options | DENY | Prevents clickjacking |
|
||||
| X-XSS-Protection | 0 (disabled) | Deprecated, CSP preferred |
|
||||
| Referrer-Policy | strict-origin-when-cross-origin | Controls referrer information |
|
||||
| Cross-Origin-Resource-Policy | cross-origin | Allows external resource loading |
|
||||
| Header | Configuration | Purpose |
|
||||
| ---------------------------- | ---------------------------------- | -------------------------------- |
|
||||
| Content-Security-Policy | Custom directives | Prevents XSS, code injection |
|
||||
| Strict-Transport-Security | 1 year, includeSubDomains, preload | Forces HTTPS connections |
|
||||
| X-Content-Type-Options | nosniff | Prevents MIME type sniffing |
|
||||
| X-Frame-Options | DENY | Prevents clickjacking |
|
||||
| X-XSS-Protection | 0 (disabled) | Deprecated, CSP preferred |
|
||||
| Referrer-Policy | strict-origin-when-cross-origin | Controls referrer information |
|
||||
| Cross-Origin-Resource-Policy | cross-origin | Allows external resource loading |
|
||||
|
||||
**Content Security Policy Directives**:
|
||||
|
||||
@@ -87,35 +87,35 @@ Using **express-rate-limit v8.2.1** with a centralized configuration in `src/con
|
||||
|
||||
```typescript
|
||||
const standardConfig = {
|
||||
standardHeaders: true, // Sends RateLimit-* headers
|
||||
standardHeaders: true, // Sends RateLimit-* headers
|
||||
legacyHeaders: false,
|
||||
skip: shouldSkipRateLimit, // Disabled in test environment
|
||||
skip: shouldSkipRateLimit, // Disabled in test environment
|
||||
};
|
||||
```
|
||||
|
||||
**Rate Limiters by Category**:
|
||||
|
||||
| Category | Limiter | Window | Max Requests |
|
||||
| -------- | ------- | ------ | ------------ |
|
||||
| **Authentication** | loginLimiter | 15 min | 5 |
|
||||
| | registerLimiter | 1 hour | 5 |
|
||||
| | forgotPasswordLimiter | 15 min | 5 |
|
||||
| | resetPasswordLimiter | 15 min | 10 |
|
||||
| | refreshTokenLimiter | 15 min | 20 |
|
||||
| | logoutLimiter | 15 min | 10 |
|
||||
| **Public/User Read** | publicReadLimiter | 15 min | 100 |
|
||||
| | userReadLimiter | 15 min | 100 |
|
||||
| | userUpdateLimiter | 15 min | 100 |
|
||||
| **Sensitive Operations** | userSensitiveUpdateLimiter | 1 hour | 5 |
|
||||
| | adminTriggerLimiter | 15 min | 30 |
|
||||
| **AI/Costly** | aiGenerationLimiter | 15 min | 20 |
|
||||
| | geocodeLimiter | 1 hour | 100 |
|
||||
| | priceHistoryLimiter | 15 min | 50 |
|
||||
| **Uploads** | adminUploadLimiter | 15 min | 20 |
|
||||
| | aiUploadLimiter | 15 min | 10 |
|
||||
| | batchLimiter | 15 min | 50 |
|
||||
| **Tracking** | trackingLimiter | 15 min | 200 |
|
||||
| | reactionToggleLimiter | 15 min | 150 |
|
||||
| Category | Limiter | Window | Max Requests |
|
||||
| ------------------------ | -------------------------- | ------ | ------------ |
|
||||
| **Authentication** | loginLimiter | 15 min | 5 |
|
||||
| | registerLimiter | 1 hour | 5 |
|
||||
| | forgotPasswordLimiter | 15 min | 5 |
|
||||
| | resetPasswordLimiter | 15 min | 10 |
|
||||
| | refreshTokenLimiter | 15 min | 20 |
|
||||
| | logoutLimiter | 15 min | 10 |
|
||||
| **Public/User Read** | publicReadLimiter | 15 min | 100 |
|
||||
| | userReadLimiter | 15 min | 100 |
|
||||
| | userUpdateLimiter | 15 min | 100 |
|
||||
| **Sensitive Operations** | userSensitiveUpdateLimiter | 1 hour | 5 |
|
||||
| | adminTriggerLimiter | 15 min | 30 |
|
||||
| **AI/Costly** | aiGenerationLimiter | 15 min | 20 |
|
||||
| | geocodeLimiter | 1 hour | 100 |
|
||||
| | priceHistoryLimiter | 15 min | 50 |
|
||||
| **Uploads** | adminUploadLimiter | 15 min | 20 |
|
||||
| | aiUploadLimiter | 15 min | 10 |
|
||||
| | batchLimiter | 15 min | 50 |
|
||||
| **Tracking** | trackingLimiter | 15 min | 200 |
|
||||
| | reactionToggleLimiter | 15 min | 150 |
|
||||
|
||||
**Test Environment Handling**:
|
||||
|
||||
@@ -140,7 +140,7 @@ sanitizeFilename(filename: string): string
|
||||
|
||||
**Multer Configuration** (`src/middleware/multer.middleware.ts`):
|
||||
|
||||
- MIME type validation via `imageFileFilter` (only image/* allowed)
|
||||
- MIME type validation via `imageFileFilter` (only image/\* allowed)
|
||||
- File size limits (2MB for logos, configurable per upload type)
|
||||
- Unique filenames using timestamps + random suffixes
|
||||
- User-scoped storage paths
|
||||
@@ -203,10 +203,12 @@ Per-request structured logging (ADR-004):
|
||||
|
||||
```typescript
|
||||
import cors from 'cors';
|
||||
app.use(cors({
|
||||
origin: process.env.ALLOWED_ORIGINS?.split(',') || 'http://localhost:3000',
|
||||
credentials: true,
|
||||
}));
|
||||
app.use(
|
||||
cors({
|
||||
origin: process.env.ALLOWED_ORIGINS?.split(',') || 'http://localhost:3000',
|
||||
credentials: true,
|
||||
}),
|
||||
);
|
||||
```
|
||||
|
||||
2. **Redis-backed rate limiting**: For distributed deployments, use `rate-limit-redis` store
|
||||
|
||||
@@ -16,12 +16,12 @@ We will adopt a hybrid naming convention strategy to explicitly distinguish betw
|
||||
|
||||
1. **Database and AI Types (`snake_case`)**:
|
||||
Interfaces, Type definitions, and Zod schemas that represent raw database rows or direct AI responses **MUST** use `snake_case`.
|
||||
- *Examples*: `AiFlyerDataSchema`, `ExtractedFlyerItemSchema`, `FlyerInsert`.
|
||||
- *Reasoning*: This avoids unnecessary mapping layers when inserting data into the database or parsing AI output. It serves as a visual cue that the data is "raw", "external", or destined for persistence.
|
||||
- _Examples_: `AiFlyerDataSchema`, `ExtractedFlyerItemSchema`, `FlyerInsert`.
|
||||
- _Reasoning_: This avoids unnecessary mapping layers when inserting data into the database or parsing AI output. It serves as a visual cue that the data is "raw", "external", or destined for persistence.
|
||||
|
||||
2. **Internal Application Logic (`camelCase`)**:
|
||||
Variables, function arguments, and processed data structures used within the application logic (Service layer, UI components, utility functions) **MUST** use `camelCase`.
|
||||
- *Reasoning*: This adheres to standard JavaScript/TypeScript practices and maintains consistency with the rest of the ecosystem (React, etc.).
|
||||
- _Reasoning_: This adheres to standard JavaScript/TypeScript practices and maintains consistency with the rest of the ecosystem (React, etc.).
|
||||
|
||||
3. **Boundary Handling**:
|
||||
- For background jobs that primarily move data from AI to DB, preserving `snake_case` is preferred to minimize transformation logic.
|
||||
|
||||
@@ -486,9 +486,9 @@ Attach screenshots for:
|
||||
|
||||
## 🔐 Sign-Off
|
||||
|
||||
**Tester Name**: ******\*\*\*\*******\_\_\_******\*\*\*\*******
|
||||
**Tester Name**: **\*\***\*\*\*\***\*\***\_\_\_**\*\***\*\*\*\***\*\***
|
||||
|
||||
**Date/Time Completed**: ****\*\*\*\*****\_\_\_****\*\*\*\*****
|
||||
**Date/Time Completed**: \***\*\*\*\*\*\*\***\_\_\_\***\*\*\*\*\*\*\***
|
||||
|
||||
**Total Testing Time**: **\_\_** minutes
|
||||
|
||||
|
||||
278
docs/operations/PM2-CRASH-DEBUGGING.md
Normal file
278
docs/operations/PM2-CRASH-DEBUGGING.md
Normal file
@@ -0,0 +1,278 @@
|
||||
# PM2 Crash Debugging Guide
|
||||
|
||||
## Overview
|
||||
|
||||
This guide helps diagnose PM2 daemon crashes and identify which project is causing the issue.
|
||||
|
||||
## Common Symptoms
|
||||
|
||||
1. **PM2 processes disappear** between deployments
|
||||
2. **`ENOENT: no such file or directory, uv_cwd`** errors in PM2 logs
|
||||
3. **Processes require `pm2 resurrect`** after deployments
|
||||
4. **PM2 daemon restarts** unexpectedly
|
||||
|
||||
## Root Cause
|
||||
|
||||
PM2 processes crash when their working directory (CWD) is deleted or modified while they're running. This typically happens when:
|
||||
|
||||
1. **rsync --delete** removes/recreates directories while processes are active
|
||||
2. **npm install** modifies node_modules while processes are using them
|
||||
3. **Deployments** don't stop processes before file operations
|
||||
|
||||
## Debugging Tools
|
||||
|
||||
### 1. PM2 Diagnostics Workflow
|
||||
|
||||
Run the comprehensive diagnostics workflow:
|
||||
|
||||
```bash
|
||||
# In Gitea Actions UI:
|
||||
# 1. Go to Actions → "PM2 Diagnostics"
|
||||
# 2. Click "Run workflow"
|
||||
# 3. Choose monitoring duration (default: 60s)
|
||||
```
|
||||
|
||||
This workflow captures:
|
||||
|
||||
- Current PM2 state
|
||||
- Working directory validation
|
||||
- PM2 daemon logs
|
||||
- All PM2-managed projects
|
||||
- Crash patterns
|
||||
- Deployment script analysis
|
||||
|
||||
### 2. PM2 Crash Analysis Script
|
||||
|
||||
Run the crash analysis script on the server:
|
||||
|
||||
```bash
|
||||
# SSH to server
|
||||
ssh gitea-runner@projectium.com
|
||||
|
||||
# Run analysis
|
||||
cd /var/www/flyer-crawler.projectium.com
|
||||
bash scripts/analyze-pm2-crashes.sh
|
||||
|
||||
# Or save to file
|
||||
bash scripts/analyze-pm2-crashes.sh > pm2-crash-report.txt
|
||||
```
|
||||
|
||||
### 3. Manual PM2 Inspection
|
||||
|
||||
Quick manual checks:
|
||||
|
||||
```bash
|
||||
# Current PM2 state
|
||||
pm2 list
|
||||
|
||||
# Detailed JSON state
|
||||
pm2 jlist | jq '.'
|
||||
|
||||
# Check for missing CWDs
|
||||
pm2 jlist | jq -r '.[] | "\(.name): \(.pm2_env.pm_cwd)"' | while read line; do
|
||||
PROC=$(echo "$line" | cut -d: -f1)
|
||||
CWD=$(echo "$line" | cut -d: -f2- | xargs)
|
||||
[ -d "$CWD" ] && echo "✅ $PROC" || echo "❌ $PROC (CWD missing: $CWD)"
|
||||
done
|
||||
|
||||
# View PM2 daemon log
|
||||
tail -100 ~/.pm2/pm2.log
|
||||
|
||||
# Search for ENOENT errors
|
||||
grep -i "ENOENT\|uv_cwd" ~/.pm2/pm2.log
|
||||
```
|
||||
|
||||
## Identifying the Problematic Project
|
||||
|
||||
### Check Which Projects Share PM2 Daemon
|
||||
|
||||
```bash
|
||||
pm2 list
|
||||
|
||||
# Group by project
|
||||
pm2 jlist | jq -r '.[] | .name' | grep -oE "^[a-z-]+" | sort -u
|
||||
```
|
||||
|
||||
**Projects on projectium.com:**
|
||||
|
||||
- `flyer-crawler` (production, test)
|
||||
- `stock-alert` (production, test)
|
||||
- Others?
|
||||
|
||||
### Check Deployment Timing
|
||||
|
||||
1. Review PM2 daemon restart times:
|
||||
|
||||
```bash
|
||||
grep "New PM2 Daemon started" ~/.pm2/pm2.log
|
||||
```
|
||||
|
||||
2. Compare with deployment times in Gitea Actions
|
||||
|
||||
3. Identify which deployment triggered the crash
|
||||
|
||||
### Check Deployment Scripts
|
||||
|
||||
For each project, check if deployment stops PM2 before rsync:
|
||||
|
||||
```bash
|
||||
# Flyer-crawler
|
||||
cat /var/www/flyer-crawler.projectium.com/.gitea/workflows/deploy-to-prod.yml | grep -B5 -A5 "rsync.*--delete"
|
||||
|
||||
# Stock-alert
|
||||
cat /var/www/stock-alert.projectium.com/.gitea/workflows/deploy-to-prod.yml | grep -B5 -A5 "rsync.*--delete"
|
||||
```
|
||||
|
||||
**Look for:**
|
||||
|
||||
- ❌ `rsync --delete` **before** `pm2 stop`
|
||||
- ✅ `pm2 stop` **before** `rsync --delete`
|
||||
|
||||
## Common Culprits
|
||||
|
||||
### 1. Flyer-Crawler Deployments
|
||||
|
||||
**Before Fix:**
|
||||
|
||||
```yaml
|
||||
# ❌ BAD - Deploys files while processes running
|
||||
- name: Deploy Application
|
||||
run: |
|
||||
rsync --delete ./ /var/www/...
|
||||
pm2 restart ...
|
||||
```
|
||||
|
||||
**After Fix:**
|
||||
|
||||
```yaml
|
||||
# ✅ GOOD - Stops processes first
|
||||
- name: Deploy Application
|
||||
run: |
|
||||
pm2 stop flyer-crawler-api flyer-crawler-worker
|
||||
rsync --delete ./ /var/www/...
|
||||
pm2 startOrReload ...
|
||||
```
|
||||
|
||||
### 2. Stock-Alert Deployments
|
||||
|
||||
Check if stock-alert follows the same pattern. If it deploys without stopping PM2, it could crash the shared PM2 daemon.
|
||||
|
||||
### 3. Cross-Project Interference
|
||||
|
||||
If multiple projects share PM2:
|
||||
|
||||
- One project's deployment can crash another project's processes
|
||||
- The crashed project's processes lose their CWD
|
||||
- PM2 daemon may restart, clearing all processes
|
||||
|
||||
## Solutions
|
||||
|
||||
### Immediate Fix (Manual)
|
||||
|
||||
```bash
|
||||
# Restore processes from dump file
|
||||
pm2 resurrect
|
||||
|
||||
# Verify all processes are running
|
||||
pm2 list
|
||||
```
|
||||
|
||||
### Permanent Fix
|
||||
|
||||
1. **Update deployment workflows** to stop PM2 before file operations
|
||||
2. **Isolate PM2 daemons** by user or namespace
|
||||
3. **Monitor deployments** to ensure proper sequencing
|
||||
|
||||
## Deployment Workflow Template
|
||||
|
||||
**Correct sequence:**
|
||||
|
||||
```yaml
|
||||
- name: Deploy Application
|
||||
run: |
|
||||
# 1. STOP PROCESSES FIRST
|
||||
pm2 stop my-api my-worker
|
||||
|
||||
# 2. THEN deploy files
|
||||
rsync -avz --delete ./ /var/www/my-app/
|
||||
|
||||
# 3. Install dependencies (safe, no processes running)
|
||||
cd /var/www/my-app
|
||||
npm install --omit=dev
|
||||
|
||||
# 4. Clean up errored processes
|
||||
pm2 delete my-api my-worker || true
|
||||
|
||||
# 5. START processes
|
||||
pm2 startOrReload ecosystem.config.cjs
|
||||
pm2 save
|
||||
```
|
||||
|
||||
## Monitoring & Prevention
|
||||
|
||||
### Enable Verbose Logging
|
||||
|
||||
Enhanced deployment logging (already implemented in flyer-crawler):
|
||||
|
||||
```yaml
|
||||
- name: Deploy Application
|
||||
run: |
|
||||
set -x # Command tracing
|
||||
echo "Step 1: Stopping PM2..."
|
||||
pm2 stop ...
|
||||
pm2 list # Verify stopped
|
||||
|
||||
echo "Step 2: Deploying files..."
|
||||
rsync --delete ...
|
||||
|
||||
echo "Step 3: Starting PM2..."
|
||||
pm2 start ...
|
||||
pm2 list # Verify started
|
||||
```
|
||||
|
||||
### Regular Health Checks
|
||||
|
||||
```bash
|
||||
# Add to cron or monitoring system
|
||||
*/5 * * * * pm2 jlist | jq -r '.[] | select(.pm2_env.status != "online") | "ALERT: \(.name) is \(.pm2_env.status)"'
|
||||
```
|
||||
|
||||
## Troubleshooting Decision Tree
|
||||
|
||||
```
|
||||
PM2 processes missing?
|
||||
├─ YES → Run `pm2 resurrect`
|
||||
│ └─ Check PM2 daemon log for ENOENT errors
|
||||
│ ├─ ENOENT found → Working directory deleted during deployment
|
||||
│ │ └─ Fix: Add `pm2 stop` before rsync
|
||||
│ └─ No ENOENT → Check other error patterns
|
||||
│
|
||||
└─ NO → Processes running but unstable?
|
||||
└─ Check restart counts: `pm2 jlist | jq '.[].pm2_env.restart_time'`
|
||||
└─ High restarts → Application-level issue (not PM2 crash)
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [PM2 Process Isolation Requirements](../../CLAUDE.md#pm2-process-isolation-productiontest-servers)
|
||||
- [PM2 Incident Response Runbook](./PM2-INCIDENT-RESPONSE.md)
|
||||
- [Incident Report 2026-02-17](./INCIDENT-2026-02-17-PM2-PROCESS-KILL.md)
|
||||
|
||||
## Quick Reference Commands
|
||||
|
||||
```bash
|
||||
# Diagnose
|
||||
pm2 list # Current state
|
||||
pm2 jlist | jq '.' # Detailed JSON
|
||||
tail -100 ~/.pm2/pm2.log # Recent logs
|
||||
grep ENOENT ~/.pm2/pm2.log # Find crashes
|
||||
|
||||
# Fix
|
||||
pm2 resurrect # Restore from dump
|
||||
pm2 restart all # Restart everything
|
||||
pm2 save # Save current state
|
||||
|
||||
# Analyze
|
||||
bash scripts/analyze-pm2-crashes.sh # Run analysis script
|
||||
pm2 jlist | jq -r '.[].pm2_env.pm_cwd' # Check working dirs
|
||||
```
|
||||
@@ -50,7 +50,7 @@ if (fs.existsSync(envPath)) {
|
||||
} else {
|
||||
console.warn('[ecosystem-test.config.cjs] No .env file found at:', envPath);
|
||||
console.warn(
|
||||
'[ecosystem-test.config.cjs] Environment variables must be provided by the shell or CI/CD.'
|
||||
'[ecosystem-test.config.cjs] Environment variables must be provided by the shell or CI/CD.',
|
||||
);
|
||||
}
|
||||
|
||||
@@ -60,12 +60,16 @@ if (fs.existsSync(envPath)) {
|
||||
// The actual application will fail to start if secrets are missing,
|
||||
// which PM2 will handle with its restart logic.
|
||||
const requiredSecrets = ['DB_HOST', 'JWT_SECRET', 'GEMINI_API_KEY'];
|
||||
const missingSecrets = requiredSecrets.filter(key => !process.env[key]);
|
||||
const missingSecrets = requiredSecrets.filter((key) => !process.env[key]);
|
||||
|
||||
if (missingSecrets.length > 0) {
|
||||
console.warn('\n[ecosystem.config.test.cjs] WARNING: The following environment variables are MISSING:');
|
||||
missingSecrets.forEach(key => console.warn(` - ${key}`));
|
||||
console.warn('[ecosystem.config.test.cjs] The application may fail to start if these are required.\n');
|
||||
console.warn(
|
||||
'\n[ecosystem.config.test.cjs] WARNING: The following environment variables are MISSING:',
|
||||
);
|
||||
missingSecrets.forEach((key) => console.warn(` - ${key}`));
|
||||
console.warn(
|
||||
'[ecosystem.config.test.cjs] The application may fail to start if these are required.\n',
|
||||
);
|
||||
} else {
|
||||
console.log('[ecosystem.config.test.cjs] Critical environment variables are present.');
|
||||
}
|
||||
|
||||
@@ -16,11 +16,13 @@
|
||||
// The actual application will fail to start if secrets are missing,
|
||||
// which PM2 will handle with its restart logic.
|
||||
const requiredSecrets = ['DB_HOST', 'JWT_SECRET', 'GEMINI_API_KEY'];
|
||||
const missingSecrets = requiredSecrets.filter(key => !process.env[key]);
|
||||
const missingSecrets = requiredSecrets.filter((key) => !process.env[key]);
|
||||
|
||||
if (missingSecrets.length > 0) {
|
||||
console.warn('\n[ecosystem.config.cjs] WARNING: The following environment variables are MISSING:');
|
||||
missingSecrets.forEach(key => console.warn(` - ${key}`));
|
||||
console.warn(
|
||||
'\n[ecosystem.config.cjs] WARNING: The following environment variables are MISSING:',
|
||||
);
|
||||
missingSecrets.forEach((key) => console.warn(` - ${key}`));
|
||||
console.warn('[ecosystem.config.cjs] The application may fail to start if these are required.\n');
|
||||
} else {
|
||||
console.log('[ecosystem.config.cjs] Critical environment variables are present.');
|
||||
|
||||
@@ -34,9 +34,7 @@ if (missingVars.length > 0) {
|
||||
'\n[ecosystem.dev.config.cjs] WARNING: The following environment variables are MISSING:',
|
||||
);
|
||||
missingVars.forEach((key) => console.warn(` - ${key}`));
|
||||
console.warn(
|
||||
'[ecosystem.dev.config.cjs] These should be set in compose.dev.yml or .env.local\n',
|
||||
);
|
||||
console.warn('[ecosystem.dev.config.cjs] These should be set in compose.dev.yml or .env.local\n');
|
||||
} else {
|
||||
console.log('[ecosystem.dev.config.cjs] Required environment variables are present.');
|
||||
}
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "flyer-crawler",
|
||||
"version": "0.15.2",
|
||||
"version": "0.16.4",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "flyer-crawler",
|
||||
"version": "0.15.2",
|
||||
"version": "0.16.4",
|
||||
"dependencies": {
|
||||
"@bull-board/api": "^6.14.2",
|
||||
"@bull-board/express": "^6.14.2",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "flyer-crawler",
|
||||
"private": true,
|
||||
"version": "0.15.2",
|
||||
"version": "0.16.4",
|
||||
"type": "module",
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
## Current State Analysis
|
||||
|
||||
### What We Have
|
||||
|
||||
1. ✅ **TanStack Query v5.90.12 already installed** in package.json
|
||||
2. ❌ **Not being used** - Custom hooks reimplementing its functionality
|
||||
3. ❌ **Custom `useInfiniteQuery` hook** ([src/hooks/useInfiniteQuery.ts](../src/hooks/useInfiniteQuery.ts)) using `useState`/`useEffect`
|
||||
@@ -16,10 +17,12 @@
|
||||
### Current Data Fetching Patterns
|
||||
|
||||
#### Pattern 1: Custom useInfiniteQuery Hook
|
||||
|
||||
**Location**: [src/hooks/useInfiniteQuery.ts](../src/hooks/useInfiniteQuery.ts)
|
||||
**Used By**: [src/providers/FlyersProvider.tsx](../src/providers/FlyersProvider.tsx)
|
||||
|
||||
**Problems**:
|
||||
|
||||
- Reimplements pagination logic that TanStack Query provides
|
||||
- Manual loading state management
|
||||
- Manual error handling
|
||||
@@ -28,10 +31,12 @@
|
||||
- No request deduplication
|
||||
|
||||
#### Pattern 2: useApiOnMount Hook
|
||||
|
||||
**Location**: Unknown (needs investigation)
|
||||
**Used By**: [src/providers/UserDataProvider.tsx](../src/providers/UserDataProvider.tsx)
|
||||
|
||||
**Problems**:
|
||||
|
||||
- Fetches data on mount only
|
||||
- Manual loading/error state management
|
||||
- No caching between unmount/remount
|
||||
@@ -42,6 +47,7 @@
|
||||
### Phase 1: Setup TanStack Query Infrastructure (Day 1)
|
||||
|
||||
#### 1.1 Create QueryClient Configuration
|
||||
|
||||
**File**: `src/config/queryClient.ts`
|
||||
|
||||
```typescript
|
||||
@@ -51,7 +57,7 @@ export const queryClient = new QueryClient({
|
||||
defaultOptions: {
|
||||
queries: {
|
||||
staleTime: 1000 * 60 * 5, // 5 minutes
|
||||
gcTime: 1000 * 60 * 30, // 30 minutes (formerly cacheTime)
|
||||
gcTime: 1000 * 60 * 30, // 30 minutes (formerly cacheTime)
|
||||
retry: 1,
|
||||
refetchOnWindowFocus: false,
|
||||
refetchOnMount: true,
|
||||
@@ -64,9 +70,11 @@ export const queryClient = new QueryClient({
|
||||
```
|
||||
|
||||
#### 1.2 Wrap App with QueryClientProvider
|
||||
|
||||
**File**: `src/providers/AppProviders.tsx`
|
||||
|
||||
Add TanStack Query provider at the top level:
|
||||
|
||||
```typescript
|
||||
import { QueryClientProvider } from '@tanstack/react-query';
|
||||
import { ReactQueryDevtools } from '@tanstack/react-query-devtools';
|
||||
@@ -158,6 +166,7 @@ export const FlyersProvider: React.FC<{ children: ReactNode }> = ({ children })
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
|
||||
- ~100 lines of code removed
|
||||
- Automatic caching
|
||||
- Background refetching
|
||||
@@ -170,6 +179,7 @@ export const FlyersProvider: React.FC<{ children: ReactNode }> = ({ children })
|
||||
**Action**: Use TanStack Query's `useQuery` for watched items and shopping lists
|
||||
|
||||
**New Files**:
|
||||
|
||||
- `src/hooks/queries/useWatchedItemsQuery.ts`
|
||||
- `src/hooks/queries/useShoppingListsQuery.ts`
|
||||
|
||||
@@ -208,6 +218,7 @@ export const useShoppingListsQuery = (enabled: boolean) => {
|
||||
```
|
||||
|
||||
**Updated Provider**:
|
||||
|
||||
```typescript
|
||||
import React, { ReactNode, useMemo } from 'react';
|
||||
import { UserDataContext } from '../contexts/UserDataContext';
|
||||
@@ -240,6 +251,7 @@ export const UserDataProvider: React.FC<{ children: ReactNode }> = ({ children }
|
||||
```
|
||||
|
||||
**Benefits**:
|
||||
|
||||
- ~40 lines of code removed
|
||||
- No manual state synchronization
|
||||
- Automatic cache invalidation on user logout
|
||||
@@ -292,7 +304,7 @@ export const useUpdateShoppingListMutation = () => {
|
||||
|
||||
// Optimistically update
|
||||
queryClient.setQueryData(['shopping-lists'], (old) =>
|
||||
old.map((list) => (list.id === newList.id ? newList : list))
|
||||
old.map((list) => (list.id === newList.id ? newList : list)),
|
||||
);
|
||||
|
||||
return { previousLists };
|
||||
@@ -313,20 +325,24 @@ export const useUpdateShoppingListMutation = () => {
|
||||
### Phase 4: Remove Old Custom Hooks (Day 9)
|
||||
|
||||
#### Files to Remove:
|
||||
|
||||
- ❌ `src/hooks/useInfiniteQuery.ts` (if not used elsewhere)
|
||||
- ❌ `src/hooks/useApiOnMount.ts` (needs investigation)
|
||||
|
||||
#### Files to Update:
|
||||
|
||||
- Update any remaining usages in other components
|
||||
|
||||
### Phase 5: Testing & Documentation (Day 10)
|
||||
|
||||
#### 5.1 Update Tests
|
||||
|
||||
- Update provider tests to work with QueryClient
|
||||
- Add tests for new query hooks
|
||||
- Add tests for mutation hooks
|
||||
|
||||
#### 5.2 Update Documentation
|
||||
|
||||
- Mark ADR-0005 as **Accepted** and **Implemented**
|
||||
- Add usage examples to documentation
|
||||
- Update developer onboarding guide
|
||||
@@ -334,11 +350,13 @@ export const useUpdateShoppingListMutation = () => {
|
||||
## Migration Checklist
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [x] TanStack Query installed
|
||||
- [ ] QueryClient configuration created
|
||||
- [ ] App wrapped with QueryClientProvider
|
||||
|
||||
### Queries
|
||||
|
||||
- [ ] Flyers infinite query migrated
|
||||
- [ ] Watched items query migrated
|
||||
- [ ] Shopping lists query migrated
|
||||
@@ -346,6 +364,7 @@ export const useUpdateShoppingListMutation = () => {
|
||||
- [ ] Active deals query migrated (if applicable)
|
||||
|
||||
### Mutations
|
||||
|
||||
- [ ] Add watched item mutation
|
||||
- [ ] Remove watched item mutation
|
||||
- [ ] Update shopping list mutation
|
||||
@@ -353,12 +372,14 @@ export const useUpdateShoppingListMutation = () => {
|
||||
- [ ] Remove shopping list item mutation
|
||||
|
||||
### Cleanup
|
||||
|
||||
- [ ] Remove custom useInfiniteQuery hook
|
||||
- [ ] Remove custom useApiOnMount hook
|
||||
- [ ] Update all tests
|
||||
- [ ] Remove redundant state management code
|
||||
|
||||
### Documentation
|
||||
|
||||
- [ ] Update ADR-0005 status to "Accepted"
|
||||
- [ ] Add usage guidelines to README
|
||||
- [ ] Document query key conventions
|
||||
@@ -367,10 +388,12 @@ export const useUpdateShoppingListMutation = () => {
|
||||
## Benefits Summary
|
||||
|
||||
### Code Reduction
|
||||
|
||||
- **Estimated**: ~300-500 lines of custom hook code removed
|
||||
- **Result**: Simpler, more maintainable codebase
|
||||
|
||||
### Performance Improvements
|
||||
|
||||
- ✅ Automatic request deduplication
|
||||
- ✅ Background data synchronization
|
||||
- ✅ Smart cache invalidation
|
||||
@@ -378,12 +401,14 @@ export const useUpdateShoppingListMutation = () => {
|
||||
- ✅ Automatic retry logic
|
||||
|
||||
### Developer Experience
|
||||
|
||||
- ✅ React Query Devtools for debugging
|
||||
- ✅ Type-safe query hooks
|
||||
- ✅ Standardized patterns across the app
|
||||
- ✅ Less boilerplate code
|
||||
|
||||
### User Experience
|
||||
|
||||
- ✅ Faster perceived performance (cached data)
|
||||
- ✅ Better offline experience
|
||||
- ✅ Smoother UI interactions (optimistic updates)
|
||||
@@ -392,11 +417,13 @@ export const useUpdateShoppingListMutation = () => {
|
||||
## Risk Assessment
|
||||
|
||||
### Low Risk
|
||||
|
||||
- TanStack Query is industry-standard
|
||||
- Already installed in project
|
||||
- Incremental migration possible
|
||||
|
||||
### Mitigation Strategies
|
||||
|
||||
1. **Test thoroughly** - Maintain existing test coverage
|
||||
2. **Migrate incrementally** - One provider at a time
|
||||
3. **Monitor performance** - Use React Query Devtools
|
||||
|
||||
@@ -45,6 +45,7 @@ Successfully completed Phase 2 of ADR-0005 enforcement by migrating all remainin
|
||||
## Code Reduction Summary
|
||||
|
||||
### Phase 1 + Phase 2 Combined
|
||||
|
||||
- **Total custom state management code removed**: ~200 lines
|
||||
- **New query hooks created**: 5 files (~200 lines of standardized code)
|
||||
- **Providers simplified**: 4 files
|
||||
@@ -53,34 +54,38 @@ Successfully completed Phase 2 of ADR-0005 enforcement by migrating all remainin
|
||||
## Technical Improvements
|
||||
|
||||
### 1. Intelligent Caching Strategy
|
||||
|
||||
```typescript
|
||||
// Master items (rarely change) - 10 min stale time
|
||||
useMasterItemsQuery() // staleTime: 10 minutes
|
||||
useMasterItemsQuery(); // staleTime: 10 minutes
|
||||
|
||||
// Flyers (moderate changes) - 2 min stale time
|
||||
useFlyersQuery() // staleTime: 2 minutes
|
||||
useFlyersQuery(); // staleTime: 2 minutes
|
||||
|
||||
// User data (frequent changes) - 1 min stale time
|
||||
useWatchedItemsQuery() // staleTime: 1 minute
|
||||
useShoppingListsQuery() // staleTime: 1 minute
|
||||
useWatchedItemsQuery(); // staleTime: 1 minute
|
||||
useShoppingListsQuery(); // staleTime: 1 minute
|
||||
|
||||
// Flyer items (static) - 5 min stale time
|
||||
useFlyerItemsQuery() // staleTime: 5 minutes
|
||||
useFlyerItemsQuery(); // staleTime: 5 minutes
|
||||
```
|
||||
|
||||
### 2. Per-Resource Caching
|
||||
|
||||
Each flyer's items are cached separately:
|
||||
|
||||
```typescript
|
||||
// Flyer 1 items cached with key: ['flyer-items', 1]
|
||||
useFlyerItemsQuery(1)
|
||||
useFlyerItemsQuery(1);
|
||||
|
||||
// Flyer 2 items cached with key: ['flyer-items', 2]
|
||||
useFlyerItemsQuery(2)
|
||||
useFlyerItemsQuery(2);
|
||||
|
||||
// Both caches persist independently
|
||||
```
|
||||
|
||||
### 3. Automatic Query Disabling
|
||||
|
||||
```typescript
|
||||
// Query automatically disabled when flyerId is undefined
|
||||
const { data } = useFlyerItemsQuery(selectedFlyer?.flyer_id);
|
||||
@@ -90,24 +95,28 @@ const { data } = useFlyerItemsQuery(selectedFlyer?.flyer_id);
|
||||
## Benefits Achieved
|
||||
|
||||
### Performance
|
||||
|
||||
- ✅ **Reduced API calls** - Data cached between component unmounts
|
||||
- ✅ **Background refetching** - Stale data updates in background
|
||||
- ✅ **Request deduplication** - Multiple components can use same query
|
||||
- ✅ **Optimized cache times** - Different strategies for different data types
|
||||
|
||||
### Code Quality
|
||||
|
||||
- ✅ **Removed ~50 more lines** of custom state management
|
||||
- ✅ **Eliminated useApiOnMount** from all providers
|
||||
- ✅ **Standardized patterns** - All queries follow same structure
|
||||
- ✅ **Better type safety** - TypeScript types flow through queries
|
||||
|
||||
### Developer Experience
|
||||
|
||||
- ✅ **React Query Devtools** - Inspect all queries and cache
|
||||
- ✅ **Easier debugging** - Clear query states and transitions
|
||||
- ✅ **Less boilerplate** - No manual loading/error state management
|
||||
- ✅ **Automatic retries** - Failed queries retry automatically
|
||||
|
||||
### User Experience
|
||||
|
||||
- ✅ **Faster perceived performance** - Cached data shows instantly
|
||||
- ✅ **Fresh data** - Background refetching keeps data current
|
||||
- ✅ **Better offline handling** - Cached data available offline
|
||||
@@ -116,12 +125,14 @@ const { data } = useFlyerItemsQuery(selectedFlyer?.flyer_id);
|
||||
## Remaining Work
|
||||
|
||||
### Phase 3: Mutations (Next)
|
||||
|
||||
- [ ] Create mutation hooks for data modifications
|
||||
- [ ] Add/remove watched items with optimistic updates
|
||||
- [ ] Shopping list CRUD operations
|
||||
- [ ] Proper cache invalidation strategies
|
||||
|
||||
### Phase 4: Cleanup (Final)
|
||||
|
||||
- [ ] Remove `useApiOnMount` hook entirely
|
||||
- [ ] Remove `useApi` hook if no longer used
|
||||
- [ ] Remove stub implementations in providers
|
||||
@@ -159,10 +170,13 @@ Before merging, test the following:
|
||||
## Migration Notes
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
None! All providers maintain the same interface.
|
||||
|
||||
### Deprecation Warnings
|
||||
|
||||
The following will log warnings if used:
|
||||
|
||||
- `setWatchedItems()` in UserDataProvider
|
||||
- `setShoppingLists()` in UserDataProvider
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ Successfully completed Phase 3 of ADR-0005 enforcement by creating all mutation
|
||||
### Mutation Hooks
|
||||
|
||||
All mutation hooks follow a consistent pattern:
|
||||
|
||||
- Automatic cache invalidation via `queryClient.invalidateQueries()`
|
||||
- Success/error notifications via notification service
|
||||
- Proper TypeScript types for parameters
|
||||
@@ -113,15 +114,12 @@ function WatchedItemsManager() {
|
||||
{
|
||||
onSuccess: () => console.log('Added to watched list!'),
|
||||
onError: (error) => console.error('Failed:', error),
|
||||
}
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
return (
|
||||
<button
|
||||
onClick={handleAdd}
|
||||
disabled={addWatchedItem.isPending}
|
||||
>
|
||||
<button onClick={handleAdd} disabled={addWatchedItem.isPending}>
|
||||
{addWatchedItem.isPending ? 'Adding...' : 'Add to Watched List'}
|
||||
</button>
|
||||
);
|
||||
@@ -134,7 +132,7 @@ function WatchedItemsManager() {
|
||||
import {
|
||||
useCreateShoppingListMutation,
|
||||
useAddShoppingListItemMutation,
|
||||
useUpdateShoppingListItemMutation
|
||||
useUpdateShoppingListItemMutation,
|
||||
} from '../hooks/mutations';
|
||||
|
||||
function ShoppingListManager() {
|
||||
@@ -149,14 +147,14 @@ function ShoppingListManager() {
|
||||
const handleAddItem = (listId: number, masterItemId: number) => {
|
||||
addItem.mutate({
|
||||
listId,
|
||||
item: { masterItemId }
|
||||
item: { masterItemId },
|
||||
});
|
||||
};
|
||||
|
||||
const handleMarkPurchased = (itemId: number) => {
|
||||
updateItem.mutate({
|
||||
itemId,
|
||||
updates: { is_purchased: true }
|
||||
updates: { is_purchased: true },
|
||||
});
|
||||
};
|
||||
|
||||
@@ -172,23 +170,27 @@ function ShoppingListManager() {
|
||||
## Benefits Achieved
|
||||
|
||||
### Performance
|
||||
|
||||
- ✅ **Automatic cache updates** - Queries automatically refetch after mutations
|
||||
- ✅ **Request deduplication** - Multiple mutation calls are properly queued
|
||||
- ✅ **Optimistic updates ready** - Infrastructure in place for Phase 4
|
||||
|
||||
### Code Quality
|
||||
|
||||
- ✅ **Standardized pattern** - All mutations follow the same structure
|
||||
- ✅ **Comprehensive documentation** - JSDoc with examples for every hook
|
||||
- ✅ **Type safety** - Full TypeScript types for all parameters
|
||||
- ✅ **Error handling** - Consistent error handling and user notifications
|
||||
|
||||
### Developer Experience
|
||||
|
||||
- ✅ **React Query Devtools** - Inspect mutation states in real-time
|
||||
- ✅ **Easy imports** - Barrel export for clean imports
|
||||
- ✅ **Consistent API** - Same pattern across all mutations
|
||||
- ✅ **Built-in loading states** - `isPending`, `isError`, `isSuccess` states
|
||||
|
||||
### User Experience
|
||||
|
||||
- ✅ **Automatic notifications** - Success/error toasts on all mutations
|
||||
- ✅ **Fresh data** - Queries automatically update after mutations
|
||||
- ✅ **Loading states** - UI can show loading indicators during mutations
|
||||
@@ -197,6 +199,7 @@ function ShoppingListManager() {
|
||||
## Current State
|
||||
|
||||
### Completed
|
||||
|
||||
- ✅ All 7 mutation hooks created
|
||||
- ✅ Barrel export created for easy imports
|
||||
- ✅ Comprehensive documentation with examples
|
||||
@@ -225,12 +228,14 @@ These hooks are actively used throughout the application and will need careful r
|
||||
### Phase 4: Hook Refactoring & Cleanup
|
||||
|
||||
#### Step 1: Refactor useWatchedItems
|
||||
|
||||
- [ ] Replace `useApi` calls with mutation hooks
|
||||
- [ ] Remove manual state management logic
|
||||
- [ ] Simplify to just wrap mutation hooks with custom logic
|
||||
- [ ] Update all tests
|
||||
|
||||
#### Step 2: Refactor useShoppingLists
|
||||
|
||||
- [ ] Replace `useApi` calls with mutation hooks
|
||||
- [ ] Remove manual state management logic
|
||||
- [ ] Remove complex state synchronization
|
||||
@@ -238,17 +243,20 @@ These hooks are actively used throughout the application and will need careful r
|
||||
- [ ] Update all tests
|
||||
|
||||
#### Step 3: Remove Deprecated Code
|
||||
|
||||
- [ ] Remove `setWatchedItems` from UserDataContext
|
||||
- [ ] Remove `setShoppingLists` from UserDataContext
|
||||
- [ ] Remove `useApi` hook (if no longer used)
|
||||
- [ ] Remove `useApiOnMount` hook (already deprecated)
|
||||
|
||||
#### Step 4: Add Optimistic Updates (Optional)
|
||||
|
||||
- [ ] Implement optimistic updates for better UX
|
||||
- [ ] Use `onMutate` to update cache before server response
|
||||
- [ ] Implement rollback on error
|
||||
|
||||
#### Step 5: Documentation & Testing
|
||||
|
||||
- [ ] Update all component documentation
|
||||
- [ ] Update developer onboarding guide
|
||||
- [ ] Add integration tests for mutation flows
|
||||
|
||||
@@ -41,13 +41,13 @@ Successfully completed Phase 4 of ADR-0005 enforcement by refactoring the remain
|
||||
|
||||
### Phase 1-4 Combined
|
||||
|
||||
| Metric | Before | After | Reduction |
|
||||
|--------|--------|-------|-----------|
|
||||
| **useWatchedItems** | 77 lines | 71 lines | -6 lines (cleaner) |
|
||||
| **useShoppingLists** | 222 lines | 176 lines | -46 lines (-21%) |
|
||||
| **Manual state management** | ~150 lines | 0 lines | -150 lines (100%) |
|
||||
| **useApi dependencies** | 7 hooks | 0 hooks | -7 dependencies |
|
||||
| **Total for Phase 4** | 299 lines | 247 lines | **-52 lines (-17%)** |
|
||||
| Metric | Before | After | Reduction |
|
||||
| --------------------------- | ---------- | --------- | -------------------- |
|
||||
| **useWatchedItems** | 77 lines | 71 lines | -6 lines (cleaner) |
|
||||
| **useShoppingLists** | 222 lines | 176 lines | -46 lines (-21%) |
|
||||
| **Manual state management** | ~150 lines | 0 lines | -150 lines (100%) |
|
||||
| **useApi dependencies** | 7 hooks | 0 hooks | -7 dependencies |
|
||||
| **Total for Phase 4** | 299 lines | 247 lines | **-52 lines (-17%)** |
|
||||
|
||||
### Overall ADR-0005 Impact (Phases 1-4)
|
||||
|
||||
@@ -61,45 +61,54 @@ Successfully completed Phase 4 of ADR-0005 enforcement by refactoring the remain
|
||||
### 1. Simplified useWatchedItems
|
||||
|
||||
**Before (useApi pattern):**
|
||||
|
||||
```typescript
|
||||
const { execute: addWatchedItemApi, error: addError } = useApi<MasterGroceryItem, [string, string]>(
|
||||
(itemName, category) => apiClient.addWatchedItem(itemName, category)
|
||||
(itemName, category) => apiClient.addWatchedItem(itemName, category),
|
||||
);
|
||||
|
||||
const addWatchedItem = useCallback(async (itemName: string, category: string) => {
|
||||
if (!userProfile) return;
|
||||
const updatedOrNewItem = await addWatchedItemApi(itemName, category);
|
||||
const addWatchedItem = useCallback(
|
||||
async (itemName: string, category: string) => {
|
||||
if (!userProfile) return;
|
||||
const updatedOrNewItem = await addWatchedItemApi(itemName, category);
|
||||
|
||||
if (updatedOrNewItem) {
|
||||
setWatchedItems((currentItems) => {
|
||||
const itemExists = currentItems.some(
|
||||
(item) => item.master_grocery_item_id === updatedOrNewItem.master_grocery_item_id
|
||||
);
|
||||
if (!itemExists) {
|
||||
return [...currentItems, updatedOrNewItem].sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
return currentItems;
|
||||
});
|
||||
}
|
||||
}, [userProfile, setWatchedItems, addWatchedItemApi]);
|
||||
if (updatedOrNewItem) {
|
||||
setWatchedItems((currentItems) => {
|
||||
const itemExists = currentItems.some(
|
||||
(item) => item.master_grocery_item_id === updatedOrNewItem.master_grocery_item_id,
|
||||
);
|
||||
if (!itemExists) {
|
||||
return [...currentItems, updatedOrNewItem].sort((a, b) => a.name.localeCompare(b.name));
|
||||
}
|
||||
return currentItems;
|
||||
});
|
||||
}
|
||||
},
|
||||
[userProfile, setWatchedItems, addWatchedItemApi],
|
||||
);
|
||||
```
|
||||
|
||||
**After (TanStack Query):**
|
||||
|
||||
```typescript
|
||||
const addWatchedItemMutation = useAddWatchedItemMutation();
|
||||
|
||||
const addWatchedItem = useCallback(async (itemName: string, category: string) => {
|
||||
if (!userProfile) return;
|
||||
const addWatchedItem = useCallback(
|
||||
async (itemName: string, category: string) => {
|
||||
if (!userProfile) return;
|
||||
|
||||
try {
|
||||
await addWatchedItemMutation.mutateAsync({ itemName, category });
|
||||
} catch (error) {
|
||||
console.error('useWatchedItems: Failed to add item', error);
|
||||
}
|
||||
}, [userProfile, addWatchedItemMutation]);
|
||||
try {
|
||||
await addWatchedItemMutation.mutateAsync({ itemName, category });
|
||||
} catch (error) {
|
||||
console.error('useWatchedItems: Failed to add item', error);
|
||||
}
|
||||
},
|
||||
[userProfile, addWatchedItemMutation],
|
||||
);
|
||||
```
|
||||
|
||||
**Benefits:**
|
||||
|
||||
- No manual state updates
|
||||
- Cache automatically invalidated
|
||||
- Success/error notifications handled
|
||||
@@ -108,6 +117,7 @@ const addWatchedItem = useCallback(async (itemName: string, category: string) =>
|
||||
### 2. Dramatically Simplified useShoppingLists
|
||||
|
||||
**Before:** 222 lines with:
|
||||
|
||||
- 5 separate `useApi` hooks
|
||||
- Complex manual state synchronization
|
||||
- Client-side duplicate checking
|
||||
@@ -115,6 +125,7 @@ const addWatchedItem = useCallback(async (itemName: string, category: string) =>
|
||||
- Try-catch blocks for each operation
|
||||
|
||||
**After:** 176 lines with:
|
||||
|
||||
- 5 TanStack Query mutation hooks
|
||||
- Zero manual state management
|
||||
- Server-side validation
|
||||
@@ -122,6 +133,7 @@ const addWatchedItem = useCallback(async (itemName: string, category: string) =>
|
||||
- Consistent error handling
|
||||
|
||||
**Removed Complexity:**
|
||||
|
||||
```typescript
|
||||
// OLD: Manual state update with complex logic
|
||||
const addItemToList = useCallback(async (listId: number, item: {...}) => {
|
||||
@@ -158,6 +170,7 @@ const addItemToList = useCallback(async (listId: number, item: {...}) => {
|
||||
```
|
||||
|
||||
**NEW: Simple mutation call:**
|
||||
|
||||
```typescript
|
||||
const addItemToList = useCallback(async (listId: number, item: {...}) => {
|
||||
if (!userProfile) return;
|
||||
@@ -173,18 +186,20 @@ const addItemToList = useCallback(async (listId: number, item: {...}) => {
|
||||
### 3. Cleaner Context Interface
|
||||
|
||||
**Before:**
|
||||
|
||||
```typescript
|
||||
export interface UserDataContextType {
|
||||
watchedItems: MasterGroceryItem[];
|
||||
shoppingLists: ShoppingList[];
|
||||
setWatchedItems: React.Dispatch<React.SetStateAction<MasterGroceryItem[]>>; // ❌ Removed
|
||||
setShoppingLists: React.Dispatch<React.SetStateAction<ShoppingList[]>>; // ❌ Removed
|
||||
setWatchedItems: React.Dispatch<React.SetStateAction<MasterGroceryItem[]>>; // ❌ Removed
|
||||
setShoppingLists: React.Dispatch<React.SetStateAction<ShoppingList[]>>; // ❌ Removed
|
||||
isLoading: boolean;
|
||||
error: string | null;
|
||||
}
|
||||
```
|
||||
|
||||
**After:**
|
||||
|
||||
```typescript
|
||||
export interface UserDataContextType {
|
||||
watchedItems: MasterGroceryItem[];
|
||||
@@ -195,6 +210,7 @@ export interface UserDataContextType {
|
||||
```
|
||||
|
||||
**Why this matters:**
|
||||
|
||||
- Context now truly represents "server state" (read-only from context perspective)
|
||||
- Mutations are handled separately via mutation hooks
|
||||
- Clear separation of concerns: queries for reads, mutations for writes
|
||||
@@ -202,12 +218,14 @@ export interface UserDataContextType {
|
||||
## Benefits Achieved
|
||||
|
||||
### Performance
|
||||
|
||||
- ✅ **Eliminated redundant refetches** - No more manual state sync causing stale data
|
||||
- ✅ **Automatic cache updates** - Mutations invalidate queries automatically
|
||||
- ✅ **Optimistic updates ready** - Infrastructure supports adding optimistic updates in future
|
||||
- ✅ **Reduced bundle size** - 52 lines less code in custom hooks
|
||||
|
||||
### Code Quality
|
||||
|
||||
- ✅ **Removed 150+ lines** of manual state management across all hooks
|
||||
- ✅ **Eliminated useApi dependency** from user-facing hooks
|
||||
- ✅ **Consistent error handling** - All mutations use same pattern
|
||||
@@ -215,12 +233,14 @@ export interface UserDataContextType {
|
||||
- ✅ **Removed complex logic** - No more client-side duplicate checking
|
||||
|
||||
### Developer Experience
|
||||
|
||||
- ✅ **Simpler hook implementations** - 46 lines less in useShoppingLists alone
|
||||
- ✅ **Easier debugging** - React Query Devtools show all mutations
|
||||
- ✅ **Type safety** - Mutation hooks provide full TypeScript types
|
||||
- ✅ **Consistent patterns** - All operations follow same mutation pattern
|
||||
|
||||
### User Experience
|
||||
|
||||
- ✅ **Automatic notifications** - Success/error toasts on all operations
|
||||
- ✅ **Fresh data** - Cache automatically updates after mutations
|
||||
- ✅ **Better error messages** - Server-side validation provides better feedback
|
||||
@@ -231,6 +251,7 @@ export interface UserDataContextType {
|
||||
### Breaking Changes
|
||||
|
||||
**Direct UserDataContext usage:**
|
||||
|
||||
```typescript
|
||||
// ❌ OLD: This no longer works
|
||||
const { setWatchedItems } = useUserData();
|
||||
@@ -245,6 +266,7 @@ addWatchedItem.mutate({ itemName: 'Milk', category: 'Dairy' });
|
||||
### Non-Breaking Changes
|
||||
|
||||
**Custom hooks maintain backward compatibility:**
|
||||
|
||||
```typescript
|
||||
// ✅ STILL WORKS: Custom hooks maintain same interface
|
||||
const { addWatchedItem, removeWatchedItem } = useWatchedItems();
|
||||
@@ -273,6 +295,7 @@ addWatchedItem.mutate({ itemName: 'Milk', category: 'Dairy' });
|
||||
### Testing Approach
|
||||
|
||||
**Current tests mock useApi:**
|
||||
|
||||
```typescript
|
||||
vi.mock('./useApi');
|
||||
const mockedUseApi = vi.mocked(useApi);
|
||||
@@ -280,6 +303,7 @@ mockedUseApi.mockReturnValue({ execute: mockFn, error: null, loading: false });
|
||||
```
|
||||
|
||||
**New tests should mock mutations:**
|
||||
|
||||
```typescript
|
||||
vi.mock('./mutations', () => ({
|
||||
useAddWatchedItemMutation: vi.fn(),
|
||||
@@ -300,17 +324,20 @@ useAddWatchedItemMutation.mockReturnValue({
|
||||
## Remaining Work
|
||||
|
||||
### Immediate Follow-Up (Phase 4.5)
|
||||
|
||||
- [ ] Update [src/hooks/useWatchedItems.test.tsx](../src/hooks/useWatchedItems.test.tsx)
|
||||
- [ ] Update [src/hooks/useShoppingLists.test.tsx](../src/hooks/useShoppingLists.test.tsx)
|
||||
- [ ] Add integration tests for mutation flows
|
||||
|
||||
### Phase 5: Admin Features (Next)
|
||||
|
||||
- [ ] Create query hooks for admin features
|
||||
- [ ] Migrate ActivityLog.tsx
|
||||
- [ ] Migrate AdminStatsPage.tsx
|
||||
- [ ] Migrate CorrectionsPage.tsx
|
||||
|
||||
### Phase 6: Final Cleanup
|
||||
|
||||
- [ ] Remove `useApi` hook (no longer used by core features)
|
||||
- [ ] Remove `useApiOnMount` hook (deprecated)
|
||||
- [ ] Remove custom `useInfiniteQuery` hook (deprecated)
|
||||
@@ -350,12 +377,14 @@ None! Phase 4 implementation is complete and working.
|
||||
## Performance Metrics
|
||||
|
||||
### Before Phase 4
|
||||
|
||||
- Multiple redundant state updates per mutation
|
||||
- Client-side validation adding latency
|
||||
- Complex nested state updates causing re-renders
|
||||
- Manual cache synchronization prone to bugs
|
||||
|
||||
### After Phase 4
|
||||
|
||||
- Single mutation triggers automatic cache update
|
||||
- Server-side validation (proper place for business logic)
|
||||
- Simple refetch after mutation (no manual updates)
|
||||
@@ -372,6 +401,7 @@ None! Phase 4 implementation is complete and working.
|
||||
Phase 4 successfully refactored the remaining custom hooks (`useWatchedItems` and `useShoppingLists`) to use TanStack Query mutations, eliminating all manual state management for user-facing features. The codebase is now significantly simpler, more maintainable, and follows consistent patterns throughout.
|
||||
|
||||
**Key Achievements:**
|
||||
|
||||
- Removed 52 lines of code from custom hooks
|
||||
- Eliminated 7 `useApi` dependencies
|
||||
- Removed 150+ lines of manual state management
|
||||
@@ -380,6 +410,7 @@ Phase 4 successfully refactored the remaining custom hooks (`useWatchedItems` an
|
||||
- Zero regressions in functionality
|
||||
|
||||
**Next Steps**:
|
||||
|
||||
1. Update tests for refactored hooks (Phase 4.5 - follow-up)
|
||||
2. Proceed to Phase 5 to migrate admin features
|
||||
3. Final cleanup in Phase 6
|
||||
|
||||
@@ -100,6 +100,7 @@ Successfully completed Phase 5 of ADR-0005 by migrating all admin features from
|
||||
### Before (Manual State Management)
|
||||
|
||||
**ActivityLog.tsx - Before:**
|
||||
|
||||
```typescript
|
||||
const [logs, setLogs] = useState<ActivityLogItem[]>([]);
|
||||
const [isLoading, setIsLoading] = useState(true);
|
||||
@@ -116,8 +117,7 @@ useEffect(() => {
|
||||
setError(null);
|
||||
try {
|
||||
const response = await fetchActivityLog(20, 0);
|
||||
if (!response.ok)
|
||||
throw new Error((await response.json()).message || 'Failed to fetch logs');
|
||||
if (!response.ok) throw new Error((await response.json()).message || 'Failed to fetch logs');
|
||||
setLogs(await response.json());
|
||||
} catch (err) {
|
||||
setError(err instanceof Error ? err.message : 'Failed to load activity.');
|
||||
@@ -131,6 +131,7 @@ useEffect(() => {
|
||||
```
|
||||
|
||||
**ActivityLog.tsx - After:**
|
||||
|
||||
```typescript
|
||||
const { data: logs = [], isLoading, error } = useActivityLogQuery(20, 0);
|
||||
```
|
||||
@@ -138,6 +139,7 @@ const { data: logs = [], isLoading, error } = useActivityLogQuery(20, 0);
|
||||
### Before (Manual Parallel Fetching)
|
||||
|
||||
**CorrectionsPage.tsx - Before:**
|
||||
|
||||
```typescript
|
||||
const [corrections, setCorrections] = useState<SuggestedCorrection[]>([]);
|
||||
const [isLoading, setIsLoading] = useState(true);
|
||||
@@ -172,6 +174,7 @@ useEffect(() => {
|
||||
```
|
||||
|
||||
**CorrectionsPage.tsx - After:**
|
||||
|
||||
```typescript
|
||||
const {
|
||||
data: corrections = [],
|
||||
@@ -180,15 +183,9 @@ const {
|
||||
refetch: refetchCorrections,
|
||||
} = useSuggestedCorrectionsQuery();
|
||||
|
||||
const {
|
||||
data: masterItems = [],
|
||||
isLoading: isLoadingMasterItems,
|
||||
} = useMasterItemsQuery();
|
||||
const { data: masterItems = [], isLoading: isLoadingMasterItems } = useMasterItemsQuery();
|
||||
|
||||
const {
|
||||
data: categories = [],
|
||||
isLoading: isLoadingCategories,
|
||||
} = useCategoriesQuery();
|
||||
const { data: categories = [], isLoading: isLoadingCategories } = useCategoriesQuery();
|
||||
|
||||
const isLoading = isLoadingCorrections || isLoadingMasterItems || isLoadingCategories;
|
||||
const error = correctionsError?.message || null;
|
||||
@@ -197,12 +194,14 @@ const error = correctionsError?.message || null;
|
||||
## Benefits Achieved
|
||||
|
||||
### Performance
|
||||
|
||||
- ✅ **Automatic parallel fetching** - CorrectionsPage fetches 3 queries simultaneously
|
||||
- ✅ **Shared cache** - Multiple components can reuse the same queries
|
||||
- ✅ **Smart refetching** - Queries refetch on window focus automatically
|
||||
- ✅ **Stale-while-revalidate** - Shows cached data while fetching fresh data
|
||||
|
||||
### Code Quality
|
||||
|
||||
- ✅ **~77 lines removed** from admin components (-20% average)
|
||||
- ✅ **Eliminated manual state management** for all admin queries
|
||||
- ✅ **Consistent error handling** across all admin features
|
||||
@@ -210,6 +209,7 @@ const error = correctionsError?.message || null;
|
||||
- ✅ **Removed complex Promise.all logic** from CorrectionsPage
|
||||
|
||||
### Developer Experience
|
||||
|
||||
- ✅ **Simpler component code** - Focus on UI, not data fetching
|
||||
- ✅ **Easier debugging** - React Query Devtools show all queries
|
||||
- ✅ **Type safety** - Query hooks provide full TypeScript types
|
||||
@@ -217,6 +217,7 @@ const error = correctionsError?.message || null;
|
||||
- ✅ **Consistent patterns** - All admin features follow same query pattern
|
||||
|
||||
### User Experience
|
||||
|
||||
- ✅ **Faster perceived performance** - Show cached data instantly
|
||||
- ✅ **Background updates** - Data refreshes without loading spinners
|
||||
- ✅ **Network resilience** - Automatic retry on failure
|
||||
@@ -224,12 +225,12 @@ const error = correctionsError?.message || null;
|
||||
|
||||
## Code Reduction Summary
|
||||
|
||||
| Component | Before | After | Reduction |
|
||||
|-----------|--------|-------|-----------|
|
||||
| **ActivityLog.tsx** | 158 lines | 133 lines | -25 lines (-16%) |
|
||||
| **AdminStatsPage.tsx** | 104 lines | 78 lines | -26 lines (-25%) |
|
||||
| Component | Before | After | Reduction |
|
||||
| ----------------------- | ----------------------- | ----------------- | --------------------------- |
|
||||
| **ActivityLog.tsx** | 158 lines | 133 lines | -25 lines (-16%) |
|
||||
| **AdminStatsPage.tsx** | 104 lines | 78 lines | -26 lines (-25%) |
|
||||
| **CorrectionsPage.tsx** | ~120 lines (state mgmt) | ~50 lines (hooks) | ~70 lines (-58% state code) |
|
||||
| **Total Reduction** | ~382 lines | ~261 lines | **~121 lines (-32%)** |
|
||||
| **Total Reduction** | ~382 lines | ~261 lines | **~121 lines (-32%)** |
|
||||
|
||||
**Note**: CorrectionsPage reduction is approximate as the full component includes rendering logic that wasn't changed.
|
||||
|
||||
@@ -334,6 +335,7 @@ export const AdminComponent: React.FC = () => {
|
||||
All changes are backward compatible at the component level. Components maintain their existing props and behavior.
|
||||
|
||||
**Example: ActivityLog component still accepts same props:**
|
||||
|
||||
```typescript
|
||||
interface ActivityLogProps {
|
||||
userProfile: UserProfile | null;
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
|
||||
**Date**: 2026-01-08
|
||||
**Environment**: Windows 10, VSCode with Claude Code integration
|
||||
**Configuration Files**:
|
||||
**Configuration Files**:
|
||||
|
||||
- [`mcp.json`](c:/Users/games3/AppData/Roaming/Code/User/mcp.json:1)
|
||||
- [`mcp-servers.json`](c:/Users/games3/AppData/Roaming/Code/User/globalStorage/mcp-servers.json:1)
|
||||
|
||||
@@ -13,6 +14,7 @@
|
||||
You have **8 MCP servers** configured in your environment. These servers extend Claude's capabilities by providing specialized tools for browser automation, file conversion, Git hosting integration, container management, filesystem access, and HTTP requests.
|
||||
|
||||
**Key Findings**:
|
||||
|
||||
- ✅ 7 servers are properly configured and ready to test
|
||||
- ⚠️ 1 server requires token update (gitea-lan)
|
||||
- 📋 Testing guide and automated script provided
|
||||
@@ -23,11 +25,13 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
## MCP Server Inventory
|
||||
|
||||
### 1. Chrome DevTools MCP Server
|
||||
|
||||
**Status**: ✅ Configured
|
||||
**Type**: Browser Automation
|
||||
**Command**: `npx -y chrome-devtools-mcp@latest`
|
||||
|
||||
**Capabilities**:
|
||||
|
||||
- Launch and control Chrome browser
|
||||
- Navigate to URLs
|
||||
- Click elements and interact with DOM
|
||||
@@ -36,6 +40,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
- Execute JavaScript in browser context
|
||||
|
||||
**Use Cases**:
|
||||
|
||||
- Web scraping
|
||||
- Automated testing
|
||||
- UI verification
|
||||
@@ -43,6 +48,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
- Debugging frontend issues
|
||||
|
||||
**Configuration Details**:
|
||||
|
||||
- Headless mode: Enabled
|
||||
- Isolated: False (shares browser state)
|
||||
- Channel: Stable
|
||||
@@ -50,11 +56,13 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
---
|
||||
|
||||
### 2. Markitdown MCP Server
|
||||
|
||||
**Status**: ✅ Configured
|
||||
**Type**: File Conversion
|
||||
**Command**: `C:\Users\games3\.local\bin\uvx.exe markitdown-mcp`
|
||||
|
||||
**Capabilities**:
|
||||
|
||||
- Convert PDF files to markdown
|
||||
- Convert DOCX files to markdown
|
||||
- Convert HTML to markdown
|
||||
@@ -62,24 +70,28 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
- Convert PowerPoint presentations
|
||||
|
||||
**Use Cases**:
|
||||
|
||||
- Document processing
|
||||
- Content extraction from various formats
|
||||
- Making documents AI-readable
|
||||
- Converting legacy documents to markdown
|
||||
|
||||
**Notes**:
|
||||
|
||||
- Requires Python and `uvx` to be installed
|
||||
- Uses Microsoft's Markitdown library
|
||||
|
||||
---
|
||||
|
||||
### 3. Gitea Torbonium
|
||||
|
||||
**Status**: ✅ Configured
|
||||
**Type**: Git Hosting Integration
|
||||
**Host**: https://gitea.torbonium.com
|
||||
**Command**: `d:\gitea-mcp\gitea-mcp.exe run -t stdio`
|
||||
|
||||
**Capabilities**:
|
||||
|
||||
- List and manage repositories
|
||||
- Create and update issues
|
||||
- Manage pull requests
|
||||
@@ -89,6 +101,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
- Manage repository settings
|
||||
|
||||
**Use Cases**:
|
||||
|
||||
- Automated issue creation
|
||||
- Repository management
|
||||
- Code review automation
|
||||
@@ -96,12 +109,14 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
- Release management
|
||||
|
||||
**Configuration**:
|
||||
|
||||
- Token: Configured (ending in ...fcf8)
|
||||
- Access: Full API access based on token permissions
|
||||
|
||||
---
|
||||
|
||||
### 4. Gitea LAN (Torbolan)
|
||||
|
||||
**Status**: ⚠️ Requires Configuration
|
||||
**Type**: Git Hosting Integration
|
||||
**Host**: https://gitea.torbolan.com
|
||||
@@ -110,6 +125,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
**Issue**: Access token is set to `REPLACE_WITH_NEW_TOKEN`
|
||||
|
||||
**Action Required**:
|
||||
|
||||
1. Log into https://gitea.torbolan.com
|
||||
2. Navigate to Settings → Applications
|
||||
3. Generate a new access token
|
||||
@@ -120,6 +136,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
---
|
||||
|
||||
### 5. Gitea Projectium
|
||||
|
||||
**Status**: ✅ Configured
|
||||
**Type**: Git Hosting Integration
|
||||
**Host**: https://gitea.projectium.com
|
||||
@@ -128,6 +145,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
**Capabilities**: Same as Gitea Torbonium
|
||||
|
||||
**Configuration**:
|
||||
|
||||
- Token: Configured (ending in ...9ef)
|
||||
- This appears to be the Gitea instance for your current project
|
||||
|
||||
@@ -136,11 +154,13 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
---
|
||||
|
||||
### 6. Podman/Docker MCP Server
|
||||
|
||||
**Status**: ✅ Configured
|
||||
**Type**: Container Management
|
||||
**Command**: `npx -y @modelcontextprotocol/server-docker`
|
||||
|
||||
**Capabilities**:
|
||||
|
||||
- List running containers
|
||||
- Start and stop containers
|
||||
- View container logs
|
||||
@@ -150,6 +170,7 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
- Create and manage networks
|
||||
|
||||
**Use Cases**:
|
||||
|
||||
- Container orchestration
|
||||
- Development environment management
|
||||
- Log analysis
|
||||
@@ -157,22 +178,26 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
- Image management
|
||||
|
||||
**Configuration**:
|
||||
|
||||
- Docker Host: `npipe:////./pipe/docker_engine`
|
||||
- Requires: Docker Desktop or Podman running on Windows
|
||||
|
||||
**Prerequisites**:
|
||||
|
||||
- Docker Desktop must be running
|
||||
- Named pipe access configured
|
||||
|
||||
---
|
||||
|
||||
### 7. Filesystem MCP Server
|
||||
|
||||
**Status**: ✅ Configured
|
||||
**Type**: File System Access
|
||||
**Path**: `D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com`
|
||||
**Command**: `npx -y @modelcontextprotocol/server-filesystem`
|
||||
|
||||
**Capabilities**:
|
||||
|
||||
- List directory contents recursively
|
||||
- Read file contents
|
||||
- Write and modify files
|
||||
@@ -181,27 +206,31 @@ You have **8 MCP servers** configured in your environment. These servers extend
|
||||
- Create and delete files/directories
|
||||
|
||||
**Use Cases**:
|
||||
|
||||
- Project file management
|
||||
- Bulk file operations
|
||||
- Code generation and modifications
|
||||
- File content analysis
|
||||
- Project structure exploration
|
||||
|
||||
**Security Note**:
|
||||
**Security Note**:
|
||||
This server has full read/write access to your project directory. It operates within the specified directory only.
|
||||
|
||||
**Scope**:
|
||||
**Scope**:
|
||||
|
||||
- Limited to: `D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com`
|
||||
- Cannot access files outside this directory
|
||||
|
||||
---
|
||||
|
||||
### 8. Fetch MCP Server
|
||||
|
||||
**Status**: ✅ Configured
|
||||
**Type**: HTTP Client
|
||||
**Command**: `npx -y @modelcontextprotocol/server-fetch`
|
||||
|
||||
**Capabilities**:
|
||||
|
||||
- Send HTTP GET requests
|
||||
- Send HTTP POST requests
|
||||
- Send PUT, DELETE, PATCH requests
|
||||
@@ -211,6 +240,7 @@ This server has full read/write access to your project directory. It operates wi
|
||||
- Handle authentication
|
||||
|
||||
**Use Cases**:
|
||||
|
||||
- API testing
|
||||
- Web scraping
|
||||
- Data fetching from external services
|
||||
@@ -218,6 +248,7 @@ This server has full read/write access to your project directory. It operates wi
|
||||
- Integration with external APIs
|
||||
|
||||
**Examples**:
|
||||
|
||||
- Fetch data from REST APIs
|
||||
- Download web content
|
||||
- Test API endpoints
|
||||
@@ -228,11 +259,12 @@ This server has full read/write access to your project directory. It operates wi
|
||||
|
||||
## Current Status: MCP Server Tool Availability
|
||||
|
||||
**Important Note**: While these MCP servers are configured in your environment, they are **not currently exposed as callable tools** in this Claude Code session.
|
||||
**Important Note**: While these MCP servers are configured in your environment, they are **not currently exposed as callable tools** in this Claude Code session.
|
||||
|
||||
### What This Means:
|
||||
|
||||
MCP servers typically work by:
|
||||
|
||||
1. Running as separate processes
|
||||
2. Exposing tools and resources via the Model Context Protocol
|
||||
3. Being connected to the AI assistant by the client application (VSCode)
|
||||
@@ -240,12 +272,14 @@ MCP servers typically work by:
|
||||
### Current Situation:
|
||||
|
||||
In the current session, Claude Code has access to:
|
||||
|
||||
- ✅ Built-in file operations (read, write, search, list)
|
||||
- ✅ Browser actions
|
||||
- ✅ Mode switching
|
||||
- ✅ Task management tools
|
||||
|
||||
But does **NOT** have direct access to:
|
||||
|
||||
- ❌ MCP server-specific tools (e.g., Gitea API operations)
|
||||
- ❌ Chrome DevTools controls
|
||||
- ❌ Markitdown conversion functions
|
||||
@@ -255,6 +289,7 @@ But does **NOT** have direct access to:
|
||||
### Why This Happens:
|
||||
|
||||
MCP servers need to be:
|
||||
|
||||
1. Actively connected by the client (VSCode)
|
||||
2. Running in the background
|
||||
3. Properly registered with the AI assistant
|
||||
@@ -277,6 +312,7 @@ cd plans
|
||||
```
|
||||
|
||||
This will:
|
||||
|
||||
- Test each server's basic functionality
|
||||
- Check API connectivity for Gitea servers
|
||||
- Verify Docker daemon access
|
||||
@@ -297,6 +333,7 @@ mcp-inspector npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-cra
|
||||
```
|
||||
|
||||
The inspector provides a web UI to:
|
||||
|
||||
- View available tools
|
||||
- Test tool invocations
|
||||
- See real-time logs
|
||||
@@ -343,14 +380,14 @@ Follow the comprehensive guide in [`mcp-server-testing-guide.md`](plans/mcp-serv
|
||||
|
||||
## MCP Server Use Case Matrix
|
||||
|
||||
| Server | Code Analysis | Testing | Deployment | Documentation | API Integration |
|
||||
|--------|--------------|---------|------------|---------------|-----------------|
|
||||
| Chrome DevTools | ✓ (UI testing) | ✓✓✓ | - | ✓ (screenshots) | ✓ |
|
||||
| Markitdown | - | - | - | ✓✓✓ | - |
|
||||
| Gitea (all 3) | ✓✓✓ | ✓ | ✓✓✓ | ✓✓ | ✓✓✓ |
|
||||
| Docker | ✓ | ✓✓✓ | ✓✓✓ | - | ✓ |
|
||||
| Filesystem | ✓✓✓ | ✓✓ | ✓ | ✓✓ | ✓ |
|
||||
| Fetch | ✓ | ✓✓ | ✓ | - | ✓✓✓ |
|
||||
| Server | Code Analysis | Testing | Deployment | Documentation | API Integration |
|
||||
| --------------- | -------------- | ------- | ---------- | --------------- | --------------- |
|
||||
| Chrome DevTools | ✓ (UI testing) | ✓✓✓ | - | ✓ (screenshots) | ✓ |
|
||||
| Markitdown | - | - | - | ✓✓✓ | - |
|
||||
| Gitea (all 3) | ✓✓✓ | ✓ | ✓✓✓ | ✓✓ | ✓✓✓ |
|
||||
| Docker | ✓ | ✓✓✓ | ✓✓✓ | - | ✓ |
|
||||
| Filesystem | ✓✓✓ | ✓✓ | ✓ | ✓✓ | ✓ |
|
||||
| Fetch | ✓ | ✓✓ | ✓ | - | ✓✓✓ |
|
||||
|
||||
Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable, - = Not applicable
|
||||
|
||||
@@ -359,12 +396,14 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
|
||||
## Potential Workflows
|
||||
|
||||
### Workflow 1: Automated Documentation Updates
|
||||
|
||||
1. **Fetch server**: Get latest API documentation from external service
|
||||
2. **Markitdown**: Convert to markdown format
|
||||
3. **Filesystem server**: Write to project documentation folder
|
||||
4. **Gitea server**: Create commit and push changes
|
||||
|
||||
### Workflow 2: Container-Based Testing
|
||||
|
||||
1. **Docker server**: Start test containers
|
||||
2. **Fetch server**: Send test API requests
|
||||
3. **Docker server**: Collect container logs
|
||||
@@ -372,6 +411,7 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
|
||||
5. **Gitea server**: Update test status in issues
|
||||
|
||||
### Workflow 3: Web UI Testing
|
||||
|
||||
1. **Chrome DevTools**: Launch browser and navigate to app
|
||||
2. **Chrome DevTools**: Interact with UI elements
|
||||
3. **Chrome DevTools**: Capture screenshots
|
||||
@@ -379,6 +419,7 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
|
||||
5. **Gitea server**: Update test documentation
|
||||
|
||||
### Workflow 4: Repository Management
|
||||
|
||||
1. **Gitea server**: List all repositories
|
||||
2. **Gitea server**: Check for outdated dependencies
|
||||
3. **Gitea server**: Create issues for updates needed
|
||||
@@ -389,24 +430,28 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
|
||||
## Next Steps
|
||||
|
||||
### Phase 1: Verification (Immediate)
|
||||
|
||||
1. Run the test script: [`test-mcp-servers.ps1`](plans/test-mcp-servers.ps1:1)
|
||||
2. Review results and identify issues
|
||||
3. Fix Gitea LAN token configuration
|
||||
4. Re-test all servers
|
||||
|
||||
### Phase 2: Documentation (Short-term)
|
||||
|
||||
1. Document successful test results
|
||||
2. Create usage examples for each server
|
||||
3. Set up troubleshooting guides
|
||||
4. Document common error scenarios
|
||||
|
||||
### Phase 3: Integration (Medium-term)
|
||||
|
||||
1. Verify MCP server connectivity in Claude Code sessions
|
||||
2. Test tool availability and functionality
|
||||
3. Create workflow templates
|
||||
4. Integrate into development processes
|
||||
|
||||
### Phase 4: Optimization (Long-term)
|
||||
|
||||
1. Monitor MCP server performance
|
||||
2. Optimize configurations
|
||||
3. Add additional MCP servers as needed
|
||||
@@ -419,7 +464,7 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
|
||||
- **MCP Protocol Specification**: https://modelcontextprotocol.io
|
||||
- **Testing Guide**: [`mcp-server-testing-guide.md`](plans/mcp-server-testing-guide.md:1)
|
||||
- **Test Script**: [`test-mcp-servers.ps1`](plans/test-mcp-servers.ps1:1)
|
||||
- **Configuration Files**:
|
||||
- **Configuration Files**:
|
||||
- [`mcp.json`](c:/Users/games3/AppData/Roaming/Code/User/mcp.json:1)
|
||||
- [`mcp-servers.json`](c:/Users/games3/AppData/Roaming/Code/User/globalStorage/mcp-servers.json:1)
|
||||
|
||||
@@ -447,6 +492,7 @@ Legend: ✓✓✓ = Primary use case, ✓✓ = Strong use case, ✓ = Applicable
|
||||
## Conclusion
|
||||
|
||||
You have a comprehensive MCP server setup that provides powerful capabilities for:
|
||||
|
||||
- **Browser automation** (Chrome DevTools)
|
||||
- **Document conversion** (Markitdown)
|
||||
- **Git hosting integration** (3 Gitea instances)
|
||||
@@ -454,12 +500,14 @@ You have a comprehensive MCP server setup that provides powerful capabilities fo
|
||||
- **File system operations** (Filesystem)
|
||||
- **HTTP requests** (Fetch)
|
||||
|
||||
**Immediate Action Required**:
|
||||
**Immediate Action Required**:
|
||||
|
||||
- Fix the Gitea LAN token configuration
|
||||
- Run the test script to verify all servers are operational
|
||||
- Review test results and address any failures
|
||||
|
||||
**Current Limitation**:
|
||||
**Current Limitation**:
|
||||
|
||||
- MCP server tools are not exposed in the current Claude Code session
|
||||
- May require VSCode or client-side configuration to enable
|
||||
|
||||
|
||||
@@ -9,9 +9,11 @@ MCP (Model Context Protocol) servers are standalone processes that expose tools
|
||||
## Testing Prerequisites
|
||||
|
||||
1. **MCP Inspector Tool** - Install the official MCP testing tool:
|
||||
|
||||
```bash
|
||||
npm install -g @modelcontextprotocol/inspector
|
||||
```
|
||||
|
||||
```powershell
|
||||
npm install -g @modelcontextprotocol/inspector
|
||||
```
|
||||
@@ -25,20 +27,24 @@ MCP (Model Context Protocol) servers are standalone processes that expose tools
|
||||
**Purpose**: Browser automation and Chrome DevTools integration
|
||||
|
||||
### Test Command:
|
||||
|
||||
```bash
|
||||
npx -y chrome-devtools-mcp@latest --headless true --isolated false --channel stable
|
||||
```
|
||||
|
||||
```powershell
|
||||
npx -y chrome-devtools-mcp@latest --headless true --isolated false --channel stable
|
||||
```
|
||||
|
||||
### Expected Capabilities:
|
||||
|
||||
- Browser launch and control
|
||||
- DOM inspection
|
||||
- Network monitoring
|
||||
- JavaScript execution in browser context
|
||||
|
||||
### Manual Test Steps:
|
||||
|
||||
1. Run the command above
|
||||
2. The server should start and output MCP protocol messages
|
||||
3. Use MCP Inspector to connect:
|
||||
@@ -50,6 +56,7 @@ npx -y chrome-devtools-mcp@latest --headless true --isolated false --channel sta
|
||||
```
|
||||
|
||||
### Success Indicators:
|
||||
|
||||
- Server starts without errors
|
||||
- Lists available tools (e.g., `navigate`, `click`, `screenshot`)
|
||||
- Can execute browser actions
|
||||
@@ -61,20 +68,24 @@ npx -y chrome-devtools-mcp@latest --headless true --isolated false --channel sta
|
||||
**Purpose**: Convert various file formats to markdown
|
||||
|
||||
### Test Command:
|
||||
|
||||
```bash
|
||||
C:\Users\games3\.local\bin\uvx.exe markitdown-mcp
|
||||
```
|
||||
|
||||
```powershell
|
||||
C:\Users\games3\.local\bin\uvx.exe markitdown-mcp
|
||||
```
|
||||
|
||||
### Expected Capabilities:
|
||||
|
||||
- Convert PDF to markdown
|
||||
- Convert DOCX to markdown
|
||||
- Convert HTML to markdown
|
||||
- Convert images (OCR) to markdown
|
||||
|
||||
### Manual Test Steps:
|
||||
|
||||
1. Ensure `uvx` is installed (Python tool)
|
||||
2. Run the command above
|
||||
3. Test with MCP Inspector:
|
||||
@@ -86,11 +97,13 @@ C:\Users\games3\.local\bin\uvx.exe markitdown-mcp
|
||||
```
|
||||
|
||||
### Success Indicators:
|
||||
|
||||
- Server initializes successfully
|
||||
- Lists conversion tools
|
||||
- Can convert a test file
|
||||
|
||||
### Troubleshooting:
|
||||
|
||||
- If `uvx` is not found, install it:
|
||||
```bash
|
||||
pip install uvx
|
||||
@@ -111,6 +124,7 @@ You have three Gitea server configurations. All use the same executable but conn
|
||||
**Host**: https://gitea.torbonium.com
|
||||
|
||||
#### Test Command:
|
||||
|
||||
```powershell
|
||||
$env:GITEA_HOST="https://gitea.torbonium.com"
|
||||
$env:GITEA_ACCESS_TOKEN="391c9ddbe113378bc87bb8184800ba954648fcf8"
|
||||
@@ -118,6 +132,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
|
||||
```
|
||||
|
||||
#### Expected Capabilities:
|
||||
|
||||
- List repositories
|
||||
- Create/update issues
|
||||
- Manage pull requests
|
||||
@@ -125,6 +140,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
|
||||
- Manage branches
|
||||
|
||||
#### Manual Test Steps:
|
||||
|
||||
1. Set environment variables
|
||||
2. Run gitea-mcp.exe
|
||||
3. Use MCP Inspector or test direct API access:
|
||||
@@ -141,6 +157,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
|
||||
**Status**: ⚠️ Token needs replacement
|
||||
|
||||
#### Test Command:
|
||||
|
||||
```powershell
|
||||
$env:GITEA_HOST="https://gitea.torbolan.com"
|
||||
$env:GITEA_ACCESS_TOKEN="REPLACE_WITH_NEW_TOKEN" # ⚠️ UPDATE THIS
|
||||
@@ -148,6 +165,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
|
||||
```
|
||||
|
||||
#### Before Testing:
|
||||
|
||||
1. Generate a new access token:
|
||||
- Log into https://gitea.torbolan.com
|
||||
- Go to Settings → Applications → Generate New Token
|
||||
@@ -158,6 +176,7 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
|
||||
**Host**: https://gitea.projectium.com
|
||||
|
||||
#### Test Command:
|
||||
|
||||
```powershell
|
||||
$env:GITEA_HOST="https://gitea.projectium.com"
|
||||
$env:GITEA_ACCESS_TOKEN="c72bc0f14f623fec233d3c94b3a16397fe3649ef"
|
||||
@@ -165,12 +184,14 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
|
||||
```
|
||||
|
||||
### Success Indicators for All Gitea Servers:
|
||||
|
||||
- Server connects to Gitea instance
|
||||
- Lists available repositories
|
||||
- Can read repository metadata
|
||||
- Authentication succeeds
|
||||
|
||||
### Troubleshooting:
|
||||
|
||||
- **401 Unauthorized**: Token is invalid or expired
|
||||
- **Connection refused**: Check if Gitea instance is accessible
|
||||
- **SSL errors**: Verify HTTPS certificate validity
|
||||
@@ -182,12 +203,14 @@ d:\gitea-mcp\gitea-mcp.exe run -t stdio
|
||||
**Purpose**: Container management and Docker operations
|
||||
|
||||
### Test Command:
|
||||
|
||||
```powershell
|
||||
$env:DOCKER_HOST="npipe:////./pipe/docker_engine"
|
||||
npx -y @modelcontextprotocol/server-docker
|
||||
```
|
||||
|
||||
### Expected Capabilities:
|
||||
|
||||
- List containers
|
||||
- Start/stop containers
|
||||
- View container logs
|
||||
@@ -195,6 +218,7 @@ npx -y @modelcontextprotocol/server-docker
|
||||
- Manage images
|
||||
|
||||
### Manual Test Steps:
|
||||
|
||||
1. Ensure Docker Desktop or Podman is running
|
||||
2. Verify named pipe exists: `npipe:////./pipe/docker_engine`
|
||||
3. Run the server command
|
||||
@@ -207,17 +231,20 @@ npx -y @modelcontextprotocol/server-docker
|
||||
```
|
||||
|
||||
### Verify Docker Access Directly:
|
||||
|
||||
```powershell
|
||||
docker ps
|
||||
docker images
|
||||
```
|
||||
|
||||
### Success Indicators:
|
||||
|
||||
- Server connects to Docker daemon
|
||||
- Can list containers and images
|
||||
- Can execute container operations
|
||||
|
||||
### Troubleshooting:
|
||||
|
||||
- **Cannot connect to Docker daemon**: Ensure Docker Desktop is running
|
||||
- **Named pipe error**: Check DOCKER_HOST configuration
|
||||
- **Permission denied**: Run as administrator
|
||||
@@ -229,14 +256,17 @@ docker images
|
||||
**Purpose**: Access and manipulate files in specified directory
|
||||
|
||||
### Test Command:
|
||||
|
||||
```bash
|
||||
npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com"
|
||||
```
|
||||
|
||||
```powershell
|
||||
npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com"
|
||||
```
|
||||
|
||||
### Expected Capabilities:
|
||||
|
||||
- List directory contents
|
||||
- Read files
|
||||
- Write files
|
||||
@@ -244,6 +274,7 @@ npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-crawler.projectiu
|
||||
- Get file metadata
|
||||
|
||||
### Manual Test Steps:
|
||||
|
||||
1. Run the command above
|
||||
2. Use MCP Inspector:
|
||||
```bash
|
||||
@@ -255,18 +286,21 @@ npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-crawler.projectiu
|
||||
3. Test listing directory contents
|
||||
|
||||
### Verify Directory Access:
|
||||
|
||||
```powershell
|
||||
Test-Path "D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com"
|
||||
Get-ChildItem "D:\gitea\flyer-crawler.projectium.com\flyer-crawler.projectium.com" | Select-Object -First 5
|
||||
```
|
||||
|
||||
### Success Indicators:
|
||||
|
||||
- Server starts successfully
|
||||
- Can list directory contents
|
||||
- Can read file contents
|
||||
- Write operations work (if permissions allow)
|
||||
|
||||
### Security Note:
|
||||
|
||||
This server has access to your entire project directory. Ensure it's only used in trusted contexts.
|
||||
|
||||
---
|
||||
@@ -276,14 +310,17 @@ This server has access to your entire project directory. Ensure it's only used i
|
||||
**Purpose**: Make HTTP requests to external APIs and websites
|
||||
|
||||
### Test Command:
|
||||
|
||||
```bash
|
||||
npx -y @modelcontextprotocol/server-fetch
|
||||
```
|
||||
|
||||
```powershell
|
||||
npx -y @modelcontextprotocol/server-fetch
|
||||
```
|
||||
|
||||
### Expected Capabilities:
|
||||
|
||||
- HTTP GET requests
|
||||
- HTTP POST requests
|
||||
- Handle JSON/text responses
|
||||
@@ -291,6 +328,7 @@ npx -y @modelcontextprotocol/server-fetch
|
||||
- Follow redirects
|
||||
|
||||
### Manual Test Steps:
|
||||
|
||||
1. Run the server command
|
||||
2. Use MCP Inspector:
|
||||
```bash
|
||||
@@ -302,9 +340,11 @@ npx -y @modelcontextprotocol/server-fetch
|
||||
3. Test fetching a URL through the inspector
|
||||
|
||||
### Test Fetch Capability Directly:
|
||||
|
||||
```bash
|
||||
curl https://api.github.com/users/github
|
||||
```
|
||||
|
||||
```powershell
|
||||
# Test if curl/web requests work
|
||||
curl https://api.github.com/users/github
|
||||
@@ -313,6 +353,7 @@ Invoke-RestMethod -Uri "https://api.github.com/users/github"
|
||||
```
|
||||
|
||||
### Success Indicators:
|
||||
|
||||
- Server initializes
|
||||
- Can fetch URLs
|
||||
- Returns proper HTTP responses
|
||||
@@ -414,6 +455,7 @@ npm install -g @modelcontextprotocol/inspector
|
||||
# Test any server
|
||||
mcp-inspector <command> <args>
|
||||
```
|
||||
|
||||
```powershell
|
||||
# Install globally
|
||||
npm install -g @modelcontextprotocol/inspector
|
||||
@@ -434,6 +476,7 @@ mcp-inspector npx -y @modelcontextprotocol/server-filesystem "D:\gitea\flyer-cra
|
||||
# Test Docker server
|
||||
mcp-inspector npx -y @modelcontextprotocol/server-docker
|
||||
```
|
||||
|
||||
```powershell
|
||||
# Test fetch server
|
||||
mcp-inspector npx -y @modelcontextprotocol/server-fetch
|
||||
@@ -450,19 +493,25 @@ mcp-inspector npx -y @modelcontextprotocol/server-docker
|
||||
## Common Issues and Solutions
|
||||
|
||||
### Issue: "Cannot find module" or "Command not found"
|
||||
|
||||
**Solution**: Ensure Node.js and npm are installed and in PATH
|
||||
|
||||
### Issue: MCP server starts but doesn't respond
|
||||
|
||||
**Solution**: Check server logs, verify stdio communication, ensure no JSON parsing errors
|
||||
|
||||
### Issue: Authentication failures with Gitea
|
||||
**Solution**:
|
||||
|
||||
**Solution**:
|
||||
|
||||
1. Verify tokens haven't expired
|
||||
2. Check token permissions in Gitea settings
|
||||
3. Ensure network access to Gitea instances
|
||||
|
||||
### Issue: Docker server cannot connect
|
||||
|
||||
**Solution**:
|
||||
|
||||
1. Start Docker Desktop
|
||||
2. Verify DOCKER_HOST environment variable
|
||||
3. Check Windows named pipe permissions
|
||||
@@ -472,6 +521,7 @@ mcp-inspector npx -y @modelcontextprotocol/server-docker
|
||||
## Next Steps
|
||||
|
||||
After testing:
|
||||
|
||||
1. Document which servers are working
|
||||
2. Fix any configuration issues
|
||||
3. Update tokens as needed
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
## Configuration Summary
|
||||
|
||||
### MCP Configuration File
|
||||
|
||||
**Location**: `c:/Users/games3/AppData/Roaming/Code/User/mcp.json`
|
||||
|
||||
```json
|
||||
@@ -19,6 +20,7 @@
|
||||
```
|
||||
|
||||
### Key Configuration Details
|
||||
|
||||
- **Package**: `docker-mcp` (community MCP server with SSH support)
|
||||
- **Connection Method**: SSH to Podman machine
|
||||
- **SSH Endpoint**: `root@127.0.0.1:2972`
|
||||
@@ -27,12 +29,14 @@
|
||||
## Podman System Status
|
||||
|
||||
### Podman Machine
|
||||
|
||||
```
|
||||
NAME VM TYPE CREATED CPUS MEMORY DISK SIZE
|
||||
podman-machine-default wsl 4 weeks ago 4 2GiB 100GiB
|
||||
```
|
||||
|
||||
### Connection Information
|
||||
|
||||
```
|
||||
Name: podman-machine-default-root
|
||||
URI: ssh://root@127.0.0.1:2972/run/podman/podman.sock
|
||||
@@ -40,7 +44,9 @@ Default: true
|
||||
```
|
||||
|
||||
### Container Status
|
||||
|
||||
Podman is operational with 3 containers:
|
||||
|
||||
- `flyer-dev` (Ubuntu) - Exited
|
||||
- `flyer-crawler-redis` (Redis) - Exited
|
||||
- `flyer-crawler-postgres` (PostGIS) - Exited
|
||||
@@ -48,11 +54,13 @@ Podman is operational with 3 containers:
|
||||
## Test Results
|
||||
|
||||
### Command Line Tests
|
||||
|
||||
✅ **Podman CLI**: Working - `podman ps` returns successfully
|
||||
✅ **Container Management**: Working - Can list and manage containers
|
||||
✅ **Socket Connection**: Working - SSH connection to Podman machine functional
|
||||
|
||||
### MCP Server Integration Tests
|
||||
|
||||
✅ **Configuration File**: Updated and valid JSON
|
||||
✅ **VSCode Restart**: Completed to load new MCP configuration
|
||||
✅ **Package Selection**: Using `docker-mcp` (supports SSH connections)
|
||||
@@ -85,16 +93,19 @@ Once the MCP server is fully loaded, the following tools should be available:
|
||||
### If MCP Server Doesn't Connect
|
||||
|
||||
1. **Verify Podman is running**:
|
||||
|
||||
```bash
|
||||
podman ps
|
||||
```
|
||||
|
||||
2. **Check SSH connection**:
|
||||
|
||||
```bash
|
||||
podman system connection list
|
||||
```
|
||||
|
||||
3. **Test docker-mcp package manually**:
|
||||
|
||||
```powershell
|
||||
$env:DOCKER_HOST="ssh://root@127.0.0.1:2972/run/podman/podman.sock"
|
||||
npx -y docker-mcp
|
||||
|
||||
106
scripts/analyze-pm2-crashes.sh
Normal file
106
scripts/analyze-pm2-crashes.sh
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/bin/bash
|
||||
# scripts/analyze-pm2-crashes.sh
|
||||
#
|
||||
# Analyzes PM2 logs to identify crash patterns and problematic projects
|
||||
|
||||
set -e
|
||||
|
||||
PM2_LOG="/home/gitea-runner/.pm2/pm2.log"
|
||||
|
||||
echo "========================================="
|
||||
echo "PM2 CRASH ANALYSIS TOOL"
|
||||
echo "========================================="
|
||||
echo ""
|
||||
|
||||
if [ ! -f "$PM2_LOG" ]; then
|
||||
echo "❌ PM2 log file not found at: $PM2_LOG"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Analyzing PM2 log file: $PM2_LOG"
|
||||
echo "Log file size: $(du -h "$PM2_LOG" | cut -f1)"
|
||||
echo "Last modified: $(stat -c %y "$PM2_LOG")"
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo "1. RECENT PM2 DAEMON RESTARTS"
|
||||
echo "========================================="
|
||||
grep -i "New PM2 Daemon started" "$PM2_LOG" | tail -5 || echo "No daemon restarts found"
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo "2. ENOENT / CWD ERRORS"
|
||||
echo "========================================="
|
||||
grep -i "ENOENT\|uv_cwd\|no such file or directory" "$PM2_LOG" | tail -20 || echo "No ENOENT errors found"
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo "3. PROCESS CRASH PATTERNS"
|
||||
echo "========================================="
|
||||
echo "Searching for app crash events..."
|
||||
grep -i "App \[.*\] exited\|App \[.*\] errored\|App \[.*\] crashed" "$PM2_LOG" | tail -20 || echo "No app crashes found"
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo "4. PROJECTS INVOLVED IN CRASHES"
|
||||
echo "========================================="
|
||||
echo "Extracting project names from crash logs..."
|
||||
grep -i "ENOENT\|crash\|error" "$PM2_LOG" | grep -oE "flyer-crawler[a-z-]*|stock-alert[a-z-]*" | sort | uniq -c | sort -rn || echo "No project names found in crashes"
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo "5. TIMELINE OF RECENT ERRORS (Last 50)"
|
||||
echo "========================================="
|
||||
grep -E "^[0-9]{4}-[0-9]{2}-[0-9]{2}" "$PM2_LOG" | grep -i "error\|crash\|ENOENT" | tail -50 || echo "No timestamped errors found"
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo "6. CURRENT PM2 STATE"
|
||||
echo "========================================="
|
||||
pm2 list
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo "7. PROCESSES WITH MISSING CWD"
|
||||
echo "========================================="
|
||||
pm2 jlist | jq -r '.[] | select(.pm2_env.pm_cwd) | "\(.name): \(.pm2_env.pm_cwd)"' | while read line; do
|
||||
PROC_NAME=$(echo "$line" | cut -d: -f1)
|
||||
CWD=$(echo "$line" | cut -d: -f2- | xargs)
|
||||
if [ ! -d "$CWD" ]; then
|
||||
echo "❌ $PROC_NAME - CWD missing: $CWD"
|
||||
else
|
||||
echo "✅ $PROC_NAME - CWD exists: $CWD"
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
|
||||
echo "========================================="
|
||||
echo "8. RECOMMENDATIONS"
|
||||
echo "========================================="
|
||||
echo ""
|
||||
|
||||
# Count ENOENT errors
|
||||
ENOENT_COUNT=$(grep -c "ENOENT\|uv_cwd" "$PM2_LOG" 2>/dev/null || echo "0")
|
||||
if [ "$ENOENT_COUNT" -gt 0 ]; then
|
||||
echo "⚠️ Found $ENOENT_COUNT ENOENT/CWD errors in logs"
|
||||
echo " This indicates processes losing their working directory during deployment"
|
||||
echo " Solution: Ensure PM2 processes are stopped BEFORE rsync --delete operations"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Check for multiple projects
|
||||
FLYER_PROCESSES=$(pm2 jlist | jq '[.[] | select(.name | contains("flyer-crawler"))] | length' 2>/dev/null || echo "0")
|
||||
STOCK_PROCESSES=$(pm2 jlist | jq '[.[] | select(.name | contains("stock-alert"))] | length' 2>/dev/null || echo "0")
|
||||
|
||||
if [ "$FLYER_PROCESSES" -gt 0 ] && [ "$STOCK_PROCESSES" -gt 0 ]; then
|
||||
echo "ℹ️ Multiple projects detected:"
|
||||
echo " - Flyer-crawler: $FLYER_PROCESSES processes"
|
||||
echo " - Stock-alert: $STOCK_PROCESSES processes"
|
||||
echo " Recommendation: Ensure deployments don't interfere with each other"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "✅ Analysis complete"
|
||||
echo ""
|
||||
echo "To save this report:"
|
||||
echo " bash scripts/analyze-pm2-crashes.sh > pm2-crash-report.txt"
|
||||
@@ -50,12 +50,12 @@ async function main() {
|
||||
DIRECTORIES_TO_CLEAN.map((dir) => {
|
||||
const absolutePath = resolve(projectRoot, dir);
|
||||
return removeDirectory(absolutePath);
|
||||
})
|
||||
}),
|
||||
);
|
||||
|
||||
const successCount = results.filter(Boolean).length;
|
||||
console.log(
|
||||
`Clean complete: ${successCount}/${DIRECTORIES_TO_CLEAN.length} directories processed.`
|
||||
`Clean complete: ${successCount}/${DIRECTORIES_TO_CLEAN.length} directories processed.`,
|
||||
);
|
||||
|
||||
// Always exit successfully (matches rimraf behavior)
|
||||
|
||||
@@ -9,11 +9,7 @@ import '@testing-library/jest-dom';
|
||||
describe('StatCard', () => {
|
||||
it('renders title and value correctly', () => {
|
||||
renderWithProviders(
|
||||
<StatCard
|
||||
title="Total Users"
|
||||
value="1,234"
|
||||
icon={<div data-testid="mock-icon">Icon</div>}
|
||||
/>,
|
||||
<StatCard title="Total Users" value="1,234" icon={<div data-testid="mock-icon">Icon</div>} />,
|
||||
);
|
||||
|
||||
expect(screen.getByText('Total Users')).toBeInTheDocument();
|
||||
@@ -22,13 +18,9 @@ describe('StatCard', () => {
|
||||
|
||||
it('renders the icon', () => {
|
||||
renderWithProviders(
|
||||
<StatCard
|
||||
title="Total Users"
|
||||
value="1,234"
|
||||
icon={<div data-testid="mock-icon">Icon</div>}
|
||||
/>,
|
||||
<StatCard title="Total Users" value="1,234" icon={<div data-testid="mock-icon">Icon</div>} />,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('mock-icon')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -144,4 +144,4 @@ export const batchLimiter = rateLimit({
|
||||
message: 'Too many batch requests from this IP, please try again later.',
|
||||
});
|
||||
|
||||
export const budgetUpdateLimiter = batchLimiter; // Alias
|
||||
export const budgetUpdateLimiter = batchLimiter; // Alias
|
||||
|
||||
@@ -73,9 +73,15 @@ export const FlyerReviewPage: React.FC = () => {
|
||||
flyers.map((flyer) => (
|
||||
<li key={flyer.flyer_id} className="p-4 hover:bg-gray-50 dark:hover:bg-gray-700/50">
|
||||
<Link to={`/flyers/${flyer.flyer_id}`} className="flex items-center space-x-4">
|
||||
<img src={flyer.icon_url || undefined} alt={flyer.store?.name || 'Unknown Store'} className="w-12 h-12 rounded-md object-cover" />
|
||||
<img
|
||||
src={flyer.icon_url || undefined}
|
||||
alt={flyer.store?.name || 'Unknown Store'}
|
||||
className="w-12 h-12 rounded-md object-cover"
|
||||
/>
|
||||
<div className="flex-1">
|
||||
<p className="font-semibold text-gray-800 dark:text-white">{flyer.store?.name || 'Unknown Store'}</p>
|
||||
<p className="font-semibold text-gray-800 dark:text-white">
|
||||
{flyer.store?.name || 'Unknown Store'}
|
||||
</p>
|
||||
<p className="text-sm text-gray-500 dark:text-gray-400">{flyer.file_name}</p>
|
||||
</div>
|
||||
<div className="text-right text-sm text-gray-500 dark:text-gray-400">
|
||||
@@ -90,4 +96,4 @@ export const FlyerReviewPage: React.FC = () => {
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
@@ -6,7 +6,9 @@ import { renderWithProviders } from '../../../tests/utils/renderWithProviders';
|
||||
|
||||
describe('StatCard', () => {
|
||||
it('should render the title and value correctly', () => {
|
||||
renderWithProviders(<StatCard title="Test Stat" value="1,234" icon={<div data-testid="icon" />} />);
|
||||
renderWithProviders(
|
||||
<StatCard title="Test Stat" value="1,234" icon={<div data-testid="icon" />} />,
|
||||
);
|
||||
|
||||
expect(screen.getByText('Test Stat')).toBeInTheDocument();
|
||||
expect(screen.getByText('1,234')).toBeInTheDocument();
|
||||
|
||||
@@ -69,4 +69,4 @@ describe('AppProviders', () => {
|
||||
expect(masterItemsProvider).toContainElement(userDataProvider);
|
||||
expect(userDataProvider).toContainElement(child);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -35,7 +35,7 @@ export const FlyersProvider: React.FC<{ children: ReactNode }> = ({ children })
|
||||
isRefetchingFlyers,
|
||||
refetchFlyers,
|
||||
}),
|
||||
[flyers, isLoadingFlyers, error, isRefetchingFlyers, refetchFlyers]
|
||||
[flyers, isLoadingFlyers, error, isRefetchingFlyers, refetchFlyers],
|
||||
);
|
||||
|
||||
return <FlyersContext.Provider value={value}>{children}</FlyersContext.Provider>;
|
||||
|
||||
@@ -12,11 +12,7 @@ import { useMasterItemsQuery } from '../hooks/queries/useMasterItemsQuery';
|
||||
* Master items are cached longer (10 minutes) since they change infrequently.
|
||||
*/
|
||||
export const MasterItemsProvider: React.FC<{ children: ReactNode }> = ({ children }) => {
|
||||
const {
|
||||
data: masterItems = [],
|
||||
isLoading,
|
||||
error,
|
||||
} = useMasterItemsQuery();
|
||||
const { data: masterItems = [], isLoading, error } = useMasterItemsQuery();
|
||||
|
||||
const value = useMemo(
|
||||
() => ({
|
||||
@@ -24,7 +20,7 @@ export const MasterItemsProvider: React.FC<{ children: ReactNode }> = ({ childre
|
||||
isLoading,
|
||||
error: error?.message || null,
|
||||
}),
|
||||
[masterItems, isLoading, error]
|
||||
[masterItems, isLoading, error],
|
||||
);
|
||||
|
||||
return <MasterItemsContext.Provider value={value}>{children}</MasterItemsContext.Provider>;
|
||||
|
||||
@@ -38,7 +38,15 @@ export const UserDataProvider: React.FC<{ children: ReactNode }> = ({ children }
|
||||
isLoading: isEnabled && (isLoadingWatched || isLoadingLists),
|
||||
error: watchedError?.message || listsError?.message || null,
|
||||
}),
|
||||
[watchedItems, shoppingLists, isEnabled, isLoadingWatched, isLoadingLists, watchedError, listsError]
|
||||
[
|
||||
watchedItems,
|
||||
shoppingLists,
|
||||
isEnabled,
|
||||
isLoadingWatched,
|
||||
isLoadingLists,
|
||||
watchedError,
|
||||
listsError,
|
||||
],
|
||||
);
|
||||
|
||||
return <UserDataContext.Provider value={value}>{children}</UserDataContext.Provider>;
|
||||
|
||||
@@ -705,7 +705,9 @@ describe('AI Routes (/api/v1/ai)', () => {
|
||||
});
|
||||
|
||||
it('should return 200 with a stubbed response on success', async () => {
|
||||
const response = await supertest(app).post('/api/v1/ai/check-flyer').attach('image', imagePath);
|
||||
const response = await supertest(app)
|
||||
.post('/api/v1/ai/check-flyer')
|
||||
.attach('image', imagePath);
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.body.data.is_flyer).toBe(true);
|
||||
});
|
||||
@@ -717,7 +719,9 @@ describe('AI Routes (/api/v1/ai)', () => {
|
||||
throw new Error('Logging failed');
|
||||
});
|
||||
// Attach a valid file to get past the `if (!req.file)` check.
|
||||
const response = await supertest(app).post('/api/v1/ai/check-flyer').attach('image', imagePath);
|
||||
const response = await supertest(app)
|
||||
.post('/api/v1/ai/check-flyer')
|
||||
.attach('image', imagePath);
|
||||
expect(response.status).toBe(500);
|
||||
});
|
||||
});
|
||||
@@ -900,14 +904,18 @@ describe('AI Routes (/api/v1/ai)', () => {
|
||||
});
|
||||
|
||||
it('POST /generate-image should return 501 Not Implemented', async () => {
|
||||
const response = await supertest(app).post('/api/v1/ai/generate-image').send({ prompt: 'test' });
|
||||
const response = await supertest(app)
|
||||
.post('/api/v1/ai/generate-image')
|
||||
.send({ prompt: 'test' });
|
||||
|
||||
expect(response.status).toBe(501);
|
||||
expect(response.body.error.message).toBe('Image generation is not yet implemented.');
|
||||
});
|
||||
|
||||
it('POST /generate-speech should return 501 Not Implemented', async () => {
|
||||
const response = await supertest(app).post('/api/v1/ai/generate-speech').send({ text: 'test' });
|
||||
const response = await supertest(app)
|
||||
.post('/api/v1/ai/generate-speech')
|
||||
.send({ text: 'test' });
|
||||
expect(response.status).toBe(501);
|
||||
expect(response.body.error.message).toBe('Speech generation is not yet implemented.');
|
||||
});
|
||||
|
||||
@@ -204,7 +204,9 @@ describe('Gamification Routes (/api/v1/achievements)', () => {
|
||||
mockedIsAdmin.mockImplementation((req: Request, res: Response, next: NextFunction) => next()); // Grant admin access
|
||||
vi.mocked(db.gamificationRepo.awardAchievement).mockResolvedValue(undefined);
|
||||
|
||||
const response = await supertest(adminApp).post('/api/v1/achievements/award').send(awardPayload);
|
||||
const response = await supertest(adminApp)
|
||||
.post('/api/v1/achievements/award')
|
||||
.send(awardPayload);
|
||||
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.body.data.message).toContain('Successfully awarded');
|
||||
@@ -224,7 +226,9 @@ describe('Gamification Routes (/api/v1/achievements)', () => {
|
||||
mockedIsAdmin.mockImplementation((req: Request, res: Response, next: NextFunction) => next());
|
||||
vi.mocked(db.gamificationRepo.awardAchievement).mockRejectedValue(new Error('DB Error'));
|
||||
|
||||
const response = await supertest(adminApp).post('/api/v1/achievements/award').send(awardPayload);
|
||||
const response = await supertest(adminApp)
|
||||
.post('/api/v1/achievements/award')
|
||||
.send(awardPayload);
|
||||
expect(response.status).toBe(500);
|
||||
expect(response.body.error.message).toBe('DB Error');
|
||||
});
|
||||
|
||||
@@ -99,7 +99,9 @@ describe('Price Routes (/api/v1/price-history)', () => {
|
||||
});
|
||||
|
||||
it('should return 400 if masterItemIds is an empty array', async () => {
|
||||
const response = await supertest(app).post('/api/v1/price-history').send({ masterItemIds: [] });
|
||||
const response = await supertest(app)
|
||||
.post('/api/v1/price-history')
|
||||
.send({ masterItemIds: [] });
|
||||
|
||||
expect(response.status).toBe(400);
|
||||
expect(response.body.error.details[0].message).toBe(
|
||||
|
||||
@@ -60,7 +60,9 @@ describe('Stats Routes (/api/v1/stats)', () => {
|
||||
});
|
||||
|
||||
it('should return 400 for invalid query parameters', async () => {
|
||||
const response = await supertest(app).get('/api/v1/stats/most-frequent-sales?days=0&limit=abc');
|
||||
const response = await supertest(app).get(
|
||||
'/api/v1/stats/most-frequent-sales?days=0&limit=abc',
|
||||
);
|
||||
expect(response.status).toBe(400);
|
||||
expect(response.body.error.details).toBeDefined();
|
||||
expect(response.body.error.details.length).toBe(2);
|
||||
|
||||
@@ -388,7 +388,9 @@ describe('User Routes (/api/v1/users)', () => {
|
||||
describe('Shopping List Item Routes', () => {
|
||||
describe('POST /shopping-lists/:listId/items (Validation)', () => {
|
||||
it('should return 400 if neither masterItemId nor customItemName are provided', async () => {
|
||||
const response = await supertest(app).post('/api/v1/users/shopping-lists/1/items').send({});
|
||||
const response = await supertest(app)
|
||||
.post('/api/v1/users/shopping-lists/1/items')
|
||||
.send({});
|
||||
expect(response.status).toBe(400);
|
||||
expect(response.body.error.details[0].message).toBe(
|
||||
'Either masterItemId or customItemName must be provided.',
|
||||
@@ -512,7 +514,9 @@ describe('User Routes (/api/v1/users)', () => {
|
||||
});
|
||||
|
||||
it('should return 400 if no update fields are provided for an item', async () => {
|
||||
const response = await supertest(app).put(`/api/v1/users/shopping-lists/items/101`).send({});
|
||||
const response = await supertest(app)
|
||||
.put(`/api/v1/users/shopping-lists/items/101`)
|
||||
.send({});
|
||||
expect(response.status).toBe(400);
|
||||
expect(response.body.error.details[0].message).toContain(
|
||||
'At least one field (quantity, is_purchased) must be provided.',
|
||||
@@ -1011,7 +1015,9 @@ describe('User Routes (/api/v1/users)', () => {
|
||||
const addressData = { address_line_1: '123 New St' };
|
||||
vi.mocked(userService.upsertUserAddress).mockResolvedValue(5);
|
||||
|
||||
const response = await supertest(app).put('/api/v1/users/profile/address').send(addressData);
|
||||
const response = await supertest(app)
|
||||
.put('/api/v1/users/profile/address')
|
||||
.send(addressData);
|
||||
|
||||
expect(response.status).toBe(200);
|
||||
expect(userService.upsertUserAddress).toHaveBeenCalledWith(
|
||||
|
||||
@@ -51,7 +51,9 @@ export class AiAnalysisService {
|
||||
// Normalize sources to a consistent format.
|
||||
const mappedSources = (response.sources || []).map(
|
||||
(s: RawSource) =>
|
||||
(s.web ? { uri: s.web.uri || '', title: s.web.title || 'Untitled' } : { uri: '', title: 'Untitled' }) as Source,
|
||||
(s.web
|
||||
? { uri: s.web.uri || '', title: s.web.title || 'Untitled' }
|
||||
: { uri: '', title: 'Untitled' }) as Source,
|
||||
);
|
||||
return { ...response, sources: mappedSources };
|
||||
}
|
||||
@@ -82,7 +84,9 @@ export class AiAnalysisService {
|
||||
// Normalize sources to a consistent format.
|
||||
const mappedSources = (response.sources || []).map(
|
||||
(s: RawSource) =>
|
||||
(s.web ? { uri: s.web.uri || '', title: s.web.title || 'Untitled' } : { uri: '', title: 'Untitled' }) as Source,
|
||||
(s.web
|
||||
? { uri: s.web.uri || '', title: s.web.title || 'Untitled' }
|
||||
: { uri: '', title: 'Untitled' }) as Source,
|
||||
);
|
||||
return { ...response, sources: mappedSources };
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ describe('AnalyticsService', () => {
|
||||
data,
|
||||
attemptsMade: 1,
|
||||
updateProgress: vi.fn(),
|
||||
} as unknown as Job<T>);
|
||||
}) as unknown as Job<T>;
|
||||
|
||||
describe('processDailyReportJob', () => {
|
||||
it('should process successfully', async () => {
|
||||
@@ -207,4 +207,4 @@ describe('AnalyticsService', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -76,4 +76,4 @@ export class AnalyticsService {
|
||||
}
|
||||
}
|
||||
|
||||
export const analyticsService = new AnalyticsService();
|
||||
export const analyticsService = new AnalyticsService();
|
||||
|
||||
@@ -45,7 +45,9 @@ describe('BrandService', () => {
|
||||
|
||||
vi.mocked(db.adminRepo.updateBrandLogo).mockRejectedValue(dbError);
|
||||
|
||||
await expect(brandService.updateBrandLogo(brandId, mockFile, mockLogger)).rejects.toThrow('DB Error');
|
||||
await expect(brandService.updateBrandLogo(brandId, mockFile, mockLogger)).rejects.toThrow(
|
||||
'DB Error',
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -3,11 +3,15 @@ import * as db from './db/index.db';
|
||||
import type { Logger } from 'pino';
|
||||
|
||||
class BrandService {
|
||||
async updateBrandLogo(brandId: number, file: Express.Multer.File, logger: Logger): Promise<string> {
|
||||
async updateBrandLogo(
|
||||
brandId: number,
|
||||
file: Express.Multer.File,
|
||||
logger: Logger,
|
||||
): Promise<string> {
|
||||
const logoUrl = `/flyer-images/${file.filename}`;
|
||||
await db.adminRepo.updateBrandLogo(brandId, logoUrl, logger);
|
||||
return logoUrl;
|
||||
}
|
||||
}
|
||||
|
||||
export const brandService = new BrandService();
|
||||
export const brandService = new BrandService();
|
||||
|
||||
@@ -28,9 +28,15 @@ export class BudgetRepository {
|
||||
);
|
||||
return res.rows;
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in getBudgetsForUser', { userId }, {
|
||||
defaultMessage: 'Failed to retrieve budgets.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in getBudgetsForUser',
|
||||
{ userId },
|
||||
{
|
||||
defaultMessage: 'Failed to retrieve budgets.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,12 +66,18 @@ export class BudgetRepository {
|
||||
return res.rows[0];
|
||||
});
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in createBudget', { budgetData, userId }, {
|
||||
fkMessage: 'The specified user does not exist.',
|
||||
notNullMessage: 'One or more required budget fields are missing.',
|
||||
checkMessage: 'Invalid value provided for budget period.',
|
||||
defaultMessage: 'Failed to create budget.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in createBudget',
|
||||
{ budgetData, userId },
|
||||
{
|
||||
fkMessage: 'The specified user does not exist.',
|
||||
notNullMessage: 'One or more required budget fields are missing.',
|
||||
checkMessage: 'Invalid value provided for budget period.',
|
||||
defaultMessage: 'Failed to create budget.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,9 +110,15 @@ export class BudgetRepository {
|
||||
return res.rows[0];
|
||||
} catch (error) {
|
||||
if (error instanceof NotFoundError) throw error;
|
||||
handleDbError(error, logger, 'Database error in updateBudget', { budgetId, userId }, {
|
||||
defaultMessage: 'Failed to update budget.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in updateBudget',
|
||||
{ budgetId, userId },
|
||||
{
|
||||
defaultMessage: 'Failed to update budget.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,9 +138,15 @@ export class BudgetRepository {
|
||||
}
|
||||
} catch (error) {
|
||||
if (error instanceof NotFoundError) throw error;
|
||||
handleDbError(error, logger, 'Database error in deleteBudget', { budgetId, userId }, {
|
||||
defaultMessage: 'Failed to delete budget.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in deleteBudget',
|
||||
{ budgetId, userId },
|
||||
{
|
||||
defaultMessage: 'Failed to delete budget.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -158,4 +158,4 @@ describe('Conversion DB Service', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -194,6 +194,7 @@ export function handleDbError(
|
||||
|
||||
// Fallback generic error
|
||||
// Use the consistent DatabaseError from the processing errors module for the fallback.
|
||||
const errorMessage = options.defaultMessage || `Failed to perform operation on ${options.entityName || 'database'}.`;
|
||||
const errorMessage =
|
||||
options.defaultMessage || `Failed to perform operation on ${options.entityName || 'database'}.`;
|
||||
throw new ProcessingDatabaseError(errorMessage);
|
||||
}
|
||||
|
||||
@@ -94,4 +94,4 @@ describe('Price DB Service', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -61,4 +61,4 @@ export const priceRepo = {
|
||||
);
|
||||
}
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
@@ -25,9 +25,15 @@ export class RecipeRepository {
|
||||
);
|
||||
return res.rows;
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in getRecipesBySalePercentage', { minPercentage }, {
|
||||
defaultMessage: 'Failed to get recipes by sale percentage.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in getRecipesBySalePercentage',
|
||||
{ minPercentage },
|
||||
{
|
||||
defaultMessage: 'Failed to get recipes by sale percentage.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,9 +101,15 @@ export class RecipeRepository {
|
||||
);
|
||||
return res.rows;
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in getUserFavoriteRecipes', { userId }, {
|
||||
defaultMessage: 'Failed to get favorite recipes.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in getUserFavoriteRecipes',
|
||||
{ userId },
|
||||
{
|
||||
defaultMessage: 'Failed to get favorite recipes.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,10 +136,16 @@ export class RecipeRepository {
|
||||
}
|
||||
return res.rows[0];
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in addFavoriteRecipe', { userId, recipeId }, {
|
||||
fkMessage: 'The specified user or recipe does not exist.',
|
||||
defaultMessage: 'Failed to add favorite recipe.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in addFavoriteRecipe',
|
||||
{ userId, recipeId },
|
||||
{
|
||||
fkMessage: 'The specified user or recipe does not exist.',
|
||||
defaultMessage: 'Failed to add favorite recipe.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,9 +164,15 @@ export class RecipeRepository {
|
||||
throw new NotFoundError('Favorite recipe not found for this user.');
|
||||
}
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in removeFavoriteRecipe', { userId, recipeId }, {
|
||||
defaultMessage: 'Failed to remove favorite recipe.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in removeFavoriteRecipe',
|
||||
{ userId, recipeId },
|
||||
{
|
||||
defaultMessage: 'Failed to remove favorite recipe.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,23 +184,55 @@ export class RecipeRepository {
|
||||
*/
|
||||
async createRecipe(
|
||||
userId: string,
|
||||
recipeData: Pick<Recipe, 'name' | 'instructions' | 'description' | 'prep_time_minutes' | 'cook_time_minutes' | 'servings' | 'photo_url'>,
|
||||
logger: Logger
|
||||
recipeData: Pick<
|
||||
Recipe,
|
||||
| 'name'
|
||||
| 'instructions'
|
||||
| 'description'
|
||||
| 'prep_time_minutes'
|
||||
| 'cook_time_minutes'
|
||||
| 'servings'
|
||||
| 'photo_url'
|
||||
>,
|
||||
logger: Logger,
|
||||
): Promise<Recipe> {
|
||||
try {
|
||||
const { name, instructions, description, prep_time_minutes, cook_time_minutes, servings, photo_url } = recipeData;
|
||||
const {
|
||||
name,
|
||||
instructions,
|
||||
description,
|
||||
prep_time_minutes,
|
||||
cook_time_minutes,
|
||||
servings,
|
||||
photo_url,
|
||||
} = recipeData;
|
||||
const res = await this.db.query<Recipe>(
|
||||
`INSERT INTO public.recipes
|
||||
(user_id, name, instructions, description, prep_time_minutes, cook_time_minutes, servings, photo_url, status)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'public')
|
||||
RETURNING *`,
|
||||
[userId, name, instructions, description, prep_time_minutes, cook_time_minutes, servings, photo_url]
|
||||
[
|
||||
userId,
|
||||
name,
|
||||
instructions,
|
||||
description,
|
||||
prep_time_minutes,
|
||||
cook_time_minutes,
|
||||
servings,
|
||||
photo_url,
|
||||
],
|
||||
);
|
||||
return res.rows[0];
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in createRecipe', { userId, recipeData }, {
|
||||
defaultMessage: 'Failed to create recipe.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in createRecipe',
|
||||
{ userId, recipeData },
|
||||
{
|
||||
defaultMessage: 'Failed to create recipe.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -206,9 +262,15 @@ export class RecipeRepository {
|
||||
throw new NotFoundError('Recipe not found or user does not have permission to delete.');
|
||||
}
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in deleteRecipe', { recipeId, userId, isAdmin }, {
|
||||
defaultMessage: 'Failed to delete recipe.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in deleteRecipe',
|
||||
{ recipeId, userId, isAdmin },
|
||||
{
|
||||
defaultMessage: 'Failed to delete recipe.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,9 +333,15 @@ export class RecipeRepository {
|
||||
if (error instanceof Error && error.message === 'No fields provided to update.') {
|
||||
throw error;
|
||||
}
|
||||
handleDbError(error, logger, 'Database error in updateRecipe', { recipeId, userId, updates }, {
|
||||
defaultMessage: 'Failed to update recipe.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in updateRecipe',
|
||||
{ recipeId, userId, updates },
|
||||
{
|
||||
defaultMessage: 'Failed to update recipe.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -315,9 +383,15 @@ export class RecipeRepository {
|
||||
}
|
||||
return res.rows[0];
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in getRecipeById', { recipeId }, {
|
||||
defaultMessage: 'Failed to retrieve recipe.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in getRecipeById',
|
||||
{ recipeId },
|
||||
{
|
||||
defaultMessage: 'Failed to retrieve recipe.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,9 +415,15 @@ export class RecipeRepository {
|
||||
const res = await this.db.query<RecipeComment>(query, [recipeId]);
|
||||
return res.rows;
|
||||
} catch (error) {
|
||||
handleDbError(error, logger, 'Database error in getRecipeComments', { recipeId }, {
|
||||
defaultMessage: 'Failed to get recipe comments.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in getRecipeComments',
|
||||
{ recipeId },
|
||||
{
|
||||
defaultMessage: 'Failed to get recipe comments.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -374,7 +454,10 @@ export class RecipeRepository {
|
||||
logger,
|
||||
'Database error in addRecipeComment',
|
||||
{ recipeId, userId, parentCommentId },
|
||||
{ fkMessage: 'The specified recipe, user, or parent comment does not exist.', defaultMessage: 'Failed to add recipe comment.' },
|
||||
{
|
||||
fkMessage: 'The specified recipe, user, or parent comment does not exist.',
|
||||
defaultMessage: 'Failed to add recipe comment.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -398,10 +481,16 @@ export class RecipeRepository {
|
||||
// raise_exception
|
||||
throw new Error(error.message); // Re-throw the user-friendly message from the DB function.
|
||||
}
|
||||
handleDbError(error, logger, 'Database error in forkRecipe', { userId, originalRecipeId }, {
|
||||
fkMessage: 'The specified user or original recipe does not exist.',
|
||||
defaultMessage: 'Failed to fork recipe.',
|
||||
});
|
||||
handleDbError(
|
||||
error,
|
||||
logger,
|
||||
'Database error in forkRecipe',
|
||||
{ userId, originalRecipeId },
|
||||
{
|
||||
fkMessage: 'The specified user or original recipe does not exist.',
|
||||
defaultMessage: 'Failed to fork recipe.',
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -81,4 +81,4 @@ describe('EventBus', () => {
|
||||
// callback2 should be called again
|
||||
expect(callback2).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -4,7 +4,11 @@ import sharp from 'sharp';
|
||||
import type { Dirent } from 'node:fs';
|
||||
import type { Job } from 'bullmq';
|
||||
import type { Logger } from 'pino';
|
||||
import { ImageConversionError, PdfConversionError, UnsupportedFileTypeError } from './processingErrors';
|
||||
import {
|
||||
ImageConversionError,
|
||||
PdfConversionError,
|
||||
UnsupportedFileTypeError,
|
||||
} from './processingErrors';
|
||||
import type { FlyerJobData } from '../types/job-data';
|
||||
// Define the image formats supported by the AI model
|
||||
const SUPPORTED_IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.webp', '.heic', '.heif'];
|
||||
@@ -169,7 +173,9 @@ export class FlyerFileHandler {
|
||||
return outputPath;
|
||||
} catch (error) {
|
||||
logger.error({ err: error, filePath }, 'Failed to convert image to PNG using sharp.');
|
||||
throw new ImageConversionError(`Image conversion to PNG failed for ${path.basename(filePath)}.`);
|
||||
throw new ImageConversionError(
|
||||
`Image conversion to PNG failed for ${path.basename(filePath)}.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -217,7 +223,10 @@ export class FlyerFileHandler {
|
||||
// For other supported types like WEBP, etc., which are less likely to have problematic EXIF,
|
||||
// we can process them directly without modification for now.
|
||||
logger.info(`Processing as a single image file (non-JPEG/PNG): ${filePath}`);
|
||||
return { imagePaths: [{ path: filePath, mimetype: `image/${fileExt.slice(1)}` }], createdImagePaths: [] };
|
||||
return {
|
||||
imagePaths: [{ path: filePath, mimetype: `image/${fileExt.slice(1)}` }],
|
||||
createdImagePaths: [],
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -294,9 +303,11 @@ export class FlyerFileHandler {
|
||||
await this.fs.rename(tempPath, image.path);
|
||||
} catch (error) {
|
||||
logger.error({ err: error, path: image.path }, 'Failed to optimize image.');
|
||||
throw new ImageConversionError(`Image optimization failed for ${path.basename(image.path)}.`);
|
||||
throw new ImageConversionError(
|
||||
`Image optimization failed for ${path.basename(image.path)}.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
logger.info('Image optimization complete.');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,10 +102,10 @@ describe('FlyerPersistenceService', () => {
|
||||
mockFlyerData,
|
||||
mockItemsForDb,
|
||||
mockLogger,
|
||||
mockClient
|
||||
mockClient,
|
||||
);
|
||||
expect(mockLogger.info).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Successfully processed flyer')
|
||||
expect.stringContaining('Successfully processed flyer'),
|
||||
);
|
||||
|
||||
// Verify AdminRepository usage
|
||||
@@ -117,7 +117,7 @@ describe('FlyerPersistenceService', () => {
|
||||
displayText: `Processed a new flyer for ${mockFlyerData.store_name}.`,
|
||||
details: { flyerId: mockCreatedFlyer.flyer_id, storeName: mockFlyerData.store_name },
|
||||
}),
|
||||
mockLogger
|
||||
mockLogger,
|
||||
);
|
||||
|
||||
// Verify GamificationRepository usage
|
||||
@@ -153,8 +153,8 @@ describe('FlyerPersistenceService', () => {
|
||||
vi.mocked(createFlyerAndItems).mockRejectedValue(error);
|
||||
|
||||
await expect(
|
||||
service.saveFlyer(mockFlyerData, mockItemsForDb, 'user-1', mockLogger)
|
||||
service.saveFlyer(mockFlyerData, mockItemsForDb, 'user-1', mockLogger),
|
||||
).rejects.toThrow(error);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -52,7 +52,11 @@ describe('GamificationService', () => {
|
||||
|
||||
await gamificationService.awardAchievement(userId, achievementName, mockLogger);
|
||||
|
||||
expect(gamificationRepo.awardAchievement).toHaveBeenCalledWith(userId, achievementName, mockLogger);
|
||||
expect(gamificationRepo.awardAchievement).toHaveBeenCalledWith(
|
||||
userId,
|
||||
achievementName,
|
||||
mockLogger,
|
||||
);
|
||||
});
|
||||
|
||||
it('should re-throw ForeignKeyConstraintError without logging it as a service error', async () => {
|
||||
@@ -163,4 +167,4 @@ describe('GamificationService', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -72,4 +72,4 @@ class GamificationService {
|
||||
}
|
||||
}
|
||||
|
||||
export const gamificationService = new GamificationService();
|
||||
export const gamificationService = new GamificationService();
|
||||
|
||||
@@ -25,7 +25,10 @@ export class GeocodingService {
|
||||
return JSON.parse(cached);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error({ err: error, cacheKey }, 'Redis GET or JSON.parse command failed. Proceeding without cache.');
|
||||
logger.error(
|
||||
{ err: error, cacheKey },
|
||||
'Redis GET or JSON.parse command failed. Proceeding without cache.',
|
||||
);
|
||||
}
|
||||
|
||||
if (process.env.GOOGLE_MAPS_API_KEY) {
|
||||
@@ -42,7 +45,7 @@ export class GeocodingService {
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
{ err: error },
|
||||
'An error occurred while calling the Google Maps Geocoding API. Falling back to Nominatim.'
|
||||
'An error occurred while calling the Google Maps Geocoding API. Falling back to Nominatim.',
|
||||
);
|
||||
}
|
||||
} else {
|
||||
@@ -69,7 +72,10 @@ export class GeocodingService {
|
||||
try {
|
||||
await redis.set(cacheKey, JSON.stringify(result), 'EX', 60 * 60 * 24 * 30); // Cache for 30 days
|
||||
} catch (error) {
|
||||
logger.error({ err: error, cacheKey }, 'Redis SET command failed. Result will not be cached.');
|
||||
logger.error(
|
||||
{ err: error, cacheKey },
|
||||
'Redis SET command failed. Result will not be cached.',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -121,7 +121,9 @@ describe('Processing Errors', () => {
|
||||
expect(error).toBeInstanceOf(TransformationError);
|
||||
expect(error.message).toBe(message);
|
||||
expect(error.errorCode).toBe('TRANSFORMATION_FAILED');
|
||||
expect(error.userMessage).toBe('There was a problem transforming the flyer data. Please check the input.');
|
||||
expect(error.userMessage).toBe(
|
||||
'There was a problem transforming the flyer data. Please check the input.',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -147,7 +149,9 @@ describe('Processing Errors', () => {
|
||||
expect(error).toBeInstanceOf(ImageConversionError);
|
||||
expect(error.message).toBe(message);
|
||||
expect(error.errorCode).toBe('IMAGE_CONVERSION_FAILED');
|
||||
expect(error.userMessage).toBe('The uploaded image could not be processed. It might be corrupt or in an unsupported format.');
|
||||
expect(error.userMessage).toBe(
|
||||
'The uploaded image could not be processed. It might be corrupt or in an unsupported format.',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -66,7 +66,10 @@ describe('SystemService', () => {
|
||||
});
|
||||
|
||||
it('should return success: false when process does not exist', async () => {
|
||||
const error = new Error('Command failed') as ExecException & { stdout?: string; stderr?: string };
|
||||
const error = new Error('Command failed') as ExecException & {
|
||||
stdout?: string;
|
||||
stderr?: string;
|
||||
};
|
||||
error.code = 1;
|
||||
error.stderr = "[PM2][ERROR] Process or Namespace flyer-crawler-api doesn't exist";
|
||||
|
||||
@@ -83,4 +86,4 @@ describe('SystemService', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -121,4 +121,4 @@ describe('tokenStorage', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -43,4 +43,4 @@ export const removeToken = (): void => {
|
||||
} catch (error) {
|
||||
console.error('SecurityError: Failed to access localStorage to remove token.', error);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
@@ -193,7 +193,9 @@ describe('UserService', () => {
|
||||
|
||||
// Act & Assert
|
||||
// The service should wrap the generic error in a `DatabaseError`.
|
||||
await expect(userService.upsertUserAddress(user, addressData, logger)).rejects.toBeInstanceOf(DatabaseError);
|
||||
await expect(userService.upsertUserAddress(user, addressData, logger)).rejects.toBeInstanceOf(
|
||||
DatabaseError,
|
||||
);
|
||||
|
||||
// Assert that the error was logged correctly
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
@@ -285,7 +287,10 @@ describe('UserService', () => {
|
||||
await expect(userService.updateUserAvatar(userId, file, logger)).rejects.toThrow(
|
||||
DatabaseError,
|
||||
);
|
||||
expect(logger.error).toHaveBeenCalledWith(expect.any(Object), `Failed to update user avatar: ${genericError.message}`);
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.any(Object),
|
||||
`Failed to update user avatar: ${genericError.message}`,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -313,8 +318,13 @@ describe('UserService', () => {
|
||||
vi.mocked(bcrypt.hash).mockResolvedValue();
|
||||
mocks.mockUpdateUserPassword.mockRejectedValue(genericError);
|
||||
|
||||
await expect(userService.updateUserPassword(userId, newPassword, logger)).rejects.toThrow(DatabaseError);
|
||||
expect(logger.error).toHaveBeenCalledWith(expect.any(Object), `Failed to update user password: ${genericError.message}`);
|
||||
await expect(userService.updateUserPassword(userId, newPassword, logger)).rejects.toThrow(
|
||||
DatabaseError,
|
||||
);
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.any(Object),
|
||||
`Failed to update user password: ${genericError.message}`,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -340,9 +350,9 @@ describe('UserService', () => {
|
||||
const { logger } = await import('./logger.server');
|
||||
mocks.mockFindUserWithPasswordHashById.mockResolvedValue(null);
|
||||
|
||||
await expect(
|
||||
userService.deleteUserAccount('user-123', 'password', logger),
|
||||
).rejects.toThrow(NotFoundError);
|
||||
await expect(userService.deleteUserAccount('user-123', 'password', logger)).rejects.toThrow(
|
||||
NotFoundError,
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw ValidationError if password does not match', async () => {
|
||||
@@ -371,8 +381,13 @@ describe('UserService', () => {
|
||||
});
|
||||
vi.mocked(bcrypt.compare).mockRejectedValue(genericError);
|
||||
|
||||
await expect(userService.deleteUserAccount(userId, password, logger)).rejects.toThrow(DatabaseError);
|
||||
expect(logger.error).toHaveBeenCalledWith(expect.any(Object), `Failed to delete user account: ${genericError.message}`);
|
||||
await expect(userService.deleteUserAccount(userId, password, logger)).rejects.toThrow(
|
||||
DatabaseError,
|
||||
);
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.any(Object),
|
||||
`Failed to delete user account: ${genericError.message}`,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -430,8 +445,13 @@ describe('UserService', () => {
|
||||
|
||||
mocks.mockDeleteUserById.mockRejectedValue(genericError);
|
||||
|
||||
await expect(userService.deleteUserAsAdmin(deleterId, targetId, logger)).rejects.toThrow(DatabaseError);
|
||||
expect(logger.error).toHaveBeenCalledWith(expect.any(Object), `Admin failed to delete user account: ${genericError.message}`);
|
||||
await expect(userService.deleteUserAsAdmin(deleterId, targetId, logger)).rejects.toThrow(
|
||||
DatabaseError,
|
||||
);
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.any(Object),
|
||||
`Admin failed to delete user account: ${genericError.message}`,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -38,13 +38,20 @@ class UserService {
|
||||
logger,
|
||||
);
|
||||
if (!userprofile.address_id) {
|
||||
await userRepo.updateUserProfile(userprofile.user.user_id, { address_id: addressId }, logger);
|
||||
await userRepo.updateUserProfile(
|
||||
userprofile.user.user_id,
|
||||
{ address_id: addressId },
|
||||
logger,
|
||||
);
|
||||
}
|
||||
return addressId;
|
||||
})
|
||||
.catch((error) => {
|
||||
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
|
||||
logger.error({ err: error, userId: userprofile.user.user_id }, `Transaction to upsert user address failed: ${errorMessage}`);
|
||||
logger.error(
|
||||
{ err: error, userId: userprofile.user.user_id },
|
||||
`Transaction to upsert user address failed: ${errorMessage}`,
|
||||
);
|
||||
// Wrap the original error in a service-level DatabaseError to standardize the error contract,
|
||||
// as this is an unexpected failure within the transaction boundary.
|
||||
throw new DatabaseError(errorMessage);
|
||||
@@ -68,7 +75,10 @@ class UserService {
|
||||
return { deletedCount };
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
|
||||
logger.error({ err: error, attemptsMade: job.attemptsMade }, `Expired token cleanup job failed: ${errorMessage}`);
|
||||
logger.error(
|
||||
{ err: error, attemptsMade: job.attemptsMade },
|
||||
`Expired token cleanup job failed: ${errorMessage}`,
|
||||
);
|
||||
// This is a background job, but wrapping in a standard error type is good practice.
|
||||
throw new DatabaseError(errorMessage);
|
||||
}
|
||||
@@ -81,7 +91,11 @@ class UserService {
|
||||
* @param logger The logger instance.
|
||||
* @returns The updated user profile.
|
||||
*/
|
||||
async updateUserAvatar(userId: string, file: Express.Multer.File, logger: Logger): Promise<Profile> {
|
||||
async updateUserAvatar(
|
||||
userId: string,
|
||||
file: Express.Multer.File,
|
||||
logger: Logger,
|
||||
): Promise<Profile> {
|
||||
try {
|
||||
const baseUrl = getBaseUrl(logger);
|
||||
const avatarUrl = `${baseUrl}/uploads/avatars/${file.filename}`;
|
||||
@@ -151,7 +165,11 @@ class UserService {
|
||||
* @param logger The logger instance.
|
||||
* @returns The address object.
|
||||
*/
|
||||
async getUserAddress(userProfile: UserProfile, addressId: number, logger: Logger): Promise<Address> {
|
||||
async getUserAddress(
|
||||
userProfile: UserProfile,
|
||||
addressId: number,
|
||||
logger: Logger,
|
||||
): Promise<Address> {
|
||||
if (userProfile.address_id !== addressId) {
|
||||
throw new ValidationError([], 'Forbidden: You can only access your own address.');
|
||||
}
|
||||
@@ -162,7 +180,10 @@ class UserService {
|
||||
throw error;
|
||||
}
|
||||
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
|
||||
logger.error({ err: error, userId: userProfile.user.user_id, addressId }, `Failed to get user address: ${errorMessage}`);
|
||||
logger.error(
|
||||
{ err: error, userId: userProfile.user.user_id, addressId },
|
||||
`Failed to get user address: ${errorMessage}`,
|
||||
);
|
||||
// Wrap unexpected errors.
|
||||
throw new DatabaseError(errorMessage);
|
||||
}
|
||||
@@ -187,7 +208,10 @@ class UserService {
|
||||
throw error;
|
||||
}
|
||||
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred.';
|
||||
log.error({ err: error, deleterId, userToDeleteId }, `Admin failed to delete user account: ${errorMessage}`);
|
||||
log.error(
|
||||
{ err: error, deleterId, userToDeleteId },
|
||||
`Admin failed to delete user account: ${errorMessage}`,
|
||||
);
|
||||
// Wrap unexpected errors.
|
||||
throw new DatabaseError(errorMessage);
|
||||
}
|
||||
|
||||
@@ -173,4 +173,4 @@ describe('Worker Entry Point', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -28,4 +28,4 @@ process.on('unhandledRejection', (reason, promise) => {
|
||||
logger.error({ reason, promise }, '[Worker] Unhandled Rejection');
|
||||
});
|
||||
|
||||
logger.info('[Worker] Worker process is running and listening for jobs.');
|
||||
logger.info('[Worker] Worker process is running and listening for jobs.');
|
||||
|
||||
@@ -209,7 +209,9 @@ describe('Authentication E2E Flow', () => {
|
||||
expect(loginResponse?.status).toBe(200);
|
||||
|
||||
// Request password reset (do not poll, as this endpoint is rate-limited)
|
||||
const forgotResponse = await getRequest().post('/api/v1/auth/forgot-password').send({ email });
|
||||
const forgotResponse = await getRequest()
|
||||
.post('/api/v1/auth/forgot-password')
|
||||
.send({ email });
|
||||
expect(forgotResponse.status).toBe(200);
|
||||
const resetToken = forgotResponse.body.data.token;
|
||||
|
||||
|
||||
@@ -112,7 +112,9 @@ describe('Reactions API Routes Integration Tests', () => {
|
||||
});
|
||||
|
||||
it('should return 400 when entityId is missing', async () => {
|
||||
const response = await request.get('/api/v1/reactions/summary').query({ entityType: 'recipe' });
|
||||
const response = await request
|
||||
.get('/api/v1/reactions/summary')
|
||||
.query({ entityType: 'recipe' });
|
||||
|
||||
expect(response.status).toBe(400);
|
||||
expect(response.body.success).toBe(false);
|
||||
|
||||
@@ -72,4 +72,4 @@ vi.mock('../../components/WhatsNewModal', async () => {
|
||||
vi.mock('../../layouts/MainLayout', async () => {
|
||||
const { MockMainLayout } = await import('../utils/componentMocks');
|
||||
return { MainLayout: MockMainLayout };
|
||||
});
|
||||
});
|
||||
|
||||
@@ -30,47 +30,69 @@ export const cleanupDb = async (options: CleanupOptions) => {
|
||||
// Children entities first, then parents.
|
||||
|
||||
if (options.suggestedCorrectionIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.suggested_corrections WHERE suggested_correction_id = ANY($1::int[])', [options.suggestedCorrectionIds]);
|
||||
await client.query(
|
||||
'DELETE FROM public.suggested_corrections WHERE suggested_correction_id = ANY($1::int[])',
|
||||
[options.suggestedCorrectionIds],
|
||||
);
|
||||
logger.debug(`Cleaned up ${options.suggestedCorrectionIds.length} suggested correction(s).`);
|
||||
}
|
||||
|
||||
if (options.budgetIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.budgets WHERE budget_id = ANY($1::int[])', [options.budgetIds]);
|
||||
await client.query('DELETE FROM public.budgets WHERE budget_id = ANY($1::int[])', [
|
||||
options.budgetIds,
|
||||
]);
|
||||
logger.debug(`Cleaned up ${options.budgetIds.length} budget(s).`);
|
||||
}
|
||||
|
||||
if (options.recipeCommentIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.recipe_comments WHERE recipe_comment_id = ANY($1::int[])', [options.recipeCommentIds]);
|
||||
await client.query(
|
||||
'DELETE FROM public.recipe_comments WHERE recipe_comment_id = ANY($1::int[])',
|
||||
[options.recipeCommentIds],
|
||||
);
|
||||
logger.debug(`Cleaned up ${options.recipeCommentIds.length} recipe comment(s).`);
|
||||
}
|
||||
|
||||
if (options.recipeIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.recipes WHERE recipe_id = ANY($1::int[])', [options.recipeIds]);
|
||||
await client.query('DELETE FROM public.recipes WHERE recipe_id = ANY($1::int[])', [
|
||||
options.recipeIds,
|
||||
]);
|
||||
logger.debug(`Cleaned up ${options.recipeIds.length} recipe(s).`);
|
||||
}
|
||||
|
||||
if (options.flyerIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.flyers WHERE flyer_id = ANY($1::int[])', [options.flyerIds]);
|
||||
await client.query('DELETE FROM public.flyers WHERE flyer_id = ANY($1::int[])', [
|
||||
options.flyerIds,
|
||||
]);
|
||||
logger.debug(`Cleaned up ${options.flyerIds.length} flyer(s).`);
|
||||
}
|
||||
|
||||
if (options.storeIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.stores WHERE store_id = ANY($1::int[])', [options.storeIds]);
|
||||
await client.query('DELETE FROM public.stores WHERE store_id = ANY($1::int[])', [
|
||||
options.storeIds,
|
||||
]);
|
||||
logger.debug(`Cleaned up ${options.storeIds.length} store(s).`);
|
||||
}
|
||||
|
||||
if (options.masterItemIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = ANY($1::int[])', [options.masterItemIds]);
|
||||
await client.query(
|
||||
'DELETE FROM public.master_grocery_items WHERE master_grocery_item_id = ANY($1::int[])',
|
||||
[options.masterItemIds],
|
||||
);
|
||||
logger.debug(`Cleaned up ${options.masterItemIds.length} master grocery item(s).`);
|
||||
}
|
||||
|
||||
if (options.shoppingListIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.shopping_lists WHERE shopping_list_id = ANY($1::int[])', [options.shoppingListIds]);
|
||||
await client.query(
|
||||
'DELETE FROM public.shopping_lists WHERE shopping_list_id = ANY($1::int[])',
|
||||
[options.shoppingListIds],
|
||||
);
|
||||
logger.debug(`Cleaned up ${options.shoppingListIds.length} shopping list(s).`);
|
||||
}
|
||||
|
||||
if (options.userIds?.filter(Boolean).length) {
|
||||
await client.query('DELETE FROM public.users WHERE user_id = ANY($1::uuid[])', [options.userIds]);
|
||||
await client.query('DELETE FROM public.users WHERE user_id = ANY($1::uuid[])', [
|
||||
options.userIds,
|
||||
]);
|
||||
logger.debug(`Cleaned up ${options.userIds.length} user(s).`);
|
||||
}
|
||||
} catch (error) {
|
||||
@@ -78,4 +100,4 @@ export const cleanupDb = async (options: CleanupOptions) => {
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
@@ -27,4 +27,4 @@ export const cleanupFiles = async (filePaths: (string | undefined | null)[]) =>
|
||||
});
|
||||
|
||||
await Promise.allSettled(cleanupPromises);
|
||||
};
|
||||
};
|
||||
|
||||
@@ -6,4 +6,4 @@ export const createMockRequest = (overrides: Partial<Request> = {}): Request =>
|
||||
log: mockLogger,
|
||||
...overrides,
|
||||
} as unknown as Request;
|
||||
};
|
||||
};
|
||||
|
||||
@@ -33,4 +33,4 @@ export async function poll<T>(
|
||||
}
|
||||
|
||||
throw new Error(`Polling timed out for ${description} after ${timeout}ms.`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,4 +28,4 @@ export const AiFlyerDataSchema = z.object({
|
||||
valid_to: z.string().nullable(),
|
||||
store_address: z.string().nullable(),
|
||||
items: z.array(ExtractedFlyerItemSchema),
|
||||
});
|
||||
});
|
||||
|
||||
2
src/types/exif-parser.d.ts
vendored
2
src/types/exif-parser.d.ts
vendored
@@ -105,4 +105,4 @@ declare module 'exif-parser' {
|
||||
}
|
||||
|
||||
export default ExifParser;
|
||||
}
|
||||
}
|
||||
|
||||
5
src/types/pdf-poppler.d.ts
vendored
5
src/types/pdf-poppler.d.ts
vendored
@@ -105,7 +105,10 @@ declare module 'pdf-poppler' {
|
||||
export class Poppler {
|
||||
constructor(binPath?: string);
|
||||
pdfToCairo(file: string, outputFilePrefix?: string, options?: PopplerOptions): Promise<string>;
|
||||
pdfInfo(file: string, options?: { ownerPassword?: string; userPassword?: string }): Promise<PdfInfo>;
|
||||
pdfInfo(
|
||||
file: string,
|
||||
options?: { ownerPassword?: string; userPassword?: string },
|
||||
): Promise<PdfInfo>;
|
||||
pdfToPs(file: string, outputFile: string, options?: any): Promise<string>;
|
||||
pdfToText(file: string, outputFile: string, options?: any): Promise<string>;
|
||||
}
|
||||
|
||||
@@ -64,9 +64,7 @@ describe('validatePasswordStrength', () => {
|
||||
|
||||
it('should return invalid for a medium password (score 2)', () => {
|
||||
// Arrange: Mock zxcvbn to return a score of 2
|
||||
vi.mocked(zxcvbn).mockReturnValue(
|
||||
createMockZxcvbnResult(2, ['Add another symbol or number']),
|
||||
);
|
||||
vi.mocked(zxcvbn).mockReturnValue(createMockZxcvbnResult(2, ['Add another symbol or number']));
|
||||
|
||||
// Act
|
||||
const result = validatePasswordStrength('Password123');
|
||||
@@ -99,4 +97,4 @@ describe('validatePasswordStrength', () => {
|
||||
expect(result.isValid).toBe(true);
|
||||
expect(result.feedback).toBe('');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -17,4 +17,4 @@ export function validatePasswordStrength(password: string): {
|
||||
return { isValid: false, feedback: `Password is too weak. ${suggestions}` };
|
||||
}
|
||||
return { isValid: true, feedback: '' };
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,9 +175,7 @@ describe('dateUtils', () => {
|
||||
|
||||
it('should handle dates with time components correctly', () => {
|
||||
// parseISO should handle the time component and formatShortDate should strip it
|
||||
expect(formatDateRange('2023-01-01T10:00:00', '2023-01-05T15:30:00')).toBe(
|
||||
'Jan 1 - Jan 5',
|
||||
);
|
||||
expect(formatDateRange('2023-01-01T10:00:00', '2023-01-05T15:30:00')).toBe('Jan 1 - Jan 5');
|
||||
});
|
||||
|
||||
describe('verbose mode', () => {
|
||||
|
||||
@@ -11,7 +11,10 @@ import { parseISO, format, isValid, differenceInDays } from 'date-fns';
|
||||
* @param date The date to calculate the simple week for. Defaults to the current date.
|
||||
* @returns An object containing the year and week number.
|
||||
*/
|
||||
export function calculateSimpleWeekAndYear(date: Date = new Date()): { year: number; week: number } {
|
||||
export function calculateSimpleWeekAndYear(date: Date = new Date()): {
|
||||
year: number;
|
||||
week: number;
|
||||
} {
|
||||
const year = date.getFullYear();
|
||||
// Use UTC dates to calculate the difference in days.
|
||||
// This avoids issues with Daylight Saving Time (DST) where a day might have 23 or 25 hours,
|
||||
|
||||
@@ -94,4 +94,4 @@ describe('fileUtils', () => {
|
||||
expect(mockedFs.unlink).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -23,4 +23,4 @@ export const cleanupUploadedFiles = async (files?: Express.Multer.File[]) => {
|
||||
if (!files || !Array.isArray(files)) return;
|
||||
// Use Promise.all to run cleanups in parallel for efficiency.
|
||||
await Promise.all(files.map((file) => cleanupUploadedFile(file)));
|
||||
};
|
||||
};
|
||||
|
||||
@@ -30,4 +30,4 @@ describe('formatCurrency', () => {
|
||||
it('should handle negative cents correctly', () => {
|
||||
expect(formatCurrency(-500)).toBe('-$5.00');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -10,5 +10,7 @@
|
||||
export const formatCurrency = (amountInCents: number | null | undefined): string => {
|
||||
if (amountInCents === null || amountInCents === undefined) return 'N/A';
|
||||
|
||||
return new Intl.NumberFormat('en-US', { style: 'currency', currency: 'USD' }).format(amountInCents / 100);
|
||||
};
|
||||
return new Intl.NumberFormat('en-US', { style: 'currency', currency: 'USD' }).format(
|
||||
amountInCents / 100,
|
||||
);
|
||||
};
|
||||
|
||||
@@ -102,7 +102,11 @@ describe('generateFlyerIcon', () => {
|
||||
generateFlyerIcon('/path/to/bad-image.jpg', '/path/to/icons', logger), // This was a duplicate, fixed.
|
||||
).rejects.toThrow('Failed to generate icon for /path/to/bad-image.jpg.');
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
{ err: sharpError, sourcePath: '/path/to/bad-image.jpg', outputPath: '/path/to/icons/icon-bad-image.webp' },
|
||||
{
|
||||
err: sharpError,
|
||||
sourcePath: '/path/to/bad-image.jpg',
|
||||
outputPath: '/path/to/icons/icon-bad-image.webp',
|
||||
},
|
||||
'An error occurred during icon generation.',
|
||||
);
|
||||
});
|
||||
@@ -132,7 +136,9 @@ describe('processAndSaveImage', () => {
|
||||
expect(mocks.withMetadata).toHaveBeenCalledWith({});
|
||||
expect(mocks.jpeg).toHaveBeenCalledWith({ quality: 85, mozjpeg: true });
|
||||
expect(mocks.png).toHaveBeenCalledWith({ compressionLevel: 8, quality: 85 });
|
||||
expect(mocks.toFile).toHaveBeenCalledWith(expect.stringContaining(path.join(destinationDir, 'original-')));
|
||||
expect(mocks.toFile).toHaveBeenCalledWith(
|
||||
expect.stringContaining(path.join(destinationDir, 'original-')),
|
||||
);
|
||||
|
||||
// Check the returned filename format (original-timestamp.jpg)
|
||||
expect(result).toMatch(/^original-\d+\.jpg$/);
|
||||
@@ -142,9 +148,9 @@ describe('processAndSaveImage', () => {
|
||||
const sharpError = new Error('Processing failed');
|
||||
mocks.toFile.mockRejectedValueOnce(sharpError);
|
||||
|
||||
await expect(
|
||||
processAndSaveImage('/path/img.jpg', '/dest', 'img.jpg', logger),
|
||||
).rejects.toThrow('Failed to process image img.jpg.');
|
||||
await expect(processAndSaveImage('/path/img.jpg', '/dest', 'img.jpg', logger)).rejects.toThrow(
|
||||
'Failed to process image img.jpg.',
|
||||
);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ err: sharpError, sourcePath: '/path/img.jpg' }),
|
||||
|
||||
@@ -31,7 +31,10 @@ export async function processAndSaveImage(
|
||||
// Ensure the destination directory exists.
|
||||
await fs.mkdir(destinationDir, { recursive: true });
|
||||
|
||||
logger.debug({ sourcePath, outputPath }, 'Starting image processing: stripping metadata and optimizing.');
|
||||
logger.debug(
|
||||
{ sourcePath, outputPath },
|
||||
'Starting image processing: stripping metadata and optimizing.',
|
||||
);
|
||||
|
||||
// Use sharp to process the image.
|
||||
// .withMetadata({}) strips all EXIF and other metadata.
|
||||
@@ -95,4 +98,4 @@ export async function generateFlyerIcon(
|
||||
// Re-throw the error to be handled by the calling service.
|
||||
throw new Error(`Failed to generate icon for ${sourcePath}.`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,9 @@ const mockPdfDocument = {
|
||||
numPages: 3,
|
||||
// Explicitly type the mock function to accept a number and return the correct promise type.
|
||||
// This resolves the TypeScript error when using mockImplementation with arguments later.
|
||||
getPage: vi.fn<(pageNumber: number) => Promise<typeof mockPdfPage>>(() => Promise.resolve(mockPdfPage)),
|
||||
getPage: vi.fn<(pageNumber: number) => Promise<typeof mockPdfPage>>(() =>
|
||||
Promise.resolve(mockPdfPage),
|
||||
),
|
||||
};
|
||||
|
||||
vi.mock('pdfjs-dist', () => ({
|
||||
|
||||
@@ -117,4 +117,4 @@ describe('serverUtils', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -117,7 +117,9 @@ describe('Zod Utilities', () => {
|
||||
const result = schema.safeParse({ params: { id: -1 } });
|
||||
expect(result.success).toBe(false);
|
||||
if (!result.success) {
|
||||
expect(result.error.issues[0].message).toBe("Invalid ID for parameter 'id'. Must be a number.");
|
||||
expect(result.error.issues[0].message).toBe(
|
||||
"Invalid ID for parameter 'id'. Must be a number.",
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -125,7 +127,9 @@ describe('Zod Utilities', () => {
|
||||
const result = schema.safeParse({ params: { id: 1.5 } });
|
||||
expect(result.success).toBe(false);
|
||||
if (!result.success) {
|
||||
expect(result.error.issues[0].message).toBe("Invalid ID for parameter 'id'. Must be a number.");
|
||||
expect(result.error.issues[0].message).toBe(
|
||||
"Invalid ID for parameter 'id'. Must be a number.",
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -133,7 +137,9 @@ describe('Zod Utilities', () => {
|
||||
const result = schema.safeParse({ params: { id: 0 } });
|
||||
expect(result.success).toBe(false);
|
||||
if (!result.success) {
|
||||
expect(result.error.issues[0].message).toBe("Invalid ID for parameter 'id'. Must be a number.");
|
||||
expect(result.error.issues[0].message).toBe(
|
||||
"Invalid ID for parameter 'id'. Must be a number.",
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -237,7 +243,10 @@ describe('Zod Utilities', () => {
|
||||
expect(schema.safeParse('123').success).toBe(true);
|
||||
const floatResult = schema.safeParse('123.45');
|
||||
expect(floatResult.success).toBe(false);
|
||||
if (!floatResult.success) expect(floatResult.error.issues[0].message).toBe('Invalid input: expected int, received number');
|
||||
if (!floatResult.success)
|
||||
expect(floatResult.error.issues[0].message).toBe(
|
||||
'Invalid input: expected int, received number',
|
||||
);
|
||||
});
|
||||
|
||||
it('should enforce positive constraint', () => {
|
||||
@@ -266,7 +275,9 @@ describe('Zod Utilities', () => {
|
||||
const tooSmallResult = schema.safeParse('9');
|
||||
expect(tooSmallResult.success).toBe(false);
|
||||
if (!tooSmallResult.success) {
|
||||
expect(tooSmallResult.error.issues[0].message).toBe('Too small: expected number to be >=10');
|
||||
expect(tooSmallResult.error.issues[0].message).toBe(
|
||||
'Too small: expected number to be >=10',
|
||||
);
|
||||
}
|
||||
const tooLargeResult = schema.safeParse('21');
|
||||
expect(tooLargeResult.success).toBe(false);
|
||||
|
||||
Reference in New Issue
Block a user