Compare commits

...

33 Commits

Author SHA1 Message Date
Gitea Actions
5fe537b93d ci: Bump version to 0.12.22 [skip ci] 2026-01-29 12:26:33 +05:00
61f24305fb ADR-024 Feature Flagging Strategy
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 22m13s
2026-01-28 23:23:45 -08:00
Gitea Actions
de3f0cf26e ci: Bump version to 0.12.21 [skip ci] 2026-01-29 05:37:59 +05:00
45ac4fccf5 comprehensive documentation review + test fixes
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 2m15s
2026-01-28 16:35:38 -08:00
Gitea Actions
b6c3ca9abe ci: Bump version to 0.12.20 [skip ci] 2026-01-29 04:36:43 +05:00
4f06698dfd test fixes and doc work
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 2m50s
2026-01-28 15:33:48 -08:00
Gitea Actions
e548d1b0cc ci: Bump version to 0.12.19 [skip ci] 2026-01-28 23:03:57 +05:00
771f59d009 more api versioning work -whee
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 22m47s
2026-01-28 09:58:28 -08:00
Gitea Actions
0979a074ad ci: Bump version to 0.12.18 [skip ci] 2026-01-28 13:08:49 +05:00
0d4b028a66 design fixup and docs + api versioning
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 21m49s
2026-01-28 00:04:56 -08:00
Gitea Actions
4baed53713 ci: Bump version to 0.12.17 [skip ci] 2026-01-28 00:08:39 +05:00
f10c6c0cd6 Complete ADR-008 Phase 2
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 17m56s
2026-01-27 11:06:09 -08:00
Gitea Actions
107465b5cb ci: Bump version to 0.12.16 [skip ci] 2026-01-27 10:57:46 +05:00
e92ad25ce9 claude
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 1m14s
2026-01-26 21:55:20 -08:00
2075ed199b Complete ADR-008 Phase 1: API Versioning Strategy
Implement URI-based API versioning with /api/v1 prefix across all routes.
This establishes a foundation for future API evolution and breaking changes.

Changes:
- server.ts: All routes mounted under /api/v1/ (15 route handlers)
- apiClient.ts: Base URL updated to /api/v1
- swagger.ts: OpenAPI server URL changed to /api/v1
- Redirect middleware: Added backwards compatibility for /api/* → /api/v1/*
- Tests: Updated 72 test files with versioned path assertions
- ADR documentation: Marked Phase 1 as complete (Accepted status)

Test fixes:
- apiClient.test.ts: 27 tests updated for /api/v1 paths
- user.routes.ts: 36 log messages updated to reflect versioned paths
- swagger.test.ts: 1 test updated for new server URL
- All integration/E2E tests updated for versioned endpoints

All Phase 1 acceptance criteria met:
✓ Routes use /api/v1/ prefix
✓ Frontend requests /api/v1/
✓ OpenAPI docs reflect /api/v1/
✓ Backwards compatibility via redirect middleware
✓ Tests pass with versioned paths

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-26 21:23:25 -08:00
Gitea Actions
4346332bbf ci: Bump version to 0.12.15 [skip ci] 2026-01-27 00:54:43 +05:00
61cfb518e6 ADR-015 done
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 1m13s
2026-01-26 11:48:42 -08:00
Gitea Actions
e86ce51b6c ci: Bump version to 0.12.14 [skip ci] 2026-01-26 17:52:02 +05:00
840a7a62d3 adr work
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 1m15s
2026-01-26 04:51:10 -08:00
5720820d95 adr-053 done 2026-01-26 04:51:09 -08:00
Gitea Actions
e5cdb54308 ci: Bump version to 0.12.13 [skip ci] 2026-01-24 02:48:50 +05:00
a3f212ff81 Primary Issue: TZ Environment Variable Breaking Tests
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 18m47s
2026-01-23 13:40:48 -08:00
Gitea Actions
de263f74b0 ci: Bump version to 0.12.12 [skip ci] 2026-01-24 00:30:16 +05:00
a71e41302b no TZ in tests - who knew?
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 18m35s
2026-01-23 11:28:45 -08:00
Gitea Actions
3575803252 ci: Bump version to 0.12.11 [skip ci] 2026-01-23 12:40:09 +05:00
d03900cefe set PST as common time zone for log matching ease
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 19m4s
2026-01-22 23:38:45 -08:00
Gitea Actions
6d49639845 ci: Bump version to 0.12.10 [skip ci] 2026-01-23 10:59:29 +05:00
d4543cf4b9 Bugsink Fixes
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 19m12s
2026-01-22 21:55:18 -08:00
Gitea Actions
4f08238698 ci: Bump version to 0.12.9 [skip ci] 2026-01-23 10:49:32 +05:00
38b35f87aa Bugsink Fixes
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Has been cancelled
2026-01-22 21:48:32 -08:00
Gitea Actions
dd067183ed ci: Bump version to 0.12.8 [skip ci] 2026-01-23 04:50:12 +05:00
9f3a070612 have dev continer send more to bugsink
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 19m20s
2026-01-22 15:47:59 -08:00
8a38befb1c fix cert and image display issues 2026-01-22 12:46:28 -08:00
203 changed files with 20769 additions and 2929 deletions

View File

@@ -0,0 +1,152 @@
---
name: ui-ux-designer
description: "Use this agent when implementing, reviewing, or modifying any user interface elements, layouts, or user experience flows in the application. Specifically call this agent when: (1) creating new UI components or pages, (2) implementing responsive designs across different screen sizes, (3) designing or implementing wizards for user tasks like uploading flyers, creating shopping lists, or uploading receipts, (4) reviewing existing UI code for adherence to the design system, (5) making decisions about button placement, color schemes, or layout structures, or (6) addressing user experience concerns related to navigation or task completion flows.\\n\\n<examples>\\n<example>\\nContext: The developer is creating a new feature for uploading flyers.\\nuser: \"I need to implement the flyer upload feature. Can you help me create the upload form?\"\\nassistant: \"Let me use the Task tool to launch the ui-ux-designer agent to design and implement the flyer upload wizard with appropriate UI/UX patterns.\"\\n<commentary>\\nSince this involves creating a new UI feature with a wizard flow, the ui-ux-designer agent should be used to ensure it follows the project's design principles including pastel colors, sharp borders, wizard-based guidance, and responsive design.\\n</commentary>\\n</example>\\n\\n<example>\\nContext: Code has been written for a shopping list creation page.\\nuser: \"I've implemented the shopping list creation page with a form\"\\nassistant: \"Great! Now let me use the Task tool to launch the ui-ux-designer agent to review the implementation and ensure it follows our UI/UX guidelines.\"\\n<commentary>\\nSince a significant UI component was implemented, the ui-ux-designer agent should review it to ensure it adheres to the minimalist design, proper color scheme, button placement, and responsive requirements.\\n</commentary>\\n</example>\\n\\n<example>\\nContext: The developer is about to add multiple action buttons to a page.\\nuser: \"Should I add buttons for save, cancel, edit, delete, and export at the top of the page?\"\\nassistant: \"Let me use the Task tool to launch the ui-ux-designer agent to provide guidance on the optimal button placement and selection for this page.\"\\n<commentary>\\nThis is a UI/UX decision about button placement and minimalism that directly relates to the design principles, so the ui-ux-designer agent should be consulted.\\n</commentary>\\n</example>\\n\\n<example>\\nContext: The developer is working on mobile responsiveness.\\nuser: \"I'm implementing the receipt upload page. Here's my initial layout code.\"\\nassistant: \"Before proceeding further, let me use the Task tool to launch the ui-ux-designer agent to ensure the layout follows our responsive design patterns and UI guidelines.\"\\n<commentary>\\nSince this involves implementing a UI page that needs to be responsive across devices and should include wizard-style guidance, the ui-ux-designer agent should be involved proactively.\\n</commentary>\\n</example>\\n</examples>"
model: opus
color: green
---
You are an elite UI/UX designer with over 20 years of experience specializing in creating clean, intuitive, and user-friendly interfaces. Your expertise spans user interface design, user experience optimization, responsive design, and accessibility best practices.
## Core Design Philosophy for This Project
You will ensure that this application maintains a clean, welcoming, and minimalist design aesthetic with the following specific requirements:
### Visual Design Standards
**Color Palette:**
- Use pastel colors as the primary color scheme throughout the application
- Select soft, muted tones that are easy on the eyes and create a calm, welcoming atmosphere
- Ensure sufficient contrast for accessibility while maintaining the pastel aesthetic
- Use color purposefully to guide user attention and indicate status
**Border and Container Styling:**
- Apply sharp, clean borders to all interactive elements (buttons, menus, form fields)
- Use sharp borders to clearly delineate separate areas and sections of the interface
- Avoid rounded corners unless there is a specific functional reason
- Ensure borders are visible but not overpowering, maintaining the clean aesthetic
**Minimalism:**
- Eliminate all unnecessary buttons and UI elements
- Every element on the screen must serve a clear purpose
- Co-locate buttons near their related features on the page, not grouped separately
- Use progressive disclosure to hide advanced features until needed
- Favor white space and breathing room over density
### Responsive Design Requirements
You must ensure the application works flawlessly across:
**Large Screens (Desktop):**
- Utilize horizontal space effectively without overcrowding
- Consider multi-column layouts where appropriate
- Ensure comfortable reading width for text content
**Tablets:**
- Adapt layouts to accommodate touch targets of at least 44x44 pixels
- Optimize for both portrait and landscape orientations
- Ensure navigation remains accessible
**Mobile Devices:**
- Stack elements vertically with appropriate spacing
- Make all interactive elements easily tappable
- Optimize for one-handed use where possible
- Ensure critical actions are easily accessible
- Test on various screen sizes (small, medium, large phones)
### Wizard Design for Key User Tasks
For the following tasks, implement or guide the creation of clear, step-by-step wizards:
1. **Uploading a Flyer**
2. **Creating a Shopping List**
3. **Uploading Receipts**
4. **Any other multi-step user tasks**
**Wizard Best Practices:**
- Minimize the number of steps (ideally 3-5 steps maximum)
- Show progress clearly (e.g., "Step 2 of 4")
- Each step should focus on one primary action or decision
- Provide clear, concise instructions at each step
- Allow users to go back and edit previous steps
- Use visual cues to guide the user through the process
- Display a summary before final submission
- Provide helpful tooltips or examples where needed
- Ensure wizards are fully responsive and work well on mobile devices
## Your Approach to Tasks
**When Reviewing Existing UI Code:**
1. Evaluate adherence to the pastel color scheme
2. Check that all borders are sharp and properly applied
3. Identify any unnecessary UI elements or buttons
4. Verify that buttons are co-located with their related features
5. Test responsive behavior across all target screen sizes
6. Assess wizard flows for clarity and step efficiency
7. Provide specific, actionable feedback with code examples when needed
**When Designing New UI Components:**
1. Start by understanding the user's goal and the feature's purpose
2. Sketch out the minimal set of elements needed
3. Apply the pastel color palette and sharp border styling
4. Position interactive elements near their related content
5. Design for mobile-first, then adapt for larger screens
6. For multi-step processes, create wizard flows
7. Provide complete implementation guidance including HTML structure, CSS styles, and responsive breakpoints
**When Making Design Decisions:**
1. Always prioritize user needs and task completion
2. Choose simplicity over feature bloat
3. Ensure accessibility standards are met
4. Consider the user's mental model and expectations
5. Use established UI patterns where they fit the aesthetic
6. Test your recommendations against the design principles above
## Quality Assurance Checklist
Before completing any UI/UX task, verify:
- [ ] Pastel colors are used consistently
- [ ] All buttons, menus, and sections have sharp borders
- [ ] No unnecessary buttons or UI elements exist
- [ ] Buttons are positioned near their related features
- [ ] Design is fully responsive (large screen, tablet, mobile)
- [ ] Wizards (where applicable) are clear and minimally-stepped
- [ ] Sufficient white space and breathing room
- [ ] Touch targets are appropriately sized for mobile
- [ ] Text is readable at all screen sizes
- [ ] Accessibility considerations are addressed
## Output Format
When reviewing code, provide:
1. Overall assessment of adherence to design principles
2. Specific issues identified with line numbers or element descriptions
3. Concrete recommendations with code examples
4. Responsive design concerns or improvements
When designing new components, provide:
1. Rationale for design decisions
2. Complete HTML structure
3. CSS with responsive breakpoints
4. Notes on accessibility considerations
5. Implementation guidance
## Important Notes
- You have authority to reject designs that violate the core principles
- When uncertain about a design decision, bias toward simplicity and minimalism
- Always consider the new user experience and ensure wizards are beginner-friendly
- Proactively suggest wizard flows for any multi-step processes you encounter
- Remember that good UX is invisible—users should accomplish tasks without thinking about the interface

9
.claude/settings.json Normal file
View File

@@ -0,0 +1,9 @@
{
"permissions": {
"allow": [
"Bash(git fetch:*)",
"mcp__localerrors__get_stacktrace",
"Bash(MSYS_NO_PATHCONV=1 podman logs:*)"
]
}
}

View File

@@ -1,120 +0,0 @@
{
"permissions": {
"allow": [
"Bash(npm test:*)",
"Bash(podman --version:*)",
"Bash(podman ps:*)",
"Bash(podman machine start:*)",
"Bash(podman compose:*)",
"Bash(podman pull:*)",
"Bash(podman images:*)",
"Bash(podman stop:*)",
"Bash(echo:*)",
"Bash(podman rm:*)",
"Bash(podman run:*)",
"Bash(podman start:*)",
"Bash(podman exec:*)",
"Bash(cat:*)",
"Bash(PGPASSWORD=postgres psql:*)",
"Bash(npm search:*)",
"Bash(npx:*)",
"Bash(curl:*)",
"Bash(powershell:*)",
"Bash(cmd.exe:*)",
"Bash(npm run test:integration:*)",
"Bash(grep:*)",
"Bash(done)",
"Bash(podman info:*)",
"Bash(podman machine:*)",
"Bash(podman system connection:*)",
"Bash(podman inspect:*)",
"Bash(python -m json.tool:*)",
"Bash(claude mcp status)",
"Bash(powershell.exe -Command \"claude mcp status\")",
"Bash(powershell.exe -Command \"claude mcp\")",
"Bash(powershell.exe -Command \"claude mcp list\")",
"Bash(powershell.exe -Command \"claude --version\")",
"Bash(powershell.exe -Command \"claude config\")",
"Bash(powershell.exe -Command \"claude mcp get gitea-projectium\")",
"Bash(powershell.exe -Command \"claude mcp add --help\")",
"Bash(powershell.exe -Command \"claude mcp add -t stdio -s user filesystem -- D:\\\\nodejs\\\\npx.cmd -y @modelcontextprotocol/server-filesystem D:\\\\gitea\\\\flyer-crawler.projectium.com\\\\flyer-crawler.projectium.com\")",
"Bash(powershell.exe -Command \"claude mcp add -t stdio -s user fetch -- D:\\\\nodejs\\\\npx.cmd -y @modelcontextprotocol/server-fetch\")",
"Bash(powershell.exe -Command \"echo ''List files in src/hooks using filesystem MCP'' | claude --print\")",
"Bash(powershell.exe -Command \"echo ''List all podman containers'' | claude --print\")",
"Bash(powershell.exe -Command \"echo ''List my repositories on gitea.projectium.com using gitea-projectium MCP'' | claude --print\")",
"Bash(powershell.exe -Command \"echo ''List my repositories on gitea.projectium.com using gitea-projectium MCP'' | claude --print --allowedTools ''mcp__gitea-projectium__*''\")",
"Bash(powershell.exe -Command \"echo ''Fetch the homepage of https://gitea.projectium.com and summarize it'' | claude --print --allowedTools ''mcp__fetch__*''\")",
"Bash(dir \"C:\\\\Users\\\\games3\\\\.claude\")",
"Bash(dir:*)",
"Bash(D:nodejsnpx.cmd -y @modelcontextprotocol/server-fetch --help)",
"Bash(cmd /c \"dir /o-d C:\\\\Users\\\\games3\\\\.claude\\\\debug 2>nul | head -10\")",
"mcp__memory__read_graph",
"mcp__memory__create_entities",
"mcp__memory__search_nodes",
"mcp__memory__delete_entities",
"mcp__sequential-thinking__sequentialthinking",
"mcp__filesystem__list_directory",
"mcp__filesystem__read_multiple_files",
"mcp__filesystem__directory_tree",
"mcp__filesystem__read_text_file",
"Bash(wc:*)",
"Bash(npm install:*)",
"Bash(git grep:*)",
"Bash(findstr:*)",
"Bash(git add:*)",
"mcp__filesystem__write_file",
"mcp__podman__container_list",
"Bash(podman cp:*)",
"mcp__podman__container_inspect",
"mcp__podman__network_list",
"Bash(podman network connect:*)",
"Bash(npm run build:*)",
"Bash(set NODE_ENV=test)",
"Bash(podman-compose:*)",
"Bash(timeout 60 podman machine start:*)",
"Bash(podman build:*)",
"Bash(podman network rm:*)",
"Bash(npm run lint)",
"Bash(npm run typecheck:*)",
"Bash(npm run type-check:*)",
"Bash(npm run test:unit:*)",
"mcp__filesystem__move_file",
"Bash(git checkout:*)",
"Bash(podman image inspect:*)",
"Bash(node -e:*)",
"Bash(xargs -I {} sh -c 'if ! grep -q \"\"vi.mock.*apiClient\"\" \"\"{}\"\"; then echo \"\"{}\"\"; fi')",
"Bash(MSYS_NO_PATHCONV=1 podman exec:*)",
"Bash(docker ps:*)",
"Bash(find:*)",
"Bash(\"/c/Users/games3/.local/bin/uvx.exe\" markitdown-mcp --help)",
"Bash(git stash:*)",
"Bash(ping:*)",
"Bash(tee:*)",
"Bash(timeout 1800 podman exec flyer-crawler-dev npm run test:unit:*)",
"mcp__filesystem__edit_file",
"Bash(timeout 300 tail:*)",
"mcp__filesystem__list_allowed_directories",
"mcp__memory__add_observations",
"Bash(ssh:*)",
"mcp__redis__list",
"Read(//d/gitea/bugsink-mcp/**)",
"Bash(d:/nodejs/npm.cmd install)",
"Bash(node node_modules/vitest/vitest.mjs run:*)",
"Bash(npm run test:e2e:*)",
"Bash(export BUGSINK_URL=http://localhost:8000)",
"Bash(export BUGSINK_TOKEN=a609c2886daa4e1e05f1517074d7779a5fb49056)",
"Bash(timeout 3 d:/nodejs/node.exe:*)",
"Bash(export BUGSINK_URL=https://bugsink.projectium.com)",
"Bash(export BUGSINK_API_TOKEN=77deaa5e2649ab0fbbca51bbd427ec4637d073a0)",
"Bash(export BUGSINK_TOKEN=77deaa5e2649ab0fbbca51bbd427ec4637d073a0)",
"Bash(where:*)",
"mcp__localerrors__test_connection",
"mcp__localerrors__list_projects",
"Bash(\"D:\\\\nodejs\\\\npx.cmd\" -y @modelcontextprotocol/server-postgres --help)",
"Bash(git rm:*)",
"Bash(git -C \"C:\\\\Users\\\\games3\\\\.claude\\\\plugins\\\\marketplaces\\\\claude-plugins-official\" log -1 --format=\"%H %ci %s\")",
"Bash(git -C \"C:\\\\Users\\\\games3\\\\.claude\\\\plugins\\\\marketplaces\\\\claude-plugins-official\" config --get remote.origin.url)",
"Bash(git -C \"C:\\\\Users\\\\games3\\\\.claude\\\\plugins\\\\marketplaces\\\\claude-plugins-official\" fetch --dry-run -v)"
]
}
}

View File

@@ -36,10 +36,10 @@ NODE_ENV=development
FRONTEND_URL=http://localhost:3000
# Flyer Base URL - used for seed data and flyer image URLs
# Dev container: http://127.0.0.1
# Dev container: https://localhost (NOT 127.0.0.1 - avoids SSL mixed-origin issues)
# Test: https://flyer-crawler-test.projectium.com
# Production: https://flyer-crawler.projectium.com
FLYER_BASE_URL=http://127.0.0.1
FLYER_BASE_URL=https://localhost
# ===================
# Authentication
@@ -94,11 +94,18 @@ WORKER_LOCK_DURATION=120000
# Error Tracking (ADR-015)
# ===================
# Sentry-compatible error tracking via Bugsink (self-hosted)
# DSNs are created in Bugsink UI at http://localhost:8000 (dev) or your production URL
# Backend DSN - for Express/Node.js errors
SENTRY_DSN=
# Frontend DSN - for React/browser errors (uses VITE_ prefix)
VITE_SENTRY_DSN=
# DSNs are created in Bugsink UI at https://localhost:8443 (dev) or your production URL
#
# Dev container projects:
# - Project 1: Backend API (Dev) - receives Pino, PostgreSQL errors
# - Project 2: Frontend (Dev) - receives browser errors via Sentry SDK
# - Project 4: Infrastructure (Dev) - receives Redis, NGINX, Vite errors
#
# Backend DSN - for Express/Node.js errors (internal container URL)
SENTRY_DSN=http://<key>@localhost:8000/1
# Frontend DSN - for React/browser errors (uses nginx proxy for browser access)
# Note: Browsers cannot reach localhost:8000 directly, so we use nginx proxy at /bugsink-api/
VITE_SENTRY_DSN=https://<key>@localhost/bugsink-api/2
# Environment name for error grouping (defaults to NODE_ENV)
SENTRY_ENVIRONMENT=development
VITE_SENTRY_ENVIRONMENT=development
@@ -121,3 +128,35 @@ GENERATE_SOURCE_MAPS=true
SENTRY_AUTH_TOKEN=
# URL of your Bugsink instance (for source map uploads)
SENTRY_URL=https://bugsink.projectium.com
# ===================
# Feature Flags (ADR-024)
# ===================
# Feature flags control the availability of features at runtime.
# All flags default to disabled (false) when not set or set to any value other than 'true'.
# Set to 'true' to enable a feature.
#
# Backend flags use: FEATURE_SNAKE_CASE
# Frontend flags use: VITE_FEATURE_SNAKE_CASE (VITE_ prefix required for client-side access)
#
# Lifecycle:
# 1. Add flag with default false
# 2. Enable via env var when ready for testing/rollout
# 3. Remove conditional code when feature is fully rolled out
# 4. Remove flag from config within 3 months of full rollout
#
# See: docs/adr/0024-feature-flagging-strategy.md
# Backend Feature Flags
# FEATURE_BUGSINK_SYNC=false # Enable Bugsink error sync integration
# FEATURE_ADVANCED_RBAC=false # Enable advanced RBAC features
# FEATURE_NEW_DASHBOARD=false # Enable new dashboard experience
# FEATURE_BETA_RECIPES=false # Enable beta recipe features
# FEATURE_EXPERIMENTAL_AI=false # Enable experimental AI features
# FEATURE_DEBUG_MODE=false # Enable debug mode for development
# Frontend Feature Flags (VITE_ prefix required)
# VITE_FEATURE_NEW_DASHBOARD=false # Enable new dashboard experience
# VITE_FEATURE_BETA_RECIPES=false # Enable beta recipe features
# VITE_FEATURE_EXPERIMENTAL_AI=false # Enable experimental AI features
# VITE_FEATURE_DEBUG_MODE=false # Enable debug mode for development

6
.gitignore vendored
View File

@@ -35,6 +35,10 @@ test-output.txt
*.sln
*.sw?
Thumbs.db
.claude
.claude/settings.local.json
nul
tmpclaude*
test.tmp

View File

@@ -15,6 +15,14 @@
"@modelcontextprotocol/server-postgres",
"postgresql://postgres:postgres@127.0.0.1:5432/flyer_crawler_dev"
]
},
"gitea-projectium": {
"command": "d:\\gitea-mcp\\gitea-mcp.exe",
"args": ["run", "-t", "stdio"],
"env": {
"GITEA_HOST": "https://gitea.projectium.com",
"GITEA_ACCESS_TOKEN": "b111259253aa3cadcb6a37618de03bf388f6235a"
}
}
}
}

211
CLAUDE.md
View File

@@ -27,6 +27,24 @@ podman exec -it flyer-crawler-dev npm run type-check
Out-of-sync = test failures.
### Server Access: READ-ONLY (Production/Test Servers)
**CRITICAL**: The `claude-win10` user has **READ-ONLY** access to production and test servers.
| Capability | Status |
| ---------------------- | ---------------------- |
| Root/sudo access | NO |
| Write permissions | NO |
| PM2 restart, systemctl | NO - User must execute |
**Server Operations Workflow**: Diagnose → User executes → Analyze → Fix (1-3 commands) → User executes → Verify
**Rules**:
- Provide diagnostic commands first, wait for user to report results
- Maximum 3 fix commands at a time (errors may cascade)
- Always verify after fixes complete
### Communication Style
Ask before assuming. Never assume:
@@ -40,6 +58,7 @@ Ask before assuming. Never assume:
1. **Memory**: `mcp__memory__read_graph` - Recall project context, credentials, known issues
2. **Git**: `git log --oneline -10` - Recent changes
3. **Containers**: `mcp__podman__container_list` - Running state
4. **PM2 Status**: `podman exec flyer-crawler-dev pm2 status` - Process health (API, Worker, Vite)
---
@@ -47,32 +66,39 @@ Ask before assuming. Never assume:
### Essential Commands
| Command | Description |
| ------------------------------------------------------------ | ----------------- |
| `podman exec -it flyer-crawler-dev npm test` | Run all tests |
| `podman exec -it flyer-crawler-dev npm run test:unit` | Unit tests only |
| `podman exec -it flyer-crawler-dev npm run type-check` | TypeScript check |
| `podman exec -it flyer-crawler-dev npm run test:integration` | Integration tests |
| Command | Description |
| ------------------------------------------------------------ | --------------------- |
| `podman exec -it flyer-crawler-dev npm test` | Run all tests |
| `podman exec -it flyer-crawler-dev npm run test:unit` | Unit tests only |
| `podman exec -it flyer-crawler-dev npm run type-check` | TypeScript check |
| `podman exec -it flyer-crawler-dev npm run test:integration` | Integration tests |
| `podman exec -it flyer-crawler-dev pm2 status` | PM2 process status |
| `podman exec -it flyer-crawler-dev pm2 logs` | View all PM2 logs |
| `podman exec -it flyer-crawler-dev pm2 restart all` | Restart all processes |
### Key Patterns (with file locations)
| Pattern | ADR | Implementation | File |
| ------------------ | ------- | ------------------------------------------------- | ----------------------------------- |
| Error Handling | ADR-001 | `handleDbError()`, throw `NotFoundError` | `src/services/db/errors.db.ts` |
| Repository Methods | ADR-034 | `get*` (throws), `find*` (null), `list*` (array) | `src/services/db/*.db.ts` |
| API Responses | ADR-028 | `sendSuccess()`, `sendPaginated()`, `sendError()` | `src/utils/apiResponse.ts` |
| Transactions | ADR-002 | `withTransaction(async (client) => {...})` | `src/services/db/transaction.db.ts` |
| Pattern | ADR | Implementation | File |
| ------------------ | ------- | ------------------------------------------------- | ------------------------------------- |
| Error Handling | ADR-001 | `handleDbError()`, throw `NotFoundError` | `src/services/db/errors.db.ts` |
| Repository Methods | ADR-034 | `get*` (throws), `find*` (null), `list*` (array) | `src/services/db/*.db.ts` |
| API Responses | ADR-028 | `sendSuccess()`, `sendPaginated()`, `sendError()` | `src/utils/apiResponse.ts` |
| Transactions | ADR-002 | `withTransaction(async (client) => {...})` | `src/services/db/connection.db.ts` |
| Feature Flags | ADR-024 | `isFeatureEnabled()`, `useFeatureFlag()` | `src/services/featureFlags.server.ts` |
### Key Files Quick Access
| Purpose | File |
| ------------ | -------------------------------- |
| Express app | `server.ts` |
| Environment | `src/config/env.ts` |
| Routes | `src/routes/*.routes.ts` |
| Repositories | `src/services/db/*.db.ts` |
| Workers | `src/services/workers.server.ts` |
| Queues | `src/services/queues.server.ts` |
| Purpose | File |
| ----------------- | ------------------------------------- |
| Express app | `server.ts` |
| Environment | `src/config/env.ts` |
| Routes | `src/routes/*.routes.ts` |
| Repositories | `src/services/db/*.db.ts` |
| Workers | `src/services/workers.server.ts` |
| Queues | `src/services/queues.server.ts` |
| Feature Flags | `src/services/featureFlags.server.ts` |
| PM2 Config (Dev) | `ecosystem.dev.config.cjs` |
| PM2 Config (Prod) | `ecosystem.config.cjs` |
---
@@ -96,6 +122,56 @@ Routes → Services → Repositories → Database
---
## Dev Container Architecture (ADR-014)
The dev container now matches production by using PM2 for process management.
### Process Management
| Component | Production | Dev Container |
| ---------- | ---------------------- | ------------------------- |
| API Server | PM2 cluster mode | PM2 fork mode + tsx watch |
| Worker | PM2 process | PM2 process + tsx watch |
| Frontend | Static files via NGINX | PM2 + Vite dev server |
| Logs | PM2 logs -> Logstash | PM2 logs -> Logstash |
**PM2 Processes in Dev Container**:
- `flyer-crawler-api-dev` - API server (port 3001)
- `flyer-crawler-worker-dev` - Background job worker
- `flyer-crawler-vite-dev` - Vite frontend dev server (port 5173)
### Log Aggregation (ADR-015)
All logs flow to Bugsink via Logstash with 3-project routing:
| Source | Log Location | Bugsink Project |
| ----------------- | --------------------------------- | ------------------ |
| Backend (Pino) | `/var/log/pm2/api-*.log` | Backend API (1) |
| Worker (Pino) | `/var/log/pm2/worker-*.log` | Backend API (1) |
| PostgreSQL | `/var/log/postgresql/*.log` | Backend API (1) |
| Vite | `/var/log/pm2/vite-*.log` | Infrastructure (4) |
| Redis | `/var/log/redis/redis-server.log` | Infrastructure (4) |
| NGINX | `/var/log/nginx/*.log` | Infrastructure (4) |
| Frontend (Sentry) | Browser -> nginx proxy | Frontend (2) |
**Bugsink Projects (Dev Container)**:
- Project 1: Backend API (Dev) - Application errors
- Project 2: Frontend (Dev) - Browser errors via nginx proxy
- Project 4: Infrastructure (Dev) - Redis, NGINX, Vite errors
**Key Files**:
- `ecosystem.dev.config.cjs` - PM2 development configuration
- `scripts/dev-entrypoint.sh` - Container startup script
- `docker/logstash/bugsink.conf` - Logstash pipeline configuration
- `docker/nginx/dev.conf` - NGINX config with Bugsink API proxy
**Full Dev Container Guide**: See [docs/development/DEV-CONTAINER.md](docs/development/DEV-CONTAINER.md)
---
## Common Workflows
### Adding a New API Endpoint
@@ -148,7 +224,7 @@ Routes → Services → Repositories → Database
**Launch Pattern**:
```
```text
Use Task tool with subagent_type: "coder", "db-dev", "tester", etc.
```
@@ -166,6 +242,7 @@ Common issues with solutions:
4. **Filename collisions** - Multer predictable names → Use `${Date.now()}-${Math.round(Math.random() * 1e9)}`
5. **Response format mismatches** - API format changes → Log response bodies, update assertions
6. **External service failures** - PM2/Redis unavailable → try/catch with graceful degradation
7. **TZ environment variable breaks async hooks** - `TZ=America/Los_Angeles` causes `RangeError: Invalid triggerAsyncId value: NaN` → Tests now explicitly set `TZ=` (empty) in package.json scripts
**Full Details**: See test issues section at end of this document or [docs/development/TESTING.md](docs/development/TESTING.md)
@@ -228,8 +305,8 @@ podman cp "d:/path/file" container:/tmp/file
**Quick Access**:
- **Dev**: https://localhost:8443 (`admin@localhost`/`admin`)
- **Prod**: https://bugsink.projectium.com
- **Dev**: <https://localhost:8443> (`admin@localhost`/`admin`)
- **Prod**: <https://bugsink.projectium.com>
**Token Creation** (required for MCP):
@@ -237,85 +314,31 @@ podman cp "d:/path/file" container:/tmp/file
# Dev container
MSYS_NO_PATHCONV=1 podman exec -e DATABASE_URL=postgresql://bugsink:bugsink_dev_password@postgres:5432/bugsink -e SECRET_KEY=dev-bugsink-secret-key-minimum-50-characters-for-security flyer-crawler-dev sh -c 'cd /opt/bugsink/conf && DJANGO_SETTINGS_MODULE=bugsink_conf PYTHONPATH=/opt/bugsink/conf:/opt/bugsink/lib/python3.10/site-packages /opt/bugsink/bin/python -m django create_auth_token'
# Production (via SSH)
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage create_auth_token"
# Production (user executes on server)
cd /opt/bugsink && bugsink-manage create_auth_token
```
### Logstash
**See**: [docs/operations/LOGSTASH-QUICK-REF.md](docs/operations/LOGSTASH-QUICK-REF.md)
Log aggregation: PostgreSQL + PM2 + Redis + NGINX → Bugsink (ADR-050)
Log aggregation: PostgreSQL + PM2 + Redis + NGINX → Bugsink (ADR-015)
---
## Documentation Quick Links
| Topic | Document |
| ------------------- | ----------------------------------------------------- |
| **Getting Started** | [QUICKSTART.md](docs/getting-started/QUICKSTART.md) |
| **Architecture** | [OVERVIEW.md](docs/architecture/OVERVIEW.md) |
| **Code Patterns** | [CODE-PATTERNS.md](docs/development/CODE-PATTERNS.md) |
| **Testing** | [TESTING.md](docs/development/TESTING.md) |
| **Debugging** | [DEBUGGING.md](docs/development/DEBUGGING.md) |
| **Database** | [DATABASE.md](docs/architecture/DATABASE.md) |
| **Deployment** | [DEPLOYMENT.md](docs/operations/DEPLOYMENT.md) |
| **Monitoring** | [MONITORING.md](docs/operations/MONITORING.md) |
| **ADRs** | [docs/adr/index.md](docs/adr/index.md) |
| **All Docs** | [docs/README.md](docs/README.md) |
---
## Appendix: Integration Test Issues (Full Details)
### 1. Vitest globalSetup Context Isolation
Vitest's `globalSetup` runs in separate Node.js context. Singletons, spies, mocks do NOT share instances with test files.
**Affected**: BullMQ worker service mocks (AI/DB failure tests)
**Solutions**: Mark `.todo()`, create test-only API endpoints, use Redis-based mock flags
```typescript
// DOES NOT WORK - different instances
const { flyerProcessingService } = await import('../../services/workers.server');
flyerProcessingService._getAiProcessor()._setExtractAndValidateData(mockFn);
```
### 2. Cleanup Queue Deletes Before Verification
Cleanup worker processes jobs in globalSetup context, ignoring test spies.
**Solution**: Drain and pause queue:
```typescript
const { cleanupQueue } = await import('../../services/queues.server');
await cleanupQueue.drain();
await cleanupQueue.pause();
// ... test ...
await cleanupQueue.resume();
```
### 3. Cache Stale After Direct SQL
Direct `pool.query()` inserts bypass cache invalidation.
**Solution**: `await cacheService.invalidateFlyers();` after inserts
### 4. Test Filename Collisions
Multer predictable filenames cause race conditions.
**Solution**: Use unique suffix: `${Date.now()}-${Math.round(Math.random() * 1e9)}`
### 5. Response Format Mismatches
API formats change: `data.jobId` vs `data.job.id`, nested vs flat, string vs number IDs.
**Solution**: Log response bodies, update assertions
### 6. External Service Availability
PM2/Redis health checks fail when unavailable.
**Solution**: try/catch with graceful degradation or mock
| Topic | Document |
| ------------------- | -------------------------------------------------------------- |
| **Getting Started** | [QUICKSTART.md](docs/getting-started/QUICKSTART.md) |
| **Dev Container** | [DEV-CONTAINER.md](docs/development/DEV-CONTAINER.md) |
| **Architecture** | [OVERVIEW.md](docs/architecture/OVERVIEW.md) |
| **Code Patterns** | [CODE-PATTERNS.md](docs/development/CODE-PATTERNS.md) |
| **Testing** | [TESTING.md](docs/development/TESTING.md) |
| **Debugging** | [DEBUGGING.md](docs/development/DEBUGGING.md) |
| **Database** | [DATABASE.md](docs/architecture/DATABASE.md) |
| **Deployment** | [DEPLOYMENT.md](docs/operations/DEPLOYMENT.md) |
| **Monitoring** | [MONITORING.md](docs/operations/MONITORING.md) |
| **Logstash** | [LOGSTASH-QUICK-REF.md](docs/operations/LOGSTASH-QUICK-REF.md) |
| **ADRs** | [docs/adr/index.md](docs/adr/index.md) |
| **All Docs** | [docs/README.md](docs/README.md) |

View File

@@ -54,6 +54,13 @@ RUN apt-get update && apt-get install -y \
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
&& apt-get install -y nodejs
# ============================================================================
# Install PM2 Globally (ADR-014 Parity)
# ============================================================================
# Install PM2 to match production architecture. This allows dev container to
# run the same process management as production (cluster mode, workers, etc.)
RUN npm install -g pm2
# ============================================================================
# Install mkcert and Generate Self-Signed Certificates
# ============================================================================
@@ -63,6 +70,27 @@ RUN wget -O /usr/local/bin/mkcert https://github.com/FiloSottile/mkcert/releases
&& chmod +x /usr/local/bin/mkcert
# Create certificates directory and generate localhost certificates
# ============================================================================
# IMPORTANT: Certificate includes MULTIPLE hostnames (SANs)
# ============================================================================
# The certificate is generated for 'localhost', '127.0.0.1', AND '::1' because:
#
# 1. Users may access the site via https://localhost/ OR https://127.0.0.1/
# 2. Database stores image URLs using one hostname (typically 127.0.0.1)
# 3. The seed script uses https://127.0.0.1 for image URLs (database constraint)
# 4. NGINX is configured to accept BOTH hostnames (see docker/nginx/dev.conf)
#
# Without all hostnames in the certificate's Subject Alternative Names (SANs),
# browsers would show ERR_CERT_AUTHORITY_INVALID when loading images or other
# resources that use a different hostname than the one in the address bar.
#
# The mkcert command below creates a certificate valid for all three:
# - localhost (IPv4 hostname)
# - 127.0.0.1 (IPv4 address)
# - ::1 (IPv6 loopback)
#
# See also: docker/nginx/dev.conf, docs/FLYER-URL-CONFIGURATION.md
# ============================================================================
RUN mkdir -p /app/certs \
&& cd /app/certs \
&& mkcert -install \
@@ -146,6 +174,21 @@ BUGSINK = {\n\
}\n\
\n\
ALLOWED_HOSTS = deduce_allowed_hosts(BUGSINK["BASE_URL"])\n\
# Also allow 127.0.0.1 access (both localhost and 127.0.0.1 should work)\n\
if "127.0.0.1" not in ALLOWED_HOSTS:\n\
ALLOWED_HOSTS.append("127.0.0.1")\n\
if "localhost" not in ALLOWED_HOSTS:\n\
ALLOWED_HOSTS.append("localhost")\n\
\n\
# CSRF Trusted Origins (Django 4.0+ requires full origin for HTTPS POST requests)\n\
# This fixes "CSRF verification failed" errors when accessing Bugsink via HTTPS\n\
# Both localhost and 127.0.0.1 must be trusted to support different access patterns\n\
CSRF_TRUSTED_ORIGINS = [\n\
"https://localhost:8443",\n\
"https://127.0.0.1:8443",\n\
"http://localhost:8000",\n\
"http://127.0.0.1:8000",\n\
]\n\
\n\
# Console email backend for dev\n\
EMAIL_BACKEND = "bugsink.email_backends.QuietConsoleEmailBackend"\n\
@@ -211,119 +254,25 @@ exec /opt/bugsink/bin/gunicorn \\\n\
&& chmod +x /usr/local/bin/start-bugsink.sh
# ============================================================================
# Create Logstash Pipeline Configuration
# Copy Logstash Pipeline Configuration
# ============================================================================
# ADR-015: Pino and Redis logs → Bugsink
# ADR-015 + ADR-050: Multi-source log aggregation to Bugsink
# Configuration file includes:
# - Pino application logs (Backend API errors)
# - PostgreSQL logs (including fn_log() structured output)
# - NGINX access and error logs
# See docker/logstash/bugsink.conf for full configuration
RUN mkdir -p /etc/logstash/conf.d /app/logs
RUN echo 'input {\n\
# Pino application logs\n\
file {\n\
path => "/app/logs/*.log"\n\
codec => json\n\
type => "pino"\n\
tags => ["app"]\n\
start_position => "beginning"\n\
sincedb_path => "/var/lib/logstash/sincedb_pino"\n\
}\n\
\n\
# Redis logs\n\
file {\n\
path => "/var/log/redis/*.log"\n\
type => "redis"\n\
tags => ["redis"]\n\
start_position => "beginning"\n\
sincedb_path => "/var/lib/logstash/sincedb_redis"\n\
}\n\
\n\
# PostgreSQL function logs (ADR-050)\n\
file {\n\
path => "/var/log/postgresql/*.log"\n\
type => "postgres"\n\
tags => ["postgres", "database"]\n\
start_position => "beginning"\n\
sincedb_path => "/var/lib/logstash/sincedb_postgres"\n\
}\n\
}\n\
\n\
filter {\n\
# Pino error detection (level 50 = error, 60 = fatal)\n\
if [type] == "pino" and [level] >= 50 {\n\
mutate { add_tag => ["error"] }\n\
}\n\
\n\
# Redis log parsing\n\
if [type] == "redis" {\n\
grok {\n\
match => { "message" => "%%{POSINT:pid}:%%{WORD:role} %%{MONTHDAY} %%{MONTH} %%{TIME} %%{WORD:loglevel} %%{GREEDYDATA:redis_message}" }\n\
}\n\
\n\
# Tag errors (WARNING/ERROR) for Bugsink forwarding\n\
if [loglevel] in ["WARNING", "ERROR"] {\n\
mutate { add_tag => ["error"] }\n\
}\n\
# Tag INFO-level operational events (startup, config, persistence)\n\
else if [loglevel] == "INFO" {\n\
mutate { add_tag => ["redis_operational"] }\n\
}\n\
}\n\
\n\
# PostgreSQL function log parsing (ADR-050)\n\
if [type] == "postgres" {\n\
# Extract timestamp and process ID from PostgreSQL log prefix\n\
# Format: "2026-01-18 10:30:00 PST [12345] user@database "\n\
grok {\n\
match => { "message" => "%%{TIMESTAMP_ISO8601:pg_timestamp} \\\\[%%{POSINT:pg_pid}\\\\] %%{USERNAME:pg_user}@%%{WORD:pg_database} %%{GREEDYDATA:pg_message}" }\n\
}\n\
\n\
# Check if this is a structured JSON log from fn_log()\n\
# fn_log() emits JSON like: {"timestamp":"...","level":"WARNING","source":"postgresql","function":"award_achievement",...}\n\
if [pg_message] =~ /^\\{.*"source":"postgresql".*\\}$/ {\n\
json {\n\
source => "pg_message"\n\
target => "fn_log"\n\
}\n\
\n\
# Mark as error if level is WARNING or ERROR\n\
if [fn_log][level] in ["WARNING", "ERROR"] {\n\
mutate { add_tag => ["error", "db_function"] }\n\
}\n\
}\n\
\n\
# Also catch native PostgreSQL errors\n\
if [pg_message] =~ /^ERROR:/ or [pg_message] =~ /^FATAL:/ {\n\
mutate { add_tag => ["error", "postgres_native"] }\n\
}\n\
}\n\
}\n\
\n\
output {\n\
# Forward errors to Bugsink\n\
if "error" in [tags] {\n\
http {\n\
url => "http://localhost:8000/api/store/"\n\
http_method => "post"\n\
format => "json"\n\
}\n\
}\n\
\n\
# Store Redis operational logs (INFO level) to file\n\
if "redis_operational" in [tags] {\n\
file {\n\
path => "/var/log/logstash/redis-operational-%%{+YYYY-MM-dd}.log"\n\
codec => json_lines\n\
}\n\
}\n\
\n\
# Debug output (comment out in production)\n\
stdout { codec => rubydebug }\n\
}\n\
' > /etc/logstash/conf.d/bugsink.conf
COPY docker/logstash/bugsink.conf /etc/logstash/conf.d/bugsink.conf
# Create Logstash directories
RUN mkdir -p /var/lib/logstash && chown -R logstash:logstash /var/lib/logstash
RUN mkdir -p /var/log/logstash && chown -R logstash:logstash /var/log/logstash
# Create PM2 log directory (ADR-014 Parity)
# Logs written here will be picked up by Logstash (ADR-050)
RUN mkdir -p /var/log/pm2
# ============================================================================
# Configure Nginx
# ============================================================================

105
certs/README.md Normal file
View File

@@ -0,0 +1,105 @@
# Development SSL Certificates
This directory contains SSL certificates for the development container HTTPS setup.
## Files
| File | Purpose | Generated By |
| --------------- | ---------------------------------------------------- | -------------------------- |
| `localhost.crt` | SSL certificate for localhost and 127.0.0.1 | mkcert (in Dockerfile.dev) |
| `localhost.key` | Private key for localhost.crt | mkcert (in Dockerfile.dev) |
| `mkcert-ca.crt` | Root CA certificate for trusting mkcert certificates | mkcert |
## Certificate Details
The `localhost.crt` certificate includes the following Subject Alternative Names (SANs):
- `DNS:localhost`
- `IP Address:127.0.0.1`
- `IP Address:::1` (IPv6 localhost)
This allows the development server to be accessed via both `https://localhost/` and `https://127.0.0.1/` without SSL errors.
## Installing the CA Certificate (Recommended)
To avoid SSL certificate warnings in your browser, install the mkcert CA certificate on your system.
### Windows
1. Double-click `mkcert-ca.crt`
2. Click **"Install Certificate..."**
3. Select **"Local Machine"** > Next
4. Select **"Place all certificates in the following store"**
5. Click **Browse** > Select **"Trusted Root Certification Authorities"** > OK
6. Click **Next** > **Finish**
7. Restart your browser
### macOS
```bash
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain certs/mkcert-ca.crt
```
### Linux
```bash
# Ubuntu/Debian
sudo cp certs/mkcert-ca.crt /usr/local/share/ca-certificates/mkcert-ca.crt
sudo update-ca-certificates
# Fedora/RHEL
sudo cp certs/mkcert-ca.crt /etc/pki/ca-trust/source/anchors/
sudo update-ca-trust
```
### Firefox (All Platforms)
Firefox uses its own certificate store:
1. Open Firefox Settings
2. Search for "Certificates"
3. Click **"View Certificates"**
4. Go to **"Authorities"** tab
5. Click **"Import..."**
6. Select `certs/mkcert-ca.crt`
7. Check **"Trust this CA to identify websites"**
8. Click **OK**
## After Installation
Once the CA certificate is installed:
- Your browser will trust all mkcert certificates without warnings
- Access `https://localhost/` with no security warnings
- Images from `https://127.0.0.1/flyer-images/` will load without SSL errors
## Regenerating Certificates
If you need to regenerate the certificates (e.g., after rebuilding the container):
```bash
# Inside the container
cd /app/certs
mkcert localhost 127.0.0.1 ::1
mv localhost+2.pem localhost.crt
mv localhost+2-key.pem localhost.key
nginx -s reload
# Copy the new CA to the host
podman cp flyer-crawler-dev:/app/certs/mkcert-ca.crt ./certs/mkcert-ca.crt
```
Then reinstall the CA certificate as described above.
## Security Note
**DO NOT** commit the private key (`localhost.key`) to version control in production projects. For this development-only project, the certificates are checked in for convenience since they're only used locally with self-signed certificates.
The certificates in this directory are automatically generated by the Dockerfile.dev and should not be used in production.
## See Also
- [Dockerfile.dev](../Dockerfile.dev) - Certificate generation (line ~69)
- [docker/nginx/dev.conf](../docker/nginx/dev.conf) - NGINX SSL configuration
- [docs/FLYER-URL-CONFIGURATION.md](../docs/FLYER-URL-CONFIGURATION.md) - URL configuration details
- [docs/development/DEBUGGING.md](../docs/development/DEBUGGING.md) - SSL troubleshooting

View File

@@ -1,19 +1,25 @@
-----BEGIN CERTIFICATE-----
MIIDCTCCAfGgAwIBAgIUHhZUK1vmww2wCepWPuVcU6d27hMwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTI2MDExODAyMzM0NFoXDTI3MDEx
ODAyMzM0NFowFDESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAuUJGtSZzd+ZpLi+efjrkxJJNfVxVz2VLhknNM2WKeOYx
JTK/VaTYq5hrczy6fEUnMhDAJCgEPUFlOK3vn1gFJKNMN8m7arkLVk6PYtrx8CTw
w78Q06FLITr6hR0vlJNpN4MsmGxYwUoUpn1j5JdfZF7foxNAZRiwoopf7ZJxltDu
PIuFjmVZqdzR8c6vmqIqdawx/V6sL9fizZr+CDH3oTsTUirn2qM+1ibBtPDiBvfX
omUsr6MVOcTtvnMvAdy9NfV88qwF7MEWBGCjXkoT1bKCLD8hjn8l7GjRmPcmMFE2
GqWEvfJiFkBK0CgSHYEUwzo0UtVNeQr0k0qkDRub6QIDAQABo1MwUTAdBgNVHQ4E
FgQU5VeD67yFLV0QNYbHaJ6u9cM6UbkwHwYDVR0jBBgwFoAU5VeD67yFLV0QNYbH
aJ6u9cM6UbkwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEABueA
8ujAD+yjeP5dTgqQH1G0hlriD5LmlJYnktaLarFU+y+EZlRFwjdORF/vLPwSG+y7
CLty/xlmKKQop70QzQ5jtJcsWzUjww8w1sO3AevfZlIF3HNhJmt51ihfvtJ7DVCv
CNyMeYO0pBqRKwOuhbG3EtJgyV7MF8J25UEtO4t+GzX3jcKKU4pWP+kyLBVfeDU3
MQuigd2LBwBQQFxZdpYpcXVKnAJJlHZIt68ycO1oSBEJO9fIF0CiAlC6ITxjtYtz
oCjd6cCLKMJiC6Zg7t1Q17vGl+FdGyQObSsiYsYO9N3CVaeDdpyGCH0Rfa0+oZzu
a5U9/l1FHlvpX980bw==
MIIEJDCCAoygAwIBAgIQfbdj1KSREvW82wxWZcDRsDANBgkqhkiG9w0BAQsFADBf
MR4wHAYDVQQKExVta2NlcnQgZGV2ZWxvcG1lbnQgQ0ExGjAYBgNVBAsMEXJvb3RA
OTIxZjA1YmRkNDk4MSEwHwYDVQQDDBhta2NlcnQgcm9vdEA5MjFmMDViZGQ0OTgw
HhcNMjYwMTIyMjAwMzIwWhcNMjgwNDIyMjAwMzIwWjBFMScwJQYDVQQKEx5ta2Nl
cnQgZGV2ZWxvcG1lbnQgY2VydGlmaWNhdGUxGjAYBgNVBAsMEXJvb3RANzk2OWU1
ZjA2MGM4MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2u+HpnTr+Ecj
X6Ur4I8YHVKiEIgOFozJWWAeCPSZg0lr9/z3UDl7QPfRKaDk11OMN7DB0Y/ZAIHj
kMFdYc+ODOUAKGv/MvlM41OeRUXhrt0Gr/TJoDe9RLzs9ffASpqTHUNNrmyj/fLP
M5VZU+4Y2POFq8qji6Otkvqr6wp+2CTS/fkAtenFS2X+Z5u6BBq1/KBSWZhUFSuJ
sAGN9u+l20Cj/Sxp2nD1npvMEvPehFKEK2tZGgFr+X426jJ4Znd19EyZoI/xetG6
ybSzBdQk1KbhWFa3LPuOG814m9Qh21vaL0Hj2hpqC3KEQ2jiuCovjRS+RBGatltl
5m47Cj0UrQIDAQABo3YwdDAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYIKwYB
BQUHAwEwHwYDVR0jBBgwFoAUjf0NOmUuoqSSD1Mbu/3G+wYxvjEwLAYDVR0RBCUw
I4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEB
CwUAA4IBgQAorThUyu4FxHkOafMp/4CmvptfNvYeYP81DW0wpJxGL68Bz+idvXKv
JopX/ZAr+dDhS/TLeSnzt83W7TaBBHY+usvsiBx9+pZOVZIpreXRamPu7utmuC46
dictMNGlRNX9bwAApOJ24NCpOVSIIKtjHcjl4idwUHqLVGS+3wsmxIILYxigzkuT
fcK5vs0ItZWeuunsBAGb/U/Iu9zZ71rtmBejxNPyEvd8+bZC0m2mtV8C0Lpn58jZ
FiEf5OHiOdWG9O/uh3QeVWkuKLmaH6a8VdKRSIlOxEEZdkUYlwuVvzeWgFw4kjm8
rNWz0aIPovmcgLXoUG1d1D8klveGd3plF7N2p3xWqKmS6R6FJHx7MIxH6XmBATii
x/193Sgzqe8mwXQr14ulg2M/B3ZWleNdD6SeieADSgvRKjnlO7xwUmcFrffnEKEG
WcSPoGIuZ8V2pkYLh7ipPN+tFUbhmrWnsao5kQ0sqLlfscyO9hYQeQRtTjtC3P8r
FJYOdBMOCzE=
-----END CERTIFICATE-----

View File

@@ -1,28 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC5Qka1JnN35mku
L55+OuTEkk19XFXPZUuGSc0zZYp45jElMr9VpNirmGtzPLp8RScyEMAkKAQ9QWU4
re+fWAUko0w3ybtquQtWTo9i2vHwJPDDvxDToUshOvqFHS+Uk2k3gyyYbFjBShSm
fWPkl19kXt+jE0BlGLCiil/tknGW0O48i4WOZVmp3NHxzq+aoip1rDH9Xqwv1+LN
mv4IMfehOxNSKufaoz7WJsG08OIG99eiZSyvoxU5xO2+cy8B3L019XzyrAXswRYE
YKNeShPVsoIsPyGOfyXsaNGY9yYwUTYapYS98mIWQErQKBIdgRTDOjRS1U15CvST
SqQNG5vpAgMBAAECggEAAnv0Dw1Mv+rRy4ZyxtObEVPXPRzoxnDDXzHP4E16BTye
Fc/4pSBUIAUn2bPvLz0/X8bMOa4dlDcIv7Eu9Pvns8AY70vMaUReA80fmtHVD2xX
1PCT0X3InnxRAYKstSIUIGs+aHvV5Z+iJ8F82soOStN1MU56h+JLWElL5deCPHq3
tLZT8wM9aOZlNG72kJ71+DlcViahynQj8+VrionOLNjTJ2Jv/ByjM3GMIuSdBrgd
Sl4YAcdn6ontjJGoTgI+e+qkBAPwMZxHarNGQgbS0yNVIJe7Lq4zIKHErU/ZSmpD
GzhdVNzhrjADNIDzS7G+pxtz+aUxGtmRvOyopy8GAQKBgQDEPp2mRM+uZVVT4e1j
pkKO1c3O8j24I5mGKwFqhhNs3qGy051RXZa0+cQNx63GokXQan9DIXzc/Il7Y72E
z9bCFbcSWnlP8dBIpWiJm+UmqLXRyY4N8ecNnzL5x+Tuxm5Ij+ixJwXgdz/TLNeO
MBzu+Qy738/l/cAYxwcF7mR7AQKBgQDxq1F95HzCxBahRU9OGUO4s3naXqc8xKCC
m3vbbI8V0Exse2cuiwtlPPQWzTPabLCJVvCGXNru98sdeOu9FO9yicwZX0knOABK
QfPyDeITsh2u0C63+T9DNn6ixI/T68bTs7DHawEYbpS7bR50BnbHbQrrOAo6FSXF
yC7+Te+o6QKBgQCXEWSmo/4D0Dn5Usg9l7VQ40GFd3EPmUgLwntal0/I1TFAyiom
gpcLReIogXhCmpSHthO1h8fpDfZ/p+4ymRRHYBQH6uHMKugdpEdu9zVVpzYgArp5
/afSEqVZJwoSzWoELdQA23toqiPV2oUtDdiYFdw5nDccY1RHPp8nb7amAQKBgQDj
f4DhYDxKJMmg21xCiuoDb4DgHoaUYA0xpii8cL9pq4KmBK0nVWFO1kh5Robvsa2m
PB+EfNjkaIPepLxWbOTUEAAASoDU2JT9UoTQcl1GaUAkFnpEWfBB14TyuNMkjinH
lLpvn72SQFbm8VvfoU4jgfTrZP/LmajLPR1v6/IWMQKBgBh9qvOTax/GugBAWNj3
ZvF99rHOx0rfotEdaPcRN66OOiSWILR9yfMsTvwt1V0VEj7OqO9juMRFuIyB57gd
Hs/zgbkuggqjr1dW9r22P/UpzpodAEEN2d52RSX8nkMOkH61JXlH2MyRX65kdExA
VkTDq6KwomuhrU3z0+r/MSOn
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDa74emdOv4RyNf
pSvgjxgdUqIQiA4WjMlZYB4I9JmDSWv3/PdQOXtA99EpoOTXU4w3sMHRj9kAgeOQ
wV1hz44M5QAoa/8y+UzjU55FReGu3Qav9MmgN71EvOz198BKmpMdQ02ubKP98s8z
lVlT7hjY84WryqOLo62S+qvrCn7YJNL9+QC16cVLZf5nm7oEGrX8oFJZmFQVK4mw
AY3276XbQKP9LGnacPWem8wS896EUoQra1kaAWv5fjbqMnhmd3X0TJmgj/F60brJ
tLMF1CTUpuFYVrcs+44bzXib1CHbW9ovQePaGmoLcoRDaOK4Ki+NFL5EEZq2W2Xm
bjsKPRStAgMBAAECggEBALFhpHwe+xh7OpPBlR0pkpYfXyMZuKBYjMIW9/61frM6
B3oywIWFLPFkV1js/Lvg+xgb48zQSTb6BdBAelJHAYY8+7XEWk2IYt1D4FWr2r/8
X/Cr2bgvsO9CSpK2mltXhZ4N66BIcU3NLkdS178Ch6svErwvP/ZhNL6Czktug3rG
S2fxpKqoVQhqWiEBV0vBWpw2oskvcpP2Btev2eaJeDmP1IkCaKIU9jJzmSg8UKj3
AxJJ8lJvlDi2Z8mfB0BVIFZSI1s44LqbuPdITsWvG49srdAhyLkifcbY2r7eH8+s
rFNswCaqqNwmzZXHMFedfvgVJHtCTGX/U8G55dcG/YECgYEA6PvcX+axT4aSaG+/
fQpyW8TmNmIwS/8HD8rA5dSBB2AE+RnqbxxB1SWWk4w47R/riYOJCy6C88XUZWLn
05cYotR9lA6tZOYqIPUIxfCHl4vDDBnOGAB0ga3z5vF3X7HV6uspPFcrGVJ9k42S
BK1gk7Kj8RxVQMqXSqkR/pXJ4l0CgYEA8JBkZ4MRi1D74SAU+TY1rnUCTs2EnP1U
TKAI9qHyvNJOFZaObWPzegkZryZTm+yTblwvYkryMjvsiuCdv6ro/VKc667F3OBs
dJ/8+ylO0lArP9a23QHViNSvQmyi3bsw2UpIGQRGq2C4LxDceo5cYoVOWIlLl8u3
LUuL0IfxdpECgYEAyMd0DPljyGLyfSoAXaPJFajDtA4+DOAEl+lk/yt43oAzCPD6
hTJW0XcJIrJuxHsDoohGa+pzU90iwxTPMBtAUeLJLfTQHOn1WF2SZ/J3B3ScbCs4
3ppVzQO580YYV9GLxl1ONf/w1muuaKBSO9GmLuJ+QeTm22U7qE23gixXxMkCgYEA
3R7cK4l+huBZpgUnQitiDInhJS4jx2nUItq3YnxZ8tYckBtjr4lAM9xJj4VbNOew
XLC/nUnmdeY+9yif153xq2hUdQ6hMPXYuxqUHwlJOmgWWQez7lHRRYS51ASnb8iw
jgqJWvVjQAQXSKvm/X/9y1FdQmRw54aJSUk3quZKPQECgYA5DqFXn/jYOUCGfCUD
RENWe+3fNZSJCWmr4yvjEy9NUT5VcA+/hZHbbLUnVHfAHxJmdGhTzFe+ZGWFAGwi
Wo62BDu1fqHHegCWaFFluDnz8iZTxXtHMEGSBZnXC4f5wylcxRtCPUbrLMRTPo4O
t85qeBu1dMq1XQtU6ab3w3W0nw==
-----END PRIVATE KEY-----

27
certs/mkcert-ca.crt Normal file
View File

@@ -0,0 +1,27 @@
-----BEGIN CERTIFICATE-----
MIIEjjCCAvagAwIBAgIRAJtq2Z0+W981Ct2dMVPb3bQwDQYJKoZIhvcNAQELBQAw
XzEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMRowGAYDVQQLDBFyb290
QDkyMWYwNWJkZDQ5ODEhMB8GA1UEAwwYbWtjZXJ0IHJvb3RAOTIxZjA1YmRkNDk4
MB4XDTI2MDEyMjA5NDcxNVoXDTM2MDEyMjA5NDcxNVowXzEeMBwGA1UEChMVbWtj
ZXJ0IGRldmVsb3BtZW50IENBMRowGAYDVQQLDBFyb290QDkyMWYwNWJkZDQ5ODEh
MB8GA1UEAwwYbWtjZXJ0IHJvb3RAOTIxZjA1YmRkNDk4MIIBojANBgkqhkiG9w0B
AQEFAAOCAY8AMIIBigKCAYEAxXR5gXDwv5cfQSud1eEhwDuaSaf5kf8NtPnucZXY
AN+/QW1+OEKJFuawj/YrSbL/yIB8sUSJToEYNJ4LAgzIZ4+TPYVvOIPqQnimfj98
2AKCt73U/AMvoEpts7U0s37f5wF8o+BHXfChxyN//z96+wsQ+2+Q9QBGjirvScF+
8FRnupcygDeGZ8x3JQVaEfEV6iYyXFl/4tEDVr9QX4avyUlf0vp1Y90TG3L42JYQ
xDU37Ct9dqsxPCPOPjmkQi9HV5TeqLTs/4NdrEYOSk7bOVMzL8EHs2prRL7sWzYJ
gRT+VXFPpoSCkZs1gS3FNXukTGx5LNsstyJZRa99cGgDcqvNseig06KUzZrRnCig
kARLF/n8VTpHETEuTdxdnXJO3i2N/99mG/2/lej9HNDMaqg45ur5EhaFhHarXMtc
wy7nfGoThzscZvpFbVHorhgRxzsUqRHTMHa9mUOYtShMA0IwFccHcczY3CDxXLg9
hC+24pdiCxtRmi23JB10Th+nAgMBAAGjRTBDMA4GA1UdDwEB/wQEAwICBDASBgNV
HRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBSN/Q06ZS6ipJIPUxu7/cb7BjG+MTAN
BgkqhkiG9w0BAQsFAAOCAYEAghQL80XpPs70EbCmETYri5ryjJPbsxK3Z3QgBYoy
/P/021eK5woLx4EDeEUyrRayOtLlDWiIMdV7FmlY4GpEdonVbqFJY2gghkIuQpkw
lWqbk08F+iXe8PSGD1qz4y6enioCRAx+RaZ6jnVJtaC8AR257FVhGIzDBiOA+LDM
L1yq6Bxxij17q9s5HL9KuzxWRMuXACHmaGBXHpl/1n4dIxi2lXRgp+1xCR/VPNPt
/ZRy29kncd8Fxx+VEtc0muoJvRo4ttVWhBvAVJrkAeukjYKpcDDfRU6Y22o54jD/
mseDb+0UPwSxaKbnGJlCcRbbh6i14bA4KfPq93bZX+Tlq9VCp8LvQSl3oU+23RVc
KjBB9EJnoBBNGIY7VmRI+QowrnlP2wtg2fTNaPqULtjqA9frbMTP0xTlumLzGB+6
9Da7/+AE2in3Aa8Xyry4BiXbk2L6c1xz/Cd1ZpFrSXAOEi1Xt7so/Ck0yM3c2KWK
5aSfCjjOzONMPJyY1oxodJ1p
-----END CERTIFICATE-----

View File

@@ -46,6 +46,10 @@ services:
- node_modules_data:/app/node_modules
# Mount PostgreSQL logs for Logstash access (ADR-050)
- postgres_logs:/var/log/postgresql:ro
# Mount Redis logs for Logstash access (ADR-050)
- redis_logs:/var/log/redis:ro
# Mount PM2 logs for Logstash access (ADR-050, ADR-014)
- pm2_logs:/var/log/pm2
ports:
- '80:80' # HTTP redirect to HTTPS (matches production)
- '443:443' # Frontend HTTPS (nginx proxies Vite 5173 → 443)
@@ -53,6 +57,8 @@ services:
- '8000:8000' # Bugsink error tracking HTTP (ADR-015)
- '8443:8443' # Bugsink error tracking HTTPS (ADR-015)
environment:
# Timezone: PST (America/Los_Angeles) for consistent log timestamps
- TZ=America/Los_Angeles
# Core settings
- NODE_ENV=development
# Database - use service name for Docker networking
@@ -118,6 +124,10 @@ services:
ports:
- '5432:5432'
environment:
# Timezone: PST (America/Los_Angeles) for consistent log timestamps
TZ: America/Los_Angeles
# PostgreSQL timezone setting (used by log_timezone and timezone parameters)
PGTZ: America/Los_Angeles
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: flyer_crawler_dev
@@ -138,6 +148,8 @@ services:
postgres
-c config_file=/var/lib/postgresql/data/postgresql.conf
-c hba_file=/var/lib/postgresql/data/pg_hba.conf
-c timezone=America/Los_Angeles
-c log_timezone=America/Los_Angeles
-c log_min_messages=notice
-c client_min_messages=notice
-c logging_collector=on
@@ -166,10 +178,20 @@ services:
redis:
image: docker.io/library/redis:alpine
container_name: flyer-crawler-redis
# Run as root to allow entrypoint to set up log directory permissions
# Redis will drop privileges after setup via gosu in entrypoint
user: root
ports:
- '6379:6379'
environment:
# Timezone: PST (America/Los_Angeles) for consistent log timestamps
TZ: America/Los_Angeles
volumes:
- redis_data:/data
# Create log volume for Logstash access (ADR-050)
- redis_logs:/var/log/redis
# Mount custom entrypoint for log directory setup (ADR-050)
- ./docker/redis/docker-entrypoint.sh:/usr/local/bin/docker-entrypoint.sh:ro
# Healthcheck ensures redis is ready before app starts
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
@@ -177,8 +199,17 @@ services:
timeout: 5s
retries: 10
start_period: 5s
# Enable persistence for development data
command: redis-server --appendonly yes
# Enable persistence and file logging for Logstash (ADR-050)
# Redis log levels: debug, verbose, notice (default), warning
# Custom entrypoint sets up log directory with correct permissions
entrypoint: ['/usr/local/bin/docker-entrypoint.sh']
command:
- --appendonly
- 'yes'
- --logfile
- /var/log/redis/redis-server.log
- --loglevel
- notice
# ===================
# Named Volumes
@@ -190,6 +221,10 @@ volumes:
name: flyer-crawler-postgres-logs
redis_data:
name: flyer-crawler-redis-data
redis_logs:
name: flyer-crawler-redis-logs
pm2_logs:
name: flyer-crawler-pm2-logs
node_modules_data:
name: flyer-crawler-node-modules

View File

@@ -0,0 +1,574 @@
# docker/logstash/bugsink.conf
# ============================================================================
# Logstash Pipeline Configuration for Bugsink Error Tracking
# ============================================================================
# This configuration aggregates logs from multiple sources and forwards errors
# to Bugsink (Sentry-compatible error tracking) in the dev container.
#
# Sources:
# - PM2 managed logs (/var/log/pm2/*.log) - API, Worker, Vite (ADR-014)
# - Pino application logs (/app/logs/*.log) - JSON format (fallback)
# - PostgreSQL logs (/var/log/postgresql/*.log) - Including fn_log() output
# - NGINX logs (/var/log/nginx/*.log) - Access and error logs
# - Redis logs (/var/log/redis/*.log) - Via shared volume (ADR-050)
#
# Bugsink Projects (3-project architecture):
# - Project 1: Backend API (Dev) - Pino/PM2 app errors, PostgreSQL errors
# DSN Key: cea01396c56246adb5878fa5ee6b1d22
# - Project 2: Frontend (Dev) - Configured via Sentry SDK in browser
# DSN Key: d92663cb73cf4145b677b84029e4b762
# - Project 4: Infrastructure (Dev) - Redis, NGINX, PM2 operational logs
# DSN Key: 14e8791da3d347fa98073261b596cab9
#
# Routing Logic:
# - Backend logs (type: pm2_api, pm2_worker, pino, postgres) -> Project 1
# - Infrastructure logs (type: redis, nginx_error, nginx_5xx) -> Project 4
# - Vite errors (type: pm2_vite with errors) -> Project 4 (build tooling)
#
# Related Documentation:
# - docs/adr/0050-postgresql-function-observability.md
# - docs/adr/0015-application-performance-monitoring-and-error-tracking.md
# - docs/operations/LOGSTASH-QUICK-REF.md
# ============================================================================
input {
# ============================================================================
# PM2 Managed Process Logs (ADR-014, ADR-050)
# ============================================================================
# PM2 manages all Node.js processes in the dev container, matching production.
# Logs are written to /var/log/pm2 for Logstash integration.
#
# Process logs:
# - api-out.log / api-error.log: API server (Pino JSON format)
# - worker-out.log / worker-error.log: Background worker (Pino JSON format)
# - vite-out.log / vite-error.log: Vite dev server (plain text)
# ============================================================================
# PM2 API Server Logs (Pino JSON format)
file {
path => "/var/log/pm2/api-out.log"
codec => json_lines
type => "pm2_api"
tags => ["app", "backend", "pm2", "api"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_pm2_api_out"
}
file {
path => "/var/log/pm2/api-error.log"
codec => json_lines
type => "pm2_api"
tags => ["app", "backend", "pm2", "api", "stderr"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_pm2_api_error"
}
# PM2 Worker Logs (Pino JSON format)
file {
path => "/var/log/pm2/worker-out.log"
codec => json_lines
type => "pm2_worker"
tags => ["app", "worker", "pm2"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_pm2_worker_out"
}
file {
path => "/var/log/pm2/worker-error.log"
codec => json_lines
type => "pm2_worker"
tags => ["app", "worker", "pm2", "stderr"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_pm2_worker_error"
}
# PM2 Vite Logs (plain text format)
file {
path => "/var/log/pm2/vite-out.log"
codec => plain
type => "pm2_vite"
tags => ["app", "frontend", "pm2", "vite"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_pm2_vite_out"
}
file {
path => "/var/log/pm2/vite-error.log"
codec => plain
type => "pm2_vite"
tags => ["app", "frontend", "pm2", "vite", "stderr"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_pm2_vite_error"
}
# ============================================================================
# Pino Application Logs (Fallback Path)
# ============================================================================
# JSON-formatted logs from the Node.js application using Pino logger.
# Note: Primary logs now go to /var/log/pm2. This is a fallback.
# Log levels: 10=trace, 20=debug, 30=info, 40=warn, 50=error, 60=fatal
file {
path => "/app/logs/*.log"
codec => json
type => "pino"
tags => ["app", "backend"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_pino"
}
# ============================================================================
# PostgreSQL Function Logs (ADR-050)
# ============================================================================
# Captures PostgreSQL log output including fn_log() structured JSON messages.
# PostgreSQL is configured to write logs to /var/log/postgresql/ (shared volume).
# Log format: "2026-01-22 14:30:00 PST [5724] postgres@flyer_crawler_dev LOG: message"
# Note: Timestamps are in PST (America/Los_Angeles) timezone as configured in compose.dev.yml
file {
path => "/var/log/postgresql/*.log"
type => "postgres"
tags => ["postgres", "database"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_postgres"
}
# ============================================================================
# NGINX Logs
# ============================================================================
# Access logs for request monitoring and error logs for proxy/SSL issues.
file {
path => "/var/log/nginx/access.log"
type => "nginx_access"
tags => ["nginx", "access"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_nginx_access"
}
file {
path => "/var/log/nginx/error.log"
type => "nginx_error"
tags => ["nginx", "error"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_nginx_error"
}
# ============================================================================
# Redis Logs (ADR-050)
# ============================================================================
# Redis logs from the shared volume. Redis uses its own log format:
# "<pid>:<role> <day> <month> <time> <loglevel> <message>"
# Example: "1:M 22 Jan 2026 14:30:00.123 * Ready to accept connections"
# Log levels: . (debug), - (verbose), * (notice), # (warning)
file {
path => "/var/log/redis/redis-server.log"
type => "redis"
tags => ["redis", "infra"]
start_position => "beginning"
sincedb_path => "/var/lib/logstash/sincedb_redis"
}
}
filter {
# ============================================================================
# PM2 API/Worker Log Processing (ADR-014)
# ============================================================================
# PM2 API and Worker logs are in Pino JSON format.
# Process them the same way as direct Pino logs.
if [type] in ["pm2_api", "pm2_worker"] {
# Tag errors (level 50 = error, 60 = fatal)
if [level] >= 50 {
mutate { add_tag => ["error", "pm2_pino_error"] }
# Map Pino level to Sentry level and set error_message field
if [level] == 60 {
mutate { add_field => { "sentry_level" => "fatal" } }
} else {
mutate { add_field => { "sentry_level" => "error" } }
}
# Copy msg to error_message for consistent access in output
mutate { add_field => { "error_message" => "%{msg}" } }
}
}
# ============================================================================
# PM2 Vite Log Processing (ADR-014)
# ============================================================================
# Vite logs are plain text. Look for error patterns.
if [type] == "pm2_vite" {
# Detect Vite build/compilation errors
if [message] =~ /(?i)(error|failed|exception|cannot|enoent|eperm|EACCES|ECONNREFUSED)/ {
mutate { add_tag => ["error", "vite_error"] }
mutate { add_field => { "sentry_level" => "error" } }
mutate { add_field => { "error_message" => "Vite: %{message}" } }
}
}
# ============================================================================
# Pino Log Processing (Fallback)
# ============================================================================
if [type] == "pino" {
# Tag errors (level 50 = error, 60 = fatal)
if [level] >= 50 {
mutate { add_tag => ["error", "pino_error"] }
# Map Pino level to Sentry level and set error_message field
if [level] == 60 {
mutate { add_field => { "sentry_level" => "fatal" } }
} else {
mutate { add_field => { "sentry_level" => "error" } }
}
# Copy msg to error_message for consistent access in output
mutate { add_field => { "error_message" => "%{msg}" } }
}
}
# ============================================================================
# PostgreSQL Log Processing (ADR-050)
# ============================================================================
# PostgreSQL log format in dev container:
# "2026-01-22 14:30:00 PST [5724] postgres@flyer_crawler_dev LOG: message"
# "2026-01-22 15:06:03 PST [19851] postgres@flyer_crawler_dev ERROR: column "id" does not exist"
# Note: Timestamps are in PST (America/Los_Angeles) timezone
if [type] == "postgres" {
# Parse PostgreSQL log prefix with timezone (PST in dev, may vary in prod)
grok {
match => { "message" => "%{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{TIME} %{WORD:pg_timezone} \[%{POSINT:pg_pid}\] %{DATA:pg_user}@%{DATA:pg_database} %{WORD:pg_level}: ?%{GREEDYDATA:pg_message}" }
tag_on_failure => ["_postgres_grok_failure"]
}
# Check if this is a structured JSON log from fn_log()
if [pg_message] =~ /^\{.*"source":"postgresql".*\}$/ {
json {
source => "pg_message"
target => "fn_log"
}
# Mark as error if level is WARNING or ERROR
if [fn_log][level] in ["WARNING", "ERROR"] {
mutate { add_tag => ["error", "db_function"] }
mutate { add_field => { "sentry_level" => "warning" } }
if [fn_log][level] == "ERROR" {
mutate { replace => { "sentry_level" => "error" } }
}
# Use fn_log message for error_message
mutate { add_field => { "error_message" => "%{[fn_log][message]}" } }
}
}
# Catch native PostgreSQL errors (ERROR: or FATAL: prefix in pg_level)
if [pg_level] == "ERROR" or [pg_level] == "FATAL" {
mutate { add_tag => ["error", "postgres_native"] }
if [pg_level] == "FATAL" {
mutate { add_field => { "sentry_level" => "fatal" } }
} else {
mutate { add_field => { "sentry_level" => "error" } }
}
# Use the full pg_message for error_message
mutate { add_field => { "error_message" => "PostgreSQL %{pg_level}: %{pg_message}" } }
}
}
# ============================================================================
# NGINX Access Log Processing
# ============================================================================
if [type] == "nginx_access" {
grok {
match => { "message" => '%{IPORHOST:client_ip} - %{DATA:user} \[%{HTTPDATE:timestamp}\] "%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}" %{NUMBER:status} %{NUMBER:bytes} "%{DATA:referrer}" "%{DATA:user_agent}"' }
tag_on_failure => ["_nginx_access_grok_failure"]
}
# Tag 5xx errors for Bugsink
if [status] =~ /^5/ {
mutate { add_tag => ["error", "nginx_5xx"] }
mutate { add_field => { "sentry_level" => "error" } }
mutate { add_field => { "error_message" => "HTTP %{status} %{method} %{request}" } }
}
}
# ============================================================================
# NGINX Error Log Processing
# ============================================================================
# NGINX error log format: "2026/01/22 17:55:01 [error] 16#16: *3 message..."
if [type] == "nginx_error" {
grok {
match => { "message" => "%{YEAR}/%{MONTHNUM}/%{MONTHDAY} %{TIME} \[%{LOGLEVEL:nginx_level}\] %{POSINT:pid}#%{POSINT}: (\*%{POSINT:connection} )?%{GREEDYDATA:nginx_message}" }
tag_on_failure => ["_nginx_error_grok_failure"]
}
# Only process actual errors, not notices (like "signal process started")
if [nginx_level] in ["error", "crit", "alert", "emerg"] {
mutate { add_tag => ["error", "nginx_error"] }
# Map NGINX log level to Sentry level
if [nginx_level] in ["crit", "alert", "emerg"] {
mutate { add_field => { "sentry_level" => "fatal" } }
} else {
mutate { add_field => { "sentry_level" => "error" } }
}
mutate { add_field => { "error_message" => "NGINX [%{nginx_level}]: %{nginx_message}" } }
}
}
# ============================================================================
# Redis Log Processing (ADR-050)
# ============================================================================
# Redis log format: "<pid>:<role> <day> <month> <time>.<ms> <level> <message>"
# Example: "1:M 22 Jan 14:30:00.123 * Ready to accept connections"
# Roles: M=master, S=slave, C=sentinel, X=cluster
# Levels: . (debug), - (verbose), * (notice), # (warning)
if [type] == "redis" {
# Parse Redis log format
grok {
match => { "message" => "%{POSINT:redis_pid}:%{WORD:redis_role} %{MONTHDAY} %{MONTH} %{YEAR}? ?%{TIME} %{DATA:redis_level_char} %{GREEDYDATA:redis_message}" }
tag_on_failure => ["_redis_grok_failure"]
}
# Map Redis level characters to human-readable levels
# . = debug, - = verbose, * = notice, # = warning
if [redis_level_char] == "#" {
mutate {
add_field => { "redis_level" => "warning" }
add_tag => ["error", "redis_warning"]
}
mutate { add_field => { "sentry_level" => "warning" } }
mutate { add_field => { "error_message" => "Redis WARNING: %{redis_message}" } }
} else if [redis_level_char] == "*" {
mutate { add_field => { "redis_level" => "notice" } }
} else if [redis_level_char] == "-" {
mutate { add_field => { "redis_level" => "verbose" } }
} else if [redis_level_char] == "." {
mutate { add_field => { "redis_level" => "debug" } }
}
# Also detect error keywords in message content (e.g., ECONNREFUSED, OOM, etc.)
if [redis_message] =~ /(?i)(error|failed|refused|denied|timeout|oom|crash|fatal|exception)/ {
if "error" not in [tags] {
mutate { add_tag => ["error", "redis_error"] }
mutate { add_field => { "sentry_level" => "error" } }
mutate { add_field => { "error_message" => "Redis ERROR: %{redis_message}" } }
}
}
}
# ============================================================================
# Generate Sentry Event ID and Ensure Required Fields for all errors
# ============================================================================
# CRITICAL: sentry_level MUST be set for all errors before output.
# Bugsink's PostgreSQL schema limits level to varchar(7), so valid values are:
# fatal, error, warning, info, debug (all <= 7 chars)
# If sentry_level is not set, the literal "%{sentry_level}" (16 chars) is sent,
# causing PostgreSQL insertion failures.
# ============================================================================
if "error" in [tags] {
# Use Ruby for robust field handling - handles all edge cases
ruby {
code => '
require "securerandom"
# Generate unique event ID for Sentry
event.set("sentry_event_id", SecureRandom.hex(16))
# =====================================================================
# CRITICAL: Validate and set sentry_level
# =====================================================================
# Valid Sentry levels (max 7 chars for Bugsink PostgreSQL schema):
# fatal, error, warning, info, debug
# Default to "error" if missing, empty, or invalid.
# =====================================================================
valid_levels = ["fatal", "error", "warning", "info", "debug"]
current_level = event.get("sentry_level")
if current_level.nil? || current_level.to_s.strip.empty? || !valid_levels.include?(current_level.to_s.downcase)
event.set("sentry_level", "error")
else
# Normalize to lowercase
event.set("sentry_level", current_level.to_s.downcase)
end
# =====================================================================
# Ensure error_message has a fallback value
# =====================================================================
error_msg = event.get("error_message")
if error_msg.nil? || error_msg.to_s.strip.empty?
fallback_msg = event.get("message") || event.get("msg") || "Unknown error"
event.set("error_message", fallback_msg.to_s)
end
'
}
}
}
output {
# ============================================================================
# Forward Errors to Bugsink (Project Routing)
# ============================================================================
# Bugsink uses Sentry-compatible API. Events must include:
# - event_id: 32 hex characters (UUID without dashes)
# - message: Human-readable error description
# - level: fatal, error, warning, info, debug
# - timestamp: Unix epoch in seconds
# - platform: "node" for backend, "javascript" for frontend
#
# Authentication via X-Sentry-Auth header with project's public key.
#
# Project Routing:
# - Project 1 (Backend): Pino app logs, PostgreSQL errors
# - Project 4 (Infrastructure): Redis, NGINX, Vite build errors
# ============================================================================
# ============================================================================
# Infrastructure Errors -> Project 4
# ============================================================================
# Redis warnings/errors, NGINX errors, and Vite build errors go to
# the Infrastructure project for separation from application code errors.
if "error" in [tags] and ([type] == "redis" or [type] == "nginx_error" or [type] == "nginx_access" or [type] == "pm2_vite") {
http {
url => "http://localhost:8000/api/4/store/"
http_method => "post"
format => "json"
headers => {
"X-Sentry-Auth" => "Sentry sentry_key=14e8791da3d347fa98073261b596cab9, sentry_version=7"
"Content-Type" => "application/json"
}
mapping => {
"event_id" => "%{sentry_event_id}"
"timestamp" => "%{@timestamp}"
"level" => "%{sentry_level}"
"platform" => "other"
"logger" => "%{type}"
"message" => "%{error_message}"
"extra" => {
"hostname" => "%{[host][name]}"
"source_type" => "%{type}"
"tags" => "%{tags}"
"original_message" => "%{message}"
"project" => "infrastructure"
}
}
}
}
# ============================================================================
# Backend Application Errors -> Project 1
# ============================================================================
# Pino application logs (API, Worker), PostgreSQL function errors, and
# native PostgreSQL errors go to the Backend API project.
else if "error" in [tags] and ([type] in ["pm2_api", "pm2_worker", "pino", "postgres"]) {
http {
url => "http://localhost:8000/api/1/store/"
http_method => "post"
format => "json"
headers => {
"X-Sentry-Auth" => "Sentry sentry_key=cea01396c56246adb5878fa5ee6b1d22, sentry_version=7"
"Content-Type" => "application/json"
}
mapping => {
"event_id" => "%{sentry_event_id}"
"timestamp" => "%{@timestamp}"
"level" => "%{sentry_level}"
"platform" => "node"
"logger" => "%{type}"
"message" => "%{error_message}"
"extra" => {
"hostname" => "%{[host][name]}"
"source_type" => "%{type}"
"tags" => "%{tags}"
"original_message" => "%{message}"
"project" => "backend"
}
}
}
}
# ============================================================================
# Fallback: Any other errors -> Project 1
# ============================================================================
# Catch-all for any errors that don't match specific routing rules.
else if "error" in [tags] {
http {
url => "http://localhost:8000/api/1/store/"
http_method => "post"
format => "json"
headers => {
"X-Sentry-Auth" => "Sentry sentry_key=cea01396c56246adb5878fa5ee6b1d22, sentry_version=7"
"Content-Type" => "application/json"
}
mapping => {
"event_id" => "%{sentry_event_id}"
"timestamp" => "%{@timestamp}"
"level" => "%{sentry_level}"
"platform" => "node"
"logger" => "%{type}"
"message" => "%{error_message}"
"extra" => {
"hostname" => "%{[host][name]}"
"source_type" => "%{type}"
"tags" => "%{tags}"
"original_message" => "%{message}"
"project" => "backend-fallback"
}
}
}
}
# ============================================================================
# Store Operational Logs to Files (for debugging/audit)
# ============================================================================
# NGINX access logs (all requests, not just errors)
if [type] == "nginx_access" and "error" not in [tags] {
file {
path => "/var/log/logstash/nginx-access-%{+YYYY-MM-dd}.log"
codec => json_lines
}
}
# PostgreSQL operational logs (non-error)
if [type] == "postgres" and "error" not in [tags] {
file {
path => "/var/log/logstash/postgres-operational-%{+YYYY-MM-dd}.log"
codec => json_lines
}
}
# Redis operational logs (non-error)
if [type] == "redis" and "error" not in [tags] {
file {
path => "/var/log/logstash/redis-operational-%{+YYYY-MM-dd}.log"
codec => json_lines
}
}
# PM2 API operational logs (non-error) - ADR-014
if [type] == "pm2_api" and "error" not in [tags] {
file {
path => "/var/log/logstash/pm2-api-%{+YYYY-MM-dd}.log"
codec => json_lines
}
}
# PM2 Worker operational logs (non-error) - ADR-014
if [type] == "pm2_worker" and "error" not in [tags] {
file {
path => "/var/log/logstash/pm2-worker-%{+YYYY-MM-dd}.log"
codec => json_lines
}
}
# PM2 Vite operational logs (non-error) - ADR-014
if [type] == "pm2_vite" and "error" not in [tags] {
file {
path => "/var/log/logstash/pm2-vite-%{+YYYY-MM-dd}.log"
codec => json_lines
}
}
# ============================================================================
# Debug Output (for development only)
# ============================================================================
# Uncomment to see all processed events in Logstash stdout
# stdout { codec => rubydebug }
}

View File

@@ -9,13 +9,39 @@
# - Frontend accessible on https://localhost (port 443)
# - Backend API on http://localhost:3001
# - Port 80 redirects to HTTPS
#
# IMPORTANT: Dual Hostname Configuration (localhost AND 127.0.0.1)
# ============================================================================
# The server_name directive includes BOTH 'localhost' and '127.0.0.1' to
# prevent SSL certificate errors when resources use different hostnames.
#
# Problem Scenario:
# 1. User accesses site via https://localhost/
# 2. Database stores image URLs as https://127.0.0.1/flyer-images/...
# 3. Browser treats these as different origins, showing ERR_CERT_AUTHORITY_INVALID
#
# Solution:
# - mkcert generates certificates valid for: localhost, 127.0.0.1, ::1
# - NGINX accepts requests to BOTH hostnames using the same certificate
# - Users can access via either hostname without SSL warnings
#
# The self-signed certificate is generated in Dockerfile.dev with:
# mkcert localhost 127.0.0.1 ::1
#
# This creates a certificate with Subject Alternative Names (SANs) for all
# three hostnames, allowing NGINX to serve valid HTTPS for each.
#
# See also:
# - Dockerfile.dev (certificate generation, ~line 69)
# - docs/FLYER-URL-CONFIGURATION.md (URL configuration details)
# - docs/development/DEBUGGING.md (SSL troubleshooting)
# ============================================================================
# HTTPS Server (main)
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name localhost;
server_name localhost 127.0.0.1;
# SSL Configuration (self-signed certificates from mkcert)
ssl_certificate /app/certs/localhost.crt;
@@ -34,6 +60,37 @@ server {
proxy_set_header X-Forwarded-Proto $scheme;
}
# ============================================================================
# Bugsink Sentry API Proxy (for frontend error reporting)
# ============================================================================
# The frontend Sentry SDK cannot reach localhost:8000 directly from the browser
# because port 8000 is only accessible within the container network.
# This proxy allows the browser to send errors to https://localhost/bugsink-api/
# which NGINX forwards to the Bugsink container on port 8000.
#
# Frontend DSN format: https://localhost/bugsink-api/<project_id>
# Example: https://localhost/bugsink-api/2 for Frontend (Dev) project
#
# The Sentry SDK sends POST requests to /bugsink-api/<project>/store/
# This proxy strips /bugsink-api and forwards to http://localhost:8000/api/
# ============================================================================
location /bugsink-api/ {
proxy_pass http://localhost:8000/api/;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Allow large error payloads with stack traces
client_max_body_size 10M;
# Timeouts for error reporting (should be fast)
proxy_connect_timeout 10s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
}
# Proxy WebSocket connections for real-time notifications
location /ws {
proxy_pass http://localhost:3001;
@@ -80,7 +137,7 @@ server {
server {
listen 80;
listen [::]:80;
server_name localhost;
server_name localhost 127.0.0.1;
return 301 https://$host$request_uri;
}

View File

@@ -2,6 +2,10 @@
# This file is mounted into the PostgreSQL container to enable structured logging
# from database functions via fn_log()
# Timezone: PST (America/Los_Angeles) for consistent log timestamps
timezone = 'America/Los_Angeles'
log_timezone = 'America/Los_Angeles'
# Enable logging to files for Logstash pickup
logging_collector = on
log_destination = 'stderr'

View File

@@ -0,0 +1,24 @@
#!/bin/sh
# docker/redis/docker-entrypoint.sh
# ============================================================================
# Redis Entrypoint Script for Dev Container
# ============================================================================
# This script ensures the Redis log directory exists and is writable before
# starting Redis. This is needed because the redis_logs volume is mounted
# at /var/log/redis but Redis Alpine runs as a non-root user.
#
# Related: ADR-050 (Log aggregation via Logstash)
# ============================================================================
set -e
# Create log directory if it doesn't exist
mkdir -p /var/log/redis
# Ensure redis user can write to the log directory
# Note: In Alpine Redis image, redis runs as uid 999
chown -R redis:redis /var/log/redis
chmod 755 /var/log/redis
# Start Redis with the provided arguments
exec redis-server "$@"

View File

@@ -0,0 +1,393 @@
# AI Documentation Index
Machine-optimized navigation for AI agents. Structured for vector retrieval and semantic search.
---
## Quick Lookup Table
| Task/Question | Primary Doc | Section/ADR |
| ----------------------- | --------------------------------------------------- | --------------------------------------- |
| Add new API endpoint | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | API Response Patterns, Input Validation |
| Add repository method | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Repository Patterns (get*/find*/list\*) |
| Fix failing test | [TESTING.md](development/TESTING.md) | Known Integration Test Issues |
| Run tests correctly | [TESTING.md](development/TESTING.md) | Test Execution Environment |
| Add database column | [DATABASE-GUIDE.md](subagents/DATABASE-GUIDE.md) | Schema sync required |
| Deploy to production | [DEPLOYMENT.md](operations/DEPLOYMENT.md) | Application Deployment |
| Debug container issue | [DEBUGGING.md](development/DEBUGGING.md) | Container Issues |
| Configure environment | [ENVIRONMENT.md](getting-started/ENVIRONMENT.md) | Configuration by Environment |
| Add background job | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Background Jobs |
| Handle errors correctly | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Error Handling |
| Use transactions | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Transaction Management |
| Add authentication | [AUTHENTICATION.md](architecture/AUTHENTICATION.md) | JWT Token Architecture |
| Cache data | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Caching |
| Check PM2 status | [DEV-CONTAINER.md](development/DEV-CONTAINER.md) | PM2 Process Management |
| View logs | [DEBUGGING.md](development/DEBUGGING.md) | PM2 Log Access |
| Understand architecture | [OVERVIEW.md](architecture/OVERVIEW.md) | System Architecture Diagram |
| Check ADR for decision | [adr/index.md](adr/index.md) | ADR by category |
| Use subagent | [subagents/OVERVIEW.md](subagents/OVERVIEW.md) | Available Subagents |
| API versioning | [API-VERSIONING.md](development/API-VERSIONING.md) | Phase 2 infrastructure |
---
## Documentation Tree
```
docs/
+-- AI-DOCUMENTATION-INDEX.md # THIS FILE - AI navigation index
+-- README.md # Human-readable doc hub
|
+-- adr/ # Architecture Decision Records (57 ADRs)
| +-- index.md # ADR index by category
| +-- 0001-*.md # Standardized error handling
| +-- 0002-*.md # Transaction management (withTransaction)
| +-- 0003-*.md # Input validation (Zod middleware)
| +-- 0008-*.md # API versioning (/api/v1/)
| +-- 0014-*.md # Platform: Linux only (CRITICAL)
| +-- 0028-*.md # API response (sendSuccess/sendError)
| +-- 0034-*.md # Repository pattern (get*/find*/list*)
| +-- 0035-*.md # Service layer architecture
| +-- 0050-*.md # PostgreSQL observability + Logstash
| +-- 0057-*.md # Test remediation post-API versioning
| +-- adr-implementation-tracker.md # Implementation status
|
+-- architecture/
| +-- OVERVIEW.md # System architecture, data flows, entities
| +-- DATABASE.md # Schema design, extensions, setup
| +-- AUTHENTICATION.md # OAuth, JWT, security features
| +-- WEBSOCKET_USAGE.md # Real-time communication patterns
| +-- api-versioning-infrastructure.md # Phase 2 versioning details
|
+-- development/
| +-- CODE-PATTERNS.md # Error handling, repos, API responses
| +-- TESTING.md # Unit/integration/E2E, known issues
| +-- DEBUGGING.md # Container, DB, API, PM2 debugging
| +-- DEV-CONTAINER.md # PM2, Logstash, container services
| +-- API-VERSIONING.md # API versioning workflows
| +-- DESIGN_TOKENS.md # Neo-Brutalism design system
| +-- ERROR-LOGGING-PATHS.md # req.originalUrl pattern
| +-- test-path-migration.md # Test file reorganization
|
+-- getting-started/
| +-- QUICKSTART.md # Quick setup instructions
| +-- INSTALL.md # Full installation guide
| +-- ENVIRONMENT.md # Environment variables reference
|
+-- operations/
| +-- DEPLOYMENT.md # Production deployment guide
| +-- BARE-METAL-SETUP.md # Server provisioning
| +-- MONITORING.md # Bugsink, health checks
| +-- LOGSTASH-QUICK-REF.md # Log aggregation reference
| +-- LOGSTASH-TROUBLESHOOTING.md # Logstash debugging
|
+-- subagents/
| +-- OVERVIEW.md # Subagent system introduction
| +-- CODER-GUIDE.md # Code development patterns
| +-- TESTER-GUIDE.md # Testing strategies
| +-- DATABASE-GUIDE.md # Database workflows
| +-- DEVOPS-GUIDE.md # Deployment/infrastructure
| +-- FRONTEND-GUIDE.md # UI/UX development
| +-- AI-USAGE-GUIDE.md # Gemini integration
| +-- DOCUMENTATION-GUIDE.md # Writing docs
| +-- SECURITY-DEBUG-GUIDE.md # Security and debugging
|
+-- tools/
| +-- MCP-CONFIGURATION.md # MCP servers setup
| +-- BUGSINK-SETUP.md # Error tracking setup
| +-- VSCODE-SETUP.md # Editor configuration
|
+-- archive/ # Historical docs, session notes
+-- sessions/ # Development session logs
+-- plans/ # Feature implementation plans
+-- research/ # Investigation notes
```
---
## Problem-to-Document Mapping
### Database Issues
| Problem | Documents |
| -------------------- | ----------------------------------------------------------------------------------------------- |
| Schema out of sync | [DATABASE-GUIDE.md](subagents/DATABASE-GUIDE.md), [CLAUDE.md](../CLAUDE.md) schema sync section |
| Migration needed | [DATABASE.md](architecture/DATABASE.md), ADR-013, ADR-023 |
| Query performance | [DEBUGGING.md](development/DEBUGGING.md) Query Performance Issues |
| Connection errors | [DEBUGGING.md](development/DEBUGGING.md) Database Issues |
| Transaction patterns | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Transaction Management, ADR-002 |
| Repository methods | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Repository Patterns, ADR-034 |
### Test Failures
| Problem | Documents |
| ---------------------------- | --------------------------------------------------------------------- |
| Tests fail in container | [TESTING.md](development/TESTING.md), ADR-014 |
| Vitest globalSetup isolation | [CLAUDE.md](../CLAUDE.md) Integration Test Issues #1 |
| Cache stale after insert | [CLAUDE.md](../CLAUDE.md) Integration Test Issues #3 |
| Queue interference | [CLAUDE.md](../CLAUDE.md) Integration Test Issues #2 |
| API path mismatches | [TESTING.md](development/TESTING.md) API Versioning in Tests, ADR-057 |
| Type check failures | [DEBUGGING.md](development/DEBUGGING.md) Type Check Failures |
| TZ environment breaks async | [CLAUDE.md](../CLAUDE.md) Integration Test Issues #7 |
### Deployment Issues
| Problem | Documents |
| --------------------- | ------------------------------------------------------------------------------------- |
| PM2 not starting | [DEBUGGING.md](development/DEBUGGING.md) PM2 Process Issues |
| NGINX configuration | [DEPLOYMENT.md](operations/DEPLOYMENT.md) NGINX Configuration |
| SSL certificates | [DEBUGGING.md](development/DEBUGGING.md) SSL Certificate Issues |
| CI/CD failures | [DEPLOYMENT.md](operations/DEPLOYMENT.md) CI/CD Pipeline, ADR-017 |
| Container won't start | [DEBUGGING.md](development/DEBUGGING.md) Container Issues |
| Bugsink not receiving | [BUGSINK-SETUP.md](tools/BUGSINK-SETUP.md), [MONITORING.md](operations/MONITORING.md) |
### Frontend/UI Changes
| Problem | Documents |
| ------------------ | --------------------------------------------------------------- |
| Component patterns | [FRONTEND-GUIDE.md](subagents/FRONTEND-GUIDE.md), ADR-044 |
| Design tokens | [DESIGN_TOKENS.md](development/DESIGN_TOKENS.md), ADR-012 |
| State management | ADR-005, [OVERVIEW.md](architecture/OVERVIEW.md) Frontend Stack |
| Hot reload broken | [DEBUGGING.md](development/DEBUGGING.md) Frontend Issues |
| CORS errors | [DEBUGGING.md](development/DEBUGGING.md) API Calls Failing |
### API Development
| Problem | Documents |
| ---------------- | ------------------------------------------------------------------------------- |
| Response format | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) API Response Patterns, ADR-028 |
| Input validation | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Input Validation, ADR-003 |
| Error handling | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Error Handling, ADR-001 |
| Rate limiting | ADR-032, [OVERVIEW.md](architecture/OVERVIEW.md) |
| API versioning | [API-VERSIONING.md](development/API-VERSIONING.md), ADR-008 |
| Authentication | [AUTHENTICATION.md](architecture/AUTHENTICATION.md), ADR-048 |
### Background Jobs
| Problem | Documents |
| ------------------- | ------------------------------------------------------------------------- |
| Jobs not processing | [DEBUGGING.md](development/DEBUGGING.md) Background Job Issues |
| Queue configuration | [CODE-PATTERNS.md](development/CODE-PATTERNS.md) Background Jobs, ADR-006 |
| Worker crashes | [DEBUGGING.md](development/DEBUGGING.md), ADR-053 |
| Scheduled jobs | ADR-037, [OVERVIEW.md](architecture/OVERVIEW.md) Scheduled Jobs |
---
## Document Priority Matrix
### CRITICAL (Read First)
| Document | Purpose | Key Content |
| --------------------------------------------------------------- | ----------------------- | ----------------------------- |
| [CLAUDE.md](../CLAUDE.md) | AI agent instructions | Rules, patterns, known issues |
| [ADR-014](adr/0014-containerization-and-deployment-strategy.md) | Platform requirement | Tests MUST run in container |
| [DEV-CONTAINER.md](development/DEV-CONTAINER.md) | Development environment | PM2, Logstash, services |
### HIGH (Core Development)
| Document | Purpose | Key Content |
| --------------------------------------------------- | ----------------- | ---------------------------- |
| [CODE-PATTERNS.md](development/CODE-PATTERNS.md) | Code templates | Error handling, repos, APIs |
| [TESTING.md](development/TESTING.md) | Test execution | Commands, known issues |
| [DATABASE.md](architecture/DATABASE.md) | Schema reference | Setup, extensions, users |
| [ADR-034](adr/0034-repository-pattern-standards.md) | Repository naming | get*/find*/list\* |
| [ADR-028](adr/0028-api-response-standardization.md) | API responses | sendSuccess/sendError |
| [ADR-001](adr/0001-standardized-error-handling.md) | Error handling | handleDbError, NotFoundError |
### MEDIUM (Specialized Tasks)
| Document | Purpose | Key Content |
| --------------------------------------------------- | --------------------- | ------------------------ |
| [subagents/OVERVIEW.md](subagents/OVERVIEW.md) | Subagent selection | When to delegate |
| [DEPLOYMENT.md](operations/DEPLOYMENT.md) | Production deployment | PM2, NGINX, CI/CD |
| [DEBUGGING.md](development/DEBUGGING.md) | Troubleshooting | Common issues, solutions |
| [ENVIRONMENT.md](getting-started/ENVIRONMENT.md) | Config reference | Variables by environment |
| [AUTHENTICATION.md](architecture/AUTHENTICATION.md) | Auth patterns | OAuth, JWT, security |
| [API-VERSIONING.md](development/API-VERSIONING.md) | Versioning | /api/v1/ prefix |
### LOW (Reference/Historical)
| Document | Purpose | Key Content |
| -------------------- | ------------------ | ------------------------- |
| [archive/](archive/) | Historical docs | Session notes, old plans |
| ADR-013, ADR-023 | Migration strategy | Proposed, not implemented |
| ADR-024 | Feature flags | Proposed |
| ADR-025 | i18n/l10n | Proposed |
---
## Cross-Reference Matrix
| Document | References | Referenced By |
| -------------------- | ------------------------------------------------------------------------------- | ------------------------------------------------------ |
| **CLAUDE.md** | ADR-001, ADR-002, ADR-008, ADR-014, ADR-028, ADR-034, ADR-035, ADR-050, ADR-057 | All development docs |
| **ADR-008** | ADR-028 | API-VERSIONING.md, TESTING.md, ADR-057 |
| **ADR-014** | - | CLAUDE.md, TESTING.md, DEPLOYMENT.md, DEV-CONTAINER.md |
| **ADR-028** | ADR-001 | CODE-PATTERNS.md, OVERVIEW.md |
| **ADR-034** | ADR-001 | CODE-PATTERNS.md, DATABASE-GUIDE.md |
| **ADR-057** | ADR-008, ADR-028 | TESTING.md |
| **CODE-PATTERNS.md** | ADR-001, ADR-002, ADR-003, ADR-028, ADR-034, ADR-036, ADR-048 | CODER-GUIDE.md |
| **TESTING.md** | ADR-014, ADR-057, CLAUDE.md | TESTER-GUIDE.md, DEBUGGING.md |
| **DEBUGGING.md** | DEV-CONTAINER.md, TESTING.md, MONITORING.md | DEVOPS-GUIDE.md |
| **DEV-CONTAINER.md** | ADR-014, ADR-050, ecosystem.dev.config.cjs | DEBUGGING.md, CLAUDE.md |
| **OVERVIEW.md** | ADR-001 through ADR-050+ | All architecture docs |
| **DATABASE.md** | ADR-002, ADR-013, ADR-055 | DATABASE-GUIDE.md |
---
## Navigation Patterns
### Adding a Feature
```
1. CLAUDE.md -> Project rules, patterns
2. CODE-PATTERNS.md -> Implementation templates
3. Relevant subagent guide -> Domain-specific patterns
4. Related ADRs -> Design decisions
5. TESTING.md -> Test requirements
```
### Fixing a Bug
```
1. DEBUGGING.md -> Common issues checklist
2. TESTING.md -> Run tests in container
3. Error logs (pm2/bugsink) -> Identify root cause
4. CODE-PATTERNS.md -> Correct pattern reference
5. Related ADR -> Architectural context
```
### Deploying
```
1. DEPLOYMENT.md -> Deployment procedures
2. ENVIRONMENT.md -> Required variables
3. MONITORING.md -> Health check verification
4. LOGSTASH-QUICK-REF.md -> Log aggregation check
```
### Database Changes
```
1. DATABASE-GUIDE.md -> Schema sync requirements (CRITICAL)
2. DATABASE.md -> Schema design patterns
3. ADR-002 -> Transaction patterns
4. ADR-034 -> Repository methods
5. ADR-055 -> Normalization rules
```
### Subagent Selection
| Task Type | Subagent | Guide |
| --------------------- | ------------------------- | ------------------------------------------------------------ |
| Write production code | `coder` | [CODER-GUIDE.md](subagents/CODER-GUIDE.md) |
| Database changes | `db-dev` | [DATABASE-GUIDE.md](subagents/DATABASE-GUIDE.md) |
| Create tests | `testwriter` | [TESTER-GUIDE.md](subagents/TESTER-GUIDE.md) |
| Fix failing tests | `tester` | [TESTER-GUIDE.md](subagents/TESTER-GUIDE.md) |
| Container/deployment | `devops` | [DEVOPS-GUIDE.md](subagents/DEVOPS-GUIDE.md) |
| UI components | `frontend-specialist` | [FRONTEND-GUIDE.md](subagents/FRONTEND-GUIDE.md) |
| External APIs | `integrations-specialist` | - |
| Security review | `security-engineer` | [SECURITY-DEBUG-GUIDE.md](subagents/SECURITY-DEBUG-GUIDE.md) |
| Production errors | `log-debug` | [SECURITY-DEBUG-GUIDE.md](subagents/SECURITY-DEBUG-GUIDE.md) |
| AI/Gemini issues | `ai-usage` | [AI-USAGE-GUIDE.md](subagents/AI-USAGE-GUIDE.md) |
---
## Key File Quick Reference
### Configuration
| File | Purpose |
| -------------------------- | ---------------------------- |
| `server.ts` | Express app setup |
| `src/config/env.ts` | Environment validation (Zod) |
| `ecosystem.dev.config.cjs` | PM2 dev config |
| `ecosystem.config.cjs` | PM2 prod config |
| `vite.config.ts` | Vite build config |
### Core Implementation
| File | Purpose |
| ----------------------------------- | ----------------------------------- |
| `src/routes/*.routes.ts` | API route handlers |
| `src/services/db/*.db.ts` | Repository layer |
| `src/services/*.server.ts` | Server-only services |
| `src/services/queues.server.ts` | BullMQ queue definitions |
| `src/services/workers.server.ts` | BullMQ workers |
| `src/utils/apiResponse.ts` | sendSuccess/sendError/sendPaginated |
| `src/services/db/errors.db.ts` | handleDbError, NotFoundError |
| `src/services/db/transaction.db.ts` | withTransaction |
### Database Schema
| File | Purpose |
| ------------------------------ | ----------------------------------- |
| `sql/master_schema_rollup.sql` | Test DB, complete reference |
| `sql/initial_schema.sql` | Fresh install (identical to rollup) |
| `sql/migrations/*.sql` | Production ALTER statements |
### Testing
| File | Purpose |
| ---------------------------------- | ----------------------- |
| `vitest.config.ts` | Unit test config |
| `vitest.config.integration.ts` | Integration test config |
| `vitest.config.e2e.ts` | E2E test config |
| `src/tests/utils/mockFactories.ts` | Mock data factories |
| `src/tests/utils/storeHelpers.ts` | Store test helpers |
---
## ADR Quick Reference
### By Implementation Status
**Implemented**: 001, 002, 003, 004, 006, 008, 009, 010, 016, 017, 020, 021, 028, 032, 033, 034, 035, 036, 037, 038, 040, 041, 043, 044, 045, 046, 050, 051, 052, 055, 057
**Partially Implemented**: 012, 014, 015, 048
**Proposed**: 011, 013, 022, 023, 024, 025, 029, 030, 031, 039, 047, 053, 054, 056
### By Category
| Category | ADRs |
| --------------------- | ------------------------------------------- |
| Core Infrastructure | 002, 007, 020, 030 |
| Data Management | 009, 013, 019, 023, 031, 055 |
| API & Integration | 003, 008, 018, 022, 028 |
| Security | 001, 011, 016, 029, 032, 033, 048 |
| Observability | 004, 015, 050, 051, 052, 056 |
| Deployment & Ops | 006, 014, 017, 024, 037, 038, 053, 054 |
| Frontend/UI | 005, 012, 025, 026, 044 |
| Dev Workflow | 010, 021, 027, 040, 045, 047, 057 |
| Architecture Patterns | 034, 035, 036, 039, 041, 042, 043, 046, 049 |
---
## Essential Commands
```bash
# Run all tests (MUST use container)
podman exec -it flyer-crawler-dev npm test
# Run unit tests
podman exec -it flyer-crawler-dev npm run test:unit
# Run type check
podman exec -it flyer-crawler-dev npm run type-check
# Run integration tests
podman exec -it flyer-crawler-dev npm run test:integration
# PM2 status
podman exec -it flyer-crawler-dev pm2 status
# PM2 logs
podman exec -it flyer-crawler-dev pm2 logs
# Restart all processes
podman exec -it flyer-crawler-dev pm2 restart all
```
---
_This index is optimized for AI agent consumption. Updated: 2026-01-28_

View File

@@ -28,9 +28,28 @@ The `.env.local` file uses `localhost` while `compose.dev.yml` uses `127.0.0.1`.
## HTTPS Setup
- Self-signed certificates auto-generated with mkcert on container startup
- CSRF Protection: Django configured with `SECURE_PROXY_SSL_HEADER` to trust `X-Forwarded-Proto` from nginx
- CSRF Protection: Django configured with `CSRF_TRUSTED_ORIGINS` for both `localhost` and `127.0.0.1` (see below)
- HTTPS proxy: nginx on port 8443 proxies to Bugsink on port 8000
- HTTPS is for UI access only - Sentry SDK uses HTTP directly
### CSRF Configuration
Django 4.0+ requires `CSRF_TRUSTED_ORIGINS` for HTTPS POST requests. The Bugsink configuration (`Dockerfile.dev`) includes:
```python
CSRF_TRUSTED_ORIGINS = [
"https://localhost:8443",
"https://127.0.0.1:8443",
"http://localhost:8000",
"http://127.0.0.1:8000",
]
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
```
**Both hostnames are required** because browsers treat `localhost` and `127.0.0.1` as different origins.
If you get "CSRF verification failed" errors, see [BUGSINK-SETUP.md](tools/BUGSINK-SETUP.md#csrf-verification-failed) for troubleshooting.
## Isolation Benefits
- Dev errors stay local, don't pollute production/test dashboards

View File

@@ -12,6 +12,87 @@ Flyer image and icon URLs are environment-specific to ensure they point to the c
| Test | `https://flyer-crawler-test.projectium.com` | `https://flyer-crawler-test.projectium.com/flyer-images/safeway-flyer.jpg` |
| Production | `https://flyer-crawler.projectium.com` | `https://flyer-crawler.projectium.com/flyer-images/safeway-flyer.jpg` |
**Note:** The dev container accepts connections to **both** `https://localhost/` and `https://127.0.0.1/` thanks to the SSL certificate and NGINX configuration. See [SSL Certificate Configuration](#ssl-certificate-configuration-dev-container) below.
## SSL Certificate Configuration (Dev Container)
The dev container uses self-signed certificates generated by [mkcert](https://github.com/FiloSottile/mkcert) to enable HTTPS locally. This configuration solves a common mixed-origin SSL issue.
### The Problem
When users access the site via `https://localhost/` but image URLs in the database use `https://127.0.0.1/...`, browsers treat these as different origins. This causes `ERR_CERT_AUTHORITY_INVALID` errors when loading images, even though both hostnames point to the same server.
### The Solution
1. **Certificate Generation** (`Dockerfile.dev`):
```bash
mkcert localhost 127.0.0.1 ::1
```
This creates a certificate with Subject Alternative Names (SANs) for all three hostnames.
2. **NGINX Configuration** (`docker/nginx/dev.conf`):
```nginx
server_name localhost 127.0.0.1;
```
NGINX accepts requests to both hostnames using the same SSL certificate.
### How It Works
| Component | Configuration |
| -------------------- | ---------------------------------------------------- |
| SSL Certificate SANs | `localhost`, `127.0.0.1`, `::1` |
| NGINX `server_name` | `localhost 127.0.0.1` |
| Seed Script URLs | Uses `https://127.0.0.1` (works with DB constraints) |
| User Access | Either `https://localhost/` or `https://127.0.0.1/` |
### Why This Matters
- **Database Constraints**: The `flyers` table has CHECK constraints requiring URLs to start with `http://` or `https://`. Relative URLs are not allowed.
- **Consistent Behavior**: Users can access the site using either hostname without SSL warnings.
- **Same Certificate**: Both hostnames use the same self-signed certificate, eliminating mixed-content errors.
### Verifying the Configuration
```bash
# Check certificate SANs
podman exec flyer-crawler-dev openssl x509 -in /app/certs/localhost.crt -text -noout | grep -A1 "Subject Alternative Name"
# Expected output:
# X509v3 Subject Alternative Name:
# DNS:localhost, IP Address:127.0.0.1, IP Address:0:0:0:0:0:0:0:1
# Test both hostnames respond
curl -k https://localhost/health
curl -k https://127.0.0.1/health
```
### Troubleshooting SSL Issues
If you encounter `ERR_CERT_AUTHORITY_INVALID`:
1. **Check NGINX is running**: `podman exec flyer-crawler-dev nginx -t`
2. **Verify certificate exists**: `podman exec flyer-crawler-dev ls -la /app/certs/`
3. **Ensure both hostnames are in server_name**: Check `/etc/nginx/sites-available/default`
4. **Rebuild container if needed**: The certificate is generated at build time
### Permanent Fix: Install CA Certificate (Recommended)
To permanently eliminate SSL certificate warnings, install the mkcert CA certificate on your system. This is optional but provides a better development experience.
The CA certificate is located at `certs/mkcert-ca.crt` in the project root. See [`certs/README.md`](../certs/README.md) for platform-specific installation instructions (Windows, macOS, Linux, Firefox).
After installation:
- Your browser will trust all mkcert certificates without warnings
- Both `https://localhost/` and `https://127.0.0.1/` will work without SSL errors
- Flyer images will load without `ERR_CERT_AUTHORITY_INVALID` errors
See also: [Debugging Guide - SSL Issues](development/DEBUGGING.md#ssl-certificate-issues)
## NGINX Static File Serving
All environments serve flyer images as static files with browser caching:
@@ -41,7 +122,7 @@ Set `FLYER_BASE_URL` in your environment configuration:
```bash
# Dev container (.env)
FLYER_BASE_URL=https://127.0.0.1
FLYER_BASE_URL=https://localhost
# Test environment
FLYER_BASE_URL=https://flyer-crawler-test.projectium.com
@@ -58,7 +139,7 @@ The seed script ([src/db/seed.ts](../src/db/seed.ts)) automatically uses the cor
2. `NODE_ENV` value:
- `production` → `https://flyer-crawler.projectium.com`
- `test` → `https://flyer-crawler-test.projectium.com`
- Default → `https://127.0.0.1`
- Default → `https://localhost`
The seed script also copies test images from `src/tests/assets/` to `public/flyer-images/`:
@@ -78,8 +159,8 @@ podman exec -it flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev
# Run the update (dev container uses HTTPS with self-signed certs)
UPDATE flyers
SET
image_url = REPLACE(image_url, 'example.com', '127.0.0.1'),
icon_url = REPLACE(icon_url, 'example.com', '127.0.0.1')
image_url = REPLACE(image_url, 'example.com', 'localhost'),
icon_url = REPLACE(icon_url, 'example.com', 'localhost')
WHERE
image_url LIKE '%example.com%'
OR icon_url LIKE '%example.com%';
@@ -131,8 +212,10 @@ export const getFlyerBaseUrl = (): string => {
}
// Check if we're in dev container (DB_HOST=postgres is typical indicator)
// Use 'localhost' instead of '127.0.0.1' to match the hostname users access
// This avoids SSL certificate mixed-origin issues in browsers
if (process.env.DB_HOST === 'postgres' || process.env.DB_HOST === '127.0.0.1') {
return 'https://127.0.0.1';
return 'https://localhost';
}
if (process.env.NODE_ENV === 'production') {
@@ -169,13 +252,13 @@ This approach ensures tests work correctly in all environments (dev container, C
| `docker/nginx/dev.conf` | Added `/flyer-images/` location block for static file serving |
| `.env.example` | Added `FLYER_BASE_URL` variable |
| `sql/update_flyer_urls.sql` | SQL script for updating existing data |
| Test files | Updated mock data to use `https://127.0.0.1` |
| Test files | Updated mock data to use `https://localhost` |
## Summary
- Seed script now uses environment-specific HTTPS URLs
- Seed script copies test images from `src/tests/assets/` to `public/flyer-images/`
- NGINX serves `/flyer-images/` as static files with 7-day cache
- Test files updated with `https://127.0.0.1`
- Test files updated with `https://localhost` (not `127.0.0.1` to avoid SSL mixed-origin issues)
- SQL script provided for updating existing data
- Documentation updated for each environment

View File

@@ -32,8 +32,10 @@ Day-to-day development guides:
- [Testing Guide](development/TESTING.md) - Unit, integration, and E2E testing
- [Code Patterns](development/CODE-PATTERNS.md) - Common code patterns and ADR examples
- [API Versioning](development/API-VERSIONING.md) - API versioning infrastructure and workflows
- [Design Tokens](development/DESIGN_TOKENS.md) - UI design system and Neo-Brutalism
- [Debugging Guide](development/DEBUGGING.md) - Common debugging patterns
- [Dev Container](development/DEV-CONTAINER.md) - Development container setup and PM2
### 🔧 Operations

View File

@@ -1,5 +1,21 @@
# DevOps Subagent Reference
## Critical Rule: Server Access is READ-ONLY
**Claude Code has READ-ONLY access to production/test servers.** The `claude-win10` user cannot execute write operations directly.
When working with production/test servers:
1. **Provide commands** for the user to execute (do not attempt SSH)
2. **Wait for user** to report command output
3. **Provide fix commands** 1-3 at a time (errors may cascade)
4. **Verify success** with read-only commands after user executes fixes
5. **Document findings** in relevant documentation
Commands in this reference are for the **user to run on the server**, not for Claude to execute.
---
## Critical Rule: Git Bash Path Conversion
Git Bash on Windows auto-converts Unix paths, breaking container commands.
@@ -69,12 +85,11 @@ MSYS_NO_PATHCONV=1 podman exec -it flyer-crawler-dev psql -U postgres -d flyer_c
## PM2 Commands
### Production Server (via SSH)
### Production Server
> **Note**: These commands are for the **user to execute on the server**. Claude Code provides commands but cannot run them directly. See [Server Access is READ-ONLY](#critical-rule-server-access-is-read-only) above.
```bash
# SSH to server
ssh root@projectium.com
# List all apps
pm2 list
@@ -210,9 +225,10 @@ INFO
### Production
> **Note**: User executes these commands on the server.
```bash
# Via SSH
ssh root@projectium.com
# Access Redis CLI
redis-cli -a $REDIS_PASSWORD
# Flush cache (use with caution)
@@ -278,10 +294,9 @@ Trigger `manual-db-backup.yml` from Gitea Actions UI.
### Manual Backup
```bash
# SSH to server
ssh root@projectium.com
> **Note**: User executes these commands on the server.
```bash
# Backup
PGPASSWORD=$DB_PASSWORD pg_dump -h $DB_HOST -U $DB_USER $DB_NAME > backup_$(date +%Y%m%d).sql
@@ -301,8 +316,10 @@ MSYS_NO_PATHCONV=1 podman exec -e DATABASE_URL=postgresql://bugsink:bugsink_dev_
### Production Token Generation
> **Note**: User executes this command on the server.
```bash
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage create_auth_token"
cd /opt/bugsink && bugsink-manage create_auth_token
```
---

View File

@@ -2,17 +2,408 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Accepted (Phase 2 Complete - All Tasks Done)
**Updated**: 2026-01-27
**Completion Note**: Phase 2 fully complete including test path migration. All 23 integration test files updated to use `/api/v1/` paths. Test suite improved from 274/348 to 345/348 passing (3 remain as todo/skipped for known issues unrelated to versioning).
## Context
As the application grows, the API will need to evolve. Making breaking changes to existing endpoints can disrupt clients (e.g., a mobile app or the web frontend). The current routing has no formal versioning scheme.
### Current State
As of January 2026, the API operates without explicit versioning:
- All routes are mounted under `/api/*` (e.g., `/api/flyers`, `/api/users/profile`)
- The frontend `apiClient.ts` uses `API_BASE_URL = '/api'` as the base
- No version prefix exists in route paths
- Breaking changes would immediately affect all consumers
### Why Version Now?
1. **Future Mobile App**: A native mobile app is planned, which will have slower update cycles than the web frontend
2. **Third-Party Integrations**: Store partners may integrate with our API
3. **Deprecation Path**: Need a clear way to deprecate and remove endpoints
4. **Documentation**: OpenAPI documentation (ADR-018) should reflect versioned endpoints
## Decision
We will adopt a URI-based versioning strategy for the API. All new and existing routes will be prefixed with a version number (e.g., `/api/v1/flyers`). This ADR establishes a clear policy for when to introduce a new version (`v2`) and how to manage deprecation of old versions.
We will adopt a URI-based versioning strategy for the API using a phased rollout approach. All routes will be prefixed with a version number (e.g., `/api/v1/flyers`).
### Versioning Format
```text
/api/v{MAJOR}/resource
```
- **MAJOR**: Incremented for breaking changes (v1, v2, v3...)
- Resource paths remain unchanged within a version
### What Constitutes a Breaking Change?
The following changes require a new API version:
| Change Type | Breaking? | Example |
| ----------------------------- | --------- | ------------------------------------------ |
| Remove endpoint | Yes | DELETE `/api/v1/legacy-feature` |
| Remove response field | Yes | Remove `user.email` from response |
| Change response field type | Yes | `id: number` to `id: string` |
| Change required request field | Yes | Make `email` required when it was optional |
| Rename endpoint | Yes | `/users` to `/accounts` |
| Add optional response field | No | Add `user.avatar_url` |
| Add optional request field | No | Add optional `page` parameter |
| Add new endpoint | No | Add `/api/v1/new-feature` |
| Fix bug in behavior | No\* | Correct calculation error |
\*Bug fixes may warrant version increment if clients depend on the buggy behavior.
## Implementation Phases
### Phase 1: Namespace Migration (Current)
**Goal**: Add `/v1/` prefix to all existing routes without behavioral changes.
**Changes Required**:
1. **server.ts**: Update all route registrations
```typescript
// Before
app.use('/api/auth', authRouter);
// After
app.use('/api/v1/auth', authRouter);
```
2. **apiClient.ts**: Update base URL
```typescript
// Before
const API_BASE_URL = import.meta.env.VITE_API_BASE_URL || '/api';
// After
const API_BASE_URL = import.meta.env.VITE_API_BASE_URL || '/api/v1';
```
3. **swagger.ts**: Update server definition
```typescript
servers: [
{
url: '/api/v1',
description: 'API v1 server',
},
],
```
4. **Redirect Middleware** (optional): Support legacy clients
```typescript
// Redirect unversioned routes to v1
app.use('/api/:resource', (req, res, next) => {
if (req.params.resource !== 'v1') {
return res.redirect(307, `/api/v1/${req.params.resource}${req.url}`);
}
next();
});
```
**Acceptance Criteria**:
- All existing functionality works at `/api/v1/*`
- Frontend makes requests to `/api/v1/*`
- OpenAPI documentation reflects `/api/v1/*` paths
- Integration tests pass with new paths
### Phase 2: Versioning Infrastructure
**Goal**: Build tooling to support multiple API versions.
**Components**:
1. **Version Router Factory**
```typescript
// src/routes/versioned.ts
export function createVersionedRoutes(version: 'v1' | 'v2') {
const router = express.Router();
if (version === 'v1') {
router.use('/auth', authRouterV1);
router.use('/users', userRouterV1);
// ...
} else if (version === 'v2') {
router.use('/auth', authRouterV2);
router.use('/users', userRouterV2);
// ...
}
return router;
}
```
2. **Version Detection Middleware**
```typescript
// Extract version from URL and attach to request
app.use('/api/:version', (req, res, next) => {
req.apiVersion = req.params.version;
next();
});
```
3. **Deprecation Headers**
```typescript
// Middleware to add deprecation headers
function deprecateVersion(sunsetDate: string) {
return (req, res, next) => {
res.set('Deprecation', 'true');
res.set('Sunset', sunsetDate);
res.set('Link', '</api/v2>; rel="successor-version"');
next();
};
}
```
### Phase 3: Version 2 Support
**Goal**: Introduce v2 API when breaking changes are needed.
**Triggers for v2**:
- Major schema changes (e.g., unified item model)
- Response format overhaul
- Authentication mechanism changes
- Significant performance-driven restructuring
**Parallel Support**:
```typescript
app.use('/api/v1', createVersionedRoutes('v1'));
app.use('/api/v2', createVersionedRoutes('v2'));
```
## Migration Path
### For Frontend (Web)
The web frontend is deployed alongside the API, so migration is straightforward:
1. Update `API_BASE_URL` in `apiClient.ts`
2. Update any hardcoded paths in tests
3. Deploy frontend and backend together
### For External Consumers
External consumers (mobile apps, partner integrations) need a transition period:
1. **Announcement**: 30 days before deprecation of v(N-1)
2. **Deprecation Headers**: Add headers 30 days before sunset
3. **Documentation**: Maintain docs for both versions during transition
4. **Sunset**: Remove v(N-1) after grace period
## Deprecation Timeline
| Version | Status | Sunset Date | Notes |
| -------------------- | ---------- | ---------------------- | --------------- |
| Unversioned `/api/*` | Deprecated | Phase 1 completion | Redirect to v1 |
| v1 | Active | TBD (when v2 releases) | Current version |
### Support Policy
- **Current Version (v(N))**: Full support, all features
- **Previous Version (v(N-1))**: Security fixes only for 6 months after v(N) release
- **Older Versions**: No support, endpoints return 410 Gone
## Backwards Compatibility Strategy
### Redirect Middleware
For a smooth transition, implement redirects from unversioned to versioned endpoints:
```typescript
// src/middleware/versionRedirect.ts
import { Request, Response, NextFunction } from 'express';
import { logger } from '../services/logger.server';
/**
* Middleware to redirect unversioned API requests to v1.
* This provides backwards compatibility during the transition period.
*
* Example: /api/flyers -> /api/v1/flyers (307 Temporary Redirect)
*/
export function versionRedirectMiddleware(req: Request, res: Response, next: NextFunction) {
const path = req.path;
// Skip if already versioned
if (path.startsWith('/v1') || path.startsWith('/v2')) {
return next();
}
// Skip health checks and documentation
if (path.startsWith('/health') || path.startsWith('/docs')) {
return next();
}
// Log deprecation warning
logger.warn(
{
path: req.originalUrl,
method: req.method,
ip: req.ip,
},
'Unversioned API request - redirecting to v1',
);
// Use 307 to preserve HTTP method
const redirectUrl = `/api/v1${path}${req.url.includes('?') ? req.url.substring(req.url.indexOf('?')) : ''}`;
return res.redirect(307, redirectUrl);
}
```
### Response Versioning Headers
All API responses include version information:
```typescript
// Middleware to add version headers
app.use('/api/v1', (req, res, next) => {
res.set('X-API-Version', 'v1');
next();
});
```
## Consequences
**Positive**: Establishes a critical pattern for long-term maintainability. Allows the API to evolve without breaking existing clients.
**Negative**: Adds a small amount of complexity to the routing setup. Requires discipline to manage versions and deprecations correctly.
### Positive
- **Clear Evolution Path**: Establishes a critical pattern for long-term maintainability
- **Client Protection**: Allows the API to evolve without breaking existing clients
- **Parallel Development**: Can develop v2 features while maintaining v1 stability
- **Documentation Clarity**: Each version has its own complete documentation
- **Graceful Deprecation**: Clients have clear timelines and migration paths
### Negative
- **Routing Complexity**: Adds complexity to the routing setup
- **Code Duplication**: May need to maintain multiple versions of handlers
- **Testing Overhead**: Tests may need to cover multiple versions
- **Documentation Maintenance**: Must keep docs for multiple versions in sync
### Mitigation
- Use shared business logic with version-specific adapters
- Automate deprecation header addition
- Generate versioned OpenAPI specs from code
- Clear internal guidelines on when to increment versions
## Key Files
| File | Purpose |
| ----------------------------------- | ------------------------------------------- |
| `server.ts` | Route registration with version prefixes |
| `src/services/apiClient.ts` | Frontend API base URL configuration |
| `src/config/swagger.ts` | OpenAPI server URL and version info |
| `src/routes/*.routes.ts` | Individual route handlers |
| `src/middleware/versionRedirect.ts` | Backwards compatibility redirects (Phase 1) |
## Related ADRs
- [ADR-003](./0003-standardized-input-validation-using-middleware.md) - Input Validation (consistent across versions)
- [ADR-018](./0018-api-documentation-strategy.md) - API Documentation Strategy (versioned OpenAPI specs)
- [ADR-028](./0028-api-response-standardization.md) - Response Standardization (envelope pattern applies to all versions)
- [ADR-016](./0016-api-security-hardening.md) - Security Hardening (applies to all versions)
- [ADR-057](./0057-test-remediation-post-api-versioning.md) - Test Remediation Post-API Versioning (documents test migration)
## Implementation Checklist
### Phase 1 Tasks
- [x] Update `server.ts` to mount all routes under `/api/v1/`
- [x] Update `src/services/apiClient.ts` API_BASE_URL to `/api/v1`
- [x] Update `src/config/swagger.ts` server URL to `/api/v1`
- [x] Add redirect middleware for unversioned requests
- [x] Update integration tests to use versioned paths
- [x] Update API documentation examples (Swagger server URL updated)
- [x] Verify all health checks work at `/api/v1/health/*`
### Phase 2 Tasks
**Implementation Guide**: [API Versioning Infrastructure](../architecture/api-versioning-infrastructure.md)
**Developer Guide**: [API Versioning Developer Guide](../development/API-VERSIONING.md)
- [x] Create version router factory (`src/routes/versioned.ts`)
- [x] Implement deprecation header middleware (`src/middleware/deprecation.middleware.ts`)
- [x] Add version detection to request context (`src/middleware/apiVersion.middleware.ts`)
- [x] Add version types to Express Request (`src/types/express.d.ts`)
- [x] Create version constants configuration (`src/config/apiVersions.ts`)
- [x] Update server.ts to use version router factory
- [x] Update swagger.ts for multi-server documentation
- [x] Add unit tests for version middleware
- [x] Add integration tests for versioned router
- [x] Document versioning patterns for developers
- [x] Migrate all test files to use `/api/v1/` paths (23 files, ~70 occurrences)
### Test Path Migration Summary (2026-01-27)
The final cleanup task for Phase 2 was completed by updating all integration test files to use versioned API paths:
| Metric | Value |
| ---------------------------- | ---------------------------------------- |
| Test files updated | 23 |
| Path occurrences changed | ~70 |
| Test failures resolved | 71 (274 -> 345 passing) |
| Tests remaining todo/skipped | 3 (known issues, not versioning-related) |
| Type check | Passing |
| Versioning-specific tests | 82/82 passing |
**Test Results After Migration**:
- Integration tests: 345/348 passing
- Unit tests: 3,375/3,391 passing (16 pre-existing failures unrelated to versioning)
### Unit Test Path Fix (2026-01-27)
Following the test path migration, 16 unit test failures were discovered and fixed. These failures were caused by error log messages using hardcoded `/api/` paths instead of versioned `/api/v1/` paths.
**Root Cause**: Error log messages in route handlers used hardcoded path strings like:
```typescript
// INCORRECT - hardcoded path doesn't reflect actual request URL
req.log.error({ error }, 'Error in /api/flyers/:id:');
```
**Solution**: Updated to use `req.originalUrl` for dynamic path logging:
```typescript
// CORRECT - uses actual request URL including version prefix
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
```
**Files Modified**:
| File | Changes |
| -------------------------------------- | ---------------------------------- |
| `src/routes/recipe.routes.ts` | 3 error log statements updated |
| `src/routes/stats.routes.ts` | 1 error log statement updated |
| `src/routes/flyer.routes.ts` | 2 error logs + 2 test expectations |
| `src/routes/personalization.routes.ts` | 3 error log statements updated |
**Test Results After Fix**:
- Unit tests: 3,382/3,391 passing (0 failures in fixed files)
- Remaining 9 failures are pre-existing, unrelated issues (CSS/mocking)
**Best Practice**: See [Error Logging Path Patterns](../development/ERROR-LOGGING-PATHS.md) for guidance on logging request paths in error handlers.
**Migration Documentation**: [Test Path Migration Guide](../development/test-path-migration.md)
### Phase 3 Tasks (Future)
- [ ] Identify breaking changes requiring v2
- [ ] Create v2 route handlers
- [ ] Set deprecation timeline for v1
- [ ] Migrate documentation to multi-version format

View File

@@ -363,6 +363,13 @@ The following files contain acknowledged code smell violations that are deferred
- `src/tests/utils/mockFactories.ts` - Mock factories (1553 lines)
- `src/tests/utils/testHelpers.ts` - Test utilities
## Related ADRs
- [ADR-014](./0014-containerization-and-deployment-strategy.md) - Containerization (tests must run in dev container)
- [ADR-040](./0040-testing-economics-and-priorities.md) - Testing Economics and Priorities
- [ADR-045](./0045-test-data-factories-and-fixtures.md) - Test Data Factories and Fixtures
- [ADR-057](./0057-test-remediation-post-api-versioning.md) - Test Remediation Post-API Versioning
## Future Enhancements
1. **Browser E2E Tests**: Consider adding Playwright for actual browser testing

View File

@@ -2,7 +2,9 @@
**Date**: 2025-12-12
**Status**: Proposed
**Status**: Superseded by [ADR-023](./0023-database-schema-migration-strategy.md)
**Note**: This ADR was an early draft. ADR-023 provides a more detailed specification for the same topic.
## Context

View File

@@ -1,324 +0,0 @@
# ADR-015: Application Performance Monitoring (APM) and Error Tracking
**Date**: 2025-12-12
**Status**: Accepted
**Updated**: 2026-01-11
## Context
While `ADR-004` established structured logging with Pino, the application lacks a high-level, aggregated view of its health, performance, and errors. It's difficult to spot trends, identify slow API endpoints, or be proactively notified of new types of errors.
Key requirements:
1. **Self-hosted**: No external SaaS dependencies for error tracking
2. **Sentry SDK compatible**: Leverage mature, well-documented SDKs
3. **Lightweight**: Minimal resource overhead in the dev container
4. **Production-ready**: Same architecture works on bare-metal production servers
5. **AI-accessible**: MCP server integration for Claude Code and other AI tools
## Decision
We will implement a self-hosted error tracking stack using **Bugsink** as the Sentry-compatible backend, with the following components:
### 1. Error Tracking Backend: Bugsink
**Bugsink** is a lightweight, self-hosted Sentry alternative that:
- Runs as a single process (no Kafka, Redis, ClickHouse required)
- Is fully compatible with Sentry SDKs
- Supports ARM64 and AMD64 architectures
- Can use SQLite (dev) or PostgreSQL (production)
**Deployment**:
- **Dev container**: Installed as a systemd service inside the container
- **Production**: Runs as a systemd service on bare-metal, listening on localhost only
- **Database**: Uses PostgreSQL with a dedicated `bugsink` user and `bugsink` database (same PostgreSQL instance as the main application)
### 2. Backend Integration: @sentry/node
The Express backend will integrate `@sentry/node` SDK to:
- Capture unhandled exceptions before PM2/process manager restarts
- Report errors with full stack traces and context
- Integrate with Pino logger for breadcrumbs
- Track transaction performance (optional)
### 3. Frontend Integration: @sentry/react
The React frontend will integrate `@sentry/react` SDK to:
- Wrap the app in a Sentry Error Boundary
- Capture unhandled JavaScript errors
- Report errors with component stack traces
- Track user session context
- **Frontend Error Correlation**: The global API client (Axios/Fetch wrapper) MUST intercept 4xx/5xx responses. It MUST extract the `x-request-id` header (if present) and attach it to the Sentry scope as a tag `api_request_id` before re-throwing the error. This allows developers to copy the ID from Sentry and search for it in backend logs.
### 4. Log Aggregation: Logstash
**Logstash** parses application and infrastructure logs, forwarding error patterns to Bugsink:
- **Installation**: Installed inside the dev container (and on bare-metal prod servers)
- **Inputs**:
- Pino JSON logs from the Node.js application
- Redis logs (connection errors, memory warnings, slow commands)
- PostgreSQL function logs (future - see Implementation Steps)
- **Filter**: Identifies error-level logs (5xx responses, unhandled exceptions, Redis errors)
- **Output**: Sends to Bugsink via Sentry-compatible HTTP API
This provides a secondary error capture path for:
- Errors that occur before Sentry SDK initialization
- Log-based errors that don't throw exceptions
- Redis connection/performance issues
- Database function errors and slow queries
- Historical error analysis from log files
### 5. MCP Server Integration: bugsink-mcp
For AI tool integration (Claude Code, Cursor, etc.), we use the open-source [bugsink-mcp](https://github.com/j-shelfwood/bugsink-mcp) server:
- **No code changes required**: Configurable via environment variables
- **Capabilities**: List projects, get issues, view events, get stacktraces, manage releases
- **Configuration**:
- `BUGSINK_URL`: Points to Bugsink instance (`http://localhost:8000` for dev, `https://bugsink.projectium.com` for prod)
- `BUGSINK_API_TOKEN`: API token from Bugsink (created via Django management command)
- `BUGSINK_ORG_SLUG`: Organization identifier (usually "sentry")
**Note:** Despite the name `sentry-selfhosted-mcp` mentioned in earlier drafts of this ADR, the actual MCP server used is `bugsink-mcp` which is specifically designed for Bugsink's API structure.
## Architecture
```text
┌─────────────────────────────────────────────────────────────────────────┐
│ Dev Container / Production Server │
├─────────────────────────────────────────────────────────────────────────┤
│ │
│ ┌──────────────────┐ ┌──────────────────┐ │
│ │ Frontend │ │ Backend │ │
│ │ (React) │ │ (Express) │ │
│ │ @sentry/react │ │ @sentry/node │ │
│ └────────┬─────────┘ └────────┬─────────┘ │
│ │ │ │
│ │ Sentry SDK Protocol │ │
│ └───────────┬───────────────┘ │
│ │ │
│ ▼ │
│ ┌──────────────────────┐ │
│ │ Bugsink │ │
│ │ (localhost:8000) │◄──────────────────┐ │
│ │ │ │ │
│ │ PostgreSQL backend │ │ │
│ └──────────────────────┘ │ │
│ │ │
│ ┌──────────────────────┐ │ │
│ │ Logstash │───────────────────┘ │
│ │ (Log Aggregator) │ Sentry Output │
│ │ │ │
│ │ Inputs: │ │
│ │ - Pino app logs │ │
│ │ - Redis logs │ │
│ │ - PostgreSQL (future) │
│ └──────────────────────┘ │
│ ▲ ▲ ▲ │
│ │ │ │ │
│ ┌───────────┘ │ └───────────┐ │
│ │ │ │ │
│ ┌────┴─────┐ ┌─────┴────┐ ┌──────┴─────┐ │
│ │ Pino │ │ Redis │ │ PostgreSQL │ │
│ │ Logs │ │ Logs │ │ Logs (TBD) │ │
│ └──────────┘ └──────────┘ └────────────┘ │
│ │
│ ┌──────────────────────┐ │
│ │ PostgreSQL │ │
│ │ ┌────────────────┐ │ │
│ │ │ flyer_crawler │ │ (main app database) │
│ │ ├────────────────┤ │ │
│ │ │ bugsink │ │ (error tracking database) │
│ │ └────────────────┘ │ │
│ └──────────────────────┘ │
│ │
└─────────────────────────────────────────────────────────────────────────┘
External (Developer Machine):
┌──────────────────────────────────────┐
│ Claude Code / Cursor / VS Code │
│ ┌────────────────────────────────┐ │
│ │ bugsink-mcp │ │
│ │ (MCP Server) │ │
│ │ │ │
│ │ BUGSINK_URL=http://localhost:8000
│ │ BUGSINK_API_TOKEN=... │ │
│ │ BUGSINK_ORG_SLUG=... │ │
│ └────────────────────────────────┘ │
└──────────────────────────────────────┘
```
## Configuration
### Environment Variables
| Variable | Description | Default (Dev) |
| ------------------ | ------------------------------ | -------------------------- |
| `BUGSINK_DSN` | Sentry-compatible DSN for SDKs | Set after project creation |
| `BUGSINK_ENABLED` | Enable/disable error reporting | `true` |
| `BUGSINK_BASE_URL` | Bugsink web UI URL (internal) | `http://localhost:8000` |
### PostgreSQL Setup
```sql
-- Create dedicated Bugsink database and user
CREATE USER bugsink WITH PASSWORD 'bugsink_dev_password';
CREATE DATABASE bugsink OWNER bugsink;
GRANT ALL PRIVILEGES ON DATABASE bugsink TO bugsink;
```
### Bugsink Configuration
```bash
# Environment variables for Bugsink service
SECRET_KEY=<random-50-char-string>
DATABASE_URL=postgresql://bugsink:bugsink_dev_password@localhost:5432/bugsink
BASE_URL=http://localhost:8000
PORT=8000
```
### Logstash Pipeline
```conf
# /etc/logstash/conf.d/bugsink.conf
# === INPUTS ===
input {
# Pino application logs
file {
path => "/app/logs/*.log"
codec => json
type => "pino"
tags => ["app"]
}
# Redis logs
file {
path => "/var/log/redis/*.log"
type => "redis"
tags => ["redis"]
}
# PostgreSQL logs (for function logging - future)
# file {
# path => "/var/log/postgresql/*.log"
# type => "postgres"
# tags => ["postgres"]
# }
}
# === FILTERS ===
filter {
# Pino error detection (level 50 = error, 60 = fatal)
if [type] == "pino" and [level] >= 50 {
mutate { add_tag => ["error"] }
}
# Redis error detection
if [type] == "redis" {
grok {
match => { "message" => "%{POSINT:pid}:%{WORD:role} %{MONTHDAY} %{MONTH} %{TIME} %{WORD:loglevel} %{GREEDYDATA:redis_message}" }
}
if [loglevel] in ["WARNING", "ERROR"] {
mutate { add_tag => ["error"] }
}
}
# PostgreSQL function error detection (future)
# if [type] == "postgres" {
# # Parse PostgreSQL log format and detect ERROR/FATAL levels
# }
}
# === OUTPUT ===
output {
if "error" in [tags] {
http {
url => "http://localhost:8000/api/store/"
http_method => "post"
format => "json"
# Sentry envelope format
}
}
}
```
## Implementation Steps
1. **Update Dockerfile.dev**:
- Install Bugsink (pip package or binary)
- Install Logstash (Elastic APT repository)
- Add systemd service files for both
2. **PostgreSQL initialization**:
- Add Bugsink user/database creation to `sql/00-init-extensions.sql`
3. **Backend SDK integration**:
- Install `@sentry/node`
- Initialize in `server.ts` before Express app
- Configure error handler middleware integration
4. **Frontend SDK integration**:
- Install `@sentry/react`
- Wrap `App` component with `Sentry.ErrorBoundary`
- Configure in `src/index.tsx`
5. **Environment configuration**:
- Add Bugsink variables to `src/config/env.ts`
- Update `.env.example` and `compose.dev.yml`
6. **Logstash configuration**:
- Create pipeline config for Pino → Bugsink
- Configure Pino to write to log file in addition to stdout
- Configure Redis log monitoring (connection errors, slow commands)
7. **MCP server documentation**:
- Document `bugsink-mcp` setup in CLAUDE.md
8. **PostgreSQL function logging** (future):
- Configure PostgreSQL to log function execution errors
- Add Logstash input for PostgreSQL logs
- Define filter rules for function-level error detection
- _Note: Ask for implementation details when this step is reached_
## Consequences
### Positive
- **Full observability**: Aggregated view of errors, trends, and performance
- **Self-hosted**: No external SaaS dependencies or subscription costs
- **SDK compatibility**: Leverages mature Sentry SDKs with excellent documentation
- **AI integration**: MCP server enables Claude Code to query and analyze errors
- **Unified architecture**: Same setup works in dev container and production
- **Lightweight**: Bugsink runs in a single process, unlike full Sentry (16GB+ RAM)
### Negative
- **Additional services**: Bugsink and Logstash add complexity to the container
- **PostgreSQL overhead**: Additional database for error tracking
- **Initial setup**: Requires configuration of multiple components
- **Logstash learning curve**: Pipeline configuration requires Logstash knowledge
## Alternatives Considered
1. **Full Sentry self-hosted**: Rejected due to complexity (Kafka, Redis, ClickHouse, 16GB+ RAM minimum)
2. **GlitchTip**: Considered, but Bugsink is lighter weight and easier to deploy
3. **Sentry SaaS**: Rejected due to self-hosted requirement
4. **Custom error aggregation**: Rejected in favor of proven Sentry SDK ecosystem
## References
- [Bugsink Documentation](https://www.bugsink.com/docs/)
- [Bugsink Docker Install](https://www.bugsink.com/docs/docker-install/)
- [@sentry/node Documentation](https://docs.sentry.io/platforms/javascript/guides/node/)
- [@sentry/react Documentation](https://docs.sentry.io/platforms/javascript/guides/react/)
- [bugsink-mcp](https://github.com/j-shelfwood/bugsink-mcp)
- [Logstash Reference](https://www.elastic.co/guide/en/logstash/current/index.html)

View File

@@ -0,0 +1,272 @@
# ADR-015: Error Tracking and Observability
**Date**: 2025-12-12
**Status**: Accepted (Fully Implemented)
**Updated**: 2026-01-26 (user context integration completed)
**Related**: [ADR-056](./0056-application-performance-monitoring.md) (Application Performance Monitoring)
## Context
While ADR-004 established structured logging with Pino, the application lacks a high-level, aggregated view of its health and errors. It's difficult to spot trends, identify recurring issues, or be proactively notified of new types of errors.
Key requirements:
1. **Self-hosted**: No external SaaS dependencies for error tracking
2. **Sentry SDK compatible**: Leverage mature, well-documented SDKs
3. **Lightweight**: Minimal resource overhead in the dev container
4. **Production-ready**: Same architecture works on bare-metal production servers
5. **AI-accessible**: MCP server integration for Claude Code and other AI tools
**Note**: Application Performance Monitoring (APM) and distributed tracing are covered separately in [ADR-056](./0056-application-performance-monitoring.md).
## Decision
We implement a self-hosted error tracking stack using **Bugsink** as the Sentry-compatible backend, with the following components:
### 1. Error Tracking Backend: Bugsink
**Bugsink** is a lightweight, self-hosted Sentry alternative that:
- Runs as a single process (no Kafka, Redis, ClickHouse required)
- Is fully compatible with Sentry SDKs
- Supports ARM64 and AMD64 architectures
- Can use SQLite (dev) or PostgreSQL (production)
**Deployment**:
- **Dev container**: Installed as a systemd service inside the container
- **Production**: Runs as a systemd service on bare-metal, listening on localhost only
- **Database**: Uses PostgreSQL with a dedicated `bugsink` user and `bugsink` database (same PostgreSQL instance as the main application)
### 2. Backend Integration: @sentry/node
The Express backend integrates `@sentry/node` SDK to:
- Capture unhandled exceptions before PM2/process manager restarts
- Report errors with full stack traces and context
- Integrate with Pino logger for breadcrumbs
- Filter errors by severity (only 5xx errors sent by default)
### 3. Frontend Integration: @sentry/react
The React frontend integrates `@sentry/react` SDK to:
- Wrap the app in an Error Boundary for graceful error handling
- Capture unhandled JavaScript errors
- Report errors with component stack traces
- Filter out browser extension errors
- **Frontend Error Correlation**: The global API client intercepts 4xx/5xx responses and can attach the `x-request-id` header to Sentry scope for correlation with backend logs
### 4. Log Aggregation: Logstash
**Logstash** parses application and infrastructure logs, forwarding error patterns to Bugsink:
- **Installation**: Installed inside the dev container (and on bare-metal prod servers)
- **Inputs**:
- Pino JSON logs from the Node.js application (PM2 managed)
- Redis logs (connection errors, memory warnings, slow commands)
- PostgreSQL function logs (via `fn_log()` - see ADR-050)
- NGINX access/error logs
- **Filter**: Identifies error-level logs (5xx responses, unhandled exceptions, Redis errors)
- **Output**: Sends to Bugsink via Sentry-compatible HTTP API
This provides a secondary error capture path for:
- Errors that occur before Sentry SDK initialization
- Log-based errors that don't throw exceptions
- Redis connection/performance issues
- Database function errors and slow queries
- Historical error analysis from log files
### 5. MCP Server Integration: bugsink-mcp
For AI tool integration (Claude Code, Cursor, etc.), we use the open-source [bugsink-mcp](https://github.com/j-shelfwood/bugsink-mcp) server:
- **No code changes required**: Configurable via environment variables
- **Capabilities**: List projects, get issues, view events, get stacktraces, manage releases
- **Configuration**:
- `BUGSINK_URL`: Points to Bugsink instance (`http://localhost:8000` for dev, `https://bugsink.projectium.com` for prod)
- `BUGSINK_API_TOKEN`: API token from Bugsink (created via Django management command)
- `BUGSINK_ORG_SLUG`: Organization identifier (usually "sentry")
## Architecture
```text
+---------------------------------------------------------------------------+
| Dev Container / Production Server |
+---------------------------------------------------------------------------+
| |
| +------------------+ +------------------+ |
| | Frontend | | Backend | |
| | (React) | | (Express) | |
| | @sentry/react | | @sentry/node | |
| +--------+---------+ +--------+---------+ |
| | | |
| | Sentry SDK Protocol | |
| +-----------+---------------+ |
| | |
| v |
| +----------------------+ |
| | Bugsink | |
| | (localhost:8000) |<------------------+ |
| | | | |
| | PostgreSQL backend | | |
| +----------------------+ | |
| | |
| +----------------------+ | |
| | Logstash |-------------------+ |
| | (Log Aggregator) | Sentry Output |
| | | |
| | Inputs: | |
| | - PM2/Pino logs | |
| | - Redis logs | |
| | - PostgreSQL logs | |
| | - NGINX logs | |
| +----------------------+ |
| ^ ^ ^ ^ |
| | | | | |
| +-----------+ | | +-----------+ |
| | | | | |
| +----+-----+ +-----+----+ +-----+----+ +-----+----+ |
| | PM2 | | Redis | | PostgreSQL| | NGINX | |
| | Logs | | Logs | | Logs | | Logs | |
| +----------+ +----------+ +-----------+ +---------+ |
| |
| +----------------------+ |
| | PostgreSQL | |
| | +----------------+ | |
| | | flyer_crawler | | (main app database) |
| | +----------------+ | |
| | | bugsink | | (error tracking database) |
| | +----------------+ | |
| +----------------------+ |
| |
+---------------------------------------------------------------------------+
External (Developer Machine):
+--------------------------------------+
| Claude Code / Cursor / VS Code |
| +--------------------------------+ |
| | bugsink-mcp | |
| | (MCP Server) | |
| | | |
| | BUGSINK_URL=http://localhost:8000
| | BUGSINK_API_TOKEN=... | |
| | BUGSINK_ORG_SLUG=... | |
| +--------------------------------+ |
+--------------------------------------+
```
## Implementation Status
### Completed
- [x] Bugsink installed and configured in dev container
- [x] PostgreSQL `bugsink` database and user created
- [x] `@sentry/node` SDK integrated in backend (`src/services/sentry.server.ts`)
- [x] `@sentry/react` SDK integrated in frontend (`src/services/sentry.client.ts`)
- [x] ErrorBoundary component created (`src/components/ErrorBoundary.tsx`)
- [x] ErrorBoundary wrapped around app (`src/providers/AppProviders.tsx`)
- [x] Logstash pipeline configured for PM2/Pino, Redis, PostgreSQL, NGINX logs
- [x] MCP server (`bugsink-mcp`) documented and configured
- [x] Environment variables added to `src/config/env.ts` and frontend `src/config.ts`
- [x] Browser extension errors filtered in `beforeSend`
- [x] 5xx error filtering in backend error handler
### Recently Completed (2026-01-26)
- [x] **User context after authentication**: Integrated `setUser()` calls in `AuthProvider.tsx` to associate errors with authenticated users
- Called on profile fetch from query (line 44-49)
- Called on direct login with profile (line 94-99)
- Called on login with profile fetch (line 124-129)
- Cleared on logout (line 76-77)
- Maps `user_id``id`, `email``email`, `full_name``username`
This completes the error tracking implementation - all errors are now associated with the authenticated user who encountered them, enabling user-specific error analysis and debugging.
## Configuration
### Environment Variables
| Variable | Description | Default (Dev) |
| -------------------- | -------------------------------- | -------------------------- |
| `SENTRY_DSN` | Sentry-compatible DSN (backend) | Set after project creation |
| `VITE_SENTRY_DSN` | Sentry-compatible DSN (frontend) | Set after project creation |
| `SENTRY_ENVIRONMENT` | Environment name | `development` |
| `SENTRY_DEBUG` | Enable debug logging | `false` |
| `SENTRY_ENABLED` | Enable/disable error reporting | `true` |
### PostgreSQL Setup
```sql
-- Create dedicated Bugsink database and user
CREATE USER bugsink WITH PASSWORD 'bugsink_dev_password';
CREATE DATABASE bugsink OWNER bugsink;
GRANT ALL PRIVILEGES ON DATABASE bugsink TO bugsink;
```
### Bugsink Configuration
```bash
# Environment variables for Bugsink service
SECRET_KEY=<random-50-char-string>
DATABASE_URL=postgresql://bugsink:bugsink_dev_password@localhost:5432/bugsink
BASE_URL=http://localhost:8000
PORT=8000
```
### Logstash Pipeline
See `docker/logstash/bugsink.conf` for the full pipeline configuration.
Key routing:
| Source | Bugsink Project |
| --------------- | --------------- |
| Backend (Pino) | Backend API |
| Worker (Pino) | Backend API |
| PostgreSQL logs | Backend API |
| Vite logs | Infrastructure |
| Redis logs | Infrastructure |
| NGINX logs | Infrastructure |
| Frontend errors | Frontend |
## Consequences
### Positive
- **Full observability**: Aggregated view of errors and trends
- **Self-hosted**: No external SaaS dependencies or subscription costs
- **SDK compatibility**: Leverages mature Sentry SDKs with excellent documentation
- **AI integration**: MCP server enables Claude Code to query and analyze errors
- **Unified architecture**: Same setup works in dev container and production
- **Lightweight**: Bugsink runs in a single process, unlike full Sentry (16GB+ RAM)
- **Error correlation**: Request IDs allow correlation between frontend errors and backend logs
### Negative
- **Additional services**: Bugsink and Logstash add complexity to the container
- **PostgreSQL overhead**: Additional database for error tracking
- **Initial setup**: Requires configuration of multiple components
- **Logstash learning curve**: Pipeline configuration requires Logstash knowledge
## Alternatives Considered
1. **Full Sentry self-hosted**: Rejected due to complexity (Kafka, Redis, ClickHouse, 16GB+ RAM minimum)
2. **GlitchTip**: Considered, but Bugsink is lighter weight and easier to deploy
3. **Sentry SaaS**: Rejected due to self-hosted requirement
4. **Custom error aggregation**: Rejected in favor of proven Sentry SDK ecosystem
## References
- [Bugsink Documentation](https://www.bugsink.com/docs/)
- [Bugsink Docker Install](https://www.bugsink.com/docs/docker-install/)
- [@sentry/node Documentation](https://docs.sentry.io/platforms/javascript/guides/node/)
- [@sentry/react Documentation](https://docs.sentry.io/platforms/javascript/guides/react/)
- [bugsink-mcp](https://github.com/j-shelfwood/bugsink-mcp)
- [Logstash Reference](https://www.elastic.co/guide/en/logstash/current/index.html)
- [ADR-050: PostgreSQL Function Observability](./0050-postgresql-function-observability.md)
- [ADR-056: Application Performance Monitoring](./0056-application-performance-monitoring.md)

View File

@@ -4,6 +4,8 @@
**Status**: Proposed
**Supersedes**: [ADR-013](./0013-database-schema-migration-strategy.md)
## Context
The `README.md` indicates that the database schema is managed by manually running a large `schema.sql.txt` file. This approach is highly error-prone, makes tracking changes difficult, and is not feasible for updating a live production database without downtime or data loss.

View File

@@ -1,18 +1,333 @@
# ADR-024: Feature Flagging Strategy
**Date**: 2025-12-12
**Status**: Accepted
**Implemented**: 2026-01-28
**Implementation Plan**: [2026-01-28-adr-024-feature-flags-implementation.md](../plans/2026-01-28-adr-024-feature-flags-implementation.md)
**Status**: Proposed
## Implementation Summary
Feature flag infrastructure fully implemented with 89 new tests (all passing). Total test suite: 3,616 tests passing.
**Backend**:
- Zod-validated schema in `src/config/env.ts` with 6 feature flags
- Service module `src/services/featureFlags.server.ts` with `isFeatureEnabled()`, `getFeatureFlags()`, `getEnabledFeatureFlags()`
- Admin endpoint `GET /api/v1/admin/feature-flags` (requires admin authentication)
- Convenience exports for direct boolean access
**Frontend**:
- Config section in `src/config.ts` with `VITE_FEATURE_*` environment variables
- Type declarations in `src/vite-env.d.ts`
- React hook `useFeatureFlag()` and `useAllFeatureFlags()` in `src/hooks/useFeatureFlag.ts`
- Declarative component `<FeatureFlag>` in `src/components/FeatureFlag.tsx`
**Current Flags**: `bugsinkSync`, `advancedRbac`, `newDashboard`, `betaRecipes`, `experimentalAi`, `debugMode`
---
## Context
As the application grows, there is no way to roll out new features to a subset of users (e.g., for beta testing) or to quickly disable a problematic feature in production without a full redeployment.
Application lacks controlled feature rollout capability. No mechanism for beta testing, quick production disablement, or gradual rollouts without full redeployment. Need type-safe, configuration-based system integrating with ADR-007 Zod validation.
## Decision
We will implement a feature flagging system. This could start with a simple configuration-based approach (defined in `ADR-007`) and evolve to use a dedicated service like **Flagsmith** or **LaunchDarkly**. This ADR will define how feature flags are created, managed, and checked in both the backend and frontend code.
Implement environment-variable-based feature flag system. Backend: Zod-validated schema in `src/config/env.ts` + dedicated service. Frontend: Vite env vars + React hook + declarative component. All flags default `false` (opt-in model). Future migration path to Flagsmith/LaunchDarkly preserved via abstraction layer.
## Consequences
**Positive**: Decouples feature releases from code deployments, reducing risk and allowing for more controlled, gradual rollouts and A/B testing. Enables easier experimentation and faster iteration.
**Negative**: Adds complexity to the codebase with conditional logic around features. Requires careful management of feature flag states to avoid technical debt.
- **Positive**: Decouples releases from deployments reduced risk, gradual rollouts, A/B testing capability
- **Negative**: Conditional logic complexity → requires sunset policy (3-month max after full rollout)
- **Neutral**: Restart required for flag changes (acceptable for current scale, external service removes this constraint)
---
## Implementation Details
### Architecture Overview
```text
Environment Variables (FEATURE_*, VITE_FEATURE_*)
├── Backend ──► src/config/env.ts (Zod) ──► src/services/featureFlags.server.ts
│ │
│ ┌──────────┴──────────┐
│ │ │
│ isFeatureEnabled() getAllFeatureFlags()
│ │
│ Routes/Services
└── Frontend ─► src/config.ts ──► src/hooks/useFeatureFlag.ts
┌──────────────┼──────────────┐
│ │ │
useFeatureFlag() useAllFeatureFlags() <FeatureFlag>
│ Component
Components
```
### File Structure
| File | Purpose | Layer |
| ------------------------------------- | ------------------------ | ---------------- |
| `src/config/env.ts` | Zod schema + env loading | Backend config |
| `src/services/featureFlags.server.ts` | Flag access service | Backend runtime |
| `src/config.ts` | Vite env parsing | Frontend config |
| `src/vite-env.d.ts` | TypeScript declarations | Frontend types |
| `src/hooks/useFeatureFlag.ts` | React hook | Frontend runtime |
| `src/components/FeatureFlag.tsx` | Declarative wrapper | Frontend UI |
### Naming Convention
| Context | Pattern | Example |
| ------------------- | ------------------------- | ---------------------------------- |
| Backend env var | `FEATURE_SNAKE_CASE` | `FEATURE_NEW_DASHBOARD` |
| Frontend env var | `VITE_FEATURE_SNAKE_CASE` | `VITE_FEATURE_NEW_DASHBOARD` |
| Config property | `camelCase` | `config.featureFlags.newDashboard` |
| Hook/function param | `camelCase` literal | `isFeatureEnabled('newDashboard')` |
### Backend Implementation
#### Schema Definition (`src/config/env.ts`)
```typescript
/**
* Feature flags schema (ADR-024).
* All flags default false (disabled) for safety.
*/
const featureFlagsSchema = z.object({
newDashboard: booleanString(false), // FEATURE_NEW_DASHBOARD
betaRecipes: booleanString(false), // FEATURE_BETA_RECIPES
experimentalAi: booleanString(false), // FEATURE_EXPERIMENTAL_AI
debugMode: booleanString(false), // FEATURE_DEBUG_MODE
});
// In loadEnvVars():
featureFlags: {
newDashboard: process.env.FEATURE_NEW_DASHBOARD,
betaRecipes: process.env.FEATURE_BETA_RECIPES,
experimentalAi: process.env.FEATURE_EXPERIMENTAL_AI,
debugMode: process.env.FEATURE_DEBUG_MODE,
},
```
#### Service Module (`src/services/featureFlags.server.ts`)
```typescript
import { config, isDevelopment } from '../config/env';
import { logger } from './logger.server';
export type FeatureFlagName = keyof typeof config.featureFlags;
/**
* Check feature flag state. Logs in development mode.
*/
export function isFeatureEnabled(flagName: FeatureFlagName): boolean {
const enabled = config.featureFlags[flagName];
if (isDevelopment) {
logger.debug({ flag: flagName, enabled }, 'Feature flag checked');
}
return enabled;
}
/**
* Get all flags (admin/debug endpoints).
*/
export function getAllFeatureFlags(): Record<FeatureFlagName, boolean> {
return { ...config.featureFlags };
}
// Convenience exports (evaluated once at startup)
export const isNewDashboardEnabled = config.featureFlags.newDashboard;
export const isBetaRecipesEnabled = config.featureFlags.betaRecipes;
```
#### Usage in Routes
```typescript
import { isFeatureEnabled } from '../services/featureFlags.server';
router.get('/dashboard', async (req, res) => {
if (isFeatureEnabled('newDashboard')) {
return sendSuccess(res, { version: 'v2', data: await getNewDashboardData() });
}
return sendSuccess(res, { version: 'v1', data: await getLegacyDashboardData() });
});
```
### Frontend Implementation
#### Config (`src/config.ts`)
```typescript
const config = {
// ... existing sections ...
featureFlags: {
newDashboard: import.meta.env.VITE_FEATURE_NEW_DASHBOARD === 'true',
betaRecipes: import.meta.env.VITE_FEATURE_BETA_RECIPES === 'true',
experimentalAi: import.meta.env.VITE_FEATURE_EXPERIMENTAL_AI === 'true',
debugMode: import.meta.env.VITE_FEATURE_DEBUG_MODE === 'true',
},
};
```
#### Type Declarations (`src/vite-env.d.ts`)
```typescript
interface ImportMetaEnv {
readonly VITE_FEATURE_NEW_DASHBOARD?: string;
readonly VITE_FEATURE_BETA_RECIPES?: string;
readonly VITE_FEATURE_EXPERIMENTAL_AI?: string;
readonly VITE_FEATURE_DEBUG_MODE?: string;
}
```
#### React Hook (`src/hooks/useFeatureFlag.ts`)
```typescript
import { useMemo } from 'react';
import config from '../config';
export type FeatureFlagName = keyof typeof config.featureFlags;
export function useFeatureFlag(flagName: FeatureFlagName): boolean {
return useMemo(() => config.featureFlags[flagName], [flagName]);
}
export function useAllFeatureFlags(): Record<FeatureFlagName, boolean> {
return useMemo(() => ({ ...config.featureFlags }), []);
}
```
#### Declarative Component (`src/components/FeatureFlag.tsx`)
```typescript
import { ReactNode } from 'react';
import { useFeatureFlag, FeatureFlagName } from '../hooks/useFeatureFlag';
interface FeatureFlagProps {
name: FeatureFlagName;
children: ReactNode;
fallback?: ReactNode;
}
export function FeatureFlag({ name, children, fallback = null }: FeatureFlagProps) {
const isEnabled = useFeatureFlag(name);
return <>{isEnabled ? children : fallback}</>;
}
```
#### Usage in Components
```tsx
// Declarative approach
<FeatureFlag name="newDashboard" fallback={<LegacyDashboard />}>
<NewDashboard />
</FeatureFlag>;
// Hook approach (for logic beyond rendering)
const isNewDashboard = useFeatureFlag('newDashboard');
useEffect(() => {
if (isNewDashboard) analytics.track('new_dashboard_viewed');
}, [isNewDashboard]);
```
### Testing Patterns
#### Backend Test Setup
```typescript
// Reset modules to test different flag states
beforeEach(() => {
vi.resetModules();
process.env.FEATURE_NEW_DASHBOARD = 'true';
});
// src/services/featureFlags.server.test.ts
describe('isFeatureEnabled', () => {
it('returns false for disabled flags', () => {
expect(isFeatureEnabled('newDashboard')).toBe(false);
});
});
```
#### Frontend Test Setup
```typescript
// Mock config module
vi.mock('../config', () => ({
default: {
featureFlags: {
newDashboard: true,
betaRecipes: false,
},
},
}));
// Component test
describe('FeatureFlag', () => {
it('renders fallback when disabled', () => {
render(
<FeatureFlag name="betaRecipes" fallback={<div>Old</div>}>
<div>New</div>
</FeatureFlag>
);
expect(screen.getByText('Old')).toBeInTheDocument();
});
});
```
### Flag Lifecycle
| Phase | Actions |
| ---------- | -------------------------------------------------------------------------------------------- |
| **Add** | 1. Add to both schemas (backend + frontend) 2. Default `false` 3. Document in `.env.example` |
| **Enable** | Set env var `='true'` → restart application |
| **Remove** | 1. Remove conditional code 2. Remove from schemas 3. Remove env vars |
| **Sunset** | Max 3 months after full rollout → remove flag |
### Admin Endpoint (Optional)
```typescript
// GET /api/v1/admin/feature-flags (admin-only)
router.get('/feature-flags', requireAdmin, async (req, res) => {
sendSuccess(res, { flags: getAllFeatureFlags() });
});
```
### Integration with ADR-007
Feature flags extend existing Zod configuration pattern:
- **Validation**: Same `booleanString()` transform used by other config
- **Loading**: Same `loadEnvVars()` function loads `FEATURE_*` vars
- **Type Safety**: `FeatureFlagName` type derived from config schema
- **Fail-Fast**: Invalid flag values fail at startup (Zod validation)
### Future Migration Path
Current implementation abstracts flag access via `isFeatureEnabled()` function and `useFeatureFlag()` hook. External service migration requires:
1. Replace implementation internals of these functions
2. Add API client for Flagsmith/LaunchDarkly
3. No changes to consuming code (routes/components)
### Explicitly Out of Scope
- External service integration (Flagsmith/LaunchDarkly)
- Database-stored flags
- Real-time flag updates (WebSocket/SSE)
- User-specific flags (A/B testing percentages)
- Flag inheritance/hierarchy
- Flag audit logging
### Key Files Reference
| Action | Files |
| --------------------- | ------------------------------------------------------------------------------------------------- |
| Add new flag | `src/config/env.ts`, `src/config.ts`, `src/vite-env.d.ts`, `.env.example` |
| Check flag (backend) | Import from `src/services/featureFlags.server.ts` |
| Check flag (frontend) | Import hook from `src/hooks/useFeatureFlag.ts` or component from `src/components/FeatureFlag.tsx` |
| Test flag behavior | Mock via `vi.resetModules()` (backend) or `vi.mock('../config')` (frontend) |

View File

@@ -195,6 +195,12 @@ Do NOT add tests:
- Coverage percentages may not satisfy external audits
- Requires judgment calls that may be inconsistent
## Related ADRs
- [ADR-010](./0010-testing-strategy-and-standards.md) - Testing Strategy and Standards (this ADR extends ADR-010)
- [ADR-045](./0045-test-data-factories-and-fixtures.md) - Test Data Factories and Fixtures
- [ADR-057](./0057-test-remediation-post-api-versioning.md) - Test Remediation Post-API Versioning
## Key Files
- `docs/adr/0010-testing-strategy-and-standards.md` - Testing mechanics

View File

@@ -2,22 +2,22 @@
**Date**: 2026-01-09
**Status**: Partially Implemented
**Status**: Accepted (Fully Implemented)
**Implemented**: 2026-01-09 (Local auth only)
**Implemented**: 2026-01-09 (Local auth + JWT), 2026-01-26 (OAuth enabled)
## Context
The application requires a secure authentication system that supports both traditional email/password login and social OAuth providers (Google, GitHub). The system must handle user sessions, token refresh, account security (lockout after failed attempts), and integrate seamlessly with the existing Express middleware pipeline.
Currently, **only local authentication is enabled**. OAuth strategies are fully implemented but commented out, pending configuration of OAuth provider credentials.
**All authentication methods are now fully implemented**: Local authentication (email/password), JWT tokens, and OAuth (Google + GitHub). OAuth strategies use conditional registration - they activate automatically when the corresponding environment variables are configured.
## Decision
We will implement a stateless JWT-based authentication system with the following components:
1. **Local Authentication**: Email/password login with bcrypt hashing.
2. **OAuth Authentication**: Google and GitHub OAuth 2.0 (currently disabled).
2. **OAuth Authentication**: Google and GitHub OAuth 2.0 (conditionally enabled via environment variables).
3. **JWT Access Tokens**: Short-lived tokens (15 minutes) for API authentication.
4. **Refresh Tokens**: Long-lived tokens (7 days) stored in HTTP-only cookies.
5. **Account Security**: Lockout after 5 failed login attempts for 15 minutes.
@@ -59,7 +59,7 @@ We will implement a stateless JWT-based authentication system with the following
│ │ │ │ │
│ │ ┌──────────┐ │ │ │
│ └────────>│ OAuth │─────────────┘ │ │
(disabled) │ Provider │ │ │
│ Provider │ │ │
│ └──────────┘ │ │
│ │ │
│ ┌──────────┐ ┌──────────┐ │ │
@@ -130,72 +130,139 @@ passport.use(
- Refresh token: 7 days expiry, 64-byte random hex
- Refresh token stored in HTTP-only cookie with `secure` flag in production
### OAuth Strategies (Disabled)
### OAuth Strategies (Conditionally Enabled)
OAuth strategies are **fully implemented** and activate automatically when the corresponding environment variables are set. The strategies use conditional registration to gracefully handle missing credentials.
#### Google OAuth
Located in `src/routes/passport.routes.ts` (lines 167-217, commented):
Located in `src/config/passport.ts` (lines 167-235):
```typescript
// passport.use(new GoogleStrategy({
// clientID: process.env.GOOGLE_CLIENT_ID!,
// clientSecret: process.env.GOOGLE_CLIENT_SECRET!,
// callbackURL: '/api/auth/google/callback',
// scope: ['profile', 'email']
// },
// async (accessToken, refreshToken, profile, done) => {
// const email = profile.emails?.[0]?.value;
// const user = await db.findUserByEmail(email);
// if (user) {
// return done(null, user);
// }
// // Create new user with null password_hash
// const newUser = await db.createUser(email, null, {
// full_name: profile.displayName,
// avatar_url: profile.photos?.[0]?.value
// });
// return done(null, newUser);
// }
// ));
// Only register the strategy if the required environment variables are set.
if (process.env.GOOGLE_CLIENT_ID && process.env.GOOGLE_CLIENT_SECRET) {
passport.use(
new GoogleStrategy(
{
clientID: process.env.GOOGLE_CLIENT_ID,
clientSecret: process.env.GOOGLE_CLIENT_SECRET,
callbackURL: '/api/auth/google/callback',
scope: ['profile', 'email'],
},
async (_accessToken, _refreshToken, profile, done) => {
const email = profile.emails?.[0]?.value;
if (!email) {
return done(new Error('No email found in Google profile.'), false);
}
const existingUserProfile = await db.userRepo.findUserWithProfileByEmail(email, logger);
if (existingUserProfile) {
// User exists, log them in (strip sensitive fields)
return done(null, cleanUserProfile);
} else {
// Create new user with null password_hash for OAuth users
const newUserProfile = await db.userRepo.createUser(
email,
null,
{
full_name: profile.displayName,
avatar_url: profile.photos?.[0]?.value,
},
logger,
);
return done(null, newUserProfile);
}
},
),
);
logger.info('[Passport] Google OAuth strategy registered.');
} else {
logger.warn('[Passport] Google OAuth strategy NOT registered: credentials not set.');
}
```
#### GitHub OAuth
Located in `src/routes/passport.routes.ts` (lines 219-269, commented):
Located in `src/config/passport.ts` (lines 237-310):
```typescript
// passport.use(new GitHubStrategy({
// clientID: process.env.GITHUB_CLIENT_ID!,
// clientSecret: process.env.GITHUB_CLIENT_SECRET!,
// callbackURL: '/api/auth/github/callback',
// scope: ['user:email']
// },
// async (accessToken, refreshToken, profile, done) => {
// const email = profile.emails?.[0]?.value;
// // Similar flow to Google OAuth
// }
// ));
// Only register the strategy if the required environment variables are set.
if (process.env.GITHUB_CLIENT_ID && process.env.GITHUB_CLIENT_SECRET) {
passport.use(
new GitHubStrategy(
{
clientID: process.env.GITHUB_CLIENT_ID,
clientSecret: process.env.GITHUB_CLIENT_SECRET,
callbackURL: '/api/auth/github/callback',
scope: ['user:email'],
},
async (_accessToken, _refreshToken, profile, done) => {
const email = profile.emails?.[0]?.value;
if (!email) {
return done(new Error('No public email found in GitHub profile.'), false);
}
// Same flow as Google OAuth - find or create user
},
),
);
logger.info('[Passport] GitHub OAuth strategy registered.');
} else {
logger.warn('[Passport] GitHub OAuth strategy NOT registered: credentials not set.');
}
```
#### OAuth Routes (Disabled)
#### OAuth Routes (Active)
Located in `src/routes/auth.routes.ts` (lines 289-315, commented):
Located in `src/routes/auth.routes.ts` (lines 587-609):
```typescript
// const handleOAuthCallback = (req, res) => {
// const user = req.user;
// const accessToken = jwt.sign(payload, JWT_SECRET, { expiresIn: '15m' });
// const refreshToken = crypto.randomBytes(64).toString('hex');
//
// await db.saveRefreshToken(user.user_id, refreshToken);
// res.cookie('refreshToken', refreshToken, { httpOnly: true, secure: true });
// res.redirect(`${FRONTEND_URL}/auth/callback?token=${accessToken}`);
// };
// Google OAuth routes
router.get('/google', passport.authenticate('google', { session: false }));
router.get(
'/google/callback',
passport.authenticate('google', {
session: false,
failureRedirect: '/?error=google_auth_failed',
}),
createOAuthCallbackHandler('google'),
);
// router.get('/google', passport.authenticate('google', { session: false }));
// router.get('/google/callback', passport.authenticate('google', { ... }), handleOAuthCallback);
// router.get('/github', passport.authenticate('github', { session: false }));
// router.get('/github/callback', passport.authenticate('github', { ... }), handleOAuthCallback);
// GitHub OAuth routes
router.get('/github', passport.authenticate('github', { session: false }));
router.get(
'/github/callback',
passport.authenticate('github', {
session: false,
failureRedirect: '/?error=github_auth_failed',
}),
createOAuthCallbackHandler('github'),
);
```
#### OAuth Callback Handler
The callback handler generates tokens and redirects to the frontend:
```typescript
const createOAuthCallbackHandler = (provider: 'google' | 'github') => {
return async (req: Request, res: Response) => {
const userProfile = req.user as UserProfile;
const { accessToken, refreshToken } = await authService.handleSuccessfulLogin(
userProfile,
req.log,
);
res.cookie('refreshToken', refreshToken, {
httpOnly: true,
secure: process.env.NODE_ENV === 'production',
maxAge: 30 * 24 * 60 * 60 * 1000, // 30 days
});
// Redirect to frontend with provider-specific token param
const tokenParam = provider === 'google' ? 'googleAuthToken' : 'githubAuthToken';
res.redirect(`${process.env.FRONTEND_URL}/?${tokenParam}=${accessToken}`);
};
};
```
### Database Schema
@@ -248,11 +315,13 @@ export const mockAuth = (req, res, next) => {
};
```
## Enabling OAuth
## Configuring OAuth Providers
OAuth is fully implemented and activates automatically when credentials are provided. No code changes are required.
### Step 1: Set Environment Variables
Add to `.env`:
Add to your environment (`.env.local` for development, Gitea secrets for production):
```bash
# Google OAuth
@@ -283,54 +352,29 @@ GITHUB_CLIENT_SECRET=your-github-client-secret
- Development: `http://localhost:3001/api/auth/github/callback`
- Production: `https://your-domain.com/api/auth/github/callback`
### Step 3: Uncomment Backend Code
### Step 3: Restart the Application
**In `src/routes/passport.routes.ts`**:
After setting the environment variables, restart PM2:
1. Uncomment import statements (lines 5-6):
```typescript
import { Strategy as GoogleStrategy } from 'passport-google-oauth20';
import { Strategy as GitHubStrategy } from 'passport-github2';
```
2. Uncomment Google strategy (lines 167-217)
3. Uncomment GitHub strategy (lines 219-269)
**In `src/routes/auth.routes.ts`**:
1. Uncomment `handleOAuthCallback` function (lines 291-309)
2. Uncomment OAuth routes (lines 311-315)
### Step 4: Add Frontend OAuth Buttons
Create login buttons that redirect to:
- Google: `GET /api/auth/google`
- GitHub: `GET /api/auth/github`
Handle callback at `/auth/callback?token=<accessToken>`:
1. Extract token from URL
2. Store in client-side token storage
3. Redirect to dashboard
### Step 5: Handle OAuth Callback Page
Create `src/pages/AuthCallback.tsx`:
```typescript
const AuthCallback = () => {
const token = new URLSearchParams(location.search).get('token');
if (token) {
setToken(token);
navigate('/dashboard');
} else {
navigate('/login?error=auth_failed');
}
};
```bash
podman exec -it flyer-crawler-dev pm2 restart all
```
The Passport configuration will automatically register the OAuth strategies when it detects the credentials. Check the logs for confirmation:
```text
[Passport] Google OAuth strategy registered.
[Passport] GitHub OAuth strategy registered.
```
### Frontend Integration
OAuth login buttons are implemented in `src/client/pages/AuthView.tsx`. The frontend:
1. Redirects users to `/api/auth/google` or `/api/auth/github`
2. Handles the callback via the `useAppInitialization` hook which looks for `googleAuthToken` or `githubAuthToken` query parameters
3. Stores the token and redirects to the dashboard
## Known Limitations
1. **No OAuth Provider ID Mapping**: Users are identified by email only. If a user has accounts with different emails on Google and GitHub, they create separate accounts.
@@ -372,31 +416,32 @@ const AuthCallback = () => {
- **Stateless Architecture**: No session storage required; scales horizontally.
- **Secure by Default**: HTTP-only cookies, short token expiry, bcrypt hashing.
- **Account Protection**: Lockout prevents brute-force attacks.
- **Flexible OAuth**: Can enable/disable OAuth without code changes (just env vars + uncommenting).
- **Graceful Degradation**: System works with local auth only.
- **Flexible OAuth**: OAuth activates automatically when credentials are set - no code changes needed.
- **Graceful Degradation**: System works with local auth only when OAuth credentials are not configured.
- **Full Feature Set**: Both local and OAuth authentication are production-ready.
### Negative
- **OAuth Disabled by Default**: Requires manual uncommenting to enable.
- **No Account Linking**: Multiple OAuth providers create separate accounts.
- **Frontend Work Required**: OAuth login buttons don't exist yet.
- **Token in URL**: OAuth callback passes token in URL (visible in browser history).
- **No Account Linking**: Multiple OAuth providers create separate accounts if emails differ.
- **Token in URL**: OAuth callback passes token in URL query parameter (visible in browser history).
- **Email-Based Identity**: OAuth users are identified by email only, not provider-specific IDs.
### Mitigation
- Document OAuth enablement steps clearly (see [../architecture/AUTHENTICATION.md](../architecture/AUTHENTICATION.md)).
- Document OAuth configuration steps clearly (see [../architecture/AUTHENTICATION.md](../architecture/AUTHENTICATION.md)).
- Consider adding OAuth provider ID columns for future account linking.
- Use URL fragment (`#token=`) instead of query parameter for callback.
- Consider using URL fragment (`#token=`) instead of query parameter for callback in future enhancement.
## Key Files
| File | Purpose |
| ------------------------------------------------------ | ------------------------------------------------ |
| `src/routes/passport.routes.ts` | Passport strategies (local, JWT, OAuth) |
| `src/config/passport.ts` | Passport strategies (local, JWT, OAuth) |
| `src/routes/auth.routes.ts` | Auth endpoints (login, register, refresh, OAuth) |
| `src/services/authService.ts` | Auth business logic |
| `src/services/db/user.db.ts` | User database operations |
| `src/config/env.ts` | Environment variable validation |
| `src/client/pages/AuthView.tsx` | Frontend login/register UI with OAuth buttons |
| [AUTHENTICATION.md](../architecture/AUTHENTICATION.md) | OAuth setup guide |
| `.env.example` | Environment variable template |
@@ -409,11 +454,11 @@ const AuthCallback = () => {
## Future Enhancements
1. **Enable OAuth**: Uncomment strategies and configure providers.
2. **Add OAuth Provider Mapping Table**: Store `googleId`, `githubId` for account linking.
3. **Implement Account Linking**: Allow users to connect multiple OAuth providers.
4. **Add Password to OAuth Users**: Allow OAuth users to set a password.
5. **Implement PKCE**: Add PKCE flow for enhanced OAuth security.
6. **Token in Fragment**: Use URL fragment for OAuth callback token.
7. **OAuth Token Storage**: Store OAuth refresh tokens for provider API access.
8. **Magic Link Login**: Add passwordless email login option.
1. **Add OAuth Provider Mapping Table**: Store `googleId`, `githubId` for account linking.
2. **Implement Account Linking**: Allow users to connect multiple OAuth providers.
3. **Add Password to OAuth Users**: Allow OAuth users to set a password for local login.
4. **Implement PKCE**: Add PKCE flow for enhanced OAuth security.
5. **Token in Fragment**: Use URL fragment for OAuth callback token instead of query parameter.
6. **OAuth Token Storage**: Store OAuth refresh tokens for provider API access.
7. **Magic Link Login**: Add passwordless email login option.
8. **Additional OAuth Providers**: Support for Apple, Microsoft, or other providers.

View File

@@ -2,9 +2,9 @@
**Date**: 2026-01-11
**Status**: Proposed
**Status**: Accepted (Fully Implemented)
**Related**: [ADR-015](0015-application-performance-monitoring-and-error-tracking.md), [ADR-004](0004-standardized-application-wide-structured-logging.md)
**Related**: [ADR-015](0015-error-tracking-and-observability.md), [ADR-004](0004-standardized-application-wide-structured-logging.md)
## Context
@@ -335,7 +335,7 @@ SELECT award_achievement('user-uuid', 'Nonexistent Badge');
## References
- [ADR-015: Application Performance Monitoring](0015-application-performance-monitoring-and-error-tracking.md)
- [ADR-015: Error Tracking and Observability](0015-error-tracking-and-observability.md)
- [ADR-004: Standardized Structured Logging](0004-standardized-application-wide-structured-logging.md)
- [PostgreSQL RAISE Documentation](https://www.postgresql.org/docs/current/plpgsql-errors-and-messages.html)
- [PostgreSQL Logging Configuration](https://www.postgresql.org/docs/current/runtime-config-logging.html)

View File

@@ -2,7 +2,9 @@
**Date**: 2026-01-11
**Status**: Proposed
**Status**: Accepted (Fully Implemented)
**Related**: [ADR-004](0004-standardized-application-wide-structured-logging.md)
## Context
@@ -17,7 +19,9 @@ We will adopt a namespace-based debug filter pattern, similar to the `debug` npm
## Implementation
In `src/services/logger.server.ts`:
### Core Implementation (Completed 2026-01-11)
Implemented in [src/services/logger.server.ts:140-150](src/services/logger.server.ts#L140-L150):
```typescript
const debugModules = (process.env.DEBUG_MODULES || '').split(',').map((s) => s.trim());
@@ -33,10 +37,100 @@ export const createScopedLogger = (moduleName: string) => {
};
```
### Adopted Services (Completed 2026-01-26)
Services currently using `createScopedLogger`:
- `ai-service` - AI/Gemini integration ([src/services/aiService.server.ts:1020](src/services/aiService.server.ts#L1020))
- `flyer-processing-service` - Flyer upload and processing ([src/services/flyerProcessingService.server.ts:20](src/services/flyerProcessingService.server.ts#L20))
## Usage
To debug only AI and Database interactions:
### Enable Debug Logging for Specific Modules
To debug only AI and flyer processing:
```bash
DEBUG_MODULES=ai-service,db-repo npm run dev
DEBUG_MODULES=ai-service,flyer-processing-service npm run dev
```
### Enable All Debug Logging
Use wildcard to enable debug logging for all modules:
```bash
DEBUG_MODULES=* npm run dev
```
### Common Module Names
| Module Name | Purpose | File |
| -------------------------- | ---------------------------------------- | ----------------------------------------------- |
| `ai-service` | AI/Gemini API interactions | `src/services/aiService.server.ts` |
| `flyer-processing-service` | Flyer upload, validation, and processing | `src/services/flyerProcessingService.server.ts` |
## Best Practices
1. **Use Scoped Loggers for Long-Running Services**: Services with complex workflows or external API calls should use `createScopedLogger` to allow targeted debugging.
2. **Use Child Loggers for Contextual Data**: Even within scoped loggers, create child loggers with job/request-specific context:
```typescript
const logger = createScopedLogger('my-service');
async function processJob(job: Job) {
const jobLogger = logger.child({ jobId: job.id, jobName: job.name });
jobLogger.debug('Starting job processing');
}
```
3. **Module Naming Convention**: Use kebab-case suffixed with `-service` or `-worker` (e.g., `ai-service`, `email-worker`).
4. **Production Usage**: `DEBUG_MODULES` can be set in production for temporary debugging, but should not be used continuously due to increased log volume.
## Examples
### Development Debugging
Debug AI service issues during development:
```bash
# Dev container
DEBUG_MODULES=ai-service npm run dev
# Or via PM2
DEBUG_MODULES=ai-service pm2 restart flyer-crawler-api-dev
```
### Production Troubleshooting
Temporarily enable debug logging for a specific subsystem:
```bash
# SSH into production server
ssh root@projectium.com
# Set environment variable and restart
DEBUG_MODULES=ai-service pm2 restart flyer-crawler-api
# View logs
pm2 logs flyer-crawler-api --lines 100
# Disable debug logging
pm2 unset DEBUG_MODULES flyer-crawler-api
pm2 restart flyer-crawler-api
```
## Consequences
**Positive**:
- Developers can inspect detailed logs for specific subsystems without log flooding
- Production debugging becomes more targeted and efficient
- No performance impact when debug logging is disabled
- Compatible with existing Pino logging infrastructure
**Negative**:
- Requires developers to know module names (mitigated by documentation above)
- Not all services have adopted scoped loggers yet (gradual migration)

View File

@@ -2,7 +2,14 @@
**Date**: 2026-01-11
**Status**: Proposed
**Status**: Accepted (Fully Implemented)
**Implementation Status**:
- ✅ BullMQ worker stall configuration (complete)
- ✅ Basic health endpoints (/live, /ready, /redis, etc.)
- ✅ /health/queues endpoint (complete)
- ✅ Worker heartbeat mechanism (complete)
## Context
@@ -60,3 +67,76 @@ The `/health/queues` endpoint will:
**Negative**:
- Requires configuring external monitoring to poll the new endpoint.
## Implementation Notes
### Completed (2026-01-11)
1. **BullMQ Stall Configuration** - `src/config/workerOptions.ts`
- All workers use `defaultWorkerOptions` with:
- `stalledInterval: 30000` (30s)
- `maxStalledCount: 3`
- `lockDuration: 30000` (30s)
- Applied to all 9 workers: flyer, email, analytics, cleanup, weekly-analytics, token-cleanup, receipt, expiry-alert, barcode
2. **Basic Health Endpoints** - `src/routes/health.routes.ts`
- `/health/live` - Liveness probe
- `/health/ready` - Readiness probe (checks DB, Redis, storage)
- `/health/startup` - Startup probe
- `/health/redis` - Redis connectivity
- `/health/db-pool` - Database connection pool status
### Implementation Completed (2026-01-26)
1. **`/health/queues` Endpoint** ✅
- Added route to `src/routes/health.routes.ts:511-674`
- Iterates through all 9 queues from `src/services/queues.server.ts`
- Fetches job counts using BullMQ Queue API: `getJobCounts()`
- Returns structured response including both queue metrics and worker heartbeats:
```typescript
{
status: 'healthy' | 'unhealthy',
timestamp: string,
queues: {
[queueName]: {
waiting: number,
active: number,
failed: number,
delayed: number
}
},
workers: {
[workerName]: {
alive: boolean,
lastSeen?: string,
pid?: number,
host?: string
}
}
}
```
- Returns 200 OK if all healthy, 503 if any queue/worker unavailable
- Full OpenAPI documentation included
2. **Worker Heartbeat Mechanism** ✅
- Added `updateWorkerHeartbeat()` and `startWorkerHeartbeat()` in `src/services/workers.server.ts:100-149`
- Key pattern: `worker:heartbeat:<worker-name>`
- Stores: `{ timestamp: ISO8601, pid: number, host: string }`
- Updates every 30s with 90s TTL
- Integrated with `/health/queues` endpoint (checks if heartbeat < 60s old)
- Heartbeat intervals properly cleaned up in `closeWorkers()` and `gracefulShutdown()`
3. **Comprehensive Tests** ✅
- Added 5 test cases in `src/routes/health.routes.test.ts:623-858`
- Tests cover: healthy state, queue failures, stale heartbeats, missing heartbeats, Redis errors
- All tests follow existing patterns with proper mocking
### Future Enhancements (Not Implemented)
1. **Queue Depth Alerting** (Low Priority)
- Add configurable thresholds per queue type
- Return 500 if `waiting` count exceeds threshold for extended period
- Consider using Redis for storing threshold breach timestamps
- **Estimate**: 1-2 hours

View File

@@ -332,6 +332,6 @@ Response:
## References
- [ADR-006: Background Job Processing](./0006-background-job-processing-and-task-queues.md)
- [ADR-015: Application Performance Monitoring](./0015-application-performance-monitoring-and-error-tracking.md)
- [ADR-015: Error Tracking and Observability](./0015-error-tracking-and-observability.md)
- [Bugsink API Documentation](https://bugsink.com/docs/api/)
- [Gitea API Documentation](https://docs.gitea.io/en-us/api-usage/)

View File

@@ -1,4 +1,4 @@
# ADR-023: Database Normalization and Referential Integrity
# ADR-055: Database Normalization and Referential Integrity
**Date:** 2026-01-19
**Status:** Accepted

View File

@@ -0,0 +1,262 @@
# ADR-056: Application Performance Monitoring (APM)
**Date**: 2026-01-26
**Status**: Proposed
**Related**: [ADR-015](./0015-error-tracking-and-observability.md) (Error Tracking and Observability)
## Context
Application Performance Monitoring (APM) provides visibility into application behavior through:
- **Distributed Tracing**: Track requests across services, queues, and database calls
- **Performance Metrics**: Response times, throughput, error rates
- **Resource Monitoring**: Memory usage, CPU, database connections
- **Transaction Analysis**: Identify slow endpoints and bottlenecks
While ADR-015 covers error tracking and observability, APM is a distinct concern focused on performance rather than errors. The Sentry SDK supports APM through its tracing features, but this capability is currently **intentionally disabled** in our application.
### Current State
The Sentry SDK is installed and configured for error tracking (see ADR-015), but APM features are disabled:
```typescript
// src/services/sentry.client.ts
Sentry.init({
dsn: config.sentry.dsn,
environment: config.sentry.environment,
// Performance monitoring - disabled for now to keep it simple
tracesSampleRate: 0,
// ...
});
```
```typescript
// src/services/sentry.server.ts
Sentry.init({
dsn: config.sentry.dsn,
environment: config.sentry.environment || config.server.nodeEnv,
// Performance monitoring - disabled for now to keep it simple
tracesSampleRate: 0,
// ...
});
```
### Why APM is Currently Disabled
1. **Complexity**: APM adds overhead and complexity to debugging
2. **Bugsink Limitations**: Bugsink's APM support is less mature than its error tracking
3. **Resource Overhead**: Tracing adds memory and CPU overhead
4. **Focus**: Error tracking provides more immediate value for our current scale
5. **Cost**: High sample rates can significantly increase storage requirements
## Decision
We propose a **staged approach** to APM implementation:
### Phase 1: Selective Backend Tracing (Low Priority)
Enable tracing for specific high-value operations:
```typescript
// Enable tracing for specific transactions only
Sentry.init({
dsn: config.sentry.dsn,
tracesSampleRate: 0, // Keep default at 0
// Trace only specific high-value transactions
tracesSampler: (samplingContext) => {
const transactionName = samplingContext.transactionContext?.name;
// Always trace flyer processing jobs
if (transactionName?.includes('flyer-processing')) {
return 0.1; // 10% sample rate
}
// Always trace AI/Gemini calls
if (transactionName?.includes('gemini')) {
return 0.5; // 50% sample rate
}
// Trace slow endpoints (determined by custom logic)
if (samplingContext.parentSampled) {
return 0.1; // 10% for child transactions
}
return 0; // Don't trace other transactions
},
});
```
### Phase 2: Custom Performance Metrics
Add custom metrics without full tracing overhead:
```typescript
// Custom metric for slow database queries
import { metrics } from '@sentry/node';
// In repository methods
const startTime = performance.now();
const result = await pool.query(sql, params);
const duration = performance.now() - startTime;
metrics.distribution('db.query.duration', duration, {
tags: { query_type: 'select', table: 'flyers' },
});
if (duration > 1000) {
logger.warn({ duration, sql }, 'Slow query detected');
}
```
### Phase 3: Full APM Integration (Future)
When/if full APM is needed:
```typescript
Sentry.init({
dsn: config.sentry.dsn,
tracesSampleRate: 0.1, // 10% of transactions
profilesSampleRate: 0.1, // 10% of traced transactions get profiled
integrations: [
// Database tracing
Sentry.postgresIntegration(),
// Redis tracing
Sentry.redisIntegration(),
// BullMQ job tracing
Sentry.prismaIntegration(), // or custom BullMQ integration
],
});
```
## Implementation Steps
### To Enable Basic APM
1. **Update Sentry Configuration**:
- Set `tracesSampleRate` > 0 in `src/services/sentry.server.ts`
- Set `tracesSampleRate` > 0 in `src/services/sentry.client.ts`
- Add environment variable `SENTRY_TRACES_SAMPLE_RATE` (default: 0)
2. **Add Instrumentation**:
- Enable automatic Express instrumentation
- Add manual spans for BullMQ job processing
- Add database query instrumentation
3. **Frontend Tracing**:
- Add Browser Tracing integration
- Configure page load and navigation tracing
4. **Environment Variables**:
```bash
SENTRY_TRACES_SAMPLE_RATE=0.1 # 10% sampling
SENTRY_PROFILES_SAMPLE_RATE=0 # Profiling disabled
```
5. **Bugsink Configuration**:
- Verify Bugsink supports performance data ingestion
- Configure retention policies for performance data
### Configuration Changes Required
```typescript
// src/config/env.ts - Add new config
sentry: {
dsn: env.SENTRY_DSN,
environment: env.SENTRY_ENVIRONMENT,
debug: env.SENTRY_DEBUG === 'true',
tracesSampleRate: parseFloat(env.SENTRY_TRACES_SAMPLE_RATE || '0'),
profilesSampleRate: parseFloat(env.SENTRY_PROFILES_SAMPLE_RATE || '0'),
},
```
```typescript
// src/services/sentry.server.ts - Updated init
Sentry.init({
dsn: config.sentry.dsn,
environment: config.sentry.environment,
tracesSampleRate: config.sentry.tracesSampleRate,
profilesSampleRate: config.sentry.profilesSampleRate,
// ... rest of config
});
```
## Trade-offs
### Enabling APM
**Benefits**:
- Identify performance bottlenecks
- Track distributed transactions across services
- Profile slow endpoints
- Monitor resource utilization trends
**Costs**:
- Increased memory usage (~5-15% overhead)
- Additional CPU for trace processing
- Increased storage in Bugsink/Sentry
- More complex debugging (noise in traces)
- Potential latency from tracing overhead
### Keeping APM Disabled
**Benefits**:
- Simpler operation and debugging
- Lower resource overhead
- Focused on error tracking (higher priority)
- No additional storage costs
**Costs**:
- No automated performance insights
- Manual profiling required for bottleneck detection
- Limited visibility into slow transactions
## Alternatives Considered
1. **OpenTelemetry**: More vendor-neutral, but adds another dependency and complexity
2. **Prometheus + Grafana**: Good for metrics, but doesn't provide distributed tracing
3. **Jaeger/Zipkin**: Purpose-built for tracing, but requires additional infrastructure
4. **New Relic/Datadog SaaS**: Full-featured but conflicts with self-hosted requirement
## Current Recommendation
**Keep APM disabled** (`tracesSampleRate: 0`) until:
1. Specific performance issues are identified that require tracing
2. Bugsink's APM support is verified and tested
3. Infrastructure can support the additional overhead
4. There is a clear business need for performance visibility
When enabling APM becomes necessary, start with Phase 1 (selective tracing) to minimize overhead while gaining targeted insights.
## Consequences
### Positive (When Implemented)
- Automated identification of slow endpoints
- Distributed trace visualization across async operations
- Correlation between errors and performance issues
- Proactive alerting on performance degradation
### Negative
- Additional infrastructure complexity
- Storage overhead for trace data
- Potential performance impact from tracing itself
- Learning curve for trace analysis
## References
- [Sentry Performance Monitoring](https://docs.sentry.io/product/performance/)
- [@sentry/node Performance](https://docs.sentry.io/platforms/javascript/guides/node/performance/)
- [@sentry/react Performance](https://docs.sentry.io/platforms/javascript/guides/react/performance/)
- [OpenTelemetry](https://opentelemetry.io/) (alternative approach)
- [ADR-015: Error Tracking and Observability](./0015-error-tracking-and-observability.md)

View File

@@ -0,0 +1,367 @@
# ADR-057: Test Remediation Post-API Versioning and Frontend Rework
**Date**: 2026-01-28
**Status**: Accepted
**Context**: Major test remediation effort completed after ADR-008 API versioning implementation and frontend style rework
## Context
Following the completion of ADR-008 Phase 2 (API Versioning Strategy) and a concurrent frontend style/design rework, the test suite experienced 105 test failures across unit tests and E2E tests. This ADR documents the systematic remediation effort, root cause analysis, and lessons learned to prevent similar issues in future migrations.
### Scope of Failures
| Test Type | Failures | Total Tests | Pass Rate After Fix |
| ---------- | -------- | ----------- | ------------------- |
| Unit Tests | 69 | 3,392 | 100% |
| E2E Tests | 36 | 36 | 100% |
| **Total** | **105** | **3,428** | **100%** |
### Root Causes Identified
The failures were categorized into six distinct categories:
1. **API Versioning Path Mismatches** (71 failures)
- Test files using `/api/` instead of `/api/v1/`
- Environment variables not set for API base URL
- Integration and E2E tests calling unversioned endpoints
2. **Dark Mode Class Assertion Failures** (8 failures)
- Frontend rework changed Tailwind dark mode utility classes
- Test assertions checking for outdated class names
3. **Selected Item Styling Changes** (6 failures)
- Component styling refactored to new design tokens
- Test assertions expecting old CSS class combinations
4. **Admin-Only Component Visibility** (12 failures)
- MainLayout tests not properly mocking admin role
- ActivityLog component visibility tied to role-based access
5. **Mock Hoisting Issues** (5 failures)
- Queue mocks not available during module initialization
- Vitest's module hoisting order causing mock setup failures
6. **Error Log Path Hardcoding** (3 failures)
- Route handlers logging hardcoded paths like `/api/flyers`
- Test assertions expecting versioned paths `/api/v1/flyers`
## Decision
We implemented a systematic remediation approach addressing each failure category with targeted fixes while establishing patterns to prevent regression.
### 1. API Versioning Configuration Updates
**Files Modified**:
- `vite.config.ts`
- `vitest.config.e2e.ts`
- `vitest.config.integration.ts`
**Pattern Applied**: Centralize API base URL in Vitest environment variables
```typescript
// vite.config.ts - Unit test configuration
test: {
env: {
// ADR-008: Ensure API versioning is correctly set for unit tests
VITE_API_BASE_URL: '/api/v1',
},
// ...
}
// vitest.config.e2e.ts - E2E test configuration
test: {
env: {
// ADR-008: API versioning - all routes use /api/v1 prefix
VITE_API_BASE_URL: 'http://localhost:3098/api/v1',
},
// ...
}
// vitest.config.integration.ts - Integration test configuration
test: {
env: {
// ADR-008: API versioning - all routes use /api/v1 prefix
VITE_API_BASE_URL: 'http://localhost:3099/api/v1',
},
// ...
}
```
### 2. E2E Test URL Path Updates
**Files Modified** (7 files, 31 URL occurrences):
- `src/tests/e2e/budget-journey.e2e.test.ts`
- `src/tests/e2e/deals-journey.e2e.test.ts`
- `src/tests/e2e/flyer-upload.e2e.test.ts`
- `src/tests/e2e/inventory-journey.e2e.test.ts`
- `src/tests/e2e/receipt-journey.e2e.test.ts`
- `src/tests/e2e/upc-journey.e2e.test.ts`
- `src/tests/e2e/user-journey.e2e.test.ts`
**Pattern Applied**: Update all hardcoded API paths to versioned endpoints
```typescript
// Before
const response = await getRequest().post('/api/auth/register').send({...});
// After
const response = await getRequest().post('/api/v1/auth/register').send({...});
```
### 3. Unit Test Assertion Updates for UI Changes
**Files Modified**:
- `src/features/flyer/FlyerDisplay.test.tsx`
- `src/features/flyer/FlyerList.test.tsx`
**Pattern Applied**: Update CSS class assertions to match new design system
```typescript
// FlyerDisplay.test.tsx - Dark mode class update
// Before
expect(image).toHaveClass('dark:brightness-75');
// After
expect(image).toHaveClass('dark:brightness-90');
// FlyerList.test.tsx - Selected item styling update
// Before
expect(selectedItem).toHaveClass('ring-2', 'ring-brand-primary');
// After
expect(selectedItem).toHaveClass('border-brand-primary', 'bg-teal-50/50', 'dark:bg-teal-900/10');
```
### 4. Admin-Only Component Test Separation
**File Modified**: `src/layouts/MainLayout.test.tsx`
**Pattern Applied**: Separate test cases for admin vs. regular user visibility
```typescript
describe('for authenticated users', () => {
beforeEach(() => {
mockedUseAuth.mockReturnValue({
...defaultUseAuthReturn,
authStatus: 'AUTHENTICATED',
userProfile: createMockUserProfile({ user: mockUser }),
});
});
it('renders auth-gated components for regular users (PriceHistoryChart, Leaderboard)', () => {
renderWithRouter(<MainLayout {...defaultProps} />);
expect(screen.getByTestId('price-history-chart')).toBeInTheDocument();
expect(screen.getByTestId('leaderboard')).toBeInTheDocument();
// ActivityLog is admin-only, should NOT be present for regular users
expect(screen.queryByTestId('activity-log')).not.toBeInTheDocument();
});
it('renders ActivityLog for admin users', () => {
mockedUseAuth.mockReturnValue({
...defaultUseAuthReturn,
authStatus: 'AUTHENTICATED',
userProfile: createMockUserProfile({ user: mockUser, role: 'admin' }),
});
renderWithRouter(<MainLayout {...defaultProps} />);
expect(screen.getByTestId('activity-log')).toBeInTheDocument();
});
});
```
### 5. vi.hoisted() Pattern for Queue Mocks
**File Modified**: `src/routes/health.routes.test.ts`
**Pattern Applied**: Use `vi.hoisted()` to ensure mocks are available during module hoisting
```typescript
// Use vi.hoisted to create mock queue objects that are available during vi.mock hoisting.
// This ensures the mock objects exist when the factory function runs.
const { mockQueuesModule } = vi.hoisted(() => {
// Helper function to create a mock queue object with vi.fn()
const createMockQueue = () => ({
getJobCounts: vi.fn().mockResolvedValue({
waiting: 0,
active: 0,
failed: 0,
delayed: 0,
}),
});
return {
mockQueuesModule: {
flyerQueue: createMockQueue(),
emailQueue: createMockQueue(),
// ... additional queues
},
};
});
// Mock the queues.server module BEFORE the health router imports it.
vi.mock('../services/queues.server', () => mockQueuesModule);
// Import the router AFTER all mocks are defined.
import healthRouter from './health.routes';
```
### 6. Dynamic Error Log Paths
**Pattern Applied**: Use `req.originalUrl` instead of hardcoded paths in error handlers
```typescript
// Before (INCORRECT - hardcoded path)
req.log.error({ error }, 'Error in /api/flyers/:id:');
// After (CORRECT - dynamic path)
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
```
## Implementation Summary
### Files Modified (14 total)
| Category | Files | Changes |
| -------------------- | ----- | ------------------------------------------------- |
| Vitest Configuration | 3 | Added `VITE_API_BASE_URL` environment variables |
| E2E Tests | 7 | Updated 31 API endpoint URLs |
| Unit Tests | 4 | Updated assertions for UI, mocks, and admin roles |
### Verification Results
After remediation, all tests pass in the dev container environment:
```text
Unit Tests: 3,392 passing
E2E Tests: 36 passing
Integration: 345/348 passing (3 known issues, unrelated)
Type Check: Passing
```
## Consequences
### Positive
1. **Test Suite Stability**: All tests now pass consistently in the dev container
2. **API Versioning Compliance**: Tests enforce the `/api/v1/` path requirement
3. **Pattern Documentation**: Clear patterns established for future test maintenance
4. **Separation of Concerns**: Admin vs. user test cases properly separated
5. **Mock Reliability**: `vi.hoisted()` pattern prevents mock timing issues
### Negative
1. **Maintenance Overhead**: Future API version changes will require test updates
2. **Manual Migration**: No automated tool to update test paths during versioning
### Neutral
1. **Test Execution Time**: No significant impact on test execution duration
2. **Coverage Metrics**: Coverage percentages unchanged
## Best Practices Established
### 1. API Versioning in Tests
**Always use versioned API paths in tests**:
```typescript
// Good
const response = await request.get('/api/v1/users/profile');
// Bad
const response = await request.get('/api/users/profile');
```
**Configure environment variables centrally in Vitest configs** rather than in individual test files.
### 2. vi.hoisted() for Module-Level Mocks
When mocking modules that are imported at the top level of other modules:
```typescript
// Pattern: Define mocks with vi.hoisted() BEFORE vi.mock() calls
const { mockModule } = vi.hoisted(() => ({
mockModule: {
someFunction: vi.fn(),
},
}));
vi.mock('./some-module', () => mockModule);
// Import AFTER mocks
import { something } from './module-that-imports-some-module';
```
### 3. Testing Conditional Component Rendering
When testing components that render differently based on user role:
1. Create separate `describe` blocks for each role
2. Set up role-specific mocks in `beforeEach`
3. Explicitly test both presence AND absence of role-gated components
### 4. CSS Class Assertions After UI Refactors
After frontend style changes:
1. Review component implementation for new class names
2. Update test assertions to match actual CSS classes
3. Consider using partial matching for complex class combinations:
```typescript
// Flexible matching for Tailwind classes
expect(element).toHaveClass('border-brand-primary');
// vs exact matching
expect(element).toHaveClass('border-brand-primary', 'bg-teal-50/50', 'dark:bg-teal-900/10');
```
### 5. Error Logging Paths
**Always use dynamic paths in error logs**:
```typescript
// Pattern: Use req.originalUrl for request path logging
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
```
This ensures error logs reflect the actual request URL including version prefixes.
## Migration Checklist for Future API Version Changes
When implementing a new API version (e.g., v2), follow this checklist:
- [ ] Update `vite.config.ts` test environment `VITE_API_BASE_URL`
- [ ] Update `vitest.config.e2e.ts` test environment `VITE_API_BASE_URL`
- [ ] Update `vitest.config.integration.ts` test environment `VITE_API_BASE_URL`
- [ ] Search and replace `/api/v1/` with `/api/v2/` in E2E test files
- [ ] Search and replace `/api/v1/` with `/api/v2/` in integration test files
- [ ] Verify route handler error logs use `req.originalUrl`
- [ ] Run full test suite in dev container to verify
**Search command for finding hardcoded paths**:
```bash
grep -r "/api/v1/" src/tests/
grep -r "'/api/" src/routes/*.ts
```
## Related ADRs
- [ADR-008](./0008-api-versioning-strategy.md) - API Versioning Strategy
- [ADR-010](./0010-testing-strategy-and-standards.md) - Testing Strategy and Standards
- [ADR-014](./0014-containerization-and-deployment-strategy.md) - Platform: Linux Only
- [ADR-040](./0040-testing-economics-and-priorities.md) - Testing Economics and Priorities
- [ADR-012](./0012-frontend-component-library-and-design-system.md) - Frontend Component Library
## Key Files
| File | Purpose |
| ------------------------------ | -------------------------------------------- |
| `vite.config.ts` | Unit test environment configuration |
| `vitest.config.e2e.ts` | E2E test environment configuration |
| `vitest.config.integration.ts` | Integration test environment configuration |
| `src/tests/e2e/*.e2e.test.ts` | E2E test files with versioned API paths |
| `src/routes/*.routes.test.ts` | Route test files with `vi.hoisted()` pattern |
| `docs/development/TESTING.md` | Testing guide with best practices |

View File

@@ -15,9 +15,10 @@ This document tracks the implementation status and estimated effort for all Arch
| Status | Count |
| ---------------------------- | ----- |
| Accepted (Fully Implemented) | 30 |
| Accepted (Fully Implemented) | 42 |
| Partially Implemented | 2 |
| Proposed (Not Started) | 16 |
| Proposed (Not Started) | 12 |
| Superseded | 1 |
---
@@ -34,23 +35,23 @@ This document tracks the implementation status and estimated effort for all Arch
### Category 2: Data Management
| ADR | Title | Status | Effort | Notes |
| --------------------------------------------------------------- | ------------------------ | -------- | ------ | ------------------------------ |
| [ADR-009](./0009-caching-strategy-for-read-heavy-operations.md) | Caching Strategy | Accepted | - | Fully implemented |
| [ADR-013](./0013-database-schema-migration-strategy.md) | Schema Migrations v1 | Proposed | M | Superseded by ADR-023 |
| [ADR-019](./0019-data-backup-and-recovery-strategy.md) | Backup & Recovery | Accepted | - | Fully implemented |
| [ADR-023](./0023-database-schema-migration-strategy.md) | Schema Migrations v2 | Proposed | L | Requires tooling setup |
| [ADR-031](./0031-data-retention-and-privacy-compliance.md) | Data Retention & Privacy | Proposed | XL | Legal/compliance review needed |
| ADR | Title | Status | Effort | Notes |
| --------------------------------------------------------------- | ------------------------ | ---------- | ------ | ------------------------------ |
| [ADR-009](./0009-caching-strategy-for-read-heavy-operations.md) | Caching Strategy | Accepted | - | Fully implemented |
| [ADR-013](./0013-database-schema-migration-strategy.md) | Schema Migrations v1 | Superseded | - | Superseded by ADR-023 |
| [ADR-019](./0019-data-backup-and-recovery-strategy.md) | Backup & Recovery | Accepted | - | Fully implemented |
| [ADR-023](./0023-database-schema-migration-strategy.md) | Schema Migrations v2 | Proposed | L | Requires tooling setup |
| [ADR-031](./0031-data-retention-and-privacy-compliance.md) | Data Retention & Privacy | Proposed | XL | Legal/compliance review needed |
### Category 3: API & Integration
| ADR | Title | Status | Effort | Notes |
| ------------------------------------------------------------------- | ------------------------ | ----------- | ------ | ------------------------------------- |
| [ADR-003](./0003-standardized-input-validation-using-middleware.md) | Input Validation | Accepted | - | Fully implemented |
| [ADR-008](./0008-api-versioning-strategy.md) | API Versioning | Proposed | L | Major URL/routing changes |
| [ADR-018](./0018-api-documentation-strategy.md) | API Documentation | Accepted | - | OpenAPI/Swagger implemented |
| [ADR-022](./0022-real-time-notification-system.md) | Real-time Notifications | Proposed | XL | WebSocket infrastructure |
| [ADR-028](./0028-api-response-standardization.md) | Response Standardization | Implemented | L | Completed (routes, middleware, tests) |
| ADR | Title | Status | Effort | Notes |
| ------------------------------------------------------------------- | ------------------------ | -------- | ------ | ------------------------------------- |
| [ADR-003](./0003-standardized-input-validation-using-middleware.md) | Input Validation | Accepted | - | Fully implemented |
| [ADR-008](./0008-api-versioning-strategy.md) | API Versioning | Accepted | - | Phase 2 complete, tests migrated |
| [ADR-018](./0018-api-documentation-strategy.md) | API Documentation | Accepted | - | OpenAPI/Swagger implemented |
| [ADR-022](./0022-real-time-notification-system.md) | Real-time Notifications | Accepted | - | Fully implemented |
| [ADR-028](./0028-api-response-standardization.md) | Response Standardization | Accepted | - | Completed (routes, middleware, tests) |
### Category 4: Security & Compliance
@@ -62,25 +63,31 @@ This document tracks the implementation status and estimated effort for all Arch
| [ADR-029](./0029-secret-rotation-and-key-management.md) | Secret Rotation | Proposed | L | Infrastructure changes needed |
| [ADR-032](./0032-rate-limiting-strategy.md) | Rate Limiting | Accepted | - | Fully implemented |
| [ADR-033](./0033-file-upload-and-storage-strategy.md) | File Upload & Storage | Accepted | - | Fully implemented |
| [ADR-048](./0048-authentication-strategy.md) | Authentication | Accepted | - | Fully implemented |
### Category 5: Observability & Monitoring
| ADR | Title | Status | Effort | Notes |
| -------------------------------------------------------------------------- | --------------------------- | -------- | ------ | --------------------------------- |
| [ADR-004](./0004-standardized-application-wide-structured-logging.md) | Structured Logging | Accepted | - | Fully implemented |
| [ADR-015](./0015-application-performance-monitoring-and-error-tracking.md) | APM & Error Tracking | Proposed | M | Third-party integration |
| [ADR-050](./0050-postgresql-function-observability.md) | PostgreSQL Fn Observability | Proposed | M | Depends on ADR-015 implementation |
| ADR | Title | Status | Effort | Notes |
| --------------------------------------------------------------------- | --------------------------- | -------- | ------ | ------------------------------------------ |
| [ADR-004](./0004-standardized-application-wide-structured-logging.md) | Structured Logging | Accepted | - | Fully implemented |
| [ADR-015](./0015-error-tracking-and-observability.md) | Error Tracking | Accepted | - | Fully implemented |
| [ADR-050](./0050-postgresql-function-observability.md) | PostgreSQL Fn Observability | Accepted | - | Fully implemented |
| [ADR-051](./0051-asynchronous-context-propagation.md) | Context Propagation | Accepted | - | Fully implemented |
| [ADR-052](./0052-granular-debug-logging-strategy.md) | Granular Debug Logging | Accepted | - | Fully implemented |
| [ADR-056](./0056-application-performance-monitoring.md) | APM (Performance) | Proposed | M | tracesSampleRate=0, intentionally disabled |
### Category 6: Deployment & Operations
| ADR | Title | Status | Effort | Notes |
| -------------------------------------------------------------- | ----------------- | -------- | ------ | -------------------------- |
| [ADR-006](./0006-background-job-processing-and-task-queues.md) | Background Jobs | Accepted | - | Fully implemented |
| [ADR-014](./0014-containerization-and-deployment-strategy.md) | Containerization | Partial | M | Docker done, K8s pending |
| [ADR-017](./0017-ci-cd-and-branching-strategy.md) | CI/CD & Branching | Accepted | - | Fully implemented |
| [ADR-024](./0024-feature-flagging-strategy.md) | Feature Flags | Proposed | M | New service/library needed |
| [ADR-037](./0037-scheduled-jobs-and-cron-pattern.md) | Scheduled Jobs | Accepted | - | Fully implemented |
| [ADR-038](./0038-graceful-shutdown-pattern.md) | Graceful Shutdown | Accepted | - | Fully implemented |
| ADR | Title | Status | Effort | Notes |
| -------------------------------------------------------------- | ------------------ | -------- | ------ | ------------------------ |
| [ADR-006](./0006-background-job-processing-and-task-queues.md) | Background Jobs | Accepted | - | Fully implemented |
| [ADR-014](./0014-containerization-and-deployment-strategy.md) | Containerization | Partial | M | Docker done, K8s pending |
| [ADR-017](./0017-ci-cd-and-branching-strategy.md) | CI/CD & Branching | Accepted | - | Fully implemented |
| [ADR-024](./0024-feature-flagging-strategy.md) | Feature Flags | Accepted | - | Fully implemented |
| [ADR-037](./0037-scheduled-jobs-and-cron-pattern.md) | Scheduled Jobs | Accepted | - | Fully implemented |
| [ADR-038](./0038-graceful-shutdown-pattern.md) | Graceful Shutdown | Accepted | - | Fully implemented |
| [ADR-053](./0053-worker-health-checks.md) | Worker Health | Accepted | - | Fully implemented |
| [ADR-054](./0054-bugsink-gitea-issue-sync.md) | Bugsink-Gitea Sync | Proposed | L | Automated issue creation |
### Category 7: Frontend / User Interface
@@ -99,61 +106,82 @@ This document tracks the implementation status and estimated effort for all Arch
| [ADR-010](./0010-testing-strategy-and-standards.md) | Testing Strategy | Accepted | - | Fully implemented |
| [ADR-021](./0021-code-formatting-and-linting-unification.md) | Formatting & Linting | Accepted | - | Fully implemented |
| [ADR-027](./0027-standardized-naming-convention-for-ai-and-database-types.md) | Naming Conventions | Accepted | - | Fully implemented |
| [ADR-040](./0040-testing-economics-and-priorities.md) | Testing Economics | Accepted | - | Fully implemented |
| [ADR-045](./0045-test-data-factories-and-fixtures.md) | Test Data Factories | Accepted | - | Fully implemented |
| [ADR-047](./0047-project-file-and-folder-organization.md) | Project Organization | Proposed | XL | Major reorganization |
| [ADR-057](./0057-test-remediation-post-api-versioning.md) | Test Remediation | Accepted | - | Fully implemented |
### Category 9: Architecture Patterns
| ADR | Title | Status | Effort | Notes |
| -------------------------------------------------------- | --------------------- | -------- | ------ | ----------------- |
| [ADR-034](./0034-repository-pattern-standards.md) | Repository Pattern | Accepted | - | Fully implemented |
| [ADR-035](./0035-service-layer-architecture.md) | Service Layer | Accepted | - | Fully implemented |
| [ADR-036](./0036-event-bus-and-pub-sub-pattern.md) | Event Bus | Accepted | - | Fully implemented |
| [ADR-039](./0039-dependency-injection-pattern.md) | Dependency Injection | Accepted | - | Fully implemented |
| [ADR-041](./0041-ai-gemini-integration-architecture.md) | AI/Gemini Integration | Accepted | - | Fully implemented |
| [ADR-042](./0042-email-and-notification-architecture.md) | Email & Notifications | Accepted | - | Fully implemented |
| [ADR-043](./0043-express-middleware-pipeline.md) | Middleware Pipeline | Accepted | - | Fully implemented |
| [ADR-046](./0046-image-processing-pipeline.md) | Image Processing | Accepted | - | Fully implemented |
| [ADR-049](./0049-gamification-and-achievement-system.md) | Gamification System | Accepted | - | Fully implemented |
| ADR | Title | Status | Effort | Notes |
| --------------------------------------------------------------------- | --------------------- | -------- | ------ | ------------------------- |
| [ADR-034](./0034-repository-pattern-standards.md) | Repository Pattern | Accepted | - | Fully implemented |
| [ADR-035](./0035-service-layer-architecture.md) | Service Layer | Accepted | - | Fully implemented |
| [ADR-036](./0036-event-bus-and-pub-sub-pattern.md) | Event Bus | Accepted | - | Fully implemented |
| [ADR-039](./0039-dependency-injection-pattern.md) | Dependency Injection | Accepted | - | Fully implemented |
| [ADR-041](./0041-ai-gemini-integration-architecture.md) | AI/Gemini Integration | Accepted | - | Fully implemented |
| [ADR-042](./0042-email-and-notification-architecture.md) | Email & Notifications | Accepted | - | Fully implemented |
| [ADR-043](./0043-express-middleware-pipeline.md) | Middleware Pipeline | Accepted | - | Fully implemented |
| [ADR-046](./0046-image-processing-pipeline.md) | Image Processing | Accepted | - | Fully implemented |
| [ADR-049](./0049-gamification-and-achievement-system.md) | Gamification System | Accepted | - | Fully implemented |
| [ADR-055](./0055-database-normalization-and-referential-integrity.md) | DB Normalization | Accepted | M | API uses IDs, not strings |
---
## Work Still To Be Completed (Priority Order)
These ADRs are proposed but not yet implemented, ordered by suggested implementation priority:
These ADRs are proposed or partially implemented, ordered by suggested implementation priority:
| Priority | ADR | Title | Effort | Rationale |
| -------- | ------- | --------------------------- | ------ | ------------------------------------------------- |
| 1 | ADR-015 | APM & Error Tracking | M | Production visibility, debugging |
| 1b | ADR-050 | PostgreSQL Fn Observability | M | Database function visibility (depends on ADR-015) |
| 2 | ADR-024 | Feature Flags | M | Safer deployments, A/B testing |
| 3 | ADR-023 | Schema Migrations v2 | L | Database evolution support |
| 4 | ADR-029 | Secret Rotation | L | Security improvement |
| 5 | ADR-008 | API Versioning | L | Future API evolution |
| 6 | ADR-030 | Circuit Breaker | L | Resilience improvement |
| 7 | ADR-022 | Real-time Notifications | XL | Major feature enhancement |
| 8 | ADR-011 | Authorization & RBAC | XL | Advanced permission system |
| 9 | ADR-025 | i18n & l10n | XL | Multi-language support |
| 10 | ADR-031 | Data Retention & Privacy | XL | Compliance requirements |
| Priority | ADR | Title | Status | Effort | Rationale |
| -------- | ------- | ------------------------ | -------- | ------ | ------------------------------------ |
| 1 | ADR-054 | Bugsink-Gitea Sync | Proposed | L | Automated issue tracking from errors |
| 2 | ADR-023 | Schema Migrations v2 | Proposed | L | Database evolution support |
| 3 | ADR-029 | Secret Rotation | Proposed | L | Security improvement |
| 4 | ADR-030 | Circuit Breaker | Proposed | L | Resilience improvement |
| 5 | ADR-056 | APM (Performance) | Proposed | M | Enable when performance issues arise |
| 6 | ADR-011 | Authorization & RBAC | Proposed | XL | Advanced permission system |
| 7 | ADR-025 | i18n & l10n | Proposed | XL | Multi-language support |
| 8 | ADR-031 | Data Retention & Privacy | Proposed | XL | Compliance requirements |
---
## Recent Implementation History
| Date | ADR | Change |
| ---------- | ------- | ---------------------------------------------------------------------- |
| 2026-01-11 | ADR-050 | Created - PostgreSQL function observability with fn_log() and Logstash |
| 2026-01-11 | ADR-018 | Implemented - OpenAPI/Swagger documentation at /docs/api-docs |
| 2026-01-11 | ADR-049 | Created - Gamification system, achievements, and testing requirements |
| 2026-01-09 | ADR-047 | Created - Project file/folder organization with migration plan |
| 2026-01-09 | ADR-041 | Created - AI/Gemini integration with model fallback and rate limiting |
| 2026-01-09 | ADR-042 | Created - Email and notification architecture with BullMQ queuing |
| 2026-01-09 | ADR-043 | Created - Express middleware pipeline ordering and patterns |
| 2026-01-09 | ADR-044 | Created - Frontend feature-based folder organization |
| 2026-01-09 | ADR-045 | Created - Test data factory pattern for mock generation |
| 2026-01-09 | ADR-046 | Created - Image processing pipeline with Sharp and EXIF stripping |
| 2026-01-09 | ADR-026 | Fully implemented - client-side structured logger |
| 2026-01-09 | ADR-028 | Fully implemented - all routes, middleware, and tests updated |
| Date | ADR | Change |
| ---------- | ------- | ----------------------------------------------------------------------------------- |
| 2026-01-28 | ADR-024 | Fully implemented - Backend/frontend feature flags, 89 tests, admin endpoint |
| 2026-01-28 | ADR-057 | Created - Test remediation documentation for ADR-008 Phase 2 migration |
| 2026-01-28 | ADR-013 | Marked as Superseded by ADR-023 |
| 2026-01-27 | ADR-008 | Test path migration complete - 23 files, ~70 paths updated, 274->345 tests passing |
| 2026-01-27 | ADR-008 | Phase 2 Complete - Version router factory, deprecation headers, 82 versioning tests |
| 2026-01-26 | ADR-015 | Completed - Added Sentry user context in AuthProvider, now fully implemented |
| 2026-01-26 | ADR-056 | Created - APM split from ADR-015, status Proposed (tracesSampleRate=0) |
| 2026-01-26 | ADR-015 | Refactored to focus on error tracking only, temporarily status Partial |
| 2026-01-26 | ADR-048 | Verified as fully implemented - JWT + OAuth authentication complete |
| 2026-01-26 | ADR-022 | Verified as fully implemented - WebSocket notifications complete |
| 2026-01-26 | ADR-052 | Marked as fully implemented - createScopedLogger complete |
| 2026-01-26 | ADR-053 | Marked as fully implemented - /health/queues endpoint complete |
| 2026-01-26 | ADR-050 | Marked as fully implemented - PostgreSQL function observability |
| 2026-01-26 | ADR-055 | Created (renumbered from duplicate ADR-023) - DB normalization |
| 2026-01-26 | ADR-054 | Added to tracker - Bugsink to Gitea issue synchronization |
| 2026-01-26 | ADR-053 | Added to tracker - Worker health checks and monitoring |
| 2026-01-26 | ADR-052 | Added to tracker - Granular debug logging strategy |
| 2026-01-26 | ADR-051 | Added to tracker - Asynchronous context propagation |
| 2026-01-26 | ADR-048 | Added to tracker - Authentication strategy |
| 2026-01-26 | ADR-040 | Added to tracker - Testing economics and priorities |
| 2026-01-17 | ADR-054 | Created - Bugsink-Gitea sync worker proposal |
| 2026-01-11 | ADR-050 | Created - PostgreSQL function observability with fn_log() |
| 2026-01-11 | ADR-018 | Implemented - OpenAPI/Swagger documentation at /docs/api-docs |
| 2026-01-11 | ADR-049 | Created - Gamification system, achievements, and testing |
| 2026-01-09 | ADR-047 | Created - Project file/folder organization with migration plan |
| 2026-01-09 | ADR-041 | Created - AI/Gemini integration with model fallback |
| 2026-01-09 | ADR-042 | Created - Email and notification architecture with BullMQ |
| 2026-01-09 | ADR-043 | Created - Express middleware pipeline ordering and patterns |
| 2026-01-09 | ADR-044 | Created - Frontend feature-based folder organization |
| 2026-01-09 | ADR-045 | Created - Test data factory pattern for mock generation |
| 2026-01-09 | ADR-046 | Created - Image processing pipeline with Sharp and EXIF stripping |
| 2026-01-09 | ADR-026 | Fully implemented - client-side structured logger |
| 2026-01-09 | ADR-028 | Fully implemented - all routes, middleware, and tests updated |
---

View File

@@ -2,6 +2,8 @@
This directory contains a log of the architectural decisions made for the Flyer Crawler project.
**[Implementation Tracker](./adr-implementation-tracker.md)**: Track implementation status and effort estimates for all ADRs.
## 1. Foundational / Core Infrastructure
**[ADR-002](./0002-standardized-transaction-management.md)**: Standardized Transaction Management and Unit of Work Pattern (Accepted)
@@ -12,7 +14,7 @@ This directory contains a log of the architectural decisions made for the Flyer
## 2. Data Management
**[ADR-009](./0009-caching-strategy-for-read-heavy-operations.md)**: Caching Strategy for Read-Heavy Operations (Accepted)
**[ADR-013](./0013-database-schema-migration-strategy.md)**: Database Schema Migration Strategy (Proposed)
**[ADR-013](./0013-database-schema-migration-strategy.md)**: Database Schema Migration Strategy (Superseded by ADR-023)
**[ADR-019](./0019-data-backup-and-recovery-strategy.md)**: Data Backup and Recovery Strategy (Accepted)
**[ADR-023](./0023-database-schema-migration-strategy.md)**: Database Schema Migration Strategy (Proposed)
**[ADR-031](./0031-data-retention-and-privacy-compliance.md)**: Data Retention and Privacy Compliance (Proposed)
@@ -20,9 +22,9 @@ This directory contains a log of the architectural decisions made for the Flyer
## 3. API & Integration
**[ADR-003](./0003-standardized-input-validation-using-middleware.md)**: Standardized Input Validation using Middleware (Accepted)
**[ADR-008](./0008-api-versioning-strategy.md)**: API Versioning Strategy (Proposed)
**[ADR-018](./0018-api-documentation-strategy.md)**: API Documentation Strategy (Proposed)
**[ADR-022](./0022-real-time-notification-system.md)**: Real-time Notification System (Proposed)
**[ADR-008](./0008-api-versioning-strategy.md)**: API Versioning Strategy (Accepted - Phase 2 Complete)
**[ADR-018](./0018-api-documentation-strategy.md)**: API Documentation Strategy (Accepted)
**[ADR-022](./0022-real-time-notification-system.md)**: Real-time Notification System (Accepted)
**[ADR-028](./0028-api-response-standardization.md)**: API Response Standardization and Envelope Pattern (Implemented)
## 4. Security & Compliance
@@ -33,12 +35,16 @@ This directory contains a log of the architectural decisions made for the Flyer
**[ADR-029](./0029-secret-rotation-and-key-management.md)**: Secret Rotation and Key Management Strategy (Proposed)
**[ADR-032](./0032-rate-limiting-strategy.md)**: Rate Limiting Strategy (Accepted)
**[ADR-033](./0033-file-upload-and-storage-strategy.md)**: File Upload and Storage Strategy (Accepted)
**[ADR-048](./0048-authentication-strategy.md)**: Authentication Strategy (Partially Implemented)
**[ADR-048](./0048-authentication-strategy.md)**: Authentication Strategy (Accepted)
## 5. Observability & Monitoring
**[ADR-004](./0004-standardized-application-wide-structured-logging.md)**: Standardized Application-Wide Structured Logging (Accepted)
**[ADR-015](./0015-application-performance-monitoring-and-error-tracking.md)**: Application Performance Monitoring (APM) and Error Tracking (Proposed)
**[ADR-015](./0015-error-tracking-and-observability.md)**: Error Tracking and Observability (Accepted)
**[ADR-050](./0050-postgresql-function-observability.md)**: PostgreSQL Function Observability (Accepted)
**[ADR-051](./0051-asynchronous-context-propagation.md)**: Asynchronous Context Propagation (Accepted)
**[ADR-052](./0052-granular-debug-logging-strategy.md)**: Granular Debug Logging Strategy (Accepted)
**[ADR-056](./0056-application-performance-monitoring.md)**: Application Performance Monitoring (Proposed)
## 6. Deployment & Operations
@@ -48,13 +54,15 @@ This directory contains a log of the architectural decisions made for the Flyer
**[ADR-024](./0024-feature-flagging-strategy.md)**: Feature Flagging Strategy (Proposed)
**[ADR-037](./0037-scheduled-jobs-and-cron-pattern.md)**: Scheduled Jobs and Cron Pattern (Accepted)
**[ADR-038](./0038-graceful-shutdown-pattern.md)**: Graceful Shutdown Pattern (Accepted)
**[ADR-053](./0053-worker-health-checks.md)**: Worker Health Checks and Stalled Job Monitoring (Accepted)
**[ADR-054](./0054-bugsink-gitea-issue-sync.md)**: Bugsink to Gitea Issue Synchronization (Proposed)
## 7. Frontend / User Interface
**[ADR-005](./0005-frontend-state-management-and-server-cache-strategy.md)**: Frontend State Management and Server Cache Strategy (Accepted)
**[ADR-012](./0012-frontend-component-library-and-design-system.md)**: Frontend Component Library and Design System (Partially Implemented)
**[ADR-025](./0025-internationalization-and-localization-strategy.md)**: Internationalization (i18n) and Localization (l10n) Strategy (Proposed)
**[ADR-026](./0026-standardized-client-side-structured-logging.md)**: Standardized Client-Side Structured Logging (Proposed)
**[ADR-026](./0026-standardized-client-side-structured-logging.md)**: Standardized Client-Side Structured Logging (Accepted)
**[ADR-044](./0044-frontend-feature-organization.md)**: Frontend Feature Organization Pattern (Accepted)
## 8. Development Workflow & Quality
@@ -65,6 +73,7 @@ This directory contains a log of the architectural decisions made for the Flyer
**[ADR-040](./0040-testing-economics-and-priorities.md)**: Testing Economics and Priorities (Accepted)
**[ADR-045](./0045-test-data-factories-and-fixtures.md)**: Test Data Factories and Fixtures (Accepted)
**[ADR-047](./0047-project-file-and-folder-organization.md)**: Project File and Folder Organization (Proposed)
**[ADR-057](./0057-test-remediation-post-api-versioning.md)**: Test Remediation Post-API Versioning (Accepted)
## 9. Architecture Patterns
@@ -76,3 +85,5 @@ This directory contains a log of the architectural decisions made for the Flyer
**[ADR-042](./0042-email-and-notification-architecture.md)**: Email and Notification Architecture (Accepted)
**[ADR-043](./0043-express-middleware-pipeline.md)**: Express Middleware Pipeline Architecture (Accepted)
**[ADR-046](./0046-image-processing-pipeline.md)**: Image Processing Pipeline (Accepted)
**[ADR-049](./0049-gamification-and-achievement-system.md)**: Gamification and Achievement System (Accepted)
**[ADR-055](./0055-database-normalization-and-referential-integrity.md)**: Database Normalization and Referential Integrity (Accepted)

View File

@@ -1,10 +1,168 @@
# Database Setup
# Database Architecture
Flyer Crawler uses PostgreSQL with several extensions for full-text search, geographic data, and UUID generation.
**Version**: 0.12.20
**Last Updated**: 2026-01-28
Flyer Crawler uses PostgreSQL 16 with PostGIS for geographic data, pg_trgm for fuzzy text search, and uuid-ossp for UUID generation. The database contains 65 tables organized into logical domains.
## Table of Contents
1. [Schema Overview](#schema-overview)
2. [Database Setup](#database-setup)
3. [Schema Reference](#schema-reference)
4. [Related Documentation](#related-documentation)
---
## Required Extensions
## Schema Overview
The database is organized into the following domains:
### Core Infrastructure (6 tables)
| Table | Purpose | Primary Key |
| ----------------------- | ----------------------------------------- | ----------------- |
| `users` | Authentication credentials and login data | `user_id` (UUID) |
| `profiles` | Public user data, preferences, points | `user_id` (UUID) |
| `addresses` | Normalized address storage with geocoding | `address_id` |
| `activity_log` | User activity audit trail | `activity_log_id` |
| `password_reset_tokens` | Temporary tokens for password reset | `token_id` |
| `schema_info` | Schema deployment metadata | `environment` |
### Stores and Locations (4 tables)
| Table | Purpose | Primary Key |
| ------------------------ | --------------------------------------- | ------------------- |
| `stores` | Grocery store chains (Safeway, Kroger) | `store_id` |
| `store_locations` | Physical store locations with addresses | `store_location_id` |
| `favorite_stores` | User store favorites | `user_id, store_id` |
| `store_receipt_patterns` | Receipt text patterns for store ID | `pattern_id` |
### Flyers and Items (7 tables)
| Table | Purpose | Primary Key |
| ----------------------- | -------------------------------------- | ------------------------ |
| `flyers` | Uploaded flyer metadata and status | `flyer_id` |
| `flyer_items` | Individual deals extracted from flyers | `flyer_item_id` |
| `flyer_locations` | Flyer-to-location associations | `flyer_location_id` |
| `categories` | Item categorization (Produce, Dairy) | `category_id` |
| `master_grocery_items` | Canonical grocery item dictionary | `master_grocery_item_id` |
| `master_item_aliases` | Alternative names for master items | `alias_id` |
| `unmatched_flyer_items` | Items pending master item matching | `unmatched_item_id` |
### Products and Brands (2 tables)
| Table | Purpose | Primary Key |
| ---------- | ---------------------------------------------- | ------------ |
| `brands` | Brand names (Coca-Cola, Kraft) | `brand_id` |
| `products` | Specific products (master item + brand + size) | `product_id` |
### Price Tracking (3 tables)
| Table | Purpose | Primary Key |
| ----------------------- | ---------------------------------- | ------------------ |
| `item_price_history` | Historical prices for master items | `price_history_id` |
| `user_submitted_prices` | User-contributed price reports | `submission_id` |
| `suggested_corrections` | Suggested edits to flyer items | `correction_id` |
### User Features (8 tables)
| Table | Purpose | Primary Key |
| -------------------- | ------------------------------------ | --------------------------- |
| `user_watched_items` | Items user wants to track prices for | `user_watched_item_id` |
| `user_alerts` | Price alert thresholds | `alert_id` |
| `notifications` | User notifications | `notification_id` |
| `user_item_aliases` | User-defined item name aliases | `alias_id` |
| `user_follows` | User-to-user follow relationships | `follower_id, following_id` |
| `user_reactions` | Reactions to content (likes, etc.) | `reaction_id` |
| `budgets` | User-defined spending budgets | `budget_id` |
| `search_queries` | Search history for analytics | `query_id` |
### Shopping Lists (4 tables)
| Table | Purpose | Primary Key |
| ----------------------- | ------------------------ | ------------------------- |
| `shopping_lists` | User shopping lists | `shopping_list_id` |
| `shopping_list_items` | Items on shopping lists | `shopping_list_item_id` |
| `shared_shopping_lists` | Shopping list sharing | `shared_shopping_list_id` |
| `shopping_trips` | Completed shopping trips | `trip_id` |
| `shopping_trip_items` | Items purchased on trips | `trip_item_id` |
### Recipes (11 tables)
| Table | Purpose | Primary Key |
| --------------------------------- | -------------------------------- | ------------------------- |
| `recipes` | User recipes with metadata | `recipe_id` |
| `recipe_ingredients` | Recipe ingredient list | `recipe_ingredient_id` |
| `recipe_ingredient_substitutions` | Ingredient alternatives | `substitution_id` |
| `tags` | Recipe tags (vegan, quick, etc.) | `tag_id` |
| `recipe_tags` | Recipe-to-tag associations | `recipe_id, tag_id` |
| `appliances` | Kitchen appliances | `appliance_id` |
| `recipe_appliances` | Appliances needed for recipes | `recipe_id, appliance_id` |
| `recipe_ratings` | User ratings for recipes | `rating_id` |
| `recipe_comments` | User comments on recipes | `comment_id` |
| `favorite_recipes` | User recipe favorites | `user_id, recipe_id` |
| `recipe_collections` | User recipe collections | `collection_id` |
### Meal Planning (3 tables)
| Table | Purpose | Primary Key |
| ------------------- | -------------------------- | ----------------- |
| `menu_plans` | Weekly/monthly meal plans | `menu_plan_id` |
| `shared_menu_plans` | Menu plan sharing | `share_id` |
| `planned_meals` | Individual meals in a plan | `planned_meal_id` |
### Pantry and Inventory (4 tables)
| Table | Purpose | Primary Key |
| -------------------- | ------------------------------------ | ----------------- |
| `pantry_items` | User pantry inventory | `pantry_item_id` |
| `pantry_locations` | Storage locations (fridge, freezer) | `location_id` |
| `expiry_date_ranges` | Reference shelf life data | `expiry_range_id` |
| `expiry_alerts` | User expiry notification preferences | `expiry_alert_id` |
| `expiry_alert_log` | Sent expiry notifications | `alert_log_id` |
### Receipts (4 tables)
| Table | Purpose | Primary Key |
| ------------------------ | ----------------------------- | ----------------- |
| `receipts` | Scanned receipt metadata | `receipt_id` |
| `receipt_items` | Items parsed from receipts | `receipt_item_id` |
| `receipt_processing_log` | OCR/AI processing audit trail | `log_id` |
### UPC Scanning (2 tables)
| Table | Purpose | Primary Key |
| ---------------------- | ------------------------------- | ----------- |
| `upc_scan_history` | User barcode scan history | `scan_id` |
| `upc_external_lookups` | External UPC API response cache | `lookup_id` |
### Gamification (2 tables)
| Table | Purpose | Primary Key |
| ------------------- | ---------------------------- | ------------------------- |
| `achievements` | Defined achievements | `achievement_id` |
| `user_achievements` | Achievements earned by users | `user_id, achievement_id` |
### User Preferences (3 tables)
| Table | Purpose | Primary Key |
| --------------------------- | ---------------------------- | ------------------------- |
| `dietary_restrictions` | Defined dietary restrictions | `restriction_id` |
| `user_dietary_restrictions` | User dietary preferences | `user_id, restriction_id` |
| `user_appliances` | Appliances user owns | `user_id, appliance_id` |
### Reference Data (1 table)
| Table | Purpose | Primary Key |
| ------------------ | ----------------------- | --------------- |
| `unit_conversions` | Unit conversion factors | `conversion_id` |
---
## Database Setup
### Required Extensions
| Extension | Purpose |
| ----------- | ------------------------------------------- |
@@ -14,7 +172,7 @@ Flyer Crawler uses PostgreSQL with several extensions for full-text search, geog
---
## Database Users
### Database Users
This project uses **environment-specific database users** to isolate production and test environments:

View File

@@ -1,7 +1,7 @@
# Flyer Crawler - System Architecture Overview
**Version**: 0.12.5
**Last Updated**: 2026-01-22
**Version**: 0.12.20
**Last Updated**: 2026-01-28
**Platform**: Linux (Production and Development)
---
@@ -41,7 +41,7 @@
## System Architecture Diagram
```
```text
+-----------------------------------------------------------------------------------+
| CLIENT LAYER |
+-----------------------------------------------------------------------------------+
@@ -153,10 +153,10 @@
| Component | Technology | Version | Purpose |
| ---------------------- | ---------- | -------- | -------------------------------- |
| **Runtime** | Node.js | 22.x LTS | Server-side JavaScript runtime |
| **Language** | TypeScript | 5.9.x | Type-safe JavaScript superset |
| **Web Framework** | Express.js | 5.1.x | HTTP server and routing |
| **Frontend Framework** | React | 19.2.x | UI component library |
| **Build Tool** | Vite | 7.2.x | Frontend bundling and dev server |
| **Language** | TypeScript | 5.9.3 | Type-safe JavaScript superset |
| **Web Framework** | Express.js | 5.1.0 | HTTP server and routing |
| **Frontend Framework** | React | 19.2.0 | UI component library |
| **Build Tool** | Vite | 7.2.4 | Frontend bundling and dev server |
### Data Storage
@@ -176,23 +176,23 @@
| **OAuth** | Google, GitHub | Social authentication |
| **Email** | Nodemailer (SMTP) | Transactional emails |
### Background Processing
### Background Processing Stack
| Component | Technology | Version | Purpose |
| ------------------- | ---------- | ------- | --------------------------------- |
| **Job Queues** | BullMQ | 5.65.x | Reliable async job processing |
| **Job Queues** | BullMQ | 5.65.1 | Reliable async job processing |
| **Process Manager** | PM2 | Latest | Process management and clustering |
| **Scheduler** | node-cron | 4.2.x | Scheduled tasks |
| **Scheduler** | node-cron | 4.2.1 | Scheduled tasks |
### Frontend Stack
| Component | Technology | Version | Purpose |
| -------------------- | -------------- | ------- | ---------------------------------------- |
| **State Management** | TanStack Query | 5.90.x | Server state caching and synchronization |
| **Routing** | React Router | 7.9.x | Client-side routing |
| **Styling** | Tailwind CSS | 4.1.x | Utility-first CSS framework |
| **Icons** | Lucide React | 0.555.x | Icon components |
| **Charts** | Recharts | 3.4.x | Data visualization |
| **State Management** | TanStack Query | 5.90.12 | Server state caching and synchronization |
| **Routing** | React Router | 7.9.6 | Client-side routing |
| **Styling** | Tailwind CSS | 4.1.17 | Utility-first CSS framework |
| **Icons** | Lucide React | 0.555.0 | Icon components |
| **Charts** | Recharts | 3.4.1 | Data visualization |
### Observability and Quality
@@ -221,7 +221,7 @@ The frontend is a single-page application (SPA) built with React 19 and Vite.
**Directory Structure**:
```
```text
src/
+-- components/ # Reusable UI components
+-- contexts/ # React context providers
@@ -244,17 +244,30 @@ The backend is a RESTful API server built with Express.js 5.
- Structured logging with Pino
- Standardized error handling (ADR-001)
**API Route Modules**:
| Route | Purpose |
|-------|---------|
| `/api/auth` | Authentication (login, register, OAuth) |
| `/api/users` | User profile management |
| `/api/flyers` | Flyer CRUD and processing |
| `/api/recipes` | Recipe management |
| `/api/deals` | Best prices and deal discovery |
| `/api/stores` | Store management |
| `/api/admin` | Administrative functions |
| `/api/health` | Health checks and monitoring |
**API Route Modules** (all versioned under `/api/v1/*`):
| Route | Purpose |
| ------------------------- | ----------------------------------------------- |
| `/api/v1/auth` | Authentication (login, register, OAuth) |
| `/api/v1/health` | Health checks and monitoring |
| `/api/v1/system` | System administration (PM2 status, server info) |
| `/api/v1/users` | User profile management |
| `/api/v1/ai` | AI-powered features and flyer processing |
| `/api/v1/admin` | Administrative functions |
| `/api/v1/budgets` | Budget management and spending analysis |
| `/api/v1/achievements` | Gamification and achievement system |
| `/api/v1/flyers` | Flyer CRUD and processing |
| `/api/v1/recipes` | Recipe management and recommendations |
| `/api/v1/personalization` | Master items and user preferences |
| `/api/v1/price-history` | Price tracking and trend analysis |
| `/api/v1/stats` | Public statistics and analytics |
| `/api/v1/upc` | UPC barcode scanning and product lookup |
| `/api/v1/inventory` | Inventory and expiry tracking |
| `/api/v1/receipts` | Receipt scanning and purchase history |
| `/api/v1/deals` | Best prices and deal discovery |
| `/api/v1/reactions` | Social features (reactions, sharing) |
| `/api/v1/stores` | Store management and location services |
| `/api/v1/categories` | Category browsing and product categorization |
### Database (PostgreSQL/PostGIS)
@@ -293,7 +306,23 @@ Google Gemini powers the AI extraction capabilities.
### Background Workers (BullMQ/PM2)
BullMQ workers handle asynchronous processing tasks.
BullMQ workers handle asynchronous processing tasks. PM2 manages both the API server and worker processes in production and development environments (ADR-014).
**Dev Container PM2 Processes**:
| Process | Script | Purpose |
| -------------------------- | ---------------------------------- | -------------------------- |
| `flyer-crawler-api-dev` | `tsx watch server.ts` | API server with hot reload |
| `flyer-crawler-worker-dev` | `tsx watch src/services/worker.ts` | Background job processing |
| `flyer-crawler-vite-dev` | `vite --host` | Frontend dev server |
**Production PM2 Processes**:
| Process | Script | Purpose |
| -------------------------------- | ------------------ | --------------------------- |
| `flyer-crawler-api` | `tsx server.ts` | API server (cluster mode) |
| `flyer-crawler-worker` | `tsx worker.ts` | Background job processing |
| `flyer-crawler-analytics-worker` | `tsx analytics.ts` | Analytics report generation |
**Job Queues**:
@@ -315,7 +344,7 @@ BullMQ workers handle asynchronous processing tasks.
### Flyer Processing Pipeline
```
```text
+-------------+ +----------------+ +------------------+ +---------------+
| User | | Express | | BullMQ | | PostgreSQL |
| Upload +---->+ Route +---->+ Queue +---->+ Storage |
@@ -379,7 +408,7 @@ BullMQ workers handle asynchronous processing tasks.
The application follows a strict layered architecture as defined in ADR-035.
```
```text
+-----------------------------------------------------------------------+
| ROUTES LAYER |
| Responsibilities: |
@@ -442,7 +471,7 @@ The application follows a strict layered architecture as defined in ADR-035.
### Entity Relationship Overview
```
```text
+------------------+ +------------------+ +------------------+
| users | | profiles | | addresses |
|------------------| |------------------| |------------------|
@@ -521,7 +550,7 @@ The application follows a strict layered architecture as defined in ADR-035.
### JWT Token Architecture
```
```text
+-------------------+ +-------------------+ +-------------------+
| Login Request | | Server | | Database |
| (email/pass) +---->+ Validates +---->+ Verify User |
@@ -560,7 +589,7 @@ The application follows a strict layered architecture as defined in ADR-035.
### Protected Route Flow
```
```text
+-------------------+ +-------------------+ +-------------------+
| API Request | | requireAuth | | JWT Strategy |
| + Bearer Token +---->+ Middleware +---->+ Validate |
@@ -587,7 +616,7 @@ The application follows a strict layered architecture as defined in ADR-035.
### Worker Architecture
```
```text
+-------------------+ +-------------------+ +-------------------+
| API Server | | Redis | | Worker Process |
| (Queue Producer)| | (Job Storage) | | (Consumer) |
@@ -619,7 +648,7 @@ The application follows a strict layered architecture as defined in ADR-035.
Jobs use exponential backoff for retries:
```
```text
Attempt 1: Immediate
Attempt 2: Initial delay (e.g., 5 seconds)
Attempt 3: 2x delay (e.g., 10 seconds)
@@ -642,7 +671,7 @@ Attempt 4: 4x delay (e.g., 20 seconds)
### Environment Overview
```
```text
+-----------------------------------------------------------------------------------+
| DEVELOPMENT |
+-----------------------------------------------------------------------------------+
@@ -651,9 +680,11 @@ Attempt 4: 4x delay (e.g., 20 seconds)
| | Windows Host Machine | | Linux Dev Container | |
| | - VS Code | | (flyer-crawler-dev) | |
| | - Podman Desktop +---->+ - Node.js 22 | |
| | - Git | | - PostgreSQL 16 | |
| +-----------------------------------+ | - Redis 7 | |
| | - Git | | - PM2 (process manager) | |
| +-----------------------------------+ | - PostgreSQL 16 | |
| | - Redis 7 | |
| | - Bugsink (local) | |
| | - Logstash (log aggregation) | |
| +-----------------------------------+ |
+-----------------------------------------------------------------------------------+
@@ -692,7 +723,7 @@ Attempt 4: 4x delay (e.g., 20 seconds)
### Deployment Pipeline (ADR-017)
```
```text
+------------+ +------------+ +------------+ +------------+
| Push to | | Gitea | | Build & | | Deploy |
| main +---->+ Actions +---->+ Test +---->+ to Prod |
@@ -744,11 +775,14 @@ The system architecture is governed by Architecture Decision Records (ADRs). Key
### API and Integration
| ADR | Title | Status |
| ------- | ----------------------------- | ----------- |
| ADR-003 | Standardized Input Validation | Accepted |
| ADR-022 | Real-time Notification System | Proposed |
| ADR-028 | API Response Standardization | Implemented |
| ADR | Title | Status |
| ------- | ----------------------------- | ---------------- |
| ADR-003 | Standardized Input Validation | Accepted |
| ADR-008 | API Versioning Strategy | Phase 1 Complete |
| ADR-022 | Real-time Notification System | Proposed |
| ADR-028 | API Response Standardization | Implemented |
**Implementation Guide**: [API Versioning Infrastructure](./api-versioning-infrastructure.md) (Phase 2)
### Security
@@ -818,22 +852,55 @@ The system architecture is governed by Architecture Decision Records (ADRs). Key
| File | Purpose |
| ----------------------------------------------- | --------------------------------------- |
| `src/services/flyerProcessingService.server.ts` | Flyer processing pipeline orchestration |
| `src/services/flyerAiProcessor.server.ts` | AI extraction for flyers |
| `src/services/aiService.server.ts` | Google Gemini AI integration |
| `src/services/cacheService.server.ts` | Redis caching abstraction |
| `src/services/emailService.server.ts` | Email sending |
| `src/services/queues.server.ts` | BullMQ queue definitions |
| `src/services/queueService.server.ts` | Queue management and scheduling |
| `src/services/workers.server.ts` | BullMQ worker definitions |
| `src/services/websocketService.server.ts` | Real-time WebSocket notifications |
| `src/services/receiptService.server.ts` | Receipt scanning and OCR |
| `src/services/upcService.server.ts` | UPC barcode lookup |
| `src/services/expiryService.server.ts` | Pantry expiry tracking |
| `src/services/geocodingService.server.ts` | Address geocoding |
| `src/services/analyticsService.server.ts` | Analytics and reporting |
| `src/services/monitoringService.server.ts` | Health monitoring |
| `src/services/barcodeService.server.ts` | Barcode detection |
| `src/services/logger.server.ts` | Structured logging (Pino) |
| `src/services/redis.server.ts` | Redis connection management |
| `src/services/sentry.server.ts` | Error tracking (Sentry/Bugsink) |
### Database Files
| File | Purpose |
| ---------------------------------- | -------------------------------------------- |
| `src/services/db/connection.db.ts` | Database pool and transaction management |
| `src/services/db/errors.db.ts` | Database error types |
| `src/services/db/user.db.ts` | User repository |
| `src/services/db/flyer.db.ts` | Flyer repository |
| `sql/master_schema_rollup.sql` | Complete database schema (for test DB setup) |
| `sql/initial_schema.sql` | Fresh installation schema |
| File | Purpose |
| --------------------------------------- | -------------------------------------------- |
| `src/services/db/connection.db.ts` | Database pool and transaction management |
| `src/services/db/errors.db.ts` | Database error types |
| `src/services/db/index.db.ts` | Repository exports |
| `src/services/db/user.db.ts` | User repository |
| `src/services/db/flyer.db.ts` | Flyer repository |
| `src/services/db/store.db.ts` | Store repository |
| `src/services/db/storeLocation.db.ts` | Store location repository |
| `src/services/db/recipe.db.ts` | Recipe repository |
| `src/services/db/category.db.ts` | Category repository |
| `src/services/db/personalization.db.ts` | Master items and personalization |
| `src/services/db/shopping.db.ts` | Shopping lists repository |
| `src/services/db/deals.db.ts` | Deals and best prices repository |
| `src/services/db/price.db.ts` | Price history repository |
| `src/services/db/receipt.db.ts` | Receipt repository |
| `src/services/db/upc.db.ts` | UPC scan history repository |
| `src/services/db/expiry.db.ts` | Expiry tracking repository |
| `src/services/db/gamification.db.ts` | Achievements repository |
| `src/services/db/budget.db.ts` | Budget repository |
| `src/services/db/reaction.db.ts` | User reactions repository |
| `src/services/db/notification.db.ts` | Notifications repository |
| `src/services/db/address.db.ts` | Address repository |
| `src/services/db/admin.db.ts` | Admin operations repository |
| `src/services/db/conversion.db.ts` | Unit conversion repository |
| `src/services/db/flyerLocation.db.ts` | Flyer locations repository |
| `sql/master_schema_rollup.sql` | Complete database schema (for test DB setup) |
| `sql/initial_schema.sql` | Fresh installation schema |
### Type Definitions

View File

@@ -0,0 +1,521 @@
# API Versioning Infrastructure (ADR-008 Phase 2)
**Status**: Complete
**Date**: 2026-01-27
**Prerequisite**: ADR-008 Phase 1 Complete (all routes at `/api/v1/*`)
## Implementation Summary
Phase 2 has been fully implemented with the following results:
| Metric | Value |
| ------------------ | -------------------------------------- |
| New Files Created | 5 |
| Files Modified | 2 (server.ts, express.d.ts) |
| Unit Tests | 82 passing (100%) |
| Integration Tests | 48 new versioning tests |
| RFC Compliance | RFC 8594 (Sunset), RFC 8288 (Link) |
| Supported Versions | v1 (active), v2 (infrastructure ready) |
**Developer Guide**: [API-VERSIONING.md](../development/API-VERSIONING.md)
## Purpose
Build infrastructure to support parallel API versions, version detection, and deprecation workflows. Enables future v2 API without breaking existing clients.
## Architecture Overview
```text
Request → Version Router → Version Middleware → Domain Router → Handler
↓ ↓
createVersionedRouter() attachVersionInfo()
↓ ↓
/api/v1/* | /api/v2/* req.apiVersion = 'v1'|'v2'
```
## Key Components
| Component | File | Responsibility |
| ---------------------- | ------------------------------------------ | ------------------------------------------ |
| Version Router Factory | `src/routes/versioned.ts` | Create version-specific Express routers |
| Version Middleware | `src/middleware/apiVersion.middleware.ts` | Extract version, attach to request context |
| Deprecation Middleware | `src/middleware/deprecation.middleware.ts` | Add RFC 8594 deprecation headers |
| Version Types | `src/types/express.d.ts` | Extend Express Request with apiVersion |
| Version Constants | `src/config/apiVersions.ts` | Centralized version definitions |
## Implementation Tasks
### Task 1: Version Types (Foundation)
**File**: `src/types/express.d.ts`
```typescript
declare global {
namespace Express {
interface Request {
apiVersion?: 'v1' | 'v2';
versionDeprecated?: boolean;
}
}
}
```
**Dependencies**: None
**Testing**: Type-check only (`npm run type-check`)
---
### Task 2: Version Constants
**File**: `src/config/apiVersions.ts`
```typescript
export const API_VERSIONS = ['v1', 'v2'] as const;
export type ApiVersion = (typeof API_VERSIONS)[number];
export const CURRENT_VERSION: ApiVersion = 'v1';
export const DEFAULT_VERSION: ApiVersion = 'v1';
export interface VersionConfig {
version: ApiVersion;
status: 'active' | 'deprecated' | 'sunset';
sunsetDate?: string; // ISO 8601
successorVersion?: ApiVersion;
}
export const VERSION_CONFIG: Record<ApiVersion, VersionConfig> = {
v1: { version: 'v1', status: 'active' },
v2: { version: 'v2', status: 'active' },
};
```
**Dependencies**: None
**Testing**: Unit test for version validation
---
### Task 3: Version Detection Middleware
**File**: `src/middleware/apiVersion.middleware.ts`
```typescript
import { Request, Response, NextFunction } from 'express';
import { API_VERSIONS, ApiVersion, DEFAULT_VERSION } from '../config/apiVersions';
export function extractApiVersion(req: Request, _res: Response, next: NextFunction) {
// Extract from URL path: /api/v1/... → 'v1'
const pathMatch = req.path.match(/^\/v(\d+)\//);
if (pathMatch) {
const version = `v${pathMatch[1]}` as ApiVersion;
if (API_VERSIONS.includes(version)) {
req.apiVersion = version;
}
}
// Fallback to default if not detected
req.apiVersion = req.apiVersion || DEFAULT_VERSION;
next();
}
```
**Pattern**: Attach to request before route handlers
**Integration Point**: `server.ts` before versioned route mounting
**Testing**: Unit tests for path extraction, default fallback
---
### Task 4: Deprecation Headers Middleware
**File**: `src/middleware/deprecation.middleware.ts`
Implements RFC 8594 (Sunset Header) and draft-ietf-httpapi-deprecation-header.
```typescript
import { Request, Response, NextFunction } from 'express';
import { VERSION_CONFIG, ApiVersion } from '../config/apiVersions';
import { logger } from '../services/logger.server';
export function deprecationHeaders(version: ApiVersion) {
const config = VERSION_CONFIG[version];
return (req: Request, res: Response, next: NextFunction) => {
if (config.status === 'deprecated') {
res.set('Deprecation', 'true');
if (config.sunsetDate) {
res.set('Sunset', config.sunsetDate);
}
if (config.successorVersion) {
res.set('Link', `</api/${config.successorVersion}>; rel="successor-version"`);
}
req.versionDeprecated = true;
// Log deprecation access for monitoring
logger.warn(
{
apiVersion: version,
path: req.path,
method: req.method,
sunsetDate: config.sunsetDate,
},
'Deprecated API version accessed',
);
}
// Always set version header
res.set('X-API-Version', version);
next();
};
}
```
**RFC Compliance**:
- `Deprecation: true` (draft-ietf-httpapi-deprecation-header)
- `Sunset: <date>` (RFC 8594)
- `Link: <url>; rel="successor-version"` (RFC 8288)
**Testing**: Unit tests for header presence, version status variations
---
### Task 5: Version Router Factory
**File**: `src/routes/versioned.ts`
```typescript
import { Router } from 'express';
import { ApiVersion } from '../config/apiVersions';
import { extractApiVersion } from '../middleware/apiVersion.middleware';
import { deprecationHeaders } from '../middleware/deprecation.middleware';
// Import domain routers
import authRouter from './auth.routes';
import userRouter from './user.routes';
import flyerRouter from './flyer.routes';
// ... all domain routers
interface VersionedRouters {
v1: Record<string, Router>;
v2: Record<string, Router>;
}
const ROUTERS: VersionedRouters = {
v1: {
auth: authRouter,
users: userRouter,
flyers: flyerRouter,
// ... all v1 routers (current implementation)
},
v2: {
// Future: v2-specific routers
// auth: authRouterV2,
// For now, fallback to v1
},
};
export function createVersionedRouter(version: ApiVersion): Router {
const router = Router();
// Apply version middleware
router.use(extractApiVersion);
router.use(deprecationHeaders(version));
// Get routers for this version, fallback to v1
const versionRouters = ROUTERS[version] || ROUTERS.v1;
// Mount domain routers
Object.entries(versionRouters).forEach(([path, domainRouter]) => {
router.use(`/${path}`, domainRouter);
});
return router;
}
```
**Pattern**: Factory function returns configured Router
**Fallback Strategy**: v2 uses v1 routers until v2-specific handlers exist
**Testing**: Integration test verifying route mounting
---
### Task 6: Server Integration
**File**: `server.ts` (modifications)
```typescript
// Before (current implementation - Phase 1):
app.use('/api/v1/auth', authRouter);
app.use('/api/v1/users', userRouter);
// ... individual route mounting
// After (Phase 2):
import { createVersionedRouter } from './src/routes/versioned';
// Mount versioned API routers
app.use('/api/v1', createVersionedRouter('v1'));
app.use('/api/v2', createVersionedRouter('v2')); // Placeholder for future
// Keep redirect middleware for unversioned requests
app.use('/api', versionRedirectMiddleware);
```
**Breaking Change Risk**: Low (same routes, different mounting)
**Rollback**: Revert to individual `app.use()` calls
**Testing**: Full integration test suite must pass
---
### Task 7: Request Context Propagation
**Pattern**: Version flows through request lifecycle for conditional logic.
```typescript
// In any route handler or service:
function handler(req: Request, res: Response) {
if (req.apiVersion === 'v2') {
// v2-specific behavior
return sendSuccess(res, transformV2(data));
}
// v1 behavior (default)
return sendSuccess(res, data);
}
```
**Use Cases**:
- Response transformation based on version
- Feature flags per version
- Metric tagging by version
---
### Task 8: Documentation Update
**File**: `src/config/swagger.ts` (modifications)
```typescript
const swaggerDefinition: OpenAPIV3.Document = {
// ...
servers: [
{
url: '/api/v1',
description: 'API v1 (Current)',
},
{
url: '/api/v2',
description: 'API v2 (Future)',
},
],
// ...
};
```
**File**: `docs/adr/0008-api-versioning-strategy.md` (update checklist)
---
### Task 9: Unit Tests
**File**: `src/middleware/apiVersion.middleware.test.ts`
```typescript
describe('extractApiVersion', () => {
it('extracts v1 from /api/v1/users', () => {
/* ... */
});
it('extracts v2 from /api/v2/users', () => {
/* ... */
});
it('defaults to v1 for unversioned paths', () => {
/* ... */
});
it('ignores invalid version numbers', () => {
/* ... */
});
});
```
**File**: `src/middleware/deprecation.middleware.test.ts`
```typescript
describe('deprecationHeaders', () => {
it('adds no headers for active version', () => {
/* ... */
});
it('adds Deprecation header for deprecated version', () => {
/* ... */
});
it('adds Sunset header when sunsetDate configured', () => {
/* ... */
});
it('adds Link header for successor version', () => {
/* ... */
});
it('always sets X-API-Version header', () => {
/* ... */
});
});
```
---
### Task 10: Integration Tests
**File**: `src/routes/versioned.test.ts`
```typescript
describe('Versioned Router Integration', () => {
it('mounts all v1 routes correctly', () => {
/* ... */
});
it('v2 falls back to v1 handlers', () => {
/* ... */
});
it('sets X-API-Version response header', () => {
/* ... */
});
it('deprecation headers appear when configured', () => {
/* ... */
});
});
```
**Run in Container**: `podman exec -it flyer-crawler-dev npm test -- versioned`
## Implementation Sequence
```text
[Task 1] → [Task 2] → [Task 3] → [Task 4] → [Task 5] → [Task 6]
Types Config Middleware Middleware Factory Server
↓ ↓ ↓ ↓
[Task 7] [Task 9] [Task 10] [Task 8]
Context Unit Integ Docs
```
**Critical Path**: 1 → 2 → 3 → 5 → 6 (server integration)
## File Structure After Implementation
```text
src/
├── config/
│ ├── apiVersions.ts # NEW: Version constants
│ └── swagger.ts # MODIFIED: Multi-server
├── middleware/
│ ├── apiVersion.middleware.ts # NEW: Version extraction
│ ├── apiVersion.middleware.test.ts # NEW: Unit tests
│ ├── deprecation.middleware.ts # NEW: RFC 8594 headers
│ └── deprecation.middleware.test.ts # NEW: Unit tests
├── routes/
│ ├── versioned.ts # NEW: Router factory
│ ├── versioned.test.ts # NEW: Integration tests
│ └── *.routes.ts # UNCHANGED: Domain routers
├── types/
│ └── express.d.ts # MODIFIED: Add apiVersion
server.ts # MODIFIED: Use versioned router
```
## Risk Assessment
| Risk | Likelihood | Impact | Mitigation |
| ------------------------------------ | ---------- | ------ | ----------------------------------- |
| Route registration order breaks | Medium | High | Full integration test suite |
| Middleware not applied to all routes | Low | Medium | Factory pattern ensures consistency |
| Performance impact from middleware | Low | Low | Minimal overhead (path regex) |
| Type errors in extended Request | Low | Medium | TypeScript strict mode catches |
## Rollback Procedure
1. Revert `server.ts` to individual route mounting
2. Remove new middleware files (not breaking)
3. Remove version types from `express.d.ts`
4. Run `npm run type-check && npm test` to verify
## Success Criteria
- [x] All existing tests pass (`npm test` in container)
- [x] `X-API-Version: v1` header on all `/api/v1/*` responses
- [x] TypeScript compiles without errors (`npm run type-check`)
- [x] No performance regression (< 5ms added latency)
- [x] Deprecation headers work when v1 marked deprecated (manual test)
## Known Issues and Follow-up Work
### Integration Tests Using Unversioned Paths
**Issue**: Some existing integration tests make requests to unversioned paths (e.g., `/api/flyers` instead of `/api/v1/flyers`). These tests now receive 301 redirects due to the backwards compatibility middleware.
**Impact**: 74 integration tests may need updates to use versioned paths explicitly.
**Workaround Options**:
1. Update test paths to use `/api/v1/*` explicitly (recommended)
2. Configure supertest to follow redirects automatically
3. Accept 301 as valid response in affected tests
**Resolution**: Phase 3 work item - update integration tests to use versioned endpoints consistently.
### Phase 3 Prerequisites
Before marking v1 as deprecated and implementing v2:
1. Update all integration tests to use versioned paths
2. Define breaking changes requiring v2
3. Create v2-specific route handlers where needed
4. Set deprecation timeline for v1
## Related ADRs
| ADR | Relationship |
| ------- | ------------------------------------------------- |
| ADR-008 | Parent decision (this implements Phase 2) |
| ADR-003 | Validation middleware pattern applies per-version |
| ADR-028 | Response format consistent across versions |
| ADR-018 | OpenAPI docs reflect versioned endpoints |
| ADR-043 | Middleware pipeline order considerations |
## Usage Examples
### Checking Version in Handler
```typescript
// src/routes/flyer.routes.ts
router.get('/', async (req, res) => {
const flyers = await flyerRepo.getFlyers(req.log);
// Version-specific response transformation
if (req.apiVersion === 'v2') {
return sendSuccess(res, flyers.map(transformFlyerV2));
}
return sendSuccess(res, flyers);
});
```
### Marking Version as Deprecated
```typescript
// src/config/apiVersions.ts
export const VERSION_CONFIG = {
v1: {
version: 'v1',
status: 'deprecated',
sunsetDate: '2027-01-01T00:00:00Z',
successorVersion: 'v2',
},
v2: { version: 'v2', status: 'active' },
};
```
### Testing Deprecation Headers
```bash
curl -I https://localhost:3001/api/v1/flyers
# When v1 deprecated:
# Deprecation: true
# Sunset: 2027-01-01T00:00:00Z
# Link: </api/v2>; rel="successor-version"
# X-API-Version: v1
```

View File

@@ -0,0 +1,844 @@
# API Versioning Developer Guide
**Status**: Complete (Phase 2)
**Last Updated**: 2026-01-27
**Implements**: ADR-008 Phase 2
**Architecture**: [api-versioning-infrastructure.md](../architecture/api-versioning-infrastructure.md)
This guide covers the API versioning infrastructure for the Flyer Crawler application. It explains how versioning works, how to add new versions, and how to deprecate old ones.
## Implementation Status
| Component | Status | Tests |
| ------------------------------ | -------- | -------------------- |
| Version Constants | Complete | Unit tests |
| Version Detection Middleware | Complete | 25 unit tests |
| Deprecation Headers Middleware | Complete | 30 unit tests |
| Version Router Factory | Complete | Integration tests |
| Server Integration | Complete | 48 integration tests |
| Developer Documentation | Complete | This guide |
**Total Tests**: 82 versioning-specific tests (100% passing)
---
## Table of Contents
1. [Overview](#overview)
2. [Architecture](#architecture)
3. [Key Concepts](#key-concepts)
4. [Developer Workflows](#developer-workflows)
5. [Version Headers](#version-headers)
6. [Testing Versioned Endpoints](#testing-versioned-endpoints)
7. [Migration Guide: v1 to v2](#migration-guide-v1-to-v2)
8. [Troubleshooting](#troubleshooting)
9. [Related Documentation](#related-documentation)
---
## Overview
The API uses URI-based versioning with the format `/api/v{MAJOR}/resource`. All endpoints are accessible at versioned paths like `/api/v1/flyers` or `/api/v2/users`.
### Current Version Status
| Version | Status | Description |
| ------- | ------ | ------------------------------------- |
| v1 | Active | Current production version |
| v2 | Active | Future version (infrastructure ready) |
### Key Features
- **Automatic version detection** from URL path
- **RFC 8594 compliant deprecation headers** when versions are deprecated
- **Backwards compatibility** via 301 redirects from unversioned paths
- **Version-aware request context** for conditional logic in handlers
- **Centralized configuration** for version lifecycle management
---
## Architecture
### Request Flow
```text
Client Request: GET /api/v1/flyers
|
v
+------+-------+
| server.ts |
| - Redirect |
| middleware |
+------+-------+
|
v
+------+-------+
| createApi |
| Router() |
+------+-------+
|
v
+------+-------+
| detectApi |
| Version |
| middleware |
+------+-------+
| req.apiVersion = 'v1'
v
+------+-------+
| Versioned |
| Router |
| (v1) |
+------+-------+
|
v
+------+-------+
| addDepreca |
| tionHeaders |
| middleware |
+------+-------+
| X-API-Version: v1
v
+------+-------+
| Domain |
| Router |
| (flyers) |
+------+-------+
|
v
Response
```
### Component Overview
| Component | File | Purpose |
| ------------------- | ------------------------------------------ | ----------------------------------------------------- |
| Version Constants | `src/config/apiVersions.ts` | Type definitions, version configs, utility functions |
| Version Detection | `src/middleware/apiVersion.middleware.ts` | Extract version from URL, validate, attach to request |
| Deprecation Headers | `src/middleware/deprecation.middleware.ts` | Add RFC 8594 headers for deprecated versions |
| Router Factory | `src/routes/versioned.ts` | Create version-specific Express routers |
| Type Extensions | `src/types/express.d.ts` | Add `apiVersion` and `versionDeprecation` to Request |
---
## Key Concepts
### 1. Version Configuration
All version definitions live in `src/config/apiVersions.ts`:
```typescript
// src/config/apiVersions.ts
// Supported versions as a const tuple
export const API_VERSIONS = ['v1', 'v2'] as const;
// Union type: 'v1' | 'v2'
export type ApiVersion = (typeof API_VERSIONS)[number];
// Version lifecycle status
export type VersionStatus = 'active' | 'deprecated' | 'sunset';
// Configuration for each version
export const VERSION_CONFIGS: Record<ApiVersion, VersionConfig> = {
v1: {
version: 'v1',
status: 'active',
},
v2: {
version: 'v2',
status: 'active',
},
};
```
### 2. Version Detection
The `detectApiVersion` middleware extracts the version from `req.params.version` and validates it:
```typescript
// How it works (src/middleware/apiVersion.middleware.ts)
// For valid versions:
// GET /api/v1/flyers -> req.apiVersion = 'v1'
// For invalid versions:
// GET /api/v99/flyers -> 404 with UNSUPPORTED_VERSION error
```
### 3. Request Context
After middleware runs, the request object has version information:
```typescript
// In any route handler
router.get('/flyers', async (req, res) => {
// Access the detected version
const version = req.apiVersion; // 'v1' | 'v2'
// Check deprecation status
if (req.versionDeprecation?.deprecated) {
req.log.warn(
{
sunset: req.versionDeprecation.sunsetDate,
},
'Client using deprecated API',
);
}
// Version-specific behavior
if (req.apiVersion === 'v2') {
return sendSuccess(res, transformV2(data));
}
return sendSuccess(res, data);
});
```
### 4. Route Registration
Routes are registered in `src/routes/versioned.ts` with version availability:
```typescript
// src/routes/versioned.ts
export const ROUTES: RouteRegistration[] = [
{
path: 'auth',
router: authRouter,
description: 'Authentication routes',
// Available in all versions (no versions array)
},
{
path: 'flyers',
router: flyerRouter,
description: 'Flyer management',
// Available in all versions
},
{
path: 'new-feature',
router: newFeatureRouter,
description: 'New feature only in v2',
versions: ['v2'], // Only available in v2
},
];
```
---
## Developer Workflows
### Adding a New API Version (e.g., v3)
**Step 1**: Add version to constants (`src/config/apiVersions.ts`)
```typescript
// Before
export const API_VERSIONS = ['v1', 'v2'] as const;
// After
export const API_VERSIONS = ['v1', 'v2', 'v3'] as const;
// Add configuration
export const VERSION_CONFIGS: Record<ApiVersion, VersionConfig> = {
v1: { version: 'v1', status: 'active' },
v2: { version: 'v2', status: 'active' },
v3: { version: 'v3', status: 'active' }, // NEW
};
```
**Step 2**: Router cache auto-updates (no changes needed)
The versioned router cache in `src/routes/versioned.ts` automatically creates routers for all versions defined in `API_VERSIONS`.
**Step 3**: Update OpenAPI documentation (`src/config/swagger.ts`)
```typescript
servers: [
{ url: '/api/v1', description: 'API v1' },
{ url: '/api/v2', description: 'API v2' },
{ url: '/api/v3', description: 'API v3 (New)' }, // NEW
],
```
**Step 4**: Test the new version
```bash
# In dev container
podman exec -it flyer-crawler-dev npm test
# Manual verification
curl -i http://localhost:3001/api/v3/health
# Should return 200 with X-API-Version: v3 header
```
### Marking a Version as Deprecated
**Step 1**: Update version config (`src/config/apiVersions.ts`)
```typescript
export const VERSION_CONFIGS: Record<ApiVersion, VersionConfig> = {
v1: {
version: 'v1',
status: 'deprecated', // Changed from 'active'
sunsetDate: '2027-01-01T00:00:00Z', // When it will be removed
successorVersion: 'v2', // Migration target
},
v2: {
version: 'v2',
status: 'active',
},
};
```
**Step 2**: Verify deprecation headers
```bash
curl -I http://localhost:3001/api/v1/health
# Expected headers:
# X-API-Version: v1
# Deprecation: true
# Sunset: 2027-01-01T00:00:00Z
# Link: </api/v2>; rel="successor-version"
# X-API-Deprecation-Notice: API v1 is deprecated and will be sunset...
```
**Step 3**: Monitor deprecation usage
Check logs for `Deprecated API version accessed` messages with context about which clients are still using deprecated versions.
### Adding Version-Specific Routes
**Scenario**: Add a new endpoint only available in v2+
**Step 1**: Create the route handler (new or existing file)
```typescript
// src/routes/newFeature.routes.ts
import { Router } from 'express';
import { sendSuccess } from '../utils/apiResponse';
const router = Router();
router.get('/', async (req, res) => {
// This endpoint only exists in v2+
sendSuccess(res, { feature: 'new-feature-data' });
});
export default router;
```
**Step 2**: Register with version restriction (`src/routes/versioned.ts`)
```typescript
import newFeatureRouter from './newFeature.routes';
export const ROUTES: RouteRegistration[] = [
// ... existing routes ...
{
path: 'new-feature',
router: newFeatureRouter,
description: 'New feature only available in v2+',
versions: ['v2'], // Not available in v1
},
];
```
**Step 3**: Verify route availability
```bash
# v1 - should return 404
curl -i http://localhost:3001/api/v1/new-feature
# HTTP/1.1 404 Not Found
# v2 - should work
curl -i http://localhost:3001/api/v2/new-feature
# HTTP/1.1 200 OK
# X-API-Version: v2
```
### Adding Version-Specific Behavior in Existing Routes
For routes that exist in multiple versions but behave differently:
```typescript
// src/routes/flyer.routes.ts
router.get('/:id', async (req, res) => {
const flyer = await flyerService.getFlyer(req.params.id, req.log);
// Different response format per version
if (req.apiVersion === 'v2') {
// v2 returns expanded store data
return sendSuccess(res, {
...flyer,
store: await storeService.getStore(flyer.store_id, req.log),
});
}
// v1 returns just the flyer
return sendSuccess(res, flyer);
});
```
---
## Version Headers
### Response Headers
All versioned API responses include these headers:
| Header | Always Present | Description |
| -------------------------- | ------------------ | ------------------------------------------------------- |
| `X-API-Version` | Yes | The API version handling the request |
| `Deprecation` | Only if deprecated | `true` when version is deprecated |
| `Sunset` | Only if configured | ISO 8601 date when version will be removed |
| `Link` | Only if configured | URL to successor version with `rel="successor-version"` |
| `X-API-Deprecation-Notice` | Only if deprecated | Human-readable deprecation message |
### Example: Active Version Response
```http
HTTP/1.1 200 OK
X-API-Version: v2
Content-Type: application/json
```
### Example: Deprecated Version Response
```http
HTTP/1.1 200 OK
X-API-Version: v1
Deprecation: true
Sunset: 2027-01-01T00:00:00Z
Link: </api/v2>; rel="successor-version"
X-API-Deprecation-Notice: API v1 is deprecated and will be sunset on 2027-01-01T00:00:00Z. Please migrate to v2.
Content-Type: application/json
```
### RFC Compliance
The deprecation headers follow these standards:
- **RFC 8594**: The "Sunset" HTTP Header Field
- **draft-ietf-httpapi-deprecation-header**: The "Deprecation" HTTP Header Field
- **RFC 8288**: Web Linking (for `rel="successor-version"`)
---
## Testing Versioned Endpoints
### Unit Testing Middleware
See test files for patterns:
- `src/middleware/apiVersion.middleware.test.ts`
- `src/middleware/deprecation.middleware.test.ts`
**Testing version detection**:
```typescript
// src/middleware/apiVersion.middleware.test.ts
import { detectApiVersion } from './apiVersion.middleware';
import { createMockRequest } from '../tests/utils/createMockRequest';
describe('detectApiVersion', () => {
it('should extract v1 from req.params.version', () => {
const mockRequest = createMockRequest({
params: { version: 'v1' },
});
const mockResponse = { status: vi.fn().mockReturnThis(), json: vi.fn() };
const mockNext = vi.fn();
detectApiVersion(mockRequest, mockResponse, mockNext);
expect(mockRequest.apiVersion).toBe('v1');
expect(mockNext).toHaveBeenCalled();
});
it('should return 404 for invalid version', () => {
const mockRequest = createMockRequest({
params: { version: 'v99' },
});
const mockResponse = {
status: vi.fn().mockReturnThis(),
json: vi.fn(),
};
const mockNext = vi.fn();
detectApiVersion(mockRequest, mockResponse, mockNext);
expect(mockNext).not.toHaveBeenCalled();
expect(mockResponse.status).toHaveBeenCalledWith(404);
});
});
```
**Testing deprecation headers**:
```typescript
// src/middleware/deprecation.middleware.test.ts
import { addDeprecationHeaders } from './deprecation.middleware';
import { VERSION_CONFIGS } from '../config/apiVersions';
describe('addDeprecationHeaders', () => {
beforeEach(() => {
// Mark v1 as deprecated for test
VERSION_CONFIGS.v1 = {
version: 'v1',
status: 'deprecated',
sunsetDate: '2027-01-01T00:00:00Z',
successorVersion: 'v2',
};
});
it('should add all deprecation headers', () => {
const setHeader = vi.fn();
const middleware = addDeprecationHeaders('v1');
middleware(mockRequest, { set: setHeader }, mockNext);
expect(setHeader).toHaveBeenCalledWith('Deprecation', 'true');
expect(setHeader).toHaveBeenCalledWith('Sunset', '2027-01-01T00:00:00Z');
expect(setHeader).toHaveBeenCalledWith('Link', '</api/v2>; rel="successor-version"');
});
});
```
### Integration Testing
**Test versioned endpoints**:
```typescript
import request from 'supertest';
import app from '../../server';
describe('API Versioning Integration', () => {
it('should return X-API-Version header for v1', async () => {
const response = await request(app).get('/api/v1/health').expect(200);
expect(response.headers['x-api-version']).toBe('v1');
});
it('should return 404 for unsupported version', async () => {
const response = await request(app).get('/api/v99/health').expect(404);
expect(response.body.error.code).toBe('UNSUPPORTED_VERSION');
});
it('should redirect unversioned paths to v1', async () => {
const response = await request(app).get('/api/health').expect(301);
expect(response.headers.location).toBe('/api/v1/health');
});
});
```
### Running Tests
```bash
# Run all tests in container (required)
podman exec -it flyer-crawler-dev npm test
# Run only middleware tests
podman exec -it flyer-crawler-dev npm test -- apiVersion
podman exec -it flyer-crawler-dev npm test -- deprecation
# Type check
podman exec -it flyer-crawler-dev npm run type-check
```
---
## Migration Guide: v1 to v2
When v2 is introduced with breaking changes, follow this migration process.
### For API Consumers (Frontend/Mobile)
**Step 1**: Check current API version usage
```typescript
// Frontend apiClient.ts
const API_BASE_URL = import.meta.env.VITE_API_BASE_URL || '/api/v1';
```
**Step 2**: Monitor deprecation headers
When v1 is deprecated, responses will include:
```http
Deprecation: true
Sunset: 2027-01-01T00:00:00Z
Link: </api/v2>; rel="successor-version"
```
**Step 3**: Update to v2
```typescript
// Change API base URL
const API_BASE_URL = import.meta.env.VITE_API_BASE_URL || '/api/v2';
```
**Step 4**: Handle response format changes
If v2 changes response formats, update your type definitions and parsing logic:
```typescript
// v1 response
interface FlyerResponseV1 {
id: number;
store_id: number;
}
// v2 response (example: includes embedded store)
interface FlyerResponseV2 {
id: string; // Changed to UUID
store: {
id: string;
name: string;
};
}
```
### For Backend Developers
**Step 1**: Create v2-specific handlers (if needed)
For breaking changes, create version-specific route files:
```text
src/routes/
flyer.routes.ts # Shared/v1 handlers
flyer.v2.routes.ts # v2-specific handlers (if significantly different)
```
**Step 2**: Register version-specific routes
```typescript
// src/routes/versioned.ts
export const ROUTES: RouteRegistration[] = [
{
path: 'flyers',
router: flyerRouter,
description: 'Flyer routes (v1)',
versions: ['v1'],
},
{
path: 'flyers',
router: flyerRouterV2,
description: 'Flyer routes (v2 with breaking changes)',
versions: ['v2'],
},
];
```
**Step 3**: Document changes
Update OpenAPI documentation to reflect v2 changes and mark v1 as deprecated.
### Timeline Example
| Date | Action |
| ---------- | ------------------------------------------ |
| T+0 | v2 released, v1 marked deprecated |
| T+0 | Deprecation headers added to v1 responses |
| T+30 days | Sunset warning emails to known integrators |
| T+90 days | v1 returns 410 Gone |
| T+120 days | v1 code removed |
---
## Troubleshooting
### Issue: "UNSUPPORTED_VERSION" Error
**Symptom**: Request to `/api/v3/...` returns 404 with `UNSUPPORTED_VERSION`
**Cause**: Version `v3` is not defined in `API_VERSIONS`
**Solution**: Add the version to `src/config/apiVersions.ts`:
```typescript
export const API_VERSIONS = ['v1', 'v2', 'v3'] as const;
export const VERSION_CONFIGS = {
// ...
v3: { version: 'v3', status: 'active' },
};
```
### Issue: Missing X-API-Version Header
**Symptom**: Response doesn't include `X-API-Version` header
**Cause**: Request didn't go through versioned router
**Solution**: Ensure the route is registered in `src/routes/versioned.ts` and mounted under `/api/:version`
### Issue: Deprecation Headers Not Appearing
**Symptom**: Deprecated version works but no deprecation headers
**Cause**: Version status not set to `'deprecated'` in config
**Solution**: Update `VERSION_CONFIGS`:
```typescript
v1: {
version: 'v1',
status: 'deprecated', // Must be 'deprecated', not 'active'
sunsetDate: '2027-01-01T00:00:00Z',
successorVersion: 'v2',
},
```
### Issue: Route Available in Wrong Version
**Symptom**: Route works in v1 but should only be in v2
**Cause**: Missing `versions` restriction in route registration
**Solution**: Add `versions` array:
```typescript
{
path: 'new-feature',
router: newFeatureRouter,
versions: ['v2'], // Add this to restrict availability
},
```
### Issue: Unversioned Paths Not Redirecting
**Symptom**: `/api/flyers` returns 404 instead of redirecting to `/api/v1/flyers`
**Cause**: Redirect middleware order issue in `server.ts`
**Solution**: Ensure redirect middleware is mounted BEFORE `createApiRouter()`:
```typescript
// server.ts - correct order
app.use('/api', redirectMiddleware); // First
app.use('/api', createApiRouter()); // Second
```
### Issue: TypeScript Errors on req.apiVersion
**Symptom**: `Property 'apiVersion' does not exist on type 'Request'`
**Cause**: Type extensions not being picked up
**Solution**: Ensure `src/types/express.d.ts` is included in tsconfig:
```json
{
"compilerOptions": {
"typeRoots": ["./node_modules/@types", "./src/types"]
},
"include": ["src/**/*"]
}
```
### Issue: Router Cache Stale After Config Change
**Symptom**: Version behavior doesn't update after changing `VERSION_CONFIGS`
**Cause**: Routers are cached at startup
**Solution**: Use `refreshRouterCache()` or restart the server:
```typescript
import { refreshRouterCache } from './src/routes/versioned';
// After config changes
refreshRouterCache();
```
---
## Related Documentation
### Architecture Decision Records
| ADR | Title |
| ------------------------------------------------------------------------ | ---------------------------- |
| [ADR-008](../adr/0008-api-versioning-strategy.md) | API Versioning Strategy |
| [ADR-003](../adr/0003-standardized-input-validation-using-middleware.md) | Input Validation |
| [ADR-028](../adr/0028-api-response-standardization.md) | API Response Standardization |
| [ADR-018](../adr/0018-api-documentation-strategy.md) | API Documentation Strategy |
### Implementation Files
| File | Description |
| -------------------------------------------------------------------------------------------- | ---------------------------- |
| [`src/config/apiVersions.ts`](../../src/config/apiVersions.ts) | Version constants and config |
| [`src/middleware/apiVersion.middleware.ts`](../../src/middleware/apiVersion.middleware.ts) | Version detection |
| [`src/middleware/deprecation.middleware.ts`](../../src/middleware/deprecation.middleware.ts) | Deprecation headers |
| [`src/routes/versioned.ts`](../../src/routes/versioned.ts) | Router factory |
| [`src/types/express.d.ts`](../../src/types/express.d.ts) | Request type extensions |
| [`server.ts`](../../server.ts) | Application entry point |
### Test Files
| File | Description |
| ------------------------------------------------------------------------------------------------------ | ------------------------ |
| [`src/middleware/apiVersion.middleware.test.ts`](../../src/middleware/apiVersion.middleware.test.ts) | Version detection tests |
| [`src/middleware/deprecation.middleware.test.ts`](../../src/middleware/deprecation.middleware.test.ts) | Deprecation header tests |
### External References
- [RFC 8594: The "Sunset" HTTP Header Field](https://datatracker.ietf.org/doc/html/rfc8594)
- [draft-ietf-httpapi-deprecation-header](https://datatracker.ietf.org/doc/draft-ietf-httpapi-deprecation-header/)
- [RFC 8288: Web Linking](https://datatracker.ietf.org/doc/html/rfc8288)
---
## Quick Reference
### Files to Modify for Common Tasks
| Task | Files |
| ------------------------------ | ---------------------------------------------------- |
| Add new version | `src/config/apiVersions.ts`, `src/config/swagger.ts` |
| Deprecate version | `src/config/apiVersions.ts` |
| Add version-specific route | `src/routes/versioned.ts` |
| Version-specific handler logic | Route file (e.g., `src/routes/flyer.routes.ts`) |
### Key Functions
```typescript
// Check if version is valid
isValidApiVersion('v1'); // true
isValidApiVersion('v99'); // false
// Get version from request with fallback
getRequestApiVersion(req); // Returns 'v1' | 'v2'
// Check if request has valid version
hasApiVersion(req); // boolean
// Get deprecation info
getVersionDeprecation('v1'); // { deprecated: false, ... }
```
### Commands
```bash
# Run all tests
podman exec -it flyer-crawler-dev npm test
# Type check
podman exec -it flyer-crawler-dev npm run type-check
# Check version headers manually
curl -I http://localhost:3001/api/v1/health
# Test deprecation (after marking v1 deprecated)
curl -I http://localhost:3001/api/v1/health | grep -E "(Deprecation|Sunset|Link|X-API)"
```
curl -I http://localhost:3001/api/v1/health | grep -E "(Deprecation|Sunset|Link|X-API)"
```

View File

@@ -2,6 +2,22 @@
Common code patterns extracted from Architecture Decision Records (ADRs). Use these as templates when writing new code.
## Quick Reference
| Pattern | Key Function/Class | Import From |
| ------------------ | ------------------------------------------------- | ------------------------------------- |
| Error Handling | `handleDbError()`, `NotFoundError` | `src/services/db/errors.db.ts` |
| Repository Methods | `get*`, `find*`, `list*` | `src/services/db/*.db.ts` |
| API Responses | `sendSuccess()`, `sendPaginated()`, `sendError()` | `src/utils/apiResponse.ts` |
| Transactions | `withTransaction()` | `src/services/db/connection.db.ts` |
| Validation | `validateRequest()` | `src/middleware/validation.ts` |
| Authentication | `authenticateJWT` | `src/middleware/auth.ts` |
| Caching | `cacheService` | `src/services/cache.server.ts` |
| Background Jobs | Queue classes | `src/services/queues.server.ts` |
| Feature Flags | `isFeatureEnabled()`, `useFeatureFlag()` | `src/services/featureFlags.server.ts` |
---
## Table of Contents
- [Error Handling](#error-handling)
@@ -12,12 +28,13 @@ Common code patterns extracted from Architecture Decision Records (ADRs). Use th
- [Authentication](#authentication)
- [Caching](#caching)
- [Background Jobs](#background-jobs)
- [Feature Flags](#feature-flags)
---
## Error Handling
**ADR**: [ADR-001](../adr/0001-standardized-error-handling-for-database-operations.md)
**ADR**: [ADR-001](../adr/0001-standardized-error-handling.md)
### Repository Layer Error Handling
@@ -47,16 +64,20 @@ export async function getFlyerById(id: number, client?: PoolClient): Promise<Fly
```typescript
import { sendError } from '../utils/apiResponse';
app.get('/api/flyers/:id', async (req, res) => {
app.get('/api/v1/flyers/:id', async (req, res) => {
try {
const flyer = await flyerDb.getFlyerById(parseInt(req.params.id));
return sendSuccess(res, flyer);
} catch (error) {
// IMPORTANT: Use req.originalUrl for dynamic path logging (not hardcoded paths)
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
return sendError(res, error);
}
});
```
**Best Practice**: Always use `req.originalUrl.split('?')[0]` in error log messages instead of hardcoded paths. This ensures logs reflect the actual request URL including version prefixes (`/api/v1/`). See [Error Logging Path Patterns](ERROR-LOGGING-PATHS.md) for details.
### Custom Error Types
```typescript
@@ -74,7 +95,7 @@ throw new DatabaseError('Failed to insert flyer', originalError);
## Repository Patterns
**ADR**: [ADR-034](../adr/0034-repository-layer-method-naming-conventions.md)
**ADR**: [ADR-034](../adr/0034-repository-pattern-standards.md)
### Method Naming Conventions
@@ -151,16 +172,17 @@ export async function listActiveFlyers(client?: PoolClient): Promise<Flyer[]> {
## API Response Patterns
**ADR**: [ADR-028](../adr/0028-consistent-api-response-format.md)
**ADR**: [ADR-028](../adr/0028-api-response-standardization.md)
### Success Response
```typescript
import { sendSuccess } from '../utils/apiResponse';
app.post('/api/flyers', async (req, res) => {
app.post('/api/v1/flyers', async (req, res) => {
const flyer = await flyerService.createFlyer(req.body);
return sendSuccess(res, flyer, 'Flyer created successfully', 201);
// sendSuccess(res, data, statusCode?, meta?)
return sendSuccess(res, flyer, 201);
});
```
@@ -169,30 +191,32 @@ app.post('/api/flyers', async (req, res) => {
```typescript
import { sendPaginated } from '../utils/apiResponse';
app.get('/api/flyers', async (req, res) => {
const { page = 1, pageSize = 20 } = req.query;
const { items, total } = await flyerService.listFlyers(page, pageSize);
app.get('/api/v1/flyers', async (req, res) => {
const page = parseInt(req.query.page as string) || 1;
const limit = parseInt(req.query.limit as string) || 20;
const { items, total } = await flyerService.listFlyers(page, limit);
return sendPaginated(res, {
items,
total,
page: parseInt(page),
pageSize: parseInt(pageSize),
});
// sendPaginated(res, data[], { page, limit, total }, meta?)
return sendPaginated(res, items, { page, limit, total });
});
```
### Error Response
```typescript
import { sendError } from '../utils/apiResponse';
import { sendError, sendSuccess, ErrorCode } from '../utils/apiResponse';
app.get('/api/flyers/:id', async (req, res) => {
app.get('/api/v1/flyers/:id', async (req, res) => {
try {
const flyer = await flyerDb.getFlyerById(parseInt(req.params.id));
return sendSuccess(res, flyer);
} catch (error) {
return sendError(res, error); // Automatically maps error to correct status
// sendError(res, code, message, statusCode?, details?, meta?)
if (error instanceof NotFoundError) {
return sendError(res, ErrorCode.NOT_FOUND, error.message, 404);
}
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
return sendError(res, ErrorCode.INTERNAL_ERROR, 'An error occurred', 500);
}
});
```
@@ -201,12 +225,12 @@ app.get('/api/flyers/:id', async (req, res) => {
## Transaction Management
**ADR**: [ADR-002](../adr/0002-transaction-management-pattern.md)
**ADR**: [ADR-002](../adr/0002-standardized-transaction-management.md)
### Basic Transaction
```typescript
import { withTransaction } from '../services/db/transaction.db';
import { withTransaction } from '../services/db/connection.db';
export async function createFlyerWithItems(
flyerData: FlyerInput,
@@ -258,7 +282,7 @@ export async function bulkImportFlyers(flyersData: FlyerInput[]): Promise<Import
## Input Validation
**ADR**: [ADR-003](../adr/0003-input-validation-framework.md)
**ADR**: [ADR-003](../adr/0003-standardized-input-validation-using-middleware.md)
### Zod Schema Definition
@@ -294,10 +318,10 @@ export type CreateFlyerInput = z.infer<typeof createFlyerSchema>;
import { validateRequest } from '../middleware/validation';
import { createFlyerSchema } from '../schemas/flyer.schemas';
app.post('/api/flyers', validateRequest(createFlyerSchema), async (req, res) => {
app.post('/api/v1/flyers', validateRequest(createFlyerSchema), async (req, res) => {
// req.body is now type-safe and validated
const flyer = await flyerService.createFlyer(req.body);
return sendSuccess(res, flyer, 'Flyer created successfully', 201);
return sendSuccess(res, flyer, 201);
});
```
@@ -327,7 +351,7 @@ export async function processFlyer(data: unknown): Promise<Flyer> {
import { authenticateJWT } from '../middleware/auth';
app.get(
'/api/profile',
'/api/v1/profile',
authenticateJWT, // Middleware adds req.user
async (req, res) => {
// req.user is guaranteed to exist
@@ -343,7 +367,7 @@ app.get(
import { optionalAuth } from '../middleware/auth';
app.get(
'/api/flyers',
'/api/v1/flyers',
optionalAuth, // req.user may or may not exist
async (req, res) => {
const flyers = req.user
@@ -370,7 +394,7 @@ export function generateToken(user: User): string {
## Caching
**ADR**: [ADR-029](../adr/0029-redis-caching-strategy.md)
**ADR**: [ADR-009](../adr/0009-caching-strategy-for-read-heavy-operations.md)
### Cache Pattern
@@ -410,7 +434,7 @@ export async function updateFlyer(id: number, data: UpdateFlyerInput): Promise<F
## Background Jobs
**ADR**: [ADR-036](../adr/0036-background-job-processing-architecture.md)
**ADR**: [ADR-006](../adr/0006-background-job-processing-and-task-queues.md)
### Queue Job
@@ -469,6 +493,153 @@ const flyerWorker = new Worker(
---
## Feature Flags
**ADR**: [ADR-024](../adr/0024-feature-flagging-strategy.md)
Feature flags enable controlled feature rollout, A/B testing, and quick production disablement without redeployment. All flags default to `false` (opt-in model).
### Backend Usage
```typescript
import { isFeatureEnabled, getFeatureFlags } from '../services/featureFlags.server';
// Check a specific flag in route handler
router.get('/dashboard', async (req, res) => {
if (isFeatureEnabled('newDashboard')) {
return sendSuccess(res, { version: 'v2', data: await getNewDashboardData() });
}
return sendSuccess(res, { version: 'v1', data: await getLegacyDashboardData() });
});
// Check flag in service layer
function processFlyer(flyer: Flyer): ProcessedFlyer {
if (isFeatureEnabled('experimentalAi')) {
return processWithExperimentalAi(flyer);
}
return processWithStandardAi(flyer);
}
// Get all flags (admin endpoint)
router.get('/admin/feature-flags', requireAdmin, async (req, res) => {
sendSuccess(res, { flags: getFeatureFlags() });
});
```
### Frontend Usage
```tsx
import { useFeatureFlag, useAllFeatureFlags } from '../hooks/useFeatureFlag';
import { FeatureFlag } from '../components/FeatureFlag';
// Hook approach - for logic beyond rendering
function Dashboard() {
const isNewDashboard = useFeatureFlag('newDashboard');
useEffect(() => {
if (isNewDashboard) {
analytics.track('new_dashboard_viewed');
}
}, [isNewDashboard]);
return isNewDashboard ? <NewDashboard /> : <LegacyDashboard />;
}
// Declarative component approach
function App() {
return (
<FeatureFlag feature="newDashboard" fallback={<LegacyDashboard />}>
<NewDashboard />
</FeatureFlag>
);
}
// Debug panel showing all flags
function DebugPanel() {
const flags = useAllFeatureFlags();
return (
<ul>
{Object.entries(flags).map(([name, enabled]) => (
<li key={name}>
{name}: {enabled ? 'ON' : 'OFF'}
</li>
))}
</ul>
);
}
```
### Adding a New Flag
1. **Backend** (`src/config/env.ts`):
```typescript
// In featureFlagsSchema
myNewFeature: booleanString(false), // FEATURE_MY_NEW_FEATURE
// In loadEnvVars()
myNewFeature: process.env.FEATURE_MY_NEW_FEATURE,
```
2. **Frontend** (`src/config.ts` and `src/vite-env.d.ts`):
```typescript
// In config.ts featureFlags section
myNewFeature: import.meta.env.VITE_FEATURE_MY_NEW_FEATURE === 'true',
// In vite-env.d.ts
readonly VITE_FEATURE_MY_NEW_FEATURE?: string;
```
3. **Environment** (`.env.example`):
```bash
# FEATURE_MY_NEW_FEATURE=false
# VITE_FEATURE_MY_NEW_FEATURE=false
```
### Testing Feature Flags
```typescript
// Backend - reset modules to test different states
beforeEach(() => {
vi.resetModules();
process.env.FEATURE_NEW_DASHBOARD = 'true';
});
// Frontend - mock config module
vi.mock('../config', () => ({
default: {
featureFlags: {
newDashboard: true,
betaRecipes: false,
},
},
}));
```
### Flag Lifecycle
| Phase | Actions |
| ---------- | -------------------------------------------------------------- |
| **Add** | Add to schemas (backend + frontend), default `false`, document |
| **Enable** | Set env var `='true'`, restart application |
| **Remove** | Remove conditional code, remove from schemas, remove env vars |
| **Sunset** | Max 3 months after full rollout - remove flag |
### Current Flags
| Flag | Backend Env Var | Frontend Env Var | Purpose |
| ---------------- | ------------------------- | ------------------------------ | ------------------------ |
| `bugsinkSync` | `FEATURE_BUGSINK_SYNC` | `VITE_FEATURE_BUGSINK_SYNC` | Bugsink error sync |
| `advancedRbac` | `FEATURE_ADVANCED_RBAC` | `VITE_FEATURE_ADVANCED_RBAC` | Advanced RBAC features |
| `newDashboard` | `FEATURE_NEW_DASHBOARD` | `VITE_FEATURE_NEW_DASHBOARD` | New dashboard experience |
| `betaRecipes` | `FEATURE_BETA_RECIPES` | `VITE_FEATURE_BETA_RECIPES` | Beta recipe features |
| `experimentalAi` | `FEATURE_EXPERIMENTAL_AI` | `VITE_FEATURE_EXPERIMENTAL_AI` | Experimental AI features |
| `debugMode` | `FEATURE_DEBUG_MODE` | `VITE_FEATURE_DEBUG_MODE` | Debug mode |
---
## Related Documentation
- [ADR Index](../adr/index.md) - All architecture decision records

View File

@@ -11,6 +11,7 @@ Common debugging strategies and troubleshooting patterns for Flyer Crawler.
- [API Errors](#api-errors)
- [Authentication Problems](#authentication-problems)
- [Background Job Issues](#background-job-issues)
- [SSL Certificate Issues](#ssl-certificate-issues)
- [Frontend Issues](#frontend-issues)
- [Performance Problems](#performance-problems)
- [Debugging Tools](#debugging-tools)
@@ -43,14 +44,24 @@ When something breaks, check these first:
4. **Are there recent errors in logs?**
```bash
# Application logs
# PM2 logs (dev container)
podman exec -it flyer-crawler-dev pm2 logs
# Container logs
podman logs -f flyer-crawler-dev
# PM2 logs (production)
pm2 logs flyer-crawler-api
```
5. **Is Redis accessible?**
5. **Are PM2 processes running?**
```bash
podman exec -it flyer-crawler-dev pm2 status
```
6. **Is Redis accessible?**
```bash
podman exec flyer-crawler-redis redis-cli ping
```
@@ -218,7 +229,7 @@ SELECT * FROM flyers WHERE store_id = 1;
- Add missing indexes
- Optimize WHERE clauses
- Use connection pooling
- See [ADR-034](../adr/0034-repository-layer-method-naming-conventions.md)
- See [ADR-034](../adr/0034-repository-pattern-standards.md)
---
@@ -226,7 +237,7 @@ SELECT * FROM flyers WHERE store_id = 1;
### Tests Pass on Windows, Fail in Container
**Cause**: Platform-specific behavior (ADR-014)
**Cause**: Platform-specific behavior ([ADR-014](../adr/0014-containerization-and-deployment-strategy.md))
**Rule**: Container results are authoritative. Windows results are unreliable.
@@ -426,6 +437,92 @@ console.log('Expired:', decoded.exp < Date.now() / 1000);
---
## PM2 Process Issues (Dev Container)
### PM2 Process Not Starting
**Symptom**: `pm2 status` shows process as "errored" or "stopped"
**Debug**:
```bash
# Check process status
podman exec -it flyer-crawler-dev pm2 status
# View process logs
podman exec -it flyer-crawler-dev pm2 logs flyer-crawler-api-dev --lines 50
# Show detailed process info
podman exec -it flyer-crawler-dev pm2 show flyer-crawler-api-dev
# Check if port is in use
podman exec -it flyer-crawler-dev netstat -tlnp | grep 3001
```
**Common Causes**:
- Port already in use by another process
- Missing environment variables
- Syntax error in code
- Database connection failure
**Solutions**:
```bash
# Restart specific process
podman exec -it flyer-crawler-dev pm2 restart flyer-crawler-api-dev
# Restart all processes
podman exec -it flyer-crawler-dev pm2 restart all
# Delete and recreate processes
podman exec -it flyer-crawler-dev pm2 delete all
podman exec -it flyer-crawler-dev pm2 start /app/ecosystem.dev.config.cjs
```
### PM2 Log Access
**View logs in real-time**:
```bash
# All processes
podman exec -it flyer-crawler-dev pm2 logs
# Specific process
podman exec -it flyer-crawler-dev pm2 logs flyer-crawler-api-dev
# Last 100 lines
podman exec -it flyer-crawler-dev pm2 logs --lines 100
```
**View log files directly**:
```bash
# API logs
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/pm2/api-out.log
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/pm2/api-error.log
# Worker logs
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/pm2/worker-out.log
# Vite logs
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/pm2/vite-out.log
```
### Redis Log Access
**View Redis logs**:
```bash
# View Redis log file
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-redis cat /var/log/redis/redis-server.log
# View from dev container (shared volume)
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/redis/redis-server.log
```
---
## Background Job Issues
### Jobs Not Processing
@@ -433,10 +530,16 @@ console.log('Expired:', decoded.exp < Date.now() / 1000);
**Debug**:
```bash
# Check if worker is running
# Check if worker is running (dev container)
podman exec -it flyer-crawler-dev pm2 status
# Check worker logs (dev container)
podman exec -it flyer-crawler-dev pm2 logs flyer-crawler-worker-dev
# Check if worker is running (production)
pm2 list
# Check worker logs
# Check worker logs (production)
pm2 logs flyer-crawler-worker
# Check Redis connection
@@ -494,6 +597,77 @@ pm2 logs flyer-crawler-worker --lines 100
---
## SSL Certificate Issues
### Images Not Loading (ERR_CERT_AUTHORITY_INVALID)
**Symptom**: Flyer images fail to load with `ERR_CERT_AUTHORITY_INVALID` in browser console
**Cause**: Mixed hostname origins - user accesses via `localhost` but images use `127.0.0.1` (or vice versa)
**Debug**:
```bash
# Check which hostname images are using
podman exec flyer-crawler-dev psql -U postgres -d flyer_crawler_dev \
-c "SELECT image_url FROM flyers LIMIT 1;"
# Verify certificate includes both hostnames
podman exec flyer-crawler-dev openssl x509 -in /app/certs/localhost.crt -text -noout | grep -A1 "Subject Alternative Name"
# Check NGINX accepts both hostnames
podman exec flyer-crawler-dev grep "server_name" /etc/nginx/sites-available/default
```
**Solution**: The dev container is configured to handle both hostnames:
1. Certificate includes SANs for `localhost`, `127.0.0.1`, and `::1`
2. NGINX `server_name` directive includes both `localhost` and `127.0.0.1`
If you still see errors:
```bash
# Rebuild container to regenerate certificate
podman-compose down
podman-compose build --no-cache flyer-crawler-dev
podman-compose up -d
```
See [FLYER-URL-CONFIGURATION.md](../FLYER-URL-CONFIGURATION.md#ssl-certificate-configuration-dev-container) for full details.
### Self-Signed Certificate Not Trusted
**Symptom**: Browser shows security warning for `https://localhost`
**Temporary Workaround**: Accept the warning by clicking "Advanced" > "Proceed to localhost"
**Permanent Fix (Recommended)**: Install the mkcert CA certificate to eliminate all SSL warnings.
The CA certificate is located at `certs/mkcert-ca.crt` in the project root. See [`certs/README.md`](../../certs/README.md) for platform-specific installation instructions (Windows, macOS, Linux, Firefox).
After installation:
- Your browser will trust all mkcert certificates without warnings
- Both `https://localhost/` and `https://127.0.0.1/` will work without SSL errors
- Flyer images will load without `ERR_CERT_AUTHORITY_INVALID` errors
### NGINX SSL Configuration Test
**Debug**:
```bash
# Test NGINX configuration
podman exec flyer-crawler-dev nginx -t
# Check if NGINX is listening on 443
podman exec flyer-crawler-dev netstat -tlnp | grep 443
# Verify certificate files exist
podman exec flyer-crawler-dev ls -la /app/certs/
```
---
## Frontend Issues
### Hot Reload Not Working
@@ -661,8 +835,10 @@ podman exec flyer-crawler-redis redis-cli info stats
## See Also
- [DEV-CONTAINER.md](DEV-CONTAINER.md) - Dev container guide (PM2, Logstash)
- [TESTING.md](TESTING.md) - Testing strategies
- [CODE-PATTERNS.md](CODE-PATTERNS.md) - Common patterns
- [MONITORING.md](../operations/MONITORING.md) - Production monitoring
- [LOGSTASH-QUICK-REF.md](../operations/LOGSTASH-QUICK-REF.md) - Log aggregation
- [Bugsink Setup](../tools/BUGSINK-SETUP.md) - Error tracking
- [DevOps Guide](../subagents/DEVOPS-GUIDE.md) - Container debugging

View File

@@ -0,0 +1,408 @@
# Dev Container Guide
Comprehensive documentation for the Flyer Crawler development container.
**Last Updated**: 2026-01-22
---
## Table of Contents
1. [Overview](#overview)
2. [Architecture](#architecture)
3. [PM2 Process Management](#pm2-process-management)
4. [Log Aggregation](#log-aggregation)
5. [Quick Reference](#quick-reference)
6. [Troubleshooting](#troubleshooting)
---
## Overview
The dev container provides a production-like environment for local development. Key features:
- **PM2 Process Management** - Matches production architecture (ADR-014)
- **Logstash Integration** - All logs forwarded to Bugsink (ADR-050)
- **HTTPS by Default** - Self-signed certificates via mkcert
- **Hot Reloading** - tsx watch mode for API and worker processes
### Container Services
| Service | Container Name | Purpose |
| ----------- | ------------------------ | ------------------------------ |
| Application | `flyer-crawler-dev` | Node.js app, Bugsink, Logstash |
| PostgreSQL | `flyer-crawler-postgres` | Primary database with PostGIS |
| Redis | `flyer-crawler-redis` | Cache and job queue backing |
### Access Points
| Service | URL | Notes |
| ----------- | ------------------------ | ------------------------- |
| Frontend | `https://localhost` | NGINX proxies Vite (5173) |
| Backend API | `http://localhost:3001` | Express server |
| Bugsink | `https://localhost:8443` | Error tracking UI |
| PostgreSQL | `localhost:5432` | Direct database access |
| Redis | `localhost:6379` | Direct Redis access |
---
## Architecture
### Production vs Development Parity
The dev container was updated to match production architecture (ADR-014):
| Component | Production | Dev Container (OLD) | Dev Container (NEW) |
| ------------ | ---------------------- | ---------------------- | ----------------------- |
| API Server | PM2 cluster mode | `npm run dev` (inline) | PM2 fork + tsx watch |
| Worker | PM2 process | Inline with API | PM2 process + tsx watch |
| Frontend | Static files via NGINX | Vite standalone | PM2 + Vite dev server |
| Logs | PM2 logs -> Logstash | Console only | PM2 logs -> Logstash |
| Process Mgmt | PM2 | None | PM2 |
### Container Startup Flow
When the container starts (`scripts/dev-entrypoint.sh`):
1. **NGINX** starts (HTTPS proxy for Vite and Bugsink)
2. **Bugsink** starts (error tracking on port 8000)
3. **Logstash** starts (log aggregation)
4. **PM2** starts with `ecosystem.dev.config.cjs`:
- `flyer-crawler-api-dev` - API server
- `flyer-crawler-worker-dev` - Background worker
- `flyer-crawler-vite-dev` - Vite dev server
5. Container tails PM2 logs to stay alive
### Key Configuration Files
| File | Purpose |
| ------------------------------ | ---------------------------------- |
| `ecosystem.dev.config.cjs` | PM2 development configuration |
| `ecosystem.config.cjs` | PM2 production configuration |
| `scripts/dev-entrypoint.sh` | Container startup script |
| `docker/logstash/bugsink.conf` | Logstash pipeline configuration |
| `docker/nginx/dev.conf` | NGINX development configuration |
| `compose.dev.yml` | Docker Compose service definitions |
| `Dockerfile.dev` | Container image definition |
---
## PM2 Process Management
### Process Overview
PM2 manages three processes in the dev container:
```text
+--------------------+ +------------------------+ +--------------------+
| flyer-crawler- | | flyer-crawler- | | flyer-crawler- |
| api-dev | | worker-dev | | vite-dev |
+--------------------+ +------------------------+ +--------------------+
| tsx watch | | tsx watch | | vite --host |
| server.ts | | src/services/worker.ts | | |
| Port: 3001 | | No port | | Port: 5173 |
+--------------------+ +------------------------+ +--------------------+
| | |
v v v
+------------------------------------------------------------------------+
| /var/log/pm2/*.log |
| (Logstash picks up for Bugsink) |
+------------------------------------------------------------------------+
```
### PM2 Commands
All commands should be run inside the container:
```bash
# View process status
podman exec -it flyer-crawler-dev pm2 status
# View all logs (tail -f style)
podman exec -it flyer-crawler-dev pm2 logs
# View specific process logs
podman exec -it flyer-crawler-dev pm2 logs flyer-crawler-api-dev
# Restart all processes
podman exec -it flyer-crawler-dev pm2 restart all
# Restart specific process
podman exec -it flyer-crawler-dev pm2 restart flyer-crawler-api-dev
# Stop all processes
podman exec -it flyer-crawler-dev pm2 delete all
# Show detailed process info
podman exec -it flyer-crawler-dev pm2 show flyer-crawler-api-dev
# Monitor processes in real-time
podman exec -it flyer-crawler-dev pm2 monit
```
### PM2 Log Locations
| Process | stdout Log | stderr Log |
| -------------------------- | ----------------------------- | ------------------------------- |
| `flyer-crawler-api-dev` | `/var/log/pm2/api-out.log` | `/var/log/pm2/api-error.log` |
| `flyer-crawler-worker-dev` | `/var/log/pm2/worker-out.log` | `/var/log/pm2/worker-error.log` |
| `flyer-crawler-vite-dev` | `/var/log/pm2/vite-out.log` | `/var/log/pm2/vite-error.log` |
### NPM Scripts for PM2
The following npm scripts are available for PM2 management:
```bash
# Start PM2 with dev config (inside container)
npm run dev:pm2
# Restart all PM2 processes
npm run dev:pm2:restart
# Stop all PM2 processes
npm run dev:pm2:stop
# View PM2 status
npm run dev:pm2:status
# View PM2 logs
npm run dev:pm2:logs
```
---
## Log Aggregation
### Log Flow Architecture (ADR-050)
All application logs flow through Logstash to Bugsink using a 3-project architecture:
```text
+------------------+ +------------------+ +------------------+
| PM2 Logs | | PostgreSQL | | Redis/NGINX |
| /var/log/pm2/ | | /var/log/ | | /var/log/redis/ |
| (API + Worker) | | postgresql/ | | /var/log/nginx/ |
+--------+---------+ +--------+---------+ +--------+---------+
| | |
v v v
+------------------------------------------------------------------------+
| LOGSTASH |
| /etc/logstash/conf.d/bugsink.conf |
| (Routes by log type) |
+------------------------------------------------------------------------+
| | |
v v v
+------------------+ +------------------+ +------------------+
| Backend API | | Frontend (Dev) | | Infrastructure |
| (Project 1) | | (Project 2) | | (Project 4) |
| - Pino errors | | - Browser SDK | | - Redis warnings |
| - PostgreSQL | | (not Logstash) | | - NGINX errors |
+------------------+ +------------------+ | - Vite errors |
+------------------+
```
### Log Sources
| Source | Log Path | Format | Errors To Bugsink |
| ------------ | --------------------------------- | ---------- | ----------------- |
| API Server | `/var/log/pm2/api-*.log` | Pino JSON | Yes |
| Worker | `/var/log/pm2/worker-*.log` | Pino JSON | Yes |
| Vite | `/var/log/pm2/vite-*.log` | Plain text | Yes (if error) |
| PostgreSQL | `/var/log/postgresql/*.log` | PostgreSQL | Yes (ERROR/FATAL) |
| Redis | `/var/log/redis/redis-server.log` | Redis | Yes (warnings) |
| NGINX Access | `/var/log/nginx/access.log` | Combined | Yes (5xx only) |
| NGINX Error | `/var/log/nginx/error.log` | NGINX | Yes |
### Viewing Logs
```bash
# View Logstash processed logs
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/logstash/pm2-api-$(date +%Y-%m-%d).log
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/logstash/redis-operational-$(date +%Y-%m-%d).log
# View raw Redis logs
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-redis cat /var/log/redis/redis-server.log
# Check Logstash status
podman exec flyer-crawler-dev curl -s localhost:9600/_node/stats/pipelines?pretty
```
### Bugsink Access
- **URL**: `https://localhost:8443`
- **Login**: `admin@localhost` / `admin`
- **Projects**:
- Project 1: Backend API (Dev) - Pino app errors, PostgreSQL errors
- Project 2: Frontend (Dev) - Browser errors via Sentry SDK
- Project 4: Infrastructure (Dev) - Redis warnings, NGINX errors, Vite build errors
**Note**: Frontend DSN uses nginx proxy (`/bugsink-api/`) because browsers cannot reach `localhost:8000` directly. See [BUGSINK-SETUP.md](../tools/BUGSINK-SETUP.md#frontend-nginx-proxy) for details.
---
## Quick Reference
### Starting the Dev Container
```bash
# Start all services
podman-compose -f compose.dev.yml up -d
# View container logs
podman-compose -f compose.dev.yml logs -f
# Stop all services
podman-compose -f compose.dev.yml down
```
### Common Tasks
| Task | Command |
| -------------------- | ------------------------------------------------------------------------------ |
| Run tests | `podman exec -it flyer-crawler-dev npm test` |
| Run type check | `podman exec -it flyer-crawler-dev npm run type-check` |
| View PM2 status | `podman exec -it flyer-crawler-dev pm2 status` |
| View PM2 logs | `podman exec -it flyer-crawler-dev pm2 logs` |
| Restart API | `podman exec -it flyer-crawler-dev pm2 restart flyer-crawler-api-dev` |
| Access PostgreSQL | `podman exec -it flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev` |
| Access Redis CLI | `podman exec -it flyer-crawler-redis redis-cli` |
| Shell into container | `podman exec -it flyer-crawler-dev bash` |
### Environment Variables
Key environment variables are set in `compose.dev.yml`:
| Variable | Value | Purpose |
| ----------------- | ----------------------------- | --------------------------- |
| `TZ` | `America/Los_Angeles` | Timezone (PST) for all logs |
| `NODE_ENV` | `development` | Environment mode |
| `DB_HOST` | `postgres` | PostgreSQL hostname |
| `REDIS_URL` | `redis://redis:6379` | Redis connection URL |
| `FRONTEND_URL` | `https://localhost` | CORS origin |
| `SENTRY_DSN` | `http://...@127.0.0.1:8000/1` | Backend Bugsink DSN |
| `VITE_SENTRY_DSN` | `http://...@127.0.0.1:8000/2` | Frontend Bugsink DSN |
### Timezone Configuration
All dev container services are configured to use PST (America/Los_Angeles) timezone for consistent log timestamps:
| Service | Configuration | Notes |
| ---------- | ------------------------------------------------ | ------------------------------ |
| App | `TZ=America/Los_Angeles` in compose.dev.yml | Also set via dev-entrypoint.sh |
| PostgreSQL | `timezone` and `log_timezone` in postgres config | Logs timestamps in PST |
| Redis | `TZ=America/Los_Angeles` in compose.dev.yml | Alpine uses TZ env var |
| PM2 | `TZ` in ecosystem.dev.config.cjs | Pino timestamps use local time |
**Verifying Timezone**:
```bash
# Check container timezone
podman exec flyer-crawler-dev date
# Check PostgreSQL timezone
podman exec flyer-crawler-postgres psql -U postgres -c "SHOW timezone;"
# Check Redis log timestamps
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-redis cat /var/log/redis/redis-server.log | head -5
```
**Note**: If you need UTC timestamps for production compatibility, change `TZ=UTC` in compose.dev.yml and restart containers.
---
## Troubleshooting
### PM2 Process Not Starting
**Symptom**: `pm2 status` shows process as "errored" or "stopped"
**Debug**:
```bash
# Check process logs
podman exec -it flyer-crawler-dev pm2 logs flyer-crawler-api-dev --lines 50
# Check if port is in use
podman exec -it flyer-crawler-dev netstat -tlnp | grep 3001
# Try manual start to see errors
podman exec -it flyer-crawler-dev tsx server.ts
```
**Common Causes**:
- Port already in use
- Missing environment variables
- Syntax error in code
### Logs Not Appearing in Bugsink
**Symptom**: Errors in application but nothing in Bugsink UI
**Debug**:
```bash
# Check Logstash is running
podman exec flyer-crawler-dev curl -s localhost:9600/_node/stats/pipelines?pretty
# Check Logstash logs for errors
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/logstash/logstash.log
# Verify PM2 logs exist
podman exec flyer-crawler-dev ls -la /var/log/pm2/
```
**Common Causes**:
- Logstash not started
- Log file permissions
- Bugsink not running
### Redis Logs Not Captured
**Symptom**: Redis warnings not appearing in Bugsink
**Debug**:
```bash
# Verify Redis logs exist
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-redis cat /var/log/redis/redis-server.log
# Verify shared volume is mounted
podman exec flyer-crawler-dev ls -la /var/log/redis/
```
**Common Causes**:
- `redis_logs` volume not mounted
- Redis not configured to write to file
### Hot Reload Not Working
**Symptom**: Code changes not reflected in running application
**Debug**:
```bash
# Check tsx watch is running
podman exec -it flyer-crawler-dev pm2 logs flyer-crawler-api-dev
# Manually restart process
podman exec -it flyer-crawler-dev pm2 restart flyer-crawler-api-dev
```
**Common Causes**:
- File watcher limit reached
- Volume mount issues on Windows
---
## Related Documentation
- [QUICKSTART.md](../getting-started/QUICKSTART.md) - Getting started guide
- [DEBUGGING.md](DEBUGGING.md) - Debugging strategies
- [LOGSTASH-QUICK-REF.md](../operations/LOGSTASH-QUICK-REF.md) - Logstash quick reference
- [DEV-CONTAINER-BUGSINK.md](../DEV-CONTAINER-BUGSINK.md) - Bugsink setup in dev container
- [ADR-014](../adr/0014-containerization-and-deployment-strategy.md) - Containerization and deployment strategy
- [ADR-050](../adr/0050-postgresql-function-observability.md) - PostgreSQL function observability (includes log aggregation)

View File

@@ -0,0 +1,153 @@
# Error Logging Path Patterns
## Overview
This document describes the correct pattern for logging request paths in error handlers within Express route files. Following this pattern ensures that error logs accurately reflect the actual request URL, including any API version prefixes.
## The Problem
When ADR-008 (API Versioning Strategy) was implemented, all routes were moved from `/api/*` to `/api/v1/*`. However, some error log messages contained hardcoded paths that did not update automatically:
```typescript
// INCORRECT - hardcoded path
req.log.error({ error }, 'Error in /api/flyers/:id:');
```
This caused 16 unit test failures because tests expected the error log message to contain `/api/v1/flyers/:id` but received `/api/flyers/:id`.
## The Solution
Always use `req.originalUrl` to dynamically capture the actual request path in error logs:
```typescript
// CORRECT - dynamic path from request
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
```
### Why `req.originalUrl`?
| Property | Value for `/api/v1/flyers/123?active=true` | Use Case |
| ----------------- | ------------------------------------------ | ----------------------------------- |
| `req.url` | `/123?active=true` | Path relative to router mount point |
| `req.path` | `/123` | Path without query string |
| `req.originalUrl` | `/api/v1/flyers/123?active=true` | Full original request URL |
| `req.baseUrl` | `/api/v1/flyers` | Router mount path |
`req.originalUrl` is the correct choice because:
1. It contains the full path including version prefix (`/api/v1/`)
2. It reflects what the client actually requested
3. It makes log messages searchable by the actual endpoint path
4. It automatically adapts when routes are mounted at different paths
### Stripping Query Parameters
Use `.split('?')[0]` to remove query parameters from log messages:
```typescript
// Request: /api/v1/flyers?page=1&limit=20
req.originalUrl.split('?')[0]; // Returns: /api/v1/flyers
```
This keeps log messages clean and prevents sensitive query parameters from appearing in logs.
## Standard Error Logging Pattern
### Basic Pattern
```typescript
router.get('/:id', async (req, res) => {
try {
const result = await someService.getData(req.params.id);
return sendSuccess(res, result);
} catch (error) {
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
return sendError(res, error);
}
});
```
### With Additional Context
```typescript
router.post('/', async (req, res) => {
try {
const result = await someService.createItem(req.body);
return sendSuccess(res, result, 'Item created', 201);
} catch (error) {
req.log.error(
{ error, userId: req.user?.id, body: req.body },
`Error creating item in ${req.originalUrl.split('?')[0]}:`,
);
return sendError(res, error);
}
});
```
### Descriptive Messages
For clarity, include a brief description of the operation:
```typescript
// Good - describes the operation
req.log.error({ error }, `Error fetching recipes in ${req.originalUrl.split('?')[0]}:`);
req.log.error({ error }, `Error updating user profile in ${req.originalUrl.split('?')[0]}:`);
// Acceptable - just the path
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
// Bad - hardcoded path
req.log.error({ error }, 'Error in /api/recipes:');
```
## Files Updated in Initial Fix (2026-01-27)
The following files were updated to use this pattern:
| File | Error Log Statements Fixed |
| -------------------------------------- | -------------------------- |
| `src/routes/recipe.routes.ts` | 3 |
| `src/routes/stats.routes.ts` | 1 |
| `src/routes/flyer.routes.ts` | 2 |
| `src/routes/personalization.routes.ts` | 3 |
## Testing Error Log Messages
When writing tests that verify error log messages, use flexible matchers that account for versioned paths:
```typescript
// Good - matches any version prefix
expect(logSpy).toHaveBeenCalledWith(
expect.objectContaining({ error: expect.any(Error) }),
expect.stringContaining('/flyers'),
);
// Good - explicit version match
expect(logSpy).toHaveBeenCalledWith(
expect.objectContaining({ error: expect.any(Error) }),
expect.stringContaining('/api/v1/flyers'),
);
// Bad - hardcoded unversioned path (will fail)
expect(logSpy).toHaveBeenCalledWith(
expect.objectContaining({ error: expect.any(Error) }),
'Error in /api/flyers:',
);
```
## Checklist for New Routes
When creating new route handlers:
- [ ] Use `req.originalUrl.split('?')[0]` in all error log messages
- [ ] Include descriptive text about the operation being performed
- [ ] Add structured context (userId, relevant IDs) to the log object
- [ ] Write tests that verify error logs contain the versioned path
## Related Documentation
- [ADR-008: API Versioning Strategy](../adr/0008-api-versioning-strategy.md) - Versioning implementation details
- [ADR-057: Test Remediation Post-API Versioning](../adr/0057-test-remediation-post-api-versioning.md) - Comprehensive remediation guide
- [ADR-004: Structured Logging](../adr/0004-standardized-application-wide-structured-logging.md) - Logging standards
- [CODE-PATTERNS.md](CODE-PATTERNS.md) - General code patterns
- [TESTING.md](TESTING.md) - Testing guidelines

View File

@@ -1,5 +1,19 @@
# Testing Guide
## Quick Reference
| Command | Purpose |
| ------------------------------------------------------------ | ---------------------------- |
| `podman exec -it flyer-crawler-dev npm test` | Run all tests |
| `podman exec -it flyer-crawler-dev npm run test:unit` | Unit tests (~2900) |
| `podman exec -it flyer-crawler-dev npm run test:integration` | Integration tests (28 files) |
| `podman exec -it flyer-crawler-dev npm run test:e2e` | E2E tests (11 files) |
| `podman exec -it flyer-crawler-dev npm run type-check` | TypeScript check |
**Critical**: Always run tests in the dev container. Windows results are unreliable.
---
## Overview
This project has comprehensive test coverage including unit tests, integration tests, and E2E tests. All tests must be run in the **Linux dev container environment** for reliable results.
@@ -76,7 +90,7 @@ To verify type-check is working correctly:
Example error output:
```
```text
src/pages/MyDealsPage.tsx:68:31 - error TS2339: Property 'store_name' does not exist on type 'WatchedItemDeal'.
68 <span>{deal.store_name}</span>
@@ -113,15 +127,26 @@ Located throughout `src/` directory alongside source files with `.test.ts` or `.
npm run test:unit
```
### Integration Tests (5 test files)
### Integration Tests (28 test files)
Located in `src/tests/integration/`:
Located in `src/tests/integration/`. Key test files include:
- `admin.integration.test.ts`
- `flyer.integration.test.ts`
- `price.integration.test.ts`
- `public.routes.integration.test.ts`
- `receipt.integration.test.ts`
| Test File | Domain |
| -------------------------------------- | -------------------------- |
| `admin.integration.test.ts` | Admin dashboard operations |
| `auth.integration.test.ts` | Authentication flows |
| `budget.integration.test.ts` | Budget management |
| `flyer.integration.test.ts` | Flyer CRUD operations |
| `flyer-processing.integration.test.ts` | AI flyer processing |
| `gamification.integration.test.ts` | Achievements and points |
| `inventory.integration.test.ts` | Inventory management |
| `notification.integration.test.ts` | User notifications |
| `receipt.integration.test.ts` | Receipt processing |
| `recipe.integration.test.ts` | Recipe management |
| `shopping-list.integration.test.ts` | Shopping list operations |
| `user.integration.test.ts` | User profile operations |
See `src/tests/integration/` for the complete list.
Requires PostgreSQL and Redis services running.
@@ -129,13 +154,23 @@ Requires PostgreSQL and Redis services running.
npm run test:integration
```
### E2E Tests (3 test files)
### E2E Tests (11 test files)
Located in `src/tests/e2e/`:
Located in `src/tests/e2e/`. Full user journey tests:
- `deals-journey.e2e.test.ts`
- `budget-journey.e2e.test.ts`
- `receipt-journey.e2e.test.ts`
| Test File | Journey |
| --------------------------------- | ----------------------------- |
| `admin-authorization.e2e.test.ts` | Admin access control |
| `admin-dashboard.e2e.test.ts` | Admin dashboard flows |
| `auth.e2e.test.ts` | Login/logout/registration |
| `budget-journey.e2e.test.ts` | Budget tracking workflow |
| `deals-journey.e2e.test.ts` | Finding and saving deals |
| `error-reporting.e2e.test.ts` | Error handling verification |
| `flyer-upload.e2e.test.ts` | Flyer upload and processing |
| `inventory-journey.e2e.test.ts` | Pantry management |
| `receipt-journey.e2e.test.ts` | Receipt scanning and tracking |
| `upc-journey.e2e.test.ts` | UPC barcode scanning |
| `user-journey.e2e.test.ts` | User profile management |
Requires all services (PostgreSQL, Redis, BullMQ workers) running.
@@ -157,20 +192,18 @@ Located in `src/tests/utils/storeHelpers.ts`:
```typescript
// Create a store with a location in one call
const store = await createStoreWithLocation({
storeName: 'Test Store',
address: {
address_line_1: '123 Main St',
city: 'Toronto',
province_state: 'ON',
postal_code: 'M1M 1M1',
},
pool,
log,
const store = await createStoreWithLocation(pool, {
name: 'Test Store',
address: '123 Main St',
city: 'Toronto',
province: 'ON',
postalCode: 'M1M 1M1',
});
// Returns: { storeId, addressId, storeLocationId }
// Cleanup stores and their locations
await cleanupStoreLocations([storeId1, storeId2], pool, log);
await cleanupStoreLocation(pool, store);
```
### Mock Factories
@@ -261,3 +294,214 @@ Opens a browser-based test runner with filtering and debugging capabilities.
5. **Verify cache invalidation** - tests that insert data directly must invalidate cache
6. **Use unique filenames** - file upload tests need timestamp-based filenames
7. **Check exit codes** - `npm run type-check` returns 0 on success, non-zero on error
8. **Use `req.originalUrl` in error logs** - never hardcode API paths in error messages
9. **Use versioned API paths** - always use `/api/v1/` prefix in test requests
10. **Use `vi.hoisted()` for module mocks** - ensure mocks are available during module initialization
## Testing Error Log Messages
When testing route error handlers, ensure assertions account for versioned API paths.
### Problem: Hardcoded Paths Break Tests
Error log messages with hardcoded paths cause test failures when API versions change:
```typescript
// Production code (INCORRECT - hardcoded path)
req.log.error({ error }, 'Error in /api/flyers/:id:');
// Test expects versioned path
expect(logSpy).toHaveBeenCalledWith(
expect.objectContaining({ error: expect.any(Error) }),
expect.stringContaining('/api/v1/flyers'), // FAILS - actual log has /api/flyers
);
```
### Solution: Dynamic Paths with `req.originalUrl`
Production code should use `req.originalUrl` for dynamic path logging:
```typescript
// Production code (CORRECT - dynamic path)
req.log.error({ error }, `Error in ${req.originalUrl.split('?')[0]}:`);
```
### Writing Robust Test Assertions
```typescript
// Good - matches versioned path
expect(logSpy).toHaveBeenCalledWith(
expect.objectContaining({ error: expect.any(Error) }),
expect.stringContaining('/api/v1/flyers'),
);
// Good - flexible match for any version
expect(logSpy).toHaveBeenCalledWith(
expect.objectContaining({ error: expect.any(Error) }),
expect.stringMatching(/\/api\/v\d+\/flyers/),
);
// Bad - hardcoded unversioned path
expect(logSpy).toHaveBeenCalledWith(
expect.objectContaining({ error: expect.any(Error) }),
'Error in /api/flyers:', // Will fail with versioned routes
);
```
See [Error Logging Path Patterns](ERROR-LOGGING-PATHS.md) for complete documentation.
## API Versioning in Tests (ADR-008, ADR-057)
All API endpoints use the `/api/v1/` prefix. Tests must use versioned paths.
### Configuration
API base URLs are configured centrally in Vitest config files:
| Config File | Environment Variable | Value |
| ------------------------------ | -------------------- | ------------------------------ |
| `vite.config.ts` | `VITE_API_BASE_URL` | `/api/v1` |
| `vitest.config.e2e.ts` | `VITE_API_BASE_URL` | `http://localhost:3098/api/v1` |
| `vitest.config.integration.ts` | `VITE_API_BASE_URL` | `http://localhost:3099/api/v1` |
### Writing API Tests
```typescript
// Good - versioned path
const response = await request.post('/api/v1/auth/login').send({...});
// Bad - unversioned path (will fail)
const response = await request.post('/api/auth/login').send({...});
```
### Migration Checklist
When API version changes (e.g., v1 to v2):
1. Update all Vitest config `VITE_API_BASE_URL` values
2. Search and replace API paths in E2E tests: `grep -r "/api/v1/" src/tests/e2e/`
3. Search and replace API paths in integration tests
4. Verify route handler error logs use `req.originalUrl`
5. Run full test suite in dev container
See [ADR-057](../adr/0057-test-remediation-post-api-versioning.md) for complete migration guidance.
## vi.hoisted() Pattern for Module Mocks
When mocking modules that are imported at module initialization time (like queues or database connections), use `vi.hoisted()` to ensure mocks are available during hoisting.
### Problem: Mock Not Available During Import
```typescript
// BAD: Mock might not be ready when module imports it
vi.mock('../services/queues.server', () => ({
flyerQueue: { getJobCounts: vi.fn() }, // May not exist yet
}));
import healthRouter from './health.routes'; // Imports queues.server
```
### Solution: Use vi.hoisted()
```typescript
// GOOD: Mocks are created during hoisting, before vi.mock runs
const { mockQueuesModule } = vi.hoisted(() => {
const createMockQueue = () => ({
getJobCounts: vi.fn().mockResolvedValue({
waiting: 0,
active: 0,
failed: 0,
delayed: 0,
}),
});
return {
mockQueuesModule: {
flyerQueue: createMockQueue(),
emailQueue: createMockQueue(),
// ... additional queues
},
};
});
// Now the mock object exists when vi.mock factory runs
vi.mock('../services/queues.server', () => mockQueuesModule);
// Safe to import after mocks are defined
import healthRouter from './health.routes';
```
See [ADR-057](../adr/0057-test-remediation-post-api-versioning.md) for additional patterns.
## Testing Role-Based Component Visibility
When testing components that render differently based on user roles:
### Pattern: Separate Test Cases by Role
```typescript
describe('for authenticated users', () => {
beforeEach(() => {
mockedUseAuth.mockReturnValue({
authStatus: 'AUTHENTICATED',
userProfile: createMockUserProfile({ role: 'user' }),
});
});
it('renders user-accessible components', () => {
render(<MyComponent />);
expect(screen.getByTestId('user-component')).toBeInTheDocument();
// Admin-only should NOT be present
expect(screen.queryByTestId('admin-only')).not.toBeInTheDocument();
});
});
describe('for admin users', () => {
beforeEach(() => {
mockedUseAuth.mockReturnValue({
authStatus: 'AUTHENTICATED',
userProfile: createMockUserProfile({ role: 'admin' }),
});
});
it('renders admin-only components', () => {
render(<MyComponent />);
expect(screen.getByTestId('admin-only')).toBeInTheDocument();
});
});
```
### Key Points
1. Create separate `describe` blocks for each role
2. Set up role-specific mocks in `beforeEach`
3. Test both presence AND absence of role-gated components
4. Use `screen.queryByTestId()` for elements that should NOT exist
## CSS Class Assertions After UI Refactors
After frontend style changes, update test assertions to match new CSS classes.
### Handling Tailwind Class Changes
```typescript
// Before refactor
expect(selectedItem).toHaveClass('ring-2', 'ring-brand-primary');
// After refactor - update to new classes
expect(selectedItem).toHaveClass('border-brand-primary', 'bg-teal-50/50');
```
### Flexible Matching
For complex class combinations, consider partial matching:
```typescript
// Check for key classes, ignore utility classes
expect(element).toHaveClass('border-brand-primary');
// Or use regex for patterns
expect(element.className).toMatch(/dark:bg-teal-\d+/);
```
See [ADR-057](../adr/0057-test-remediation-post-api-versioning.md) for lessons learned from the test remediation effort.

View File

@@ -0,0 +1,272 @@
# Test Path Migration: Unversioned to Versioned API Paths
**Status**: Complete
**Created**: 2026-01-27
**Completed**: 2026-01-27
**Related**: ADR-008 (API Versioning Strategy)
## Summary
All integration test files have been successfully migrated to use versioned API paths (`/api/v1/`). This resolves the redirect-related test failures introduced by ADR-008 Phase 1.
### Results
| Metric | Value |
| ------------------------- | ---------------------------------------- |
| Test files updated | 23 |
| Path occurrences changed | ~70 |
| Tests before migration | 274/348 passing |
| Tests after migration | 345/348 passing |
| Test failures resolved | 71 |
| Remaining todo/skipped | 3 (known issues, not versioning-related) |
| Type check | Passing |
| Versioning-specific tests | 82/82 passing |
### Key Outcomes
- No `301 Moved Permanently` responses in test output
- All redirect-related failures resolved
- No regressions introduced
- Unit tests unaffected (3,375/3,391 passing, pre-existing failures)
---
## Original Problem Statement
Integration tests failed due to redirect middleware (ADR-008 Phase 1). Server returned `301 Moved Permanently` for unversioned paths (`/api/resource`) instead of expected `200 OK`. Redirect targets versioned paths (`/api/v1/resource`).
**Root Cause**: Backwards-compatibility redirect in `server.ts`:
```typescript
app.use('/api', (req, res, next) => {
const versionPattern = /^\/v\d+/;
if (!versionPattern.test(req.path)) {
return res.redirect(301, `/api/v1${req.path}`);
}
next();
});
```
**Impact**: ~70 test path occurrences across 23 files returning 301 instead of expected status codes.
## Solution
Update all test API paths from `/api/{resource}` to `/api/v1/{resource}`.
## Files Requiring Updates
### Integration Tests (16 files)
| File | Occurrences | Domains |
| ------------------------------------------------------------ | ----------- | ---------------------- |
| `src/tests/integration/inventory.integration.test.ts` | 14 | inventory |
| `src/tests/integration/receipt.integration.test.ts` | 17 | receipts |
| `src/tests/integration/recipe.integration.test.ts` | 17 | recipes, users/recipes |
| `src/tests/integration/user.routes.integration.test.ts` | 10 | users/shopping-lists |
| `src/tests/integration/admin.integration.test.ts` | 7 | admin |
| `src/tests/integration/flyer-processing.integration.test.ts` | 6 | ai/jobs |
| `src/tests/integration/budget.integration.test.ts` | 5 | budgets |
| `src/tests/integration/notification.integration.test.ts` | 3 | users/notifications |
| `src/tests/integration/data-integrity.integration.test.ts` | 3 | users, admin |
| `src/tests/integration/upc.integration.test.ts` | 3 | upc |
| `src/tests/integration/edge-cases.integration.test.ts` | 3 | users/shopping-lists |
| `src/tests/integration/user.integration.test.ts` | 2 | users |
| `src/tests/integration/public.routes.integration.test.ts` | 2 | flyers, recipes |
| `src/tests/integration/flyer.integration.test.ts` | 1 | flyers |
| `src/tests/integration/category.routes.test.ts` | 1 | categories |
| `src/tests/integration/gamification.integration.test.ts` | 1 | ai/jobs |
### E2E Tests (7 files)
| File | Occurrences | Domains |
| --------------------------------------------- | ----------- | -------------------- |
| `src/tests/e2e/inventory-journey.e2e.test.ts` | 9 | inventory |
| `src/tests/e2e/receipt-journey.e2e.test.ts` | 9 | receipts |
| `src/tests/e2e/budget-journey.e2e.test.ts` | 6 | budgets |
| `src/tests/e2e/upc-journey.e2e.test.ts` | 3 | upc |
| `src/tests/e2e/deals-journey.e2e.test.ts` | 2 | categories, users |
| `src/tests/e2e/user-journey.e2e.test.ts` | 1 | users/shopping-lists |
| `src/tests/e2e/flyer-upload.e2e.test.ts` | 1 | jobs |
## Update Pattern
### Find/Replace Rules
**Template literals** (most common):
```
OLD: .get(`/api/resource/${id}`)
NEW: .get(`/api/v1/resource/${id}`)
```
**String literals**:
```
OLD: .get('/api/resource')
NEW: .get('/api/v1/resource')
```
### Regex Pattern for Batch Updates
```regex
Find: (\.(get|post|put|delete|patch)\([`'"])/api/([a-z])
Replace: $1/api/v1/$3
```
**Explanation**: Captures HTTP method call, inserts `/v1/` after `/api/`.
## Files to EXCLUDE
These files intentionally test unversioned path behavior:
| File | Reason |
| ---------------------------------------------------- | ------------------------------------ |
| `src/routes/versioning.integration.test.ts` | Tests redirect behavior itself |
| `src/services/apiClient.test.ts` | Mock server URLs, not real API calls |
| `src/services/aiApiClient.test.ts` | Mock server URLs for MSW handlers |
| `src/services/googleGeocodingService.server.test.ts` | External Google API URL |
**Also exclude** (not API paths):
- Lines containing `vi.mock('@bull-board/api` (import mocks)
- Lines containing `/api/v99` (intentional unsupported version tests)
- `describe()` and `it()` block descriptions
- Comment lines (`// `)
## Execution Batches
### Batch 1: High-Impact Integration (4 files, ~58 occurrences)
```bash
# Files with most occurrences
src/tests/integration/inventory.integration.test.ts
src/tests/integration/receipt.integration.test.ts
src/tests/integration/recipe.integration.test.ts
src/tests/integration/user.routes.integration.test.ts
```
### Batch 2: Medium Integration (6 files, ~27 occurrences)
```bash
src/tests/integration/admin.integration.test.ts
src/tests/integration/flyer-processing.integration.test.ts
src/tests/integration/budget.integration.test.ts
src/tests/integration/notification.integration.test.ts
src/tests/integration/data-integrity.integration.test.ts
src/tests/integration/upc.integration.test.ts
```
### Batch 3: Low Integration (6 files, ~10 occurrences)
```bash
src/tests/integration/edge-cases.integration.test.ts
src/tests/integration/user.integration.test.ts
src/tests/integration/public.routes.integration.test.ts
src/tests/integration/flyer.integration.test.ts
src/tests/integration/category.routes.test.ts
src/tests/integration/gamification.integration.test.ts
```
### Batch 4: E2E Tests (7 files, ~31 occurrences)
```bash
src/tests/e2e/inventory-journey.e2e.test.ts
src/tests/e2e/receipt-journey.e2e.test.ts
src/tests/e2e/budget-journey.e2e.test.ts
src/tests/e2e/upc-journey.e2e.test.ts
src/tests/e2e/deals-journey.e2e.test.ts
src/tests/e2e/user-journey.e2e.test.ts
src/tests/e2e/flyer-upload.e2e.test.ts
```
## Verification Strategy
### Per-Batch Verification
After each batch:
```bash
# Type check
podman exec -it flyer-crawler-dev npm run type-check
# Run specific test file
podman exec -it flyer-crawler-dev npx vitest run <file-path> --reporter=verbose
```
### Full Verification
After all batches:
```bash
# Full integration test suite
podman exec -it flyer-crawler-dev npm run test:integration
# Full E2E test suite
podman exec -it flyer-crawler-dev npm run test:e2e
```
### Success Criteria
- [x] No `301 Moved Permanently` responses in test output
- [x] All tests pass or fail for expected reasons (not redirect-related)
- [x] Type check passes
- [x] No regressions in unmodified tests
## Edge Cases
### Describe Block Text
Do NOT modify describe/it block descriptions:
```typescript
// KEEP AS-IS (documentation only):
describe('GET /api/users/profile', () => { ... });
// UPDATE (actual API call):
const response = await request.get('/api/v1/users/profile');
```
### Console Logging
Do NOT modify debug/error logging paths:
```typescript
// KEEP AS-IS:
console.error('[DEBUG] GET /api/admin/stats failed:', ...);
```
### Query Parameters
Include query parameters in update:
```typescript
// OLD:
.get(`/api/budgets/spending-analysis?startDate=${start}&endDate=${end}`)
// NEW:
.get(`/api/v1/budgets/spending-analysis?startDate=${start}&endDate=${end}`)
```
## Post-Completion Checklist
- [x] All 23 files updated
- [x] ~70 path occurrences migrated
- [x] Exclusion files unchanged
- [x] Type check passes
- [x] Integration tests pass (345/348)
- [x] E2E tests pass
- [x] Commit with message: `fix(tests): Update API paths to use /api/v1/ prefix (ADR-008)`
## Rollback
If issues arise:
```bash
git checkout HEAD -- src/tests/
```
## Related Documentation
- ADR-008: API Versioning Strategy
- `docs/architecture/api-versioning-infrastructure.md`
- `src/routes/versioning.integration.test.ts` (reference for expected behavior)

View File

@@ -2,134 +2,259 @@
Complete guide to environment variables used in Flyer Crawler.
---
## Quick Reference
### Minimum Required Variables (Development)
| Variable | Example | Purpose |
| ---------------- | ------------------------ | -------------------- |
| `DB_HOST` | `localhost` | PostgreSQL host |
| `DB_USER` | `postgres` | PostgreSQL username |
| `DB_PASSWORD` | `postgres` | PostgreSQL password |
| `DB_NAME` | `flyer_crawler_dev` | Database name |
| `REDIS_URL` | `redis://localhost:6379` | Redis connection URL |
| `JWT_SECRET` | (32+ character string) | JWT signing key |
| `GEMINI_API_KEY` | `AIzaSy...` | Google Gemini API |
### Source of Truth
The Zod schema at `src/config/env.ts` is the authoritative source for all environment variables. If a variable is not in this file, it is not used by the application.
---
## Configuration by Environment
### Production
**Location**: Gitea CI/CD secrets injected during deployment
**Path**: `/var/www/flyer-crawler.projectium.com/`
**Note**: No `.env` file exists - all variables come from CI/CD
| Aspect | Details |
| -------- | ------------------------------------------ |
| Location | Gitea CI/CD secrets injected at deployment |
| Path | `/var/www/flyer-crawler.projectium.com/` |
| File | No `.env` file - all from CI/CD secrets |
### Test
**Location**: Gitea CI/CD secrets + `.env.test` file
**Path**: `/var/www/flyer-crawler-test.projectium.com/`
**Note**: `.env.test` overrides for test-specific values
| Aspect | Details |
| -------- | --------------------------------------------- |
| Location | Gitea CI/CD secrets + `.env.test` overrides |
| Path | `/var/www/flyer-crawler-test.projectium.com/` |
| File | `.env.test` for test-specific values |
### Development Container
**Location**: `.env.local` file in project root
**Note**: Overrides default DSNs in `compose.dev.yml`
| Aspect | Details |
| -------- | --------------------------------------- |
| Location | `.env.local` file in project root |
| Priority | Overrides defaults in `compose.dev.yml` |
| File | `.env.local` (gitignored) |
## Required Variables
---
### Database
## Complete Variable Reference
| Variable | Description | Example |
| ------------------ | ---------------------------- | ------------------------------------------ |
| `DB_HOST` | PostgreSQL host | `localhost` (dev), `projectium.com` (prod) |
| `DB_PORT` | PostgreSQL port | `5432` |
| `DB_USER_PROD` | Production database user | `flyer_crawler_prod` |
| `DB_PASSWORD_PROD` | Production database password | (secret) |
| `DB_DATABASE_PROD` | Production database name | `flyer-crawler-prod` |
| `DB_USER_TEST` | Test database user | `flyer_crawler_test` |
| `DB_PASSWORD_TEST` | Test database password | (secret) |
| `DB_DATABASE_TEST` | Test database name | `flyer-crawler-test` |
| `DB_USER` | Dev database user | `postgres` |
| `DB_PASSWORD` | Dev database password | `postgres` |
| `DB_NAME` | Dev database name | `flyer_crawler_dev` |
### Database Configuration
**Note**: Production and test use separate `_PROD` and `_TEST` suffixed variables. Development uses unsuffixed variables.
| Variable | Required | Default | Description |
| ------------- | -------- | ------- | ----------------- |
| `DB_HOST` | Yes | - | PostgreSQL host |
| `DB_PORT` | No | `5432` | PostgreSQL port |
| `DB_USER` | Yes | - | Database username |
| `DB_PASSWORD` | Yes | - | Database password |
| `DB_NAME` | Yes | - | Database name |
### Redis
**Environment-Specific Variables** (Gitea Secrets):
| Variable | Description | Example |
| --------------------- | ------------------------- | ------------------------------ |
| `REDIS_URL` | Redis connection URL | `redis://localhost:6379` (dev) |
| `REDIS_PASSWORD_PROD` | Production Redis password | (secret) |
| `REDIS_PASSWORD_TEST` | Test Redis password | (secret) |
| Variable | Environment | Description |
| ------------------ | ----------- | ------------------------ |
| `DB_USER_PROD` | Production | Production database user |
| `DB_PASSWORD_PROD` | Production | Production database pass |
| `DB_DATABASE_PROD` | Production | Production database name |
| `DB_USER_TEST` | Test | Test database user |
| `DB_PASSWORD_TEST` | Test | Test database password |
| `DB_DATABASE_TEST` | Test | Test database name |
### Redis Configuration
| Variable | Required | Default | Description |
| ---------------- | -------- | ------- | ------------------------- |
| `REDIS_URL` | Yes | - | Redis connection URL |
| `REDIS_PASSWORD` | No | - | Redis password (optional) |
**URL Format**: `redis://[user:password@]host:port`
**Examples**:
```bash
# Development (no auth)
REDIS_URL=redis://localhost:6379
# Production (with auth)
REDIS_URL=redis://:${REDIS_PASSWORD_PROD}@localhost:6379
```
### Authentication
| Variable | Description | Example |
| ---------------------- | -------------------------- | -------------------------------- |
| `JWT_SECRET` | JWT token signing key | (minimum 32 characters) |
| `SESSION_SECRET` | Session encryption key | (minimum 32 characters) |
| `GOOGLE_CLIENT_ID` | Google OAuth client ID | `xxx.apps.googleusercontent.com` |
| `GOOGLE_CLIENT_SECRET` | Google OAuth client secret | (secret) |
| `GH_CLIENT_ID` | GitHub OAuth client ID | `xxx` |
| `GH_CLIENT_SECRET` | GitHub OAuth client secret | (secret) |
| Variable | Required | Min Length | Description |
| ---------------------- | -------- | ---------- | ----------------------- |
| `JWT_SECRET` | Yes | 32 chars | JWT token signing key |
| `JWT_SECRET_PREVIOUS` | No | - | Previous key (rotation) |
| `GOOGLE_CLIENT_ID` | No | - | Google OAuth client ID |
| `GOOGLE_CLIENT_SECRET` | No | - | Google OAuth secret |
| `GITHUB_CLIENT_ID` | No | - | GitHub OAuth client ID |
| `GITHUB_CLIENT_SECRET` | No | - | GitHub OAuth secret |
**Generate Secure Secret**:
```bash
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
```
### AI Services
| Variable | Description | Example |
| -------------------------------- | ---------------------------- | ----------- |
| `VITE_GOOGLE_GENAI_API_KEY` | Google Gemini API key (prod) | `AIzaSy...` |
| `VITE_GOOGLE_GENAI_API_KEY_TEST` | Google Gemini API key (test) | `AIzaSy...` |
| `GOOGLE_MAPS_API_KEY` | Google Maps Geocoding API | `AIzaSy...` |
| Variable | Required | Description |
| ---------------------------- | -------- | -------------------------------- |
| `GEMINI_API_KEY` | Yes\* | Google Gemini API key |
| `GEMINI_RPM` | No | Rate limit (default: 5) |
| `AI_PRICE_QUALITY_THRESHOLD` | No | Quality threshold (default: 0.5) |
### Application
\*Required for flyer processing. Application works without it but cannot extract flyer data.
| Variable | Description | Example |
| -------------- | ------------------------ | ----------------------------------- |
| `NODE_ENV` | Environment mode | `development`, `test`, `production` |
| `PORT` | Backend server port | `3001` |
| `FRONTEND_URL` | Frontend application URL | `http://localhost:5173` (dev) |
**Get API Key**: [Google AI Studio](https://aistudio.google.com/app/apikey)
### Error Tracking
### Google Services
| Variable | Description | Example |
| ---------------------- | -------------------------------- | --------------------------- |
| `SENTRY_DSN` | Sentry DSN (production) | `https://xxx@sentry.io/xxx` |
| `VITE_SENTRY_DSN` | Frontend Sentry DSN (production) | `https://xxx@sentry.io/xxx` |
| `SENTRY_DSN_TEST` | Sentry DSN (test) | `https://xxx@sentry.io/xxx` |
| `VITE_SENTRY_DSN_TEST` | Frontend Sentry DSN (test) | `https://xxx@sentry.io/xxx` |
| `SENTRY_AUTH_TOKEN` | Sentry API token for releases | (secret) |
| Variable | Required | Description |
| ---------------------- | -------- | -------------------------------- |
| `GOOGLE_MAPS_API_KEY` | No | Google Maps Geocoding API |
| `GOOGLE_CLIENT_ID` | No | OAuth (see Authentication above) |
| `GOOGLE_CLIENT_SECRET` | No | OAuth (see Authentication above) |
## Optional Variables
### UPC Lookup APIs
| Variable | Description | Default |
| ------------------- | ----------------------- | ----------------- |
| `LOG_LEVEL` | Logging verbosity | `info` |
| `REDIS_TTL` | Cache TTL in seconds | `3600` |
| `MAX_UPLOAD_SIZE` | Max file upload size | `10mb` |
| `RATE_LIMIT_WINDOW` | Rate limit window (ms) | `900000` (15 min) |
| `RATE_LIMIT_MAX` | Max requests per window | `100` |
| Variable | Required | Description |
| ------------------------ | -------- | ---------------------- |
| `UPC_ITEM_DB_API_KEY` | No | UPC Item DB API key |
| `BARCODE_LOOKUP_API_KEY` | No | Barcode Lookup API key |
### Application Settings
| Variable | Required | Default | Description |
| -------------- | -------- | ------------- | ------------------------ |
| `NODE_ENV` | No | `development` | Environment mode |
| `PORT` | No | `3001` | Backend server port |
| `FRONTEND_URL` | No | - | Frontend URL (CORS) |
| `BASE_URL` | No | - | API base URL |
| `STORAGE_PATH` | No | (see below) | Flyer image storage path |
**NODE_ENV Values**: `development`, `test`, `staging`, `production`
**Default STORAGE_PATH**: `/var/www/flyer-crawler.projectium.com/flyer-images`
### Email/SMTP Configuration
| Variable | Required | Default | Description |
| ----------------- | -------- | ------- | ----------------------- |
| `SMTP_HOST` | No | - | SMTP server hostname |
| `SMTP_PORT` | No | `587` | SMTP server port |
| `SMTP_USER` | No | - | SMTP username |
| `SMTP_PASS` | No | - | SMTP password |
| `SMTP_SECURE` | No | `false` | Use TLS |
| `SMTP_FROM_EMAIL` | No | - | From address for emails |
**Note**: Email functionality degrades gracefully if not configured.
### Worker Configuration
| Variable | Default | Description |
| ------------------------------------- | ------- | ---------------------------- |
| `WORKER_CONCURRENCY` | `1` | Main worker concurrency |
| `WORKER_LOCK_DURATION` | `30000` | Lock duration (ms) |
| `EMAIL_WORKER_CONCURRENCY` | `10` | Email worker concurrency |
| `ANALYTICS_WORKER_CONCURRENCY` | `1` | Analytics worker concurrency |
| `CLEANUP_WORKER_CONCURRENCY` | `10` | Cleanup worker concurrency |
| `WEEKLY_ANALYTICS_WORKER_CONCURRENCY` | `1` | Weekly analytics concurrency |
### Error Tracking (Bugsink/Sentry)
| Variable | Required | Default | Description |
| --------------------- | -------- | -------- | ------------------------------- |
| `SENTRY_DSN` | No | - | Backend Sentry DSN |
| `SENTRY_ENABLED` | No | `true` | Enable error tracking |
| `SENTRY_ENVIRONMENT` | No | NODE_ENV | Environment name for errors |
| `SENTRY_DEBUG` | No | `false` | Enable Sentry SDK debug logging |
| `VITE_SENTRY_DSN` | No | - | Frontend Sentry DSN |
| `VITE_SENTRY_ENABLED` | No | `true` | Enable frontend error tracking |
| `VITE_SENTRY_DEBUG` | No | `false` | Frontend SDK debug logging |
**DSN Format**: `http://[key]@[host]:[port]/[project_id]`
**Dev Container DSNs**:
```bash
# Backend (internal)
SENTRY_DSN=http://<key>@localhost:8000/1
# Frontend (via nginx proxy)
VITE_SENTRY_DSN=https://<key>@localhost/bugsink-api/2
```
---
## Configuration Files
| File | Purpose |
| ------------------------------------- | ------------------------------------------- |
| `src/config/env.ts` | Zod schema validation - **source of truth** |
| `ecosystem.config.cjs` | PM2 process manager config |
| `ecosystem.config.cjs` | PM2 process manager (production) |
| `ecosystem.dev.config.cjs` | PM2 process manager (development) |
| `.gitea/workflows/deploy-to-prod.yml` | Production deployment workflow |
| `.gitea/workflows/deploy-to-test.yml` | Test deployment workflow |
| `.env.example` | Template with all variables |
| `.env.local` | Dev container overrides (not in git) |
| `.env.test` | Test environment overrides (not in git) |
---
## Adding New Variables
### 1. Update Zod Schema
### Checklist
1. [ ] **Update Zod Schema** - Edit `src/config/env.ts`
2. [ ] **Add to Gitea Secrets** - For prod/test environments
3. [ ] **Update Deployment Workflows** - `.gitea/workflows/*.yml`
4. [ ] **Update PM2 Config** - `ecosystem.config.cjs`
5. [ ] **Update .env.example** - Template for developers
6. [ ] **Update this document** - Add to appropriate section
### Step-by-Step
#### 1. Update Zod Schema
Edit `src/config/env.ts`:
```typescript
const envSchema = z.object({
// ... existing variables ...
NEW_VARIABLE: z.string().min(1),
newSection: z.object({
newVariable: z.string().min(1, 'NEW_VARIABLE is required'),
}),
});
// In loadEnvVars():
newSection: {
newVariable: process.env.NEW_VARIABLE,
},
```
### 2. Add to Gitea Secrets
For prod/test environments:
#### 2. Add to Gitea Secrets
1. Go to Gitea repository Settings > Secrets
2. Add `NEW_VARIABLE` with value
2. Add `NEW_VARIABLE` with production value
3. Add `NEW_VARIABLE_TEST` if test needs different value
### 3. Update Deployment Workflows
#### 3. Update Deployment Workflows
Edit `.gitea/workflows/deploy-to-prod.yml`:
@@ -145,7 +270,7 @@ env:
NEW_VARIABLE: ${{ secrets.NEW_VARIABLE_TEST }}
```
### 4. Update PM2 Config
#### 4. Update PM2 Config
Edit `ecosystem.config.cjs`:
@@ -161,31 +286,36 @@ module.exports = {
};
```
### 5. Update Documentation
- Add to `.env.example`
- Update this document
- Document in relevant feature docs
---
## Security Best Practices
### Secrets Management
### Do
- **NEVER** commit secrets to git
- Use Gitea Secrets for prod/test
- Use `.env.local` for dev (gitignored)
- Generate secrets with cryptographic randomness
- Rotate secrets regularly
- Use environment-specific database users
### Do Not
- Commit secrets to git
- Use short or predictable secrets
- Share secrets across environments
- Log sensitive values
### Secret Generation
```bash
# Generate secure random secrets
# Generate secure random secrets (64 hex characters)
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
# Example output:
# a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2
```
### Database Users
Each environment has its own PostgreSQL user:
### Database Users by Environment
| Environment | User | Database |
| ----------- | -------------------- | -------------------- |
@@ -193,44 +323,61 @@ Each environment has its own PostgreSQL user:
| Test | `flyer_crawler_test` | `flyer-crawler-test` |
| Development | `postgres` | `flyer_crawler_dev` |
**Setup Commands** (as postgres superuser):
```sql
-- Production
CREATE DATABASE "flyer-crawler-prod";
CREATE USER flyer_crawler_prod WITH PASSWORD 'secure-password';
ALTER DATABASE "flyer-crawler-prod" OWNER TO flyer_crawler_prod;
\c "flyer-crawler-prod"
ALTER SCHEMA public OWNER TO flyer_crawler_prod;
GRANT CREATE, USAGE ON SCHEMA public TO flyer_crawler_prod;
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
-- Test (similar commands with _test suffix)
```
---
## Validation
Environment variables are validated at startup via `src/config/env.ts`. If validation fails:
Environment variables are validated at startup via `src/config/env.ts`.
1. Check the error message for missing/invalid variables
2. Verify `.env.local` (dev) or Gitea Secrets (prod/test)
3. Ensure values match schema requirements (min length, format, etc.)
### Startup Validation
If validation fails, you will see:
```text
╔════════════════════════════════════════════════════════════════╗
║ CONFIGURATION ERROR - APPLICATION STARTUP ║
╚════════════════════════════════════════════════════════════════╝
The following environment variables are missing or invalid:
- database.host: DB_HOST is required
- auth.jwtSecret: JWT_SECRET must be at least 32 characters
Please check your .env file or environment configuration.
```
### Debugging Configuration
```bash
# Check what variables are set (dev container)
podman exec flyer-crawler-dev env | grep -E "^(DB_|REDIS_|JWT_|SENTRY_)"
# Test database connection
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "SELECT 1;"
# Test Redis connection
podman exec flyer-crawler-redis redis-cli ping
```
---
## Troubleshooting
### Variable Not Found
```
```text
Error: Missing required environment variable: JWT_SECRET
```
**Solution**: Add the variable to your environment configuration.
**Solutions**:
1. Check `.env.local` exists and has the variable
2. Verify variable name matches schema exactly
3. Restart the application after changes
### Invalid Value
```
```text
Error: JWT_SECRET must be at least 32 characters
```
@@ -240,32 +387,36 @@ Error: JWT_SECRET must be at least 32 characters
Check `NODE_ENV` is set correctly:
- `development` - Local dev container
- `test` - CI/CD test server
- `production` - Production server
| Value | Purpose |
| ------------- | ---------------------- |
| `development` | Local dev container |
| `test` | CI/CD test server |
| `staging` | Pre-production testing |
| `production` | Production server |
### Database Connection Issues
Verify database credentials:
```bash
# Development
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "SELECT 1;"
# Production (via SSH)
ssh root@projectium.com "psql -U flyer_crawler_prod -d flyer-crawler-prod -c 'SELECT 1;'"
# If connection fails, check:
# 1. Container is running: podman ps
# 2. DB_HOST matches container network
# 3. DB_PASSWORD is correct
```
## Reference
---
- **Validation Schema**: [src/config/env.ts](../../src/config/env.ts)
- **Template**: [.env.example](../../.env.example)
- **Deployment Workflows**: [.gitea/workflows/](../../.gitea/workflows/)
- **PM2 Config**: [ecosystem.config.cjs](../../ecosystem.config.cjs)
## See Also
## Related Documentation
- [QUICKSTART.md](QUICKSTART.md) - Quick setup guide
- [INSTALL.md](INSTALL.md) - Detailed installation
- [DEV-CONTAINER.md](../development/DEV-CONTAINER.md) - Dev container setup
- [DEPLOYMENT.md](../operations/DEPLOYMENT.md) - Production deployment
- [AUTHENTICATION.md](../architecture/AUTHENTICATION.md) - OAuth setup
- [ADR-007](../adr/0007-configuration-and-secrets-management.md) - Configuration decisions
---
Last updated: January 2026

View File

@@ -1,178 +1,453 @@
# Installation Guide
This guide covers setting up a local development environment for Flyer Crawler.
Complete setup instructions for the Flyer Crawler local development environment.
---
## Quick Reference
| Setup Method | Best For | Time | Document Section |
| ----------------- | --------------------------- | ------ | --------------------------------------------------- |
| Quick Start | Already have Postgres/Redis | 5 min | [Quick Start](#quick-start) |
| Dev Container | Full production-like setup | 15 min | [Dev Container](#development-container-recommended) |
| Manual Containers | Learning the components | 20 min | [Podman Setup](#podman-setup-manual) |
---
## Prerequisites
- Node.js 20.x or later
- Access to a PostgreSQL database (local or remote)
- Redis instance (for session management)
- Google Gemini API key
- Google Maps API key (for geocoding)
### Required Software
| Software | Minimum Version | Purpose | Download |
| -------------- | --------------- | -------------------- | ----------------------------------------------- |
| Node.js | 20.x | Runtime | [nodejs.org](https://nodejs.org/) |
| Podman Desktop | 4.x | Container management | [podman-desktop.io](https://podman-desktop.io/) |
| Git | 2.x | Version control | [git-scm.com](https://git-scm.com/) |
### Windows-Specific Requirements
| Requirement | Purpose | Setup Command |
| ----------- | ------------------------------ | ---------------------------------- |
| WSL 2 | Linux compatibility for Podman | `wsl --install` (admin PowerShell) |
### Verify Installation
```bash
# Check all prerequisites
node --version # Expected: v20.x or higher
podman --version # Expected: podman version 4.x or higher
git --version # Expected: git version 2.x or higher
wsl --list -v # Expected: Shows WSL 2 distro
```
---
## Quick Start
If you already have PostgreSQL and Redis configured:
If you already have PostgreSQL and Redis configured externally:
```bash
# Install dependencies
# 1. Clone the repository
git clone https://gitea.projectium.com/flyer-crawler/flyer-crawler.git
cd flyer-crawler
# 2. Install dependencies
npm install
# Run in development mode
# 3. Create .env.local (see Environment section below)
# 4. Run in development mode
npm run dev
```
**Access Points**:
- Frontend: `http://localhost:5173`
- Backend API: `http://localhost:3001`
---
## Development Environment with Podman (Recommended for Windows)
## Development Container (Recommended)
This approach uses Podman with an Ubuntu container for a consistent development environment.
The dev container provides a complete, production-like environment.
### What's Included
| Service | Purpose | Port |
| ---------- | ------------------------ | ---------- |
| Node.js | API server, worker, Vite | 3001, 5173 |
| PostgreSQL | Database with PostGIS | 5432 |
| Redis | Cache and job queues | 6379 |
| NGINX | HTTPS reverse proxy | 443 |
| Bugsink | Error tracking | 8443 |
| Logstash | Log aggregation | - |
| PM2 | Process management | - |
### Setup Steps
#### Step 1: Initialize Podman
```bash
# Windows: Start Podman Desktop, or from terminal:
podman machine init
podman machine start
```
#### Step 2: Start Dev Container
```bash
# Start all services
podman-compose -f compose.dev.yml up -d
# View logs (optional)
podman-compose -f compose.dev.yml logs -f
```
**Expected Output**:
```text
[+] Running 3/3
- Container flyer-crawler-postgres Started
- Container flyer-crawler-redis Started
- Container flyer-crawler-dev Started
```
#### Step 3: Verify Services
```bash
# Check containers are running
podman ps
# Check PM2 processes
podman exec -it flyer-crawler-dev pm2 status
```
**Expected PM2 Status**:
```text
+---------------------------+--------+-------+
| name | status | cpu |
+---------------------------+--------+-------+
| flyer-crawler-api-dev | online | 0% |
| flyer-crawler-worker-dev | online | 0% |
| flyer-crawler-vite-dev | online | 0% |
+---------------------------+--------+-------+
```
#### Step 4: Access Application
| Service | URL | Notes |
| ----------- | ------------------------ | ---------------------------- |
| Frontend | `https://localhost` | NGINX proxies to Vite |
| Backend API | `http://localhost:3001` | Express server |
| Bugsink | `https://localhost:8443` | Login: admin@localhost/admin |
### SSL Certificate Setup (Optional but Recommended)
To eliminate browser security warnings:
**Windows**:
1. Double-click `certs/mkcert-ca.crt`
2. Click "Install Certificate..."
3. Select "Local Machine" > Next
4. Select "Place all certificates in the following store"
5. Browse > Select "Trusted Root Certification Authorities" > OK
6. Click Next > Finish
7. Restart browser
**Other Platforms**: See [`certs/README.md`](../../certs/README.md)
### Managing the Dev Container
| Action | Command |
| --------- | ------------------------------------------- |
| Start | `podman-compose -f compose.dev.yml up -d` |
| Stop | `podman-compose -f compose.dev.yml down` |
| View logs | `podman-compose -f compose.dev.yml logs -f` |
| Restart | `podman-compose -f compose.dev.yml restart` |
| Rebuild | `podman-compose -f compose.dev.yml build` |
---
## Podman Setup (Manual)
For understanding the individual components or custom configurations.
### Step 1: Install Prerequisites on Windows
1. **Install WSL 2**: Podman on Windows relies on the Windows Subsystem for Linux.
```powershell
# Run in administrator PowerShell
wsl --install
```
```powershell
wsl --install
```
Restart computer after WSL installation.
Run this in an administrator PowerShell.
### Step 2: Initialize Podman
2. **Install Podman Desktop**: Download and install [Podman Desktop for Windows](https://podman-desktop.io/).
1. Launch **Podman Desktop**
2. Follow the setup wizard to initialize Podman machine
3. Start the Podman machine
### Step 2: Set Up Podman
1. **Initialize Podman**: Launch Podman Desktop. It will automatically set up its WSL 2 machine.
2. **Start Podman**: Ensure the Podman machine is running from the Podman Desktop interface.
### Step 3: Set Up the Ubuntu Container
1. **Pull Ubuntu Image**:
```bash
podman pull ubuntu:latest
```
2. **Create a Podman Volume** (persists node_modules between container restarts):
```bash
podman volume create node_modules_cache
```
3. **Run the Ubuntu Container**:
Open a terminal in your project's root directory and run:
```bash
podman run -it -p 3001:3001 -p 5173:5173 --name flyer-dev \
-v "$(pwd):/app" \
-v "node_modules_cache:/app/node_modules" \
ubuntu:latest
```
| Flag | Purpose |
| ------------------------------------------- | ------------------------------------------------ |
| `-p 3001:3001` | Forwards the backend server port |
| `-p 5173:5173` | Forwards the Vite frontend server port |
| `--name flyer-dev` | Names the container for easy reference |
| `-v "...:/app"` | Mounts your project directory into the container |
| `-v "node_modules_cache:/app/node_modules"` | Mounts the named volume for node_modules |
### Step 4: Configure the Ubuntu Environment
You are now inside the Ubuntu container's shell.
1. **Update Package Lists**:
```bash
apt-get update
```
2. **Install Dependencies**:
```bash
apt-get install -y curl git
curl -sL https://deb.nodesource.com/setup_20.x | bash -
apt-get install -y nodejs
```
3. **Navigate to Project Directory**:
```bash
cd /app
```
4. **Install Project Dependencies**:
```bash
npm install
```
### Step 5: Run the Development Server
Or from terminal:
```bash
podman machine init
podman machine start
```
### Step 3: Create Podman Network
```bash
podman network create flyer-crawler-net
```
### Step 4: Create PostgreSQL Container
```bash
podman run -d \
--name flyer-crawler-postgres \
--network flyer-crawler-net \
-e POSTGRES_USER=postgres \
-e POSTGRES_PASSWORD=postgres \
-e POSTGRES_DB=flyer_crawler_dev \
-p 5432:5432 \
-v flyer-crawler-pgdata:/var/lib/postgresql/data \
docker.io/postgis/postgis:15-3.3
```
### Step 5: Create Redis Container
```bash
podman run -d \
--name flyer-crawler-redis \
--network flyer-crawler-net \
-p 6379:6379 \
-v flyer-crawler-redis:/data \
docker.io/library/redis:alpine
```
### Step 6: Initialize Database
```bash
# Wait for PostgreSQL to be ready
podman exec flyer-crawler-postgres pg_isready -U postgres
# Install required extensions
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";
"
# Apply schema
podman exec -i flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev < sql/master_schema_rollup.sql
```
### Step 7: Create Node.js Container
```bash
# Create volume for node_modules
podman volume create node_modules_cache
# Run Ubuntu container with project mounted
podman run -it \
--name flyer-dev \
--network flyer-crawler-net \
-p 3001:3001 \
-p 5173:5173 \
-v "$(pwd):/app" \
-v "node_modules_cache:/app/node_modules" \
ubuntu:latest
```
### Step 8: Configure Container Environment
Inside the container:
```bash
# Update and install dependencies
apt-get update
apt-get install -y curl git
# Install Node.js 20
curl -sL https://deb.nodesource.com/setup_20.x | bash -
apt-get install -y nodejs
# Navigate to project and install
cd /app
npm install
# Start development server
npm run dev
```
### Step 6: Access the Application
### Container Management Commands
- **Frontend**: http://localhost:5173
- **Backend API**: http://localhost:3001
### Managing the Container
| Action | Command |
| --------------------- | -------------------------------- |
| Stop the container | Press `Ctrl+C`, then type `exit` |
| Restart the container | `podman start -a -i flyer-dev` |
| Remove the container | `podman rm flyer-dev` |
| Action | Command |
| -------------- | ------------------------------ |
| Stop container | Press `Ctrl+C`, then `exit` |
| Restart | `podman start -a -i flyer-dev` |
| Remove | `podman rm flyer-dev` |
| List running | `podman ps` |
| List all | `podman ps -a` |
---
## Environment Variables
## Environment Configuration
This project is configured to run in a CI/CD environment and does not use `.env` files. All configuration must be provided as environment variables.
### Create .env.local
For local development, you can export these in your shell or use your IDE's environment configuration:
Create `.env.local` in the project root with your configuration:
| Variable | Description |
| --------------------------- | ------------------------------------- |
| `DB_HOST` | PostgreSQL server hostname |
| `DB_USER` | PostgreSQL username |
| `DB_PASSWORD` | PostgreSQL password |
| `DB_DATABASE_PROD` | Production database name |
| `JWT_SECRET` | Secret string for signing auth tokens |
| `VITE_GOOGLE_GENAI_API_KEY` | Google Gemini API key |
| `GOOGLE_MAPS_API_KEY` | Google Maps Geocoding API key |
| `REDIS_PASSWORD_PROD` | Production Redis password |
| `REDIS_PASSWORD_TEST` | Test Redis password |
```bash
# Database (adjust host based on your setup)
DB_HOST=localhost # Use 'postgres' if inside dev container
DB_PORT=5432
DB_USER=postgres
DB_PASSWORD=postgres
DB_NAME=flyer_crawler_dev
# Redis (adjust host based on your setup)
REDIS_URL=redis://localhost:6379 # Use 'redis://redis:6379' inside container
# Application
NODE_ENV=development
PORT=3001
FRONTEND_URL=http://localhost:5173
# Authentication (generate secure values)
JWT_SECRET=your-secret-at-least-32-characters-long
# AI Services
GEMINI_API_KEY=your-google-gemini-api-key
GOOGLE_MAPS_API_KEY=your-google-maps-api-key # Optional
```
**Generate Secure Secrets**:
```bash
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
```
### Environment Differences
| Variable | Host Development | Inside Dev Container |
| ----------- | ------------------------ | -------------------- |
| `DB_HOST` | `localhost` | `postgres` |
| `REDIS_URL` | `redis://localhost:6379` | `redis://redis:6379` |
See [ENVIRONMENT.md](ENVIRONMENT.md) for complete variable reference.
---
## Seeding Development Data
To create initial test accounts (`admin@example.com` and `user@example.com`) and sample data:
Create test accounts and sample data:
```bash
npm run seed
```
The seed script performs the following actions:
### What the Seed Script Does
1. Rebuilds the database schema from `sql/master_schema_rollup.sql`
2. Creates test user accounts (admin and regular user)
3. Copies test flyer images from `src/tests/assets/` to `public/flyer-images/`
4. Creates a sample flyer with items linked to the test images
5. Seeds watched items and a shopping list for the test user
1. Rebuilds database schema from `sql/master_schema_rollup.sql`
2. Creates test user accounts:
- `admin@example.com` (admin user)
- `user@example.com` (regular user)
3. Copies test flyer images to `public/flyer-images/`
4. Creates sample flyer with items
5. Seeds watched items and shopping list
**Test Images**: The seed script copies `test-flyer-image.jpg` and `test-flyer-icon.png` to the `public/flyer-images/` directory, which is served by NGINX at `/flyer-images/`.
### Test Images
After running, you may need to restart your IDE's TypeScript server to pick up any generated types.
The seed script copies these files from `src/tests/assets/`:
- `test-flyer-image.jpg`
- `test-flyer-icon.png`
Images are served by NGINX at `/flyer-images/`.
---
## Verification Checklist
After installation, verify everything works:
- [ ] **Containers running**: `podman ps` shows postgres and redis
- [ ] **Database accessible**: `podman exec flyer-crawler-postgres psql -U postgres -c "SELECT 1;"`
- [ ] **Frontend loads**: Open `http://localhost:5173` (or `https://localhost` for dev container)
- [ ] **API responds**: `curl http://localhost:3001/health`
- [ ] **Tests pass**: `npm run test:unit` (or in container: `podman exec -it flyer-crawler-dev npm run test:unit`)
- [ ] **Type check passes**: `npm run type-check`
---
## Troubleshooting
### Podman Machine Won't Start
```bash
# Reset Podman machine
podman machine rm
podman machine init
podman machine start
```
### Port Already in Use
```bash
# Find process using port
netstat -ano | findstr :5432
# Option: Use different port
podman run -d --name flyer-crawler-postgres -p 5433:5432 ...
# Then set DB_PORT=5433 in .env.local
```
### Database Extensions Missing
```bash
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "
CREATE EXTENSION IF NOT EXISTS postgis;
CREATE EXTENSION IF NOT EXISTS pg_trgm;
CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";
"
```
### Permission Denied on Windows Paths
Use `MSYS_NO_PATHCONV=1` prefix:
```bash
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev /path/to/script.sh
```
### Tests Fail with Timezone Errors
Tests must run in the dev container, not on Windows host:
```bash
# CORRECT
podman exec -it flyer-crawler-dev npm test
# INCORRECT (may fail with TZ errors)
npm test
```
---
## Next Steps
- [Database Setup](DATABASE.md) - Set up PostgreSQL with required extensions
- [Authentication Setup](AUTHENTICATION.md) - Configure OAuth providers
- [Deployment Guide](DEPLOYMENT.md) - Deploy to production
| Goal | Document |
| --------------------- | ------------------------------------------------------ |
| Quick setup guide | [QUICKSTART.md](QUICKSTART.md) |
| Environment variables | [ENVIRONMENT.md](ENVIRONMENT.md) |
| Database schema | [DATABASE.md](../architecture/DATABASE.md) |
| Authentication setup | [AUTHENTICATION.md](../architecture/AUTHENTICATION.md) |
| Dev container details | [DEV-CONTAINER.md](../development/DEV-CONTAINER.md) |
| Deployment | [DEPLOYMENT.md](../operations/DEPLOYMENT.md) |
---
Last updated: January 2026

View File

@@ -2,13 +2,38 @@
Get Flyer Crawler running in 5 minutes.
## Prerequisites
---
- **Windows 10/11** with WSL 2
- **Podman Desktop** installed
- **Node.js 20+** installed
## Prerequisites Checklist
## 1. Start Containers (1 minute)
Before starting, verify you have:
- [ ] **Windows 10/11** with WSL 2 enabled
- [ ] **Podman Desktop** installed ([download](https://podman-desktop.io/))
- [ ] **Node.js 20+** installed
- [ ] **Git** for cloning the repository
**Verify Prerequisites**:
```bash
# Check Podman
podman --version
# Expected: podman version 4.x or higher
# Check Node.js
node --version
# Expected: v20.x or higher
# Check WSL
wsl --list --verbose
# Expected: Shows WSL 2 distro
```
---
## Quick Setup (5 Steps)
### Step 1: Start Containers (1 minute)
```bash
# Start PostgreSQL and Redis
@@ -27,11 +52,18 @@ podman run -d --name flyer-crawler-redis \
docker.io/library/redis:alpine
```
## 2. Initialize Database (2 minutes)
**Expected Output**:
```text
# Container IDs displayed, no errors
```
### Step 2: Initialize Database (2 minutes)
```bash
# Wait for PostgreSQL to be ready
podman exec flyer-crawler-postgres pg_isready -U postgres
# Expected: localhost:5432 - accepting connections
# Install extensions
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev \
@@ -41,7 +73,17 @@ podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev \
podman exec -i flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev < sql/master_schema_rollup.sql
```
## 3. Configure Environment (1 minute)
**Expected Output**:
```text
CREATE EXTENSION
CREATE EXTENSION
CREATE EXTENSION
CREATE TABLE
... (many tables created)
```
### Step 3: Configure Environment (1 minute)
Create `.env.local` in the project root:
@@ -61,16 +103,22 @@ NODE_ENV=development
PORT=3001
FRONTEND_URL=http://localhost:5173
# Secrets (generate your own)
# Secrets (generate your own - see command below)
JWT_SECRET=your-dev-jwt-secret-at-least-32-chars-long
SESSION_SECRET=your-dev-session-secret-at-least-32-chars-long
# AI Services (get your own keys)
VITE_GOOGLE_GENAI_API_KEY=your-google-genai-api-key
GEMINI_API_KEY=your-google-gemini-api-key
GOOGLE_MAPS_API_KEY=your-google-maps-api-key
```
## 4. Install & Run (1 minute)
**Generate Secure Secrets**:
```bash
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
```
### Step 4: Install and Run (1 minute)
```bash
# Install dependencies (first time only)
@@ -80,69 +128,222 @@ npm install
npm run dev
```
## 5. Access Application
**Expected Output**:
- **Frontend**: http://localhost:5173
- **Backend API**: http://localhost:3001
- **Health Check**: http://localhost:3001/health
```text
> flyer-crawler@x.x.x dev
> concurrently ...
## Verify Installation
[API] Server listening on port 3001
[Vite] VITE ready at http://localhost:5173
```
### Step 5: Verify Installation
| Check | URL/Command | Expected Result |
| ----------- | ------------------------------ | ----------------------------------- |
| Frontend | `http://localhost:5173` | Flyer Crawler app loads |
| Backend API | `http://localhost:3001/health` | `{ "status": "ok", ... }` |
| Database | `podman exec ... psql -c ...` | `SELECT version()` returns Postgres |
| Containers | `podman ps` | Shows postgres and redis running |
---
## Full Dev Container (Recommended)
For a production-like environment with NGINX, Bugsink error tracking, and PM2 process management:
### Starting the Dev Container
```bash
# Start all services
podman-compose -f compose.dev.yml up -d
# View logs
podman-compose -f compose.dev.yml logs -f
```
### Access Points
| Service | URL | Notes |
| ----------- | ------------------------ | ---------------------------- |
| Frontend | `https://localhost` | NGINX proxy to Vite |
| Backend API | `http://localhost:3001` | Express server |
| Bugsink | `https://localhost:8443` | Error tracking (admin/admin) |
| PostgreSQL | `localhost:5432` | Database |
| Redis | `localhost:6379` | Cache |
**SSL Certificate Setup (Recommended)**:
To eliminate browser security warnings, install the mkcert CA certificate:
```bash
# Windows: Double-click certs/mkcert-ca.crt and install to Trusted Root CAs
# See certs/README.md for detailed instructions per platform
```
### PM2 Commands
```bash
# View process status
podman exec -it flyer-crawler-dev pm2 status
# View logs (all processes)
podman exec -it flyer-crawler-dev pm2 logs
# Restart all processes
podman exec -it flyer-crawler-dev pm2 restart all
# Restart specific process
podman exec -it flyer-crawler-dev pm2 restart flyer-crawler-api-dev
```
### Dev Container Processes
| Process | Description | Port |
| -------------------------- | ------------------------ | ---- |
| `flyer-crawler-api-dev` | API server (tsx watch) | 3001 |
| `flyer-crawler-worker-dev` | Background job worker | - |
| `flyer-crawler-vite-dev` | Vite frontend dev server | 5173 |
---
## Verification Commands
Run these to confirm everything is working:
```bash
# Check containers are running
podman ps
# Expected: flyer-crawler-postgres and flyer-crawler-redis both running
# Test database connection
podman exec flyer-crawler-postgres psql -U postgres -d flyer_crawler_dev -c "SELECT version();"
# Expected: PostgreSQL 15.x with PostGIS
# Run tests (in dev container)
podman exec -it flyer-crawler-dev npm run test:unit
# Expected: All tests pass
# Run type check
podman exec -it flyer-crawler-dev npm run type-check
# Expected: No type errors
```
## Common Issues
---
## Common Issues and Solutions
### "Unable to connect to Podman socket"
**Cause**: Podman machine not running
**Solution**:
```bash
podman machine start
```
### "Connection refused" to PostgreSQL
Wait a few seconds for PostgreSQL to initialize:
**Cause**: PostgreSQL still initializing
**Solution**:
```bash
# Wait for PostgreSQL to be ready
podman exec flyer-crawler-postgres pg_isready -U postgres
# Retry after "accepting connections" message
```
### Port 5432 or 6379 already in use
Stop conflicting services or change port mappings:
**Cause**: Another service using the port
**Solution**:
```bash
# Use different host port
# Option 1: Stop conflicting service
# Option 2: Use different host port
podman run -d --name flyer-crawler-postgres -p 5433:5432 ...
# Then update DB_PORT=5433 in .env.local
```
Then update `DB_PORT=5433` in `.env.local`.
### "JWT_SECRET must be at least 32 characters"
**Cause**: Secret too short in .env.local
**Solution**: Generate a longer secret:
```bash
node -e "console.log(require('crypto').randomBytes(32).toString('hex'))"
```
### Tests fail with "TZ environment variable" errors
**Cause**: Timezone setting interfering with Node.js async hooks
**Solution**: Tests must run in dev container (not Windows host):
```bash
# CORRECT - run in container
podman exec -it flyer-crawler-dev npm test
# INCORRECT - do not run on Windows host
npm test
```
---
## Next Steps
- **Read the docs**: [docs/README.md](../README.md)
- **Understand the architecture**: [docs/architecture/DATABASE.md](../architecture/DATABASE.md)
- **Learn testing**: [docs/development/TESTING.md](../development/TESTING.md)
- **Explore ADRs**: [docs/adr/index.md](../adr/index.md)
- **Contributing**: [CONTRIBUTING.md](../../CONTRIBUTING.md)
| Goal | Document |
| ----------------------- | ----------------------------------------------------- |
| Understand the codebase | [Architecture Overview](../architecture/OVERVIEW.md) |
| Configure environment | [Environment Variables](ENVIRONMENT.md) |
| Set up MCP tools | [MCP Configuration](../tools/MCP-CONFIGURATION.md) |
| Learn testing | [Testing Guide](../development/TESTING.md) |
| Understand DB schema | [Database Documentation](../architecture/DATABASE.md) |
| Read ADRs | [ADR Index](../adr/index.md) |
| Full installation guide | [Installation Guide](INSTALL.md) |
## Development Workflow
---
## Daily Development Workflow
```bash
# Daily workflow
# 1. Start containers
podman start flyer-crawler-postgres flyer-crawler-redis
# 2. Start dev server
npm run dev
# ... make changes ...
# 3. Make changes and test
npm test
# 4. Type check before commit
npm run type-check
# 5. Commit changes
git commit
```
For detailed setup instructions, see [INSTALL.md](INSTALL.md).
**For dev container users**:
```bash
# 1. Start dev container
podman-compose -f compose.dev.yml up -d
# 2. View logs
podman exec -it flyer-crawler-dev pm2 logs
# 3. Run tests
podman exec -it flyer-crawler-dev npm test
# 4. Stop when done
podman-compose -f compose.dev.yml down
```
---
Last updated: January 2026

View File

@@ -2,8 +2,68 @@
This guide covers the manual installation of Flyer Crawler and its dependencies on a bare-metal Ubuntu server (e.g., a colocation server). This is the definitive reference for setting up a production environment without containers.
**Last verified**: 2026-01-28
**Target Environment**: Ubuntu 22.04 LTS (or newer)
**Related documentation**:
- [ADR-014: Containerization and Deployment Strategy](../adr/0014-containerization-and-deployment-strategy.md)
- [ADR-015: Error Tracking and Observability](../adr/0015-error-tracking-and-observability.md)
- [ADR-050: PostgreSQL Function Observability](../adr/0050-postgresql-function-observability.md)
- [Deployment Guide](DEPLOYMENT.md)
- [Monitoring Guide](MONITORING.md)
---
## Quick Reference
### Installation Time Estimates
| Component | Estimated Time | Notes |
| ----------- | --------------- | ----------------------------- |
| PostgreSQL | 10-15 minutes | Including PostGIS extensions |
| Redis | 5 minutes | Quick install |
| Node.js | 5 minutes | Via NodeSource repository |
| Application | 15-20 minutes | Clone, install, build |
| PM2 | 5 minutes | Global install + config |
| NGINX | 10-15 minutes | Including SSL via Certbot |
| Bugsink | 20-30 minutes | Python venv, systemd services |
| Logstash | 15-20 minutes | Including pipeline config |
| **Total** | **~90 minutes** | For complete fresh install |
### Post-Installation Verification
After completing setup, verify all services:
```bash
# Check all services are running
systemctl status postgresql nginx redis-server gunicorn-bugsink snappea logstash
# Verify application health
curl -s https://flyer-crawler.projectium.com/api/health/ready | jq .
# Check PM2 processes
pm2 list
# Verify Bugsink is accessible
curl -s https://bugsink.projectium.com/accounts/login/ | head -5
```
---
## Server Access Model
All commands in this guide are intended for the **system administrator** to execute directly on the server. Claude Code and AI tools have **READ-ONLY** access to production servers and cannot execute these commands directly.
When Claude assists with server setup or troubleshooting:
1. Claude provides commands for the administrator to execute
2. Administrator runs commands and reports output
3. Claude analyzes results and provides next steps (1-3 commands at a time)
4. Administrator executes and reports results
5. Claude provides verification commands to confirm success
---
## Table of Contents

View File

@@ -2,14 +2,97 @@
This guide covers deploying Flyer Crawler to a production server.
**Last verified**: 2026-01-28
**Related documentation**:
- [ADR-014: Containerization and Deployment Strategy](../adr/0014-containerization-and-deployment-strategy.md)
- [ADR-015: Error Tracking and Observability](../adr/0015-error-tracking-and-observability.md)
- [Bare-Metal Setup Guide](BARE-METAL-SETUP.md)
- [Monitoring Guide](MONITORING.md)
---
## Quick Reference
### Command Reference Table
| Task | Command |
| -------------------- | ----------------------------------------------------------------------- |
| Deploy to production | Gitea Actions workflow (manual trigger) |
| Deploy to test | Automatic on push to `main` |
| Check PM2 status | `pm2 list` |
| View logs | `pm2 logs flyer-crawler-api --lines 100` |
| Restart all | `pm2 restart all` |
| Check NGINX | `sudo nginx -t && sudo systemctl status nginx` |
| Check health | `curl -s https://flyer-crawler.projectium.com/api/health/ready \| jq .` |
### Deployment URLs
| Environment | URL | API Port |
| ------------- | ------------------------------------------- | -------- |
| Production | `https://flyer-crawler.projectium.com` | 3001 |
| Test | `https://flyer-crawler-test.projectium.com` | 3002 |
| Dev Container | `https://localhost` | 3001 |
---
## Server Access Model
**Important**: Claude Code (and AI tools) have **READ-ONLY** access to production/test servers. The deployment workflow is:
| Actor | Capability |
| ------------ | --------------------------------------------------------------- |
| Gitea CI/CD | Automated deployments via workflows (has write access) |
| User (human) | Manual server access for troubleshooting and emergency fixes |
| Claude Code | Provides commands for user to execute; cannot run them directly |
When troubleshooting deployment issues:
1. Claude provides **diagnostic commands** for the user to run
2. User executes commands and reports output
3. Claude analyzes results and provides **fix commands** (1-3 at a time)
4. User executes fixes and reports results
5. Claude provides **verification commands** to confirm success
---
## Prerequisites
- Ubuntu server (22.04 LTS recommended)
- PostgreSQL 14+ with PostGIS extension
- Redis
- Node.js 20.x
- NGINX (reverse proxy)
- PM2 (process manager)
| Component | Version | Purpose |
| ---------- | --------- | ------------------------------- |
| Ubuntu | 22.04 LTS | Operating system |
| PostgreSQL | 14+ | Database with PostGIS extension |
| Redis | 6+ | Caching and job queues |
| Node.js | 20.x LTS | Application runtime |
| NGINX | 1.18+ | Reverse proxy and static files |
| PM2 | Latest | Process manager |
**Verify prerequisites**:
```bash
node --version # Should be v20.x.x
psql --version # Should be 14+
redis-cli ping # Should return PONG
nginx -v # Should be 1.18+
pm2 --version # Any recent version
```
## Dev Container Parity (ADR-014)
The development container now matches production architecture:
| Component | Production | Dev Container |
| ------------ | ---------------- | -------------------------- |
| Process Mgmt | PM2 | PM2 |
| API Server | PM2 cluster mode | PM2 fork + tsx watch |
| Worker | PM2 process | PM2 process + tsx watch |
| Logs | PM2 -> Logstash | PM2 -> Logstash -> Bugsink |
| HTTPS | Let's Encrypt | Self-signed (mkcert) |
This ensures issues caught in dev will behave the same in production.
**Dev Container Guide**: See [docs/development/DEV-CONTAINER.md](../development/DEV-CONTAINER.md)
---
@@ -174,7 +257,7 @@ types {
**Option 2**: Edit `/etc/nginx/mime.types` globally:
```
```text
# Change this line:
application/javascript js;
@@ -305,9 +388,78 @@ The Sentry SDK v10+ enforces HTTPS-only DSNs by default. Since Bugsink runs loca
---
## Deployment Troubleshooting
### Decision Tree: Deployment Issues
```text
Deployment failed?
|
+-- Build step failed?
| |
| +-- TypeScript errors --> Fix type issues, run `npm run type-check`
| +-- Missing dependencies --> Run `npm ci`
| +-- Out of memory --> Increase Node heap size
|
+-- Tests failed?
| |
| +-- Database connection --> Check DB_HOST, credentials
| +-- Redis connection --> Check REDIS_URL
| +-- Test isolation --> Check for race conditions
|
+-- SSH/Deploy failed?
|
+-- Permission denied --> Check SSH keys in Gitea secrets
+-- Host unreachable --> Check firewall, VPN
+-- PM2 error --> Check PM2 logs on server
```
### Common Deployment Issues
| Symptom | Diagnosis | Solution |
| ------------------------------------ | ----------------------- | ------------------------------------------------ |
| "Connection refused" on health check | API not started | Check `pm2 logs flyer-crawler-api` |
| 502 Bad Gateway | NGINX cannot reach API | Verify API port (3001), restart PM2 |
| CSS/JS not loading | Build artifacts missing | Re-run `npm run build`, check NGINX static paths |
| Database migrations failed | Schema mismatch | Run migrations manually, check DB connectivity |
| "ENOSPC" error | Disk full | Clear old logs: `pm2 flush`, clean npm cache |
| SSL certificate error | Cert expired/missing | Run `certbot renew`, check NGINX config |
### Post-Deployment Verification Checklist
After every deployment, verify:
- [ ] Health check passes: `curl -s https://flyer-crawler.projectium.com/api/health/ready`
- [ ] PM2 processes running: `pm2 list` shows `online` status
- [ ] No recent errors: Check Bugsink for new issues
- [ ] Frontend loads: Browser shows login page
- [ ] API responds: `curl https://flyer-crawler.projectium.com/api/health/ping`
### Rollback Procedure
If deployment causes issues:
```bash
# 1. Check current release
cd /var/www/flyer-crawler.projectium.com
git log --oneline -5
# 2. Revert to previous commit
git checkout HEAD~1
# 3. Rebuild and restart
npm ci && npm run build
pm2 restart all
# 4. Verify health
curl -s http://localhost:3001/api/health/ready | jq .
```
---
## Related Documentation
- [Database Setup](DATABASE.md) - PostgreSQL and PostGIS configuration
- [Authentication Setup](AUTHENTICATION.md) - OAuth provider configuration
- [Installation Guide](INSTALL.md) - Local development setup
- [Bare-Metal Server Setup](docs/BARE-METAL-SETUP.md) - Manual server installation guide
- [Database Setup](../architecture/DATABASE.md) - PostgreSQL and PostGIS configuration
- [Monitoring Guide](MONITORING.md) - Health checks and error tracking
- [Logstash Quick Reference](LOGSTASH-QUICK-REF.md) - Log aggregation
- [Bare-Metal Server Setup](BARE-METAL-SETUP.md) - Manual server installation guide

View File

@@ -2,10 +2,47 @@
Aggregates logs from PostgreSQL, PM2, Redis, NGINX; forwards errors to Bugsink.
**Last verified**: 2026-01-28
**Related documentation**:
- [ADR-050: PostgreSQL Function Observability](../adr/0050-postgresql-function-observability.md)
- [ADR-015: Error Tracking and Observability](../adr/0015-error-tracking-and-observability.md)
- [Monitoring Guide](MONITORING.md)
- [Logstash Troubleshooting Runbook](LOGSTASH-TROUBLESHOOTING.md)
---
## Quick Reference
### Bugsink Project Routing
| Source Type | Environment | Bugsink Project | Project ID |
| -------------- | ----------- | -------------------- | ---------- |
| PM2 API/Worker | Dev | Backend API (Dev) | 1 |
| PostgreSQL | Dev | Backend API (Dev) | 1 |
| Frontend JS | Dev | Frontend (Dev) | 2 |
| Redis/NGINX | Dev | Infrastructure (Dev) | 4 |
| PM2 API/Worker | Production | Backend API (Prod) | 1 |
| PostgreSQL | Production | Backend API (Prod) | 1 |
| PM2 API/Worker | Test | Backend API (Test) | 3 |
### Key DSN Keys (Dev Container)
| Project | DSN Key |
| -------------------- | ---------------------------------- |
| Backend API (Dev) | `cea01396c56246adb5878fa5ee6b1d22` |
| Frontend (Dev) | `d92663cb73cf4145b677b84029e4b762` |
| Infrastructure (Dev) | `14e8791da3d347fa98073261b596cab9` |
---
## Configuration
**Primary config**: `/etc/logstash/conf.d/bugsink.conf`
**Dev container config**: `docker/logstash/bugsink.conf`
### Related Files
| Path | Purpose |
@@ -31,6 +68,8 @@ Aggregates logs from PostgreSQL, PM2, Redis, NGINX; forwards errors to Bugsink.
## Commands
### Production (Bare Metal)
```bash
# Status and control
systemctl status logstash
@@ -55,8 +94,66 @@ tail -f /var/log/logstash/nginx-access-$(date +%Y-%m-%d).log
du -sh /var/log/logstash/
```
### Dev Container
```bash
# View Logstash logs (inside container)
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/logstash/logstash.log
# Check PM2 API logs are being processed (ADR-014)
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/logstash/pm2-api-$(date +%Y-%m-%d).log
# Check PM2 Worker logs are being processed
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/logstash/pm2-worker-$(date +%Y-%m-%d).log
# Check Redis logs are being processed
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/logstash/redis-operational-$(date +%Y-%m-%d).log
# View raw PM2 logs
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/pm2/api-out.log
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev cat /var/log/pm2/worker-out.log
# View raw Redis logs
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-redis cat /var/log/redis/redis-server.log
# Check Logstash stats
podman exec flyer-crawler-dev curl -s localhost:9600/_node/stats/pipelines?pretty
# Verify shared volume mounts
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev ls -la /var/log/pm2/
MSYS_NO_PATHCONV=1 podman exec flyer-crawler-dev ls -la /var/log/redis/
```
## Troubleshooting
### Decision Tree: Logs Not Appearing in Bugsink
```text
Errors not showing in Bugsink?
|
+-- Logstash running?
| |
| +-- No --> systemctl start logstash
| +-- Yes --> Check pipeline stats
| |
| +-- Events in = 0?
| | |
| | +-- Log files exist? --> ls /var/log/pm2/*.log
| | +-- Permissions OK? --> groups logstash
| |
| +-- Events filtered = high?
| | |
| | +-- Grok failures --> Check log format matches pattern
| |
| +-- Events out but no Bugsink?
| |
| +-- 403 error --> Wrong DSN key
| +-- 500 error --> Invalid event format (check sentry_level)
| +-- Connection refused --> Bugsink not running
```
### Common Issues Table
| Issue | Check | Solution |
| --------------------- | ---------------- | ---------------------------------------------------------------------------------------------- |
| No Bugsink errors | Logstash running | `systemctl status logstash` |
@@ -64,12 +161,35 @@ du -sh /var/log/logstash/
| Grok pattern failures | Stats endpoint | `curl localhost:9600/_node/stats/pipelines?pretty \| jq '.pipelines.main.plugins.filters'` |
| Wrong Bugsink project | Env detection | Check tags in logs match expected environment |
| Permission denied | Logstash groups | `groups logstash` should include `postgres`, `adm` |
| PM2 not captured | File paths | `ls /home/gitea-runner/.pm2/logs/flyer-crawler-worker-*.log` |
| PM2 not captured | File paths | Dev: `ls /var/log/pm2/`; Prod: `ls /home/gitea-runner/.pm2/logs/` |
| PM2 logs empty | PM2 running | Dev: `podman exec flyer-crawler-dev pm2 status`; Prod: `pm2 status` |
| NGINX logs missing | Output directory | `ls -lh /var/log/logstash/nginx-access-*.log` |
| Redis logs missing | Shared volume | Dev: Check `redis_logs` volume mounted; Prod: Check `/var/log/redis/redis-server.log` exists |
| High disk usage | Log rotation | Verify `/etc/logrotate.d/logstash` configured |
| varchar(7) error | Level validation | Add Ruby filter to validate/normalize `sentry_level` before output |
### Expected Output Examples
**Successful Logstash pipeline stats**:
```json
{
"in": 1523,
"out": 1520,
"filtered": 1520,
"queue_push_duration_in_millis": 45
}
```
**Healthy Bugsink HTTP response**:
```json
{ "id": "a1b2c3d4e5f6..." }
```
## Related Documentation
- **Dev Container Guide**: [DEV-CONTAINER.md](../development/DEV-CONTAINER.md) - PM2 and log aggregation in dev
- **Full setup**: [BARE-METAL-SETUP.md](BARE-METAL-SETUP.md) - PostgreSQL Function Observability section
- **Architecture**: [adr/0050-postgresql-function-observability.md](adr/0050-postgresql-function-observability.md)
- **Architecture**: [adr/0050-postgresql-function-observability.md](../adr/0050-postgresql-function-observability.md)
- **Troubleshooting details**: [LOGSTASH-TROUBLESHOOTING.md](LOGSTASH-TROUBLESHOOTING.md)

View File

@@ -2,6 +2,16 @@
This runbook provides step-by-step diagnostics and solutions for common Logstash issues in the PostgreSQL observability pipeline (ADR-050).
**Last verified**: 2026-01-28
**Related documentation**:
- [ADR-050: PostgreSQL Function Observability](../adr/0050-postgresql-function-observability.md)
- [Logstash Quick Reference](LOGSTASH-QUICK-REF.md)
- [Monitoring Guide](MONITORING.md)
---
## Quick Reference
| Symptom | Most Likely Cause | Quick Check |
@@ -11,6 +21,7 @@ This runbook provides step-by-step diagnostics and solutions for common Logstash
| Wrong Bugsink project | Environment detection failed | Verify `pg_database` field extraction |
| 403 authentication error | Missing/wrong DSN key | Check `X-Sentry-Auth` header |
| 500 error from Bugsink | Invalid event format | Verify `event_id` and required fields |
| varchar(7) constraint | Unresolved `%{sentry_level}` | Add Ruby filter for level validation |
---
@@ -385,7 +396,88 @@ systemctl status logstash
---
### Issue 7: Log File Rotation Issues
### Issue 7: Level Field Constraint Violation (varchar(7))
**Symptoms:**
- Bugsink returns HTTP 500 errors
- PostgreSQL errors: `value too long for type character varying(7)`
- Events fail to insert with literal `%{sentry_level}` string (16 characters)
**Root Cause:**
When Logstash cannot determine the log level (no error patterns matched), the `sentry_level` field remains as the unresolved placeholder `%{sentry_level}`. Bugsink's PostgreSQL schema has a `varchar(7)` constraint on the level field.
Valid Sentry levels (all <= 7 characters): `fatal`, `error`, `warning`, `info`, `debug`
**Diagnosis:**
```bash
# Check for HTTP 500 responses in Logstash logs
podman exec flyer-crawler-dev cat /var/log/logstash/logstash.log | grep "500"
# Check Bugsink for constraint violation errors
# Via MCP:
mcp__localerrors__list_issues({ project_id: 1, status: 'unresolved' })
```
**Solution:**
Add a Ruby filter block in `docker/logstash/bugsink.conf` to validate and normalize the `sentry_level` field before sending to Bugsink:
```ruby
# Add this AFTER all mutate filters that set sentry_level
# and BEFORE the output section
ruby {
code => '
level = event.get("sentry_level")
# Check if level is invalid (nil, empty, contains placeholder, or too long)
if level.nil? || level.to_s.empty? || level.to_s.include?("%{") || level.to_s.length > 7
# Default to "error" for error-tagged events, "info" otherwise
if event.get("tags")&.include?("error")
event.set("sentry_level", "error")
else
event.set("sentry_level", "info")
end
else
# Normalize to lowercase and validate
normalized = level.to_s.downcase
valid_levels = ["fatal", "error", "warning", "info", "debug"]
unless valid_levels.include?(normalized)
normalized = "error"
end
event.set("sentry_level", normalized)
end
'
}
```
**Key validations performed:**
1. Checks for nil or empty values
2. Detects unresolved placeholders (`%{...}`)
3. Enforces 7-character maximum length
4. Normalizes to lowercase
5. Validates against allowed Sentry levels
6. Defaults to "error" for error-tagged events, "info" otherwise
**Verification:**
```bash
# Restart Logstash
podman exec flyer-crawler-dev systemctl restart logstash
# Generate a test log that triggers the filter
podman exec flyer-crawler-dev pm2 restart flyer-crawler-api-dev
# Check no new HTTP 500 errors
podman exec flyer-crawler-dev cat /var/log/logstash/logstash.log | tail -50 | grep -E "(500|error)"
```
---
### Issue 8: Log File Rotation Issues
**Symptoms:**

View File

@@ -2,6 +2,72 @@
This guide covers all aspects of monitoring the Flyer Crawler application across development, test, and production environments.
**Last verified**: 2026-01-28
**Related documentation**:
- [ADR-015: Error Tracking and Observability](../adr/0015-error-tracking-and-observability.md)
- [ADR-020: Health Checks](../adr/0020-health-checks-and-liveness-readiness-probes.md)
- [ADR-050: PostgreSQL Function Observability](../adr/0050-postgresql-function-observability.md)
- [Logstash Quick Reference](LOGSTASH-QUICK-REF.md)
- [Deployment Guide](DEPLOYMENT.md)
---
## Quick Reference
### Monitoring URLs
| Service | Production URL | Dev Container URL |
| ------------ | ------------------------------------------------------- | ---------------------------------------- |
| Health Check | `https://flyer-crawler.projectium.com/api/health/ready` | `http://localhost:3001/api/health/ready` |
| Bugsink | `https://bugsink.projectium.com` | `https://localhost:8443` |
| Bull Board | `https://flyer-crawler.projectium.com/api/admin/jobs` | `http://localhost:3001/api/admin/jobs` |
### Quick Diagnostic Commands
```bash
# Check all services at once (production)
curl -s https://flyer-crawler.projectium.com/api/health/ready | jq '.data.services'
# Dev container health check
podman exec flyer-crawler-dev curl -s http://localhost:3001/api/health/ready | jq .
# PM2 process overview
pm2 list
# Recent errors in Bugsink (via MCP)
# mcp__bugsink__list_issues --project_id 1 --status unresolved
```
### Monitoring Decision Tree
```text
Application seems slow or unresponsive?
|
+-- Check health endpoint first
| |
| +-- Returns unhealthy?
| | |
| | +-- Database unhealthy --> Check DB pool, connections
| | +-- Redis unhealthy --> Check Redis memory, connection
| | +-- Storage unhealthy --> Check disk space, permissions
| |
| +-- Returns healthy but slow?
| |
| +-- Check PM2 memory/CPU usage
| +-- Check database slow query log
| +-- Check Redis queue depth
|
+-- Health endpoint not responding?
|
+-- Check PM2 status --> Process crashed?
+-- Check NGINX --> 502 errors?
+-- Check network --> Firewall/DNS issues?
```
---
## Table of Contents
1. [Health Checks](#health-checks)
@@ -276,10 +342,10 @@ Dev Container (in `.mcp.json`):
Bugsink 2.0.11 does not have a UI for API tokens. Create via Django management command.
**Production**:
**Production** (user executes on server):
```bash
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage create_auth_token"
cd /opt/bugsink && bugsink-manage create_auth_token
```
**Dev Container**:
@@ -294,7 +360,7 @@ The command outputs a 40-character hex token.
**Error Anatomy**:
```
```text
TypeError: Cannot read properties of undefined (reading 'map')
├── Exception Type: TypeError
├── Message: Cannot read properties of undefined (reading 'map')
@@ -357,7 +423,7 @@ Logstash aggregates logs from multiple sources and forwards errors to Bugsink (A
### Architecture
```
```text
Log Sources Logstash Outputs
┌──────────────┐ ┌─────────────┐ ┌─────────────┐
│ PostgreSQL │──────────────│ │───────────│ Bugsink │
@@ -388,11 +454,9 @@ Log Sources Logstash Outputs
### Pipeline Status
**Check Logstash Service**:
**Check Logstash Service** (user executes on server):
```bash
ssh root@projectium.com
# Service status
systemctl status logstash
@@ -485,9 +549,11 @@ PM2 manages the Node.js application processes in production.
### Basic Commands
> **Note**: These commands are for the user to execute on the server. Claude Code provides commands but cannot run them directly.
```bash
ssh root@projectium.com
su - gitea-runner # PM2 runs under this user
# Switch to gitea-runner user (PM2 runs under this user)
su - gitea-runner
# List all processes
pm2 list
@@ -520,7 +586,7 @@ pm2 stop flyer-crawler-api
**Healthy Process**:
```
```text
┌─────────────────────┬────┬─────────┬─────────┬───────┬────────┬─────────┬──────────┐
│ Name │ id │ mode │ status │ cpu │ mem │ uptime │ restarts │
├─────────────────────┼────┼─────────┼─────────┼───────┼────────┼─────────┼──────────┤
@@ -833,29 +899,28 @@ Configure alerts in your monitoring tool (UptimeRobot, Datadog, etc.):
2. Review during business hours
3. Create Gitea issue for tracking
### Quick Diagnostic Commands
### On-Call Diagnostic Commands
> **Note**: User executes these commands on the server. Claude Code provides commands but cannot run them directly.
```bash
# Full system health check
ssh root@projectium.com << 'EOF'
echo "=== Service Status ==="
# Service status checks
systemctl status pm2-gitea-runner --no-pager
systemctl status logstash --no-pager
systemctl status redis --no-pager
systemctl status postgresql --no-pager
echo "=== PM2 Processes ==="
# PM2 processes (run as gitea-runner)
su - gitea-runner -c "pm2 list"
echo "=== Disk Space ==="
# Disk space
df -h / /var
echo "=== Memory ==="
# Memory
free -h
echo "=== Recent Errors ==="
# Recent errors
journalctl -p err -n 20 --no-pager
EOF
```
### Runbook Quick Reference

View File

@@ -0,0 +1,161 @@
# Unit Test Fix Plan: Error Log Path Mismatches
**Date**: 2026-01-27
**Type**: Technical Implementation Plan
**Related**: [ADR-008: API Versioning Strategy](../adr/0008-api-versioning-strategy.md)
**Status**: Ready for Implementation
---
## Problem Statement
16 unit tests fail due to error log message assertions expecting versioned paths (`/api/v1/`) while route handlers emit hardcoded unversioned paths (`/api/`).
**Failure Pattern**:
```text
AssertionError: expected "Error PUT /api/users/profile" to contain "/api/v1/users/profile"
```
**Scope**: All failures are `toContain` assertions on `logger.error()` call arguments.
---
## Root Cause Analysis
| Layer | Behavior | Issue |
| ------------------ | ----------------------------------------------------- | ------------------- |
| Route Registration | `server.ts` mounts at `/api/v1/` | Correct |
| Request Path | `req.path` returns `/users/profile` (router-relative) | No version info |
| Error Handlers | Hardcode `"Error PUT /api/users/profile"` | Version mismatch |
| Test Assertions | Expect `"/api/v1/users/profile"` | Correct expectation |
**Root Cause**: Error log statements use template literals with hardcoded `/api/` prefix instead of `req.originalUrl` which contains the full versioned path.
**Example**:
```typescript
// Current (broken)
logger.error(`Error PUT /api/users/profile: ${err}`);
// Expected
logger.error(`Error PUT ${req.originalUrl}: ${err}`);
// Output: "Error PUT /api/v1/users/profile: ..."
```
---
## Solution Approach
Replace hardcoded path strings with `req.originalUrl` in all error log statements.
### Express Request Properties Reference
| Property | Example Value | Use Case |
| ----------------- | ------------------------------- | ----------------------------- |
| `req.originalUrl` | `/api/v1/users/profile?foo=bar` | Full URL with version + query |
| `req.path` | `/profile` | Router-relative path only |
| `req.baseUrl` | `/api/v1/users` | Mount point |
**Decision**: Use `req.originalUrl` for error logging to capture complete request context.
---
## Implementation Plan
### Affected Files
| File | Error Statements | Methods |
| ------------------------------- | ---------------- | ---------------------------------------------------- |
| `src/routes/users.routes.ts` | 3 | `PUT /profile`, `POST /profile/password`, `DELETE /` |
| `src/routes/recipe.routes.ts` | 2 | `POST /import`, `POST /:id/fork` |
| `src/routes/receipts.routes.ts` | 2 | `POST /`, `PATCH /:id` |
| `src/routes/flyers.routes.ts` | 2 | `POST /`, `PUT /:id` |
**Total**: 9 error log statements across 4 route files
### Parallel Implementation Tasks
All 4 files can be modified independently:
**Task 1**: `users.routes.ts`
- Line patterns: `Error PUT /api/users/profile`, `Error POST /api/users/profile/password`, `Error DELETE /api/users`
- Change: Replace with `Error ${req.method} ${req.originalUrl}`
**Task 2**: `recipe.routes.ts`
- Line patterns: `Error POST /api/recipes/import`, `Error POST /api/recipes/:id/fork`
- Change: Replace with `Error ${req.method} ${req.originalUrl}`
**Task 3**: `receipts.routes.ts`
- Line patterns: `Error POST /api/receipts`, `Error PATCH /api/receipts/:id`
- Change: Replace with `Error ${req.method} ${req.originalUrl}`
**Task 4**: `flyers.routes.ts`
- Line patterns: `Error POST /api/flyers`, `Error PUT /api/flyers/:id`
- Change: Replace with `Error ${req.method} ${req.originalUrl}`
### Verification
```bash
podman exec -it flyer-crawler-dev npm run test:unit
```
**Expected**: 16 failures → 0 failures (3,391/3,391 passing)
---
## Test Files Affected
Tests that will pass after fix:
| Test File | Failing Tests |
| ------------------------- | ------------- |
| `users.routes.test.ts` | 6 |
| `recipe.routes.test.ts` | 4 |
| `receipts.routes.test.ts` | 3 |
| `flyers.routes.test.ts` | 3 |
---
## Expected Outcomes
| Metric | Before | After |
| ------------------ | ----------- | ------------------- |
| Unit test failures | 16 | 0 |
| Unit tests passing | 3,375/3,391 | 3,391/3,391 |
| Integration tests | 345/348 | 345/348 (unchanged) |
### Benefits
1. **Version-agnostic logging**: Error messages automatically reflect actual request URL
2. **Future-proof**: No changes needed when v2 API is introduced
3. **Debugging clarity**: Logs show exact URL including query parameters
4. **Consistency**: All error handlers follow same pattern
---
## Implementation Notes
### Pattern to Apply
**Before**:
```typescript
logger.error(`Error PUT /api/users/profile: ${error.message}`);
```
**After**:
```typescript
logger.error(`Error ${req.method} ${req.originalUrl}: ${error.message}`);
```
### Edge Cases
- `req.originalUrl` includes query string if present (acceptable for debugging)
- No sanitization needed as URL is from Express parsed request
- Works correctly with route parameters (`:id` becomes actual value)

View File

@@ -0,0 +1,849 @@
# ADR-024 Implementation Plan: Feature Flagging Strategy
**Date**: 2026-01-28
**Type**: Technical Implementation Plan
**Related**: [ADR-024: Feature Flagging Strategy](../adr/0024-feature-flagging-strategy.md), [ADR-007: Configuration and Secrets Management](../adr/0007-configuration-and-secrets-management.md)
**Status**: Ready for Implementation
---
## Project Overview
Implement a simple, configuration-based feature flag system that integrates with the existing Zod-validated configuration in `src/config/env.ts`. The system will support both backend and frontend feature flags through environment variables, with type-safe access patterns and helper utilities.
### Key Success Criteria
1. Feature flags accessible via type-safe API on both backend and frontend
2. Zero runtime overhead when flag is disabled (compile-time elimination where possible)
3. Consistent naming convention (environment variables and code access)
4. Graceful degradation (missing flag defaults to disabled)
5. Easy migration path to external service (Flagsmith/LaunchDarkly) in the future
6. Full test coverage with mocking utilities
### Estimated Total Effort
| Phase | Estimate |
| --------------------------------- | -------------- |
| Phase 1: Backend Infrastructure | 3-5 hours |
| Phase 2: Frontend Infrastructure | 2-3 hours |
| Phase 3: Documentation & Examples | 1-2 hours |
| **Total** | **6-10 hours** |
---
## Current State Analysis
### Backend Configuration (`src/config/env.ts`)
- Zod-based schema validation at startup
- Organized into logical groups (database, redis, auth, smtp, ai, etc.)
- Helper exports for service availability (`isSmtpConfigured`, `isAiConfigured`, etc.)
- Environment helpers (`isProduction`, `isTest`, `isDevelopment`)
- Fail-fast on invalid configuration
### Frontend Configuration (`src/config.ts`)
- Uses `import.meta.env` (Vite environment variables)
- Organized into sections (app, google, sentry)
- Boolean parsing for string env vars
- Type declarations in `src/vite-env.d.ts`
### Existing Patterns to Follow
```typescript
// Backend - service availability check pattern
export const isSmtpConfigured =
!!config.smtp.host && !!config.smtp.user && !!config.smtp.pass;
// Frontend - boolean parsing pattern
enabled: import.meta.env.VITE_SENTRY_ENABLED !== 'false',
```
---
## Task Breakdown
### Phase 1: Backend Feature Flag Infrastructure
#### [1.1] Define Feature Flag Schema in env.ts
**Complexity**: Low
**Estimate**: 30-45 minutes
**Dependencies**: None
**Parallelizable**: Yes
**Description**: Add a new `featureFlags` section to the Zod schema in `src/config/env.ts`.
**Acceptance Criteria**:
- [ ] New `featureFlagsSchema` Zod object defined
- [ ] Schema supports boolean flags with defaults to `false` (opt-in model)
- [ ] Schema added to main `envSchema` object
- [ ] Type exported as part of `EnvConfig`
**Implementation Details**:
```typescript
// src/config/env.ts
/**
* Feature flags configuration schema (ADR-024).
* All flags default to false (disabled) for safety.
* Set to 'true' in environment to enable.
*/
const featureFlagsSchema = z.object({
// Example flags - replace with actual feature flags as needed
newDashboard: booleanString(false), // FEATURE_NEW_DASHBOARD
betaRecipes: booleanString(false), // FEATURE_BETA_RECIPES
experimentalAi: booleanString(false), // FEATURE_EXPERIMENTAL_AI
debugMode: booleanString(false), // FEATURE_DEBUG_MODE
});
// In loadEnvVars():
featureFlags: {
newDashboard: process.env.FEATURE_NEW_DASHBOARD,
betaRecipes: process.env.FEATURE_BETA_RECIPES,
experimentalAi: process.env.FEATURE_EXPERIMENTAL_AI,
debugMode: process.env.FEATURE_DEBUG_MODE,
},
```
**Risks/Notes**:
- Naming convention: `FEATURE_*` prefix for all feature flag env vars
- Default to `false` ensures features are opt-in, preventing accidental exposure
---
#### [1.2] Create Feature Flag Service Module
**Complexity**: Medium
**Estimate**: 1-2 hours
**Dependencies**: [1.1]
**Parallelizable**: No (depends on 1.1)
**Description**: Create a dedicated service module for feature flag access with helper functions.
**File**: `src/services/featureFlags.server.ts`
**Acceptance Criteria**:
- [ ] `isFeatureEnabled(flagName)` function for checking flags
- [ ] `getAllFeatureFlags()` function for debugging/admin endpoints
- [ ] Type-safe flag name parameter (union type or enum)
- [ ] Exported helper booleans for common flags (similar to `isSmtpConfigured`)
- [ ] Logging when feature flag is checked in development mode
**Implementation Details**:
```typescript
// src/services/featureFlags.server.ts
import { config, isDevelopment } from '../config/env';
import { logger } from './logger.server';
export type FeatureFlagName = keyof typeof config.featureFlags;
/**
* Check if a feature flag is enabled.
* @param flagName - The name of the feature flag to check
* @returns boolean indicating if the feature is enabled
*/
export function isFeatureEnabled(flagName: FeatureFlagName): boolean {
const enabled = config.featureFlags[flagName];
if (isDevelopment) {
logger.debug({ flag: flagName, enabled }, 'Feature flag checked');
}
return enabled;
}
/**
* Get all feature flags and their current states.
* Useful for debugging and admin endpoints.
*/
export function getAllFeatureFlags(): Record<FeatureFlagName, boolean> {
return { ...config.featureFlags };
}
// Convenience exports for common flag checks
export const isNewDashboardEnabled = config.featureFlags.newDashboard;
export const isBetaRecipesEnabled = config.featureFlags.betaRecipes;
export const isExperimentalAiEnabled = config.featureFlags.experimentalAi;
export const isDebugModeEnabled = config.featureFlags.debugMode;
```
**Risks/Notes**:
- Keep logging minimal to avoid performance impact
- Convenience exports are evaluated once at startup (not dynamic)
---
#### [1.3] Add Admin Endpoint for Feature Flag Status
**Complexity**: Low
**Estimate**: 30-45 minutes
**Dependencies**: [1.2]
**Parallelizable**: No (depends on 1.2)
**Description**: Add an admin/health endpoint to view current feature flag states.
**File**: `src/routes/admin.routes.ts` (or `stats.routes.ts` if admin routes don't exist)
**Acceptance Criteria**:
- [ ] `GET /api/v1/admin/feature-flags` endpoint (admin-only)
- [ ] Returns JSON object with all flags and their states
- [ ] Requires admin authentication
- [ ] Endpoint documented in Swagger
**Implementation Details**:
```typescript
// In appropriate routes file
router.get('/feature-flags', requireAdmin, async (req, res) => {
const flags = getAllFeatureFlags();
sendSuccess(res, { flags });
});
```
**Risks/Notes**:
- Ensure endpoint is protected (admin-only)
- Consider caching response if called frequently
---
#### [1.4] Backend Unit Tests
**Complexity**: Medium
**Estimate**: 1-2 hours
**Dependencies**: [1.1], [1.2]
**Parallelizable**: Yes (can start after 1.1, in parallel with 1.3)
**Description**: Write unit tests for feature flag configuration and service.
**Files**:
- `src/config/env.test.ts` (add feature flag tests)
- `src/services/featureFlags.server.test.ts` (new file)
**Acceptance Criteria**:
- [ ] Test default values (all false)
- [ ] Test parsing 'true'/'false' strings
- [ ] Test `isFeatureEnabled()` function
- [ ] Test `getAllFeatureFlags()` function
- [ ] Test type safety (TypeScript compile-time checks)
**Implementation Details**:
```typescript
// src/config/env.test.ts - add to existing file
describe('featureFlags configuration', () => {
it('should default all feature flags to false', async () => {
setValidEnv();
const { config } = await import('./env');
expect(config.featureFlags.newDashboard).toBe(false);
expect(config.featureFlags.betaRecipes).toBe(false);
});
it('should parse FEATURE_NEW_DASHBOARD as true when set', async () => {
setValidEnv({ FEATURE_NEW_DASHBOARD: 'true' });
const { config } = await import('./env');
expect(config.featureFlags.newDashboard).toBe(true);
});
});
// src/services/featureFlags.server.test.ts - new file
describe('featureFlags service', () => {
describe('isFeatureEnabled', () => {
it('should return false for disabled flags', () => {
expect(isFeatureEnabled('newDashboard')).toBe(false);
});
// ... more tests
});
});
```
---
### Phase 2: Frontend Feature Flag Infrastructure
#### [2.1] Add Frontend Feature Flag Config
**Complexity**: Low
**Estimate**: 30-45 minutes
**Dependencies**: None (can run in parallel with Phase 1)
**Parallelizable**: Yes
**Description**: Add feature flags to the frontend config module.
**Files**:
- `src/config.ts` - Add featureFlags section
- `src/vite-env.d.ts` - Add type declarations
**Acceptance Criteria**:
- [ ] Feature flags section added to `src/config.ts`
- [ ] TypeScript declarations updated in `vite-env.d.ts`
- [ ] Boolean parsing consistent with existing pattern
- [ ] Default to false when env var not set
**Implementation Details**:
```typescript
// src/config.ts
const config = {
// ... existing sections ...
/**
* Feature flags for conditional feature rendering (ADR-024).
* All flags default to false (disabled) when not explicitly set.
*/
featureFlags: {
newDashboard: import.meta.env.VITE_FEATURE_NEW_DASHBOARD === 'true',
betaRecipes: import.meta.env.VITE_FEATURE_BETA_RECIPES === 'true',
experimentalAi: import.meta.env.VITE_FEATURE_EXPERIMENTAL_AI === 'true',
debugMode: import.meta.env.VITE_FEATURE_DEBUG_MODE === 'true',
},
};
// src/vite-env.d.ts
interface ImportMetaEnv {
// ... existing declarations ...
readonly VITE_FEATURE_NEW_DASHBOARD?: string;
readonly VITE_FEATURE_BETA_RECIPES?: string;
readonly VITE_FEATURE_EXPERIMENTAL_AI?: string;
readonly VITE_FEATURE_DEBUG_MODE?: string;
}
```
---
#### [2.2] Create useFeatureFlag React Hook
**Complexity**: Medium
**Estimate**: 1-1.5 hours
**Dependencies**: [2.1]
**Parallelizable**: No (depends on 2.1)
**Description**: Create a React hook for checking feature flags in components.
**File**: `src/hooks/useFeatureFlag.ts`
**Acceptance Criteria**:
- [ ] `useFeatureFlag(flagName)` hook returns boolean
- [ ] Type-safe flag name parameter
- [ ] Memoized to prevent unnecessary re-renders
- [ ] Optional `FeatureFlag` component for conditional rendering
**Implementation Details**:
```typescript
// src/hooks/useFeatureFlag.ts
import { useMemo } from 'react';
import config from '../config';
export type FeatureFlagName = keyof typeof config.featureFlags;
/**
* Hook to check if a feature flag is enabled.
*
* @param flagName - The name of the feature flag to check
* @returns boolean indicating if the feature is enabled
*
* @example
* const isNewDashboard = useFeatureFlag('newDashboard');
* if (isNewDashboard) {
* return <NewDashboard />;
* }
*/
export function useFeatureFlag(flagName: FeatureFlagName): boolean {
return useMemo(() => config.featureFlags[flagName], [flagName]);
}
/**
* Get all feature flags (useful for debugging).
*/
export function useAllFeatureFlags(): Record<FeatureFlagName, boolean> {
return useMemo(() => ({ ...config.featureFlags }), []);
}
```
---
#### [2.3] Create FeatureFlag Component
**Complexity**: Low
**Estimate**: 30-45 minutes
**Dependencies**: [2.2]
**Parallelizable**: No (depends on 2.2)
**Description**: Create a declarative component for feature flag conditional rendering.
**File**: `src/components/FeatureFlag.tsx`
**Acceptance Criteria**:
- [ ] `<FeatureFlag name="flagName">` component
- [ ] Children rendered only when flag is enabled
- [ ] Optional `fallback` prop for disabled state
- [ ] TypeScript-enforced flag names
**Implementation Details**:
```typescript
// src/components/FeatureFlag.tsx
import { ReactNode } from 'react';
import { useFeatureFlag, FeatureFlagName } from '../hooks/useFeatureFlag';
interface FeatureFlagProps {
/** The name of the feature flag to check */
name: FeatureFlagName;
/** Content to render when feature is enabled */
children: ReactNode;
/** Optional content to render when feature is disabled */
fallback?: ReactNode;
}
/**
* Conditionally renders children based on feature flag state.
*
* @example
* <FeatureFlag name="newDashboard" fallback={<OldDashboard />}>
* <NewDashboard />
* </FeatureFlag>
*/
export function FeatureFlag({ name, children, fallback = null }: FeatureFlagProps) {
const isEnabled = useFeatureFlag(name);
return <>{isEnabled ? children : fallback}</>;
}
```
---
#### [2.4] Frontend Unit Tests
**Complexity**: Medium
**Estimate**: 1-1.5 hours
**Dependencies**: [2.1], [2.2], [2.3]
**Parallelizable**: No (depends on previous frontend tasks)
**Description**: Write unit tests for frontend feature flag utilities.
**Files**:
- `src/config.test.ts` (add feature flag tests)
- `src/hooks/useFeatureFlag.test.ts` (new file)
- `src/components/FeatureFlag.test.tsx` (new file)
**Acceptance Criteria**:
- [ ] Test config structure includes featureFlags
- [ ] Test default values (all false)
- [ ] Test hook returns correct values
- [ ] Test component renders/hides children correctly
- [ ] Test fallback rendering
**Implementation Details**:
```typescript
// src/hooks/useFeatureFlag.test.ts
import { renderHook } from '@testing-library/react';
import { useFeatureFlag, useAllFeatureFlags } from './useFeatureFlag';
describe('useFeatureFlag', () => {
it('should return false for disabled flags', () => {
const { result } = renderHook(() => useFeatureFlag('newDashboard'));
expect(result.current).toBe(false);
});
});
// src/components/FeatureFlag.test.tsx
import { render, screen } from '@testing-library/react';
import { FeatureFlag } from './FeatureFlag';
describe('FeatureFlag', () => {
it('should not render children when flag is disabled', () => {
render(
<FeatureFlag name="newDashboard">
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
});
it('should render fallback when flag is disabled', () => {
render(
<FeatureFlag name="newDashboard" fallback={<div>Old Feature</div>}>
<div>New Feature</div>
</FeatureFlag>
);
expect(screen.getByText('Old Feature')).toBeInTheDocument();
});
});
```
---
### Phase 3: Documentation & Integration
#### [3.1] Update ADR-024 with Implementation Status
**Complexity**: Low
**Estimate**: 30 minutes
**Dependencies**: [1.1], [1.2], [2.1], [2.2]
**Parallelizable**: Yes (can be done after core implementation)
**Description**: Update ADR-024 to mark it as implemented and add implementation details.
**File**: `docs/adr/0024-feature-flagging-strategy.md`
**Acceptance Criteria**:
- [ ] Status changed from "Proposed" to "Accepted"
- [ ] Implementation status section added
- [ ] Key files documented
- [ ] Usage examples included
---
#### [3.2] Update Environment Documentation
**Complexity**: Low
**Estimate**: 30 minutes
**Dependencies**: [1.1], [2.1]
**Parallelizable**: Yes
**Description**: Add feature flag environment variables to documentation.
**Files**:
- `docs/getting-started/ENVIRONMENT.md`
- `.env.example`
**Acceptance Criteria**:
- [ ] Feature flag variables documented in ENVIRONMENT.md
- [ ] New section "Feature Flags" added
- [ ] `.env.example` updated with commented feature flag examples
**Implementation Details**:
```bash
# .env.example addition
# ===================
# Feature Flags (ADR-024)
# ===================
# All feature flags default to disabled (false) when not set.
# Set to 'true' to enable a feature.
#
# FEATURE_NEW_DASHBOARD=false
# FEATURE_BETA_RECIPES=false
# FEATURE_EXPERIMENTAL_AI=false
# FEATURE_DEBUG_MODE=false
#
# Frontend equivalents (prefix with VITE_):
# VITE_FEATURE_NEW_DASHBOARD=false
# VITE_FEATURE_BETA_RECIPES=false
```
---
#### [3.3] Create CODE-PATTERNS Entry
**Complexity**: Low
**Estimate**: 30 minutes
**Dependencies**: All implementation tasks
**Parallelizable**: Yes
**Description**: Add feature flag usage patterns to CODE-PATTERNS.md.
**File**: `docs/development/CODE-PATTERNS.md`
**Acceptance Criteria**:
- [ ] Feature flag section added with examples
- [ ] Backend usage pattern documented
- [ ] Frontend usage pattern documented
- [ ] Testing pattern documented
---
#### [3.4] Update CLAUDE.md Quick Reference
**Complexity**: Low
**Estimate**: 15 minutes
**Dependencies**: All implementation tasks
**Parallelizable**: Yes
**Description**: Add feature flags to the CLAUDE.md quick reference tables.
**File**: `CLAUDE.md`
**Acceptance Criteria**:
- [ ] Feature flags added to "Key Patterns" table
- [ ] Reference to featureFlags service added
---
## Implementation Sequence
### Phase 1 (Backend) - Can Start Immediately
```text
[1.1] Schema ──────────┬──> [1.2] Service ──> [1.3] Admin Endpoint
└──> [1.4] Backend Tests (can start after 1.1)
```
### Phase 2 (Frontend) - Can Start Immediately (Parallel with Phase 1)
```text
[2.1] Config ──> [2.2] Hook ──> [2.3] Component ──> [2.4] Frontend Tests
```
### Phase 3 (Documentation) - After Implementation
```text
All Phase 1 & 2 Tasks ──> [3.1] ADR Update
├──> [3.2] Env Docs
├──> [3.3] Code Patterns
└──> [3.4] CLAUDE.md
```
---
## Critical Path
The minimum path to a working feature flag system:
1. **[1.1] Schema** (30 min) - Required for backend
2. **[1.2] Service** (1.5 hr) - Required for backend access
3. **[2.1] Frontend Config** (30 min) - Required for frontend
4. **[2.2] Hook** (1 hr) - Required for React integration
**Critical path duration**: ~3.5 hours
Non-critical but recommended:
- Admin endpoint (debugging)
- FeatureFlag component (developer convenience)
- Tests (quality assurance)
- Documentation (maintainability)
---
## Scope Recommendations
### MVP (Minimum Viable Implementation)
Include in initial implementation:
- [1.1] Backend schema with 2-3 example flags
- [1.2] Feature flag service
- [2.1] Frontend config
- [2.2] useFeatureFlag hook
- [1.4] Core backend tests
- [2.4] Core frontend tests
### Enhancements (Future Iterations)
Defer to follow-up work:
- Admin endpoint for flag visibility
- FeatureFlag component (nice-to-have)
- Dynamic flag updates without restart (requires external service)
- User-specific flags (A/B testing)
- Flag analytics/usage tracking
- Gradual rollout percentages
### Explicitly Out of Scope
- Integration with Flagsmith/LaunchDarkly (future ADR)
- Database-stored flags (requires schema changes)
- Real-time flag updates (WebSocket/SSE)
- Flag inheritance/hierarchy
- Flag audit logging
---
## Testing Strategy
### Backend Tests
| Test Type | Coverage Target | Location |
| ----------------- | ---------------------------------------- | ------------------------------------------ |
| Schema validation | Parse true/false, defaults | `src/config/env.test.ts` |
| Service functions | `isFeatureEnabled`, `getAllFeatureFlags` | `src/services/featureFlags.server.test.ts` |
| Integration | Admin endpoint (if added) | `src/routes/admin.routes.test.ts` |
### Frontend Tests
| Test Type | Coverage Target | Location |
| ------------------- | --------------------------- | ------------------------------------- |
| Config structure | featureFlags section exists | `src/config.test.ts` |
| Hook behavior | Returns correct values | `src/hooks/useFeatureFlag.test.ts` |
| Component rendering | Conditional children | `src/components/FeatureFlag.test.tsx` |
### Mocking Pattern for Tests
```typescript
// Backend - reset modules to test different flag states
beforeEach(() => {
vi.resetModules();
process.env.FEATURE_NEW_DASHBOARD = 'true';
});
// Frontend - mock config module
vi.mock('../config', () => ({
default: {
featureFlags: {
newDashboard: true,
betaRecipes: false,
},
},
}));
```
---
## Risk Assessment
| Risk | Impact | Likelihood | Mitigation |
| ------------------------------------------- | ------ | ---------- | ------------------------------------------------------------- |
| Flag state inconsistency (backend/frontend) | Medium | Low | Use same env var naming, document sync requirements |
| Performance impact from flag checks | Low | Low | Flags cached at startup, no runtime DB calls |
| Stale flags after deployment | Medium | Medium | Document restart requirement, consider future dynamic loading |
| Feature creep (too many flags) | Medium | Medium | Require ADR for new flags, sunset policy |
| Missing flag causes crash | High | Low | Default to false, graceful degradation |
---
## Files to Create
| File | Purpose |
| ------------------------------------------ | ---------------------------- |
| `src/services/featureFlags.server.ts` | Backend feature flag service |
| `src/services/featureFlags.server.test.ts` | Backend tests |
| `src/hooks/useFeatureFlag.ts` | React hook for flag access |
| `src/hooks/useFeatureFlag.test.ts` | Hook tests |
| `src/components/FeatureFlag.tsx` | Declarative flag component |
| `src/components/FeatureFlag.test.tsx` | Component tests |
## Files to Modify
| File | Changes |
| -------------------------------------------- | ---------------------------------- |
| `src/config/env.ts` | Add featureFlagsSchema and loading |
| `src/config/env.test.ts` | Add feature flag tests |
| `src/config.ts` | Add featureFlags section |
| `src/config.test.ts` | Add feature flag tests |
| `src/vite-env.d.ts` | Add VITE*FEATURE*\* declarations |
| `.env.example` | Add feature flag examples |
| `docs/adr/0024-feature-flagging-strategy.md` | Update status and details |
| `docs/getting-started/ENVIRONMENT.md` | Document feature flag vars |
| `docs/development/CODE-PATTERNS.md` | Add usage patterns |
| `CLAUDE.md` | Add to quick reference |
---
## Verification Commands
After implementation, run these commands in the dev container:
```bash
# Type checking
podman exec -it flyer-crawler-dev npm run type-check
# Backend unit tests
podman exec -it flyer-crawler-dev npm run test:unit -- --grep "featureFlag"
# Frontend tests (includes hook and component tests)
podman exec -it flyer-crawler-dev npm run test:unit -- --grep "FeatureFlag"
# Full test suite
podman exec -it flyer-crawler-dev npm test
```
---
## Example Usage (Post-Implementation)
### Backend Route Handler
```typescript
// src/routes/flyers.routes.ts
import { isFeatureEnabled } from '../services/featureFlags.server';
router.get('/dashboard', async (req, res) => {
if (isFeatureEnabled('newDashboard')) {
// New dashboard logic
return sendSuccess(res, { version: 'v2', data: await getNewDashboardData() });
}
// Legacy dashboard
return sendSuccess(res, { version: 'v1', data: await getLegacyDashboardData() });
});
```
### React Component
```tsx
// src/pages/Dashboard.tsx
import { FeatureFlag } from '../components/FeatureFlag';
import { useFeatureFlag } from '../hooks/useFeatureFlag';
// Option 1: Declarative component
function Dashboard() {
return (
<FeatureFlag name="newDashboard" fallback={<LegacyDashboard />}>
<NewDashboard />
</FeatureFlag>
);
}
// Option 2: Hook for logic
function DashboardWithLogic() {
const isNewDashboard = useFeatureFlag('newDashboard');
useEffect(() => {
if (isNewDashboard) {
analytics.track('new_dashboard_viewed');
}
}, [isNewDashboard]);
return isNewDashboard ? <NewDashboard /> : <LegacyDashboard />;
}
```
---
## Implementation Notes
### Naming Convention
| Context | Pattern | Example |
| ---------------- | ------------------------- | ---------------------------------- |
| Backend env var | `FEATURE_SNAKE_CASE` | `FEATURE_NEW_DASHBOARD` |
| Frontend env var | `VITE_FEATURE_SNAKE_CASE` | `VITE_FEATURE_NEW_DASHBOARD` |
| Config property | `camelCase` | `config.featureFlags.newDashboard` |
| Type/Hook param | `camelCase` | `isFeatureEnabled('newDashboard')` |
### Flag Lifecycle
1. **Adding a flag**: Add to both schemas, set default to `false`, document
2. **Enabling a flag**: Set env var to `'true'`, restart application
3. **Removing a flag**: Remove conditional code first, then remove flag from schemas
4. **Sunset policy**: Flags should be removed within 3 months of full rollout
---
Last updated: 2026-01-28

View File

@@ -2,6 +2,17 @@
The **ai-usage** subagent specializes in LLM APIs (Gemini, Claude), prompt engineering, and AI-powered features in the Flyer Crawler project.
## Quick Reference
| Aspect | Details |
| ------------------ | ----------------------------------------------------------------------------------- |
| **Primary Use** | Gemini API integration, prompt engineering, AI extraction |
| **Key Files** | `src/services/aiService.server.ts`, `src/services/flyerProcessingService.server.ts` |
| **Key ADRs** | ADR-041 (AI Integration), ADR-046 (Image Processing) |
| **API Key Env** | `VITE_GOOGLE_GENAI_API_KEY` (prod), `VITE_GOOGLE_GENAI_API_KEY_TEST` (test) |
| **Error Handling** | Rate limits (429), JSON parse errors, timeout handling |
| **Delegate To** | `coder` (implementation), `testwriter` (tests), `integrations-specialist` |
## When to Use
Use the **ai-usage** subagent when you need to:
@@ -295,6 +306,9 @@ const fixtureResponse = await fs.readFile('fixtures/gemini-response.json');
## Related Documentation
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - For implementing AI features
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Testing AI features
- [INTEGRATIONS-GUIDE.md](./INTEGRATIONS-GUIDE.md) - External API patterns
- [../adr/0041-ai-gemini-integration-architecture.md](../adr/0041-ai-gemini-integration-architecture.md) - AI integration ADR
- [../adr/0046-image-processing-pipeline.md](../adr/0046-image-processing-pipeline.md) - Image processing
- [CODER-GUIDE.md](./CODER-GUIDE.md) - For implementing AI features
- [../getting-started/ENVIRONMENT.md](../getting-started/ENVIRONMENT.md) - Environment configuration

View File

@@ -2,6 +2,17 @@
The **coder** subagent is your primary tool for writing and modifying production Node.js/TypeScript code in the Flyer Crawler project. This guide explains how to work effectively with the coder subagent.
## Quick Reference
| Aspect | Details |
| ---------------- | ------------------------------------------------------------------------ |
| **Primary Use** | Write/modify production TypeScript code |
| **Key Files** | `src/routes/*.routes.ts`, `src/services/**/*.ts`, `src/components/*.tsx` |
| **Key ADRs** | ADR-034 (Repository), ADR-035 (Services), ADR-028 (API Response) |
| **Test Command** | `podman exec -it flyer-crawler-dev npm run test:unit` |
| **Type Check** | `podman exec -it flyer-crawler-dev npm run type-check` |
| **Delegate To** | `db-dev` (database), `frontend-specialist` (UI), `testwriter` (tests) |
## When to Use the Coder Subagent
Use the coder subagent when you need to:
@@ -307,6 +318,8 @@ error classes for all database operations"
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Testing strategies
- [DATABASE-GUIDE.md](./DATABASE-GUIDE.md) - Database development workflows
- [../adr/0034-repository-pattern-standards.md](../adr/0034-repository-pattern-standards.md) - Repository patterns
- [../adr/0035-service-layer-architecture.md](../adr/0035-service-layer-architecture.md) - Service layer architecture
- [../adr/0028-api-response-standardization.md](../adr/0028-api-response-standardization.md) - API response patterns
- [../development/CODE-PATTERNS.md](../development/CODE-PATTERNS.md) - Code patterns reference

View File

@@ -5,6 +5,17 @@ This guide covers two database-focused subagents:
- **db-dev**: Database development - schemas, queries, migrations, optimization
- **db-admin**: Database administration - PostgreSQL/Redis admin, security, backups
## Quick Reference
| Aspect | db-dev | db-admin |
| ---------------- | -------------------------------------------- | ------------------------------------------ |
| **Primary Use** | Schemas, queries, migrations | Performance tuning, backups, security |
| **Key Files** | `src/services/db/*.db.ts`, `sql/migrations/` | `postgresql.conf`, `pg_hba.conf` |
| **Key ADRs** | ADR-034 (Repository), ADR-002 (Transactions) | ADR-019 (Backups), ADR-050 (Observability) |
| **Test Command** | `podman exec -it flyer-crawler-dev npm test` | N/A |
| **MCP Tool** | `mcp__devdb__query` | SSH to production |
| **Delegate To** | `coder` (service layer), `db-admin` (perf) | `devops` (infrastructure) |
## Understanding the Difference
| Aspect | db-dev | db-admin |
@@ -412,8 +423,9 @@ This is useful for:
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - Working with the coder subagent
- [DEVOPS-GUIDE.md](./DEVOPS-GUIDE.md) - DevOps and deployment workflows
- [../adr/0034-repository-pattern-standards.md](../adr/0034-repository-pattern-standards.md) - Repository patterns
- [../adr/0002-standardized-transaction-management.md](../adr/0002-standardized-transaction-management.md) - Transaction management
- [../adr/0019-data-backup-and-recovery-strategy.md](../adr/0019-data-backup-and-recovery-strategy.md) - Backup strategy
- [../adr/0050-postgresql-function-observability.md](../adr/0050-postgresql-function-observability.md) - Database observability
- [../BARE-METAL-SETUP.md](../BARE-METAL-SETUP.md) - Production database setup
- [../operations/BARE-METAL-SETUP.md](../operations/BARE-METAL-SETUP.md) - Production database setup

View File

@@ -6,6 +6,90 @@ This guide covers DevOps-related subagents for deployment, infrastructure, and o
- **infra-architect**: Resource optimization, capacity planning
- **bg-worker**: Background jobs, PM2 workers, BullMQ queues
## Quick Reference
| Aspect | devops | infra-architect | bg-worker |
| ---------------- | ------------------------------------------ | --------------------------- | ------------------------------- |
| **Primary Use** | Containers, CI/CD, deployments | Resource optimization | BullMQ queues, PM2 workers |
| **Key Files** | `compose.dev.yml`, `.gitea/workflows/` | `ecosystem.config.cjs` | `src/services/queues.server.ts` |
| **Key ADRs** | ADR-014 (Containers), ADR-017 (CI/CD) | N/A | ADR-006 (Background Jobs) |
| **Commands** | `podman-compose`, `pm2` | `pm2 monit`, system metrics | Redis CLI, `pm2 logs` |
| **MCP Tools** | `mcp__podman__*` | N/A | N/A |
| **Access Model** | Read-only on production (provide commands) | Same | Same |
---
## CRITICAL: Server Access Model
**Claude Code has READ-ONLY access to production/test servers.**
The `claude-win10` user cannot execute write operations (PM2 restart, systemctl, file modifications) directly on servers. The devops subagent must **provide commands for the user to execute**, not attempt to run them via SSH.
### Command Delegation Workflow
When troubleshooting or making changes to production/test servers:
| Phase | Actor | Action |
| -------- | ------ | ----------------------------------------------------------- |
| Diagnose | Claude | Provide read-only diagnostic commands |
| Report | User | Execute commands, share output with Claude |
| Analyze | Claude | Interpret results, identify root cause |
| Fix | Claude | Provide 1-3 fix commands (never more, errors may cascade) |
| Execute | User | Run fix commands, report results |
| Verify | Claude | Provide verification commands to confirm success |
| Document | Claude | Update relevant documentation with findings and resolutions |
### Example: PM2 Process Issue
Step 1 - Diagnostic Commands (Claude provides, user runs):
```bash
# Check PM2 process status
pm2 list
# View recent error logs
pm2 logs flyer-crawler-api --err --lines 50
# Check system resources
free -h
df -h /var/www
```
Step 2 - User reports output to Claude
Step 3 - Fix Commands (Claude provides 1-3 at a time):
```bash
# Restart the failing process
pm2 restart flyer-crawler-api
```
Step 4 - User executes and reports result
Step 5 - Verification Commands:
```bash
# Confirm process is running
pm2 list
# Test API health
curl -s https://flyer-crawler.projectium.com/api/health/ready | jq .
```
### What NOT to Do
```bash
# WRONG - Claude cannot execute this directly
ssh root@projectium.com "pm2 restart all"
# WRONG - Providing too many commands at once
pm2 stop all && rm -rf node_modules && npm install && pm2 start all
# WRONG - Assuming commands succeeded without user confirmation
```
---
## The devops Subagent
### When to Use
@@ -372,6 +456,8 @@ redis-cli -a $REDIS_PASSWORD
## Service Management Commands
> **Note**: These commands are for the **user to execute on the server**. Claude Code provides these commands but cannot run them directly due to read-only server access. See [Server Access Model](#critical-server-access-model) above.
### PM2 Commands
```bash
@@ -468,8 +554,13 @@ podman exec -it flyer-crawler-dev npm test
## Related Documentation
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [../BARE-METAL-SETUP.md](../BARE-METAL-SETUP.md) - Production setup guide
- [DATABASE-GUIDE.md](./DATABASE-GUIDE.md) - Database administration
- [SECURITY-DEBUG-GUIDE.md](./SECURITY-DEBUG-GUIDE.md) - Production debugging
- [../operations/BARE-METAL-SETUP.md](../operations/BARE-METAL-SETUP.md) - Production setup guide
- [../operations/DEPLOYMENT.md](../operations/DEPLOYMENT.md) - Deployment guide
- [../operations/MONITORING.md](../operations/MONITORING.md) - Monitoring guide
- [../development/DEV-CONTAINER.md](../development/DEV-CONTAINER.md) - Dev container guide
- [../adr/0014-containerization-and-deployment-strategy.md](../adr/0014-containerization-and-deployment-strategy.md) - Containerization ADR
- [../adr/0006-background-job-processing-and-task-queues.md](../adr/0006-background-job-processing-and-task-queues.md) - Background jobs ADR
- [../adr/0017-ci-cd-and-branching-strategy.md](../adr/0017-ci-cd-and-branching-strategy.md) - CI/CD strategy
- [../adr/0053-worker-health-checks.md](../adr/0053-worker-health-checks.md) - Worker health checks
- [../adr/0053-worker-health-checks-and-monitoring.md](../adr/0053-worker-health-checks-and-monitoring.md) - Worker health checks

View File

@@ -7,6 +7,15 @@ This guide covers documentation-focused subagents:
- **planner**: Feature breakdown, roadmaps, scope management
- **product-owner**: Requirements, user stories, backlog prioritization
## Quick Reference
| Aspect | documenter | describer-for-ai | planner | product-owner |
| --------------- | -------------------- | ------------------------ | --------------------- | ---------------------- |
| **Primary Use** | User docs, API specs | ADRs, technical specs | Feature breakdown | User stories, backlog |
| **Key Files** | `docs/`, API docs | `docs/adr/`, `CLAUDE.md` | `docs/plans/` | Issue tracker |
| **Output** | Markdown guides | ADRs, context docs | Task lists, roadmaps | User stories, criteria |
| **Delegate To** | `coder` (implement) | `documenter` (user docs) | `coder` (build tasks) | `planner` (breakdown) |
## The documenter Subagent
### When to Use
@@ -437,6 +446,8 @@ Include dates on documentation that may become stale:
## Related Documentation
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - For implementing documented features
- [../adr/index.md](../adr/index.md) - ADR index
- [../TESTING.md](../TESTING.md) - Testing guide
- [../development/TESTING.md](../development/TESTING.md) - Testing guide
- [../development/CODE-PATTERNS.md](../development/CODE-PATTERNS.md) - Code patterns reference
- [../../CLAUDE.md](../../CLAUDE.md) - AI instructions

View File

@@ -5,6 +5,17 @@ This guide covers frontend-focused subagents:
- **frontend-specialist**: UI components, Neo-Brutalism, Core Web Vitals, accessibility
- **uiux-designer**: UI/UX decisions, component design, user experience
## Quick Reference
| Aspect | frontend-specialist | uiux-designer |
| ----------------- | ---------------------------------------------- | -------------------------------------- |
| **Primary Use** | React components, performance, accessibility | Design decisions, user flows |
| **Key Files** | `src/components/`, `src/features/` | Design specs, mockups |
| **Key ADRs** | ADR-012 (Design System), ADR-044 (Feature Org) | ADR-012 (Design System) |
| **Design System** | Neo-Brutalism (bold borders, high contrast) | Same |
| **State Mgmt** | TanStack Query (server), Zustand (client) | N/A |
| **Delegate To** | `coder` (backend), `tester` (test coverage) | `frontend-specialist` (implementation) |
## The frontend-specialist Subagent
### When to Use
@@ -406,7 +417,8 @@ const handleSelect = useCallback((id: string) => {
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - For implementing features
- [../DESIGN_TOKENS.md](../DESIGN_TOKENS.md) - Design token reference
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Component testing patterns
- [../development/DESIGN_TOKENS.md](../development/DESIGN_TOKENS.md) - Design token reference
- [../adr/0012-frontend-component-library-and-design-system.md](../adr/0012-frontend-component-library-and-design-system.md) - Design system ADR
- [../adr/0005-frontend-state-management-and-server-cache-strategy.md](../adr/0005-frontend-state-management-and-server-cache-strategy.md) - State management ADR
- [../adr/0044-frontend-feature-organization.md](../adr/0044-frontend-feature-organization.md) - Feature organization

View File

@@ -0,0 +1,396 @@
# Integrations Subagent Guide
The **integrations-specialist** subagent handles third-party services, webhooks, and external API integrations in the Flyer Crawler project.
## Quick Reference
| Aspect | Details |
| --------------- | --------------------------------------------------------------------------- |
| **Primary Use** | External APIs, webhooks, OAuth, third-party services |
| **Key Files** | `src/services/external/`, `src/routes/webhooks.routes.ts` |
| **Key ADRs** | ADR-041 (AI Integration), ADR-016 (API Security), ADR-048 (Auth) |
| **MCP Tools** | `mcp__gitea-projectium__*`, `mcp__bugsink__*` |
| **Security** | API key storage, webhook signatures, OAuth state param |
| **Delegate To** | `coder` (implementation), `security-engineer` (review), `ai-usage` (Gemini) |
## When to Use
Use the **integrations-specialist** subagent when you need to:
- Integrate with external APIs (OAuth, REST, GraphQL)
- Implement webhook handlers
- Configure third-party services
- Debug external service connectivity
- Handle API authentication flows
- Manage external service rate limits
## What integrations-specialist Knows
The integrations-specialist subagent understands:
- OAuth 2.0 flows (authorization code, client credentials)
- REST API integration patterns
- Webhook security (signature verification)
- External service error handling
- Rate limiting and retry strategies
- API key management
## Current Integrations
| Service | Purpose | Integration Type | Key Files |
| ------------- | ---------------------- | ---------------- | ---------------------------------- |
| Google Gemini | AI flyer extraction | REST API | `src/services/aiService.server.ts` |
| Bugsink | Error tracking | REST API | MCP: `mcp__bugsink__*` |
| Gitea | Repository and CI/CD | REST API | MCP: `mcp__gitea-projectium__*` |
| Redis | Caching and job queues | Native client | `src/services/redis.server.ts` |
| PostgreSQL | Primary database | Native client | `src/services/db/pool.db.ts` |
## Example Requests
### Adding External API Integration
```
"Use integrations-specialist to integrate with the Store API
to automatically fetch store location data. Include proper
error handling, rate limiting, and caching."
```
### OAuth Implementation
```
"Use integrations-specialist to implement Google OAuth for
user authentication. Include token refresh handling and
session management."
```
### Webhook Handler
```
"Use integrations-specialist to create a webhook handler for
receiving store inventory updates. Include signature verification
and idempotency handling."
```
### Debugging External Service Issues
```
"Use integrations-specialist to debug why the Gemini API calls
are intermittently failing with timeout errors. Check connection
pooling, retry logic, and error handling."
```
## Integration Patterns
### REST API Client Pattern
```typescript
// src/services/external/storeApi.server.ts
import { env } from '@/config/env';
import { log } from '@/services/logger.server';
interface StoreApiConfig {
baseUrl: string;
apiKey: string;
timeout: number;
}
class StoreApiClient {
private config: StoreApiConfig;
constructor(config: StoreApiConfig) {
this.config = config;
}
async getStoreLocations(storeId: string): Promise<StoreLocation[]> {
const url = `${this.config.baseUrl}/stores/${storeId}/locations`;
try {
const response = await fetch(url, {
headers: {
Authorization: `Bearer ${this.config.apiKey}`,
'Content-Type': 'application/json',
},
signal: AbortSignal.timeout(this.config.timeout),
});
if (!response.ok) {
throw new ExternalApiError(`Store API error: ${response.status}`, response.status);
}
return response.json();
} catch (error) {
log.error({ error, storeId }, 'Failed to fetch store locations');
throw error;
}
}
}
export const storeApiClient = new StoreApiClient({
baseUrl: env.STORE_API_BASE_URL,
apiKey: env.STORE_API_KEY,
timeout: 10000,
});
```
### Webhook Handler Pattern
```typescript
// src/routes/webhooks.routes.ts
import { Router } from 'express';
import crypto from 'crypto';
import { env } from '@/config/env';
const router = Router();
function verifyWebhookSignature(payload: string, signature: string, secret: string): boolean {
const expected = crypto.createHmac('sha256', secret).update(payload).digest('hex');
return crypto.timingSafeEqual(Buffer.from(signature), Buffer.from(`sha256=${expected}`));
}
router.post('/store-updates', async (req, res, next) => {
try {
const signature = req.headers['x-webhook-signature'] as string;
const payload = JSON.stringify(req.body);
if (!verifyWebhookSignature(payload, signature, env.WEBHOOK_SECRET)) {
return res.status(401).json({ error: 'Invalid signature' });
}
// Process webhook with idempotency check
const eventId = req.headers['x-event-id'] as string;
const alreadyProcessed = await checkIdempotencyKey(eventId);
if (alreadyProcessed) {
return res.status(200).json({ status: 'already_processed' });
}
await processStoreUpdate(req.body);
await markEventProcessed(eventId);
res.status(200).json({ status: 'processed' });
} catch (error) {
next(error);
}
});
```
### OAuth Flow Pattern
```typescript
// src/services/oauth/googleOAuth.server.ts
import { OAuth2Client } from 'google-auth-library';
import { env } from '@/config/env';
const oauth2Client = new OAuth2Client(
env.GOOGLE_CLIENT_ID,
env.GOOGLE_CLIENT_SECRET,
env.GOOGLE_REDIRECT_URI,
);
export function getAuthorizationUrl(): string {
return oauth2Client.generateAuthUrl({
access_type: 'offline',
scope: ['email', 'profile'],
prompt: 'consent',
});
}
export async function exchangeCodeForTokens(code: string) {
const { tokens } = await oauth2Client.getToken(code);
return tokens;
}
export async function refreshAccessToken(refreshToken: string) {
oauth2Client.setCredentials({ refresh_token: refreshToken });
const { credentials } = await oauth2Client.refreshAccessToken();
return credentials;
}
```
## Error Handling for External Services
### Custom Error Classes
```typescript
// src/services/external/errors.ts
export class ExternalApiError extends Error {
constructor(
message: string,
public statusCode: number,
public retryable: boolean = false,
) {
super(message);
this.name = 'ExternalApiError';
}
}
export class RateLimitError extends ExternalApiError {
constructor(
message: string,
public retryAfter: number,
) {
super(message, 429, true);
this.name = 'RateLimitError';
}
}
```
### Retry with Exponential Backoff
```typescript
async function fetchWithRetry<T>(
fn: () => Promise<T>,
options: { maxRetries: number; baseDelay: number },
): Promise<T> {
let lastError: Error;
for (let attempt = 0; attempt <= options.maxRetries; attempt++) {
try {
return await fn();
} catch (error) {
lastError = error as Error;
if (error instanceof ExternalApiError && !error.retryable) {
throw error;
}
if (attempt < options.maxRetries) {
const delay = options.baseDelay * Math.pow(2, attempt);
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
}
throw lastError!;
}
```
## Rate Limiting Strategies
### Token Bucket Pattern
```typescript
class RateLimiter {
private tokens: number;
private lastRefill: number;
private readonly maxTokens: number;
private readonly refillRate: number; // tokens per second
constructor(maxTokens: number, refillRate: number) {
this.maxTokens = maxTokens;
this.tokens = maxTokens;
this.refillRate = refillRate;
this.lastRefill = Date.now();
}
async acquire(): Promise<void> {
this.refill();
if (this.tokens < 1) {
const waitTime = ((1 - this.tokens) / this.refillRate) * 1000;
await new Promise((resolve) => setTimeout(resolve, waitTime));
this.refill();
}
this.tokens -= 1;
}
private refill(): void {
const now = Date.now();
const elapsed = (now - this.lastRefill) / 1000;
this.tokens = Math.min(this.maxTokens, this.tokens + elapsed * this.refillRate);
this.lastRefill = now;
}
}
```
## Testing Integrations
### Mocking External Services
```typescript
// src/tests/mocks/storeApi.mock.ts
import { vi } from 'vitest';
export const mockStoreApiClient = {
getStoreLocations: vi.fn(),
};
vi.mock('@/services/external/storeApi.server', () => ({
storeApiClient: mockStoreApiClient,
}));
```
### Integration Test with Real Service
```typescript
// src/tests/integration/storeApi.integration.test.ts
describe('Store API Integration', () => {
it.skipIf(!env.STORE_API_KEY)('fetches real store locations', async () => {
const locations = await storeApiClient.getStoreLocations('test-store');
expect(locations).toBeInstanceOf(Array);
});
});
```
## MCP Tools for Integrations
### Gitea Integration
```
// List repositories
mcp__gitea-projectium__list_my_repos()
// Create issue
mcp__gitea-projectium__create_issue({
owner: "projectium",
repo: "flyer-crawler",
title: "Issue title",
body: "Issue description"
})
```
### Bugsink Integration
```
// List projects
mcp__bugsink__list_projects()
// Get issue details
mcp__bugsink__get_issue({ issue_id: "..." })
// Get stacktrace
mcp__bugsink__get_stacktrace({ event_id: "..." })
```
## Security Considerations
### API Key Storage
- Never commit API keys to version control
- Use environment variables via `src/config/env.ts`
- Rotate keys periodically
- Use separate keys for dev/test/prod
### Webhook Security
- Always verify webhook signatures
- Use HTTPS for webhook endpoints
- Implement idempotency
- Log webhook events for audit
### OAuth Security
- Use state parameter to prevent CSRF
- Store tokens securely (encrypted at rest)
- Implement token refresh before expiration
- Validate token scopes
## Related Documentation
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [SECURITY-DEBUG-GUIDE.md](./SECURITY-DEBUG-GUIDE.md) - Security patterns
- [AI-USAGE-GUIDE.md](./AI-USAGE-GUIDE.md) - Gemini API integration
- [../adr/0041-ai-gemini-integration-architecture.md](../adr/0041-ai-gemini-integration-architecture.md) - AI integration ADR
- [../adr/0016-api-security-hardening.md](../adr/0016-api-security-hardening.md) - API security
- [../adr/0048-authentication-strategy.md](../adr/0048-authentication-strategy.md) - Authentication

View File

@@ -89,6 +89,47 @@ Or:
Claude will automatically invoke the appropriate subagent with the relevant context.
## Quick Reference Decision Tree
Use this flowchart to quickly identify the right subagent:
```
What do you need to do?
|
+-- Write/modify code? ----------------> Is it database-related?
| |
| +-- Yes -> db-dev
| +-- No --> Is it frontend?
| |
| +-- Yes -> frontend-specialist
| +-- No --> Is it AI/Gemini?
| |
| +-- Yes -> ai-usage
| +-- No --> coder
|
+-- Test something? -------------------> Write new tests? -> testwriter
| Find bugs/vulnerabilities? -> tester
| Review existing code? -> code-reviewer
|
+-- Debug an issue? -------------------> Production error? -> log-debug
| Database slow? -> db-admin
| External API failing? -> integrations-specialist
| AI extraction failing? -> ai-usage
|
+-- Infrastructure/Deployment? --------> Container/CI/CD? -> devops
| Resource optimization? -> infra-architect
| Background jobs? -> bg-worker
|
+-- Documentation? --------------------> User-facing docs? -> documenter
| ADRs/Technical specs? -> describer-for-ai
| Feature planning? -> planner
| User stories? -> product-owner
|
+-- Security? -------------------------> security-engineer
|
+-- Design/UX? ------------------------> uiux-designer
```
## Subagent Selection Guide
### Which Subagent Should I Use?
@@ -183,12 +224,26 @@ Subagents can pass information back to the main conversation and to each other t
## Related Documentation
- [CODER-GUIDE.md](./CODER-GUIDE.md) - Working with the coder subagent
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Testing strategies and patterns
- [DATABASE-GUIDE.md](./DATABASE-GUIDE.md) - Database development workflows
- [DEVOPS-GUIDE.md](./DEVOPS-GUIDE.md) - DevOps and deployment workflows
### Subagent Guides
| Guide | Subagents Covered |
| ---------------------------------------------------- | ----------------------------------------------------- |
| [CODER-GUIDE.md](./CODER-GUIDE.md) | coder |
| [TESTER-GUIDE.md](./TESTER-GUIDE.md) | tester, testwriter |
| [DATABASE-GUIDE.md](./DATABASE-GUIDE.md) | db-dev, db-admin |
| [DEVOPS-GUIDE.md](./DEVOPS-GUIDE.md) | devops, infra-architect, bg-worker |
| [FRONTEND-GUIDE.md](./FRONTEND-GUIDE.md) | frontend-specialist, uiux-designer |
| [SECURITY-DEBUG-GUIDE.md](./SECURITY-DEBUG-GUIDE.md) | security-engineer, log-debug, code-reviewer |
| [AI-USAGE-GUIDE.md](./AI-USAGE-GUIDE.md) | ai-usage |
| [INTEGRATIONS-GUIDE.md](./INTEGRATIONS-GUIDE.md) | integrations-specialist, tools-integration-specialist |
| [DOCUMENTATION-GUIDE.md](./DOCUMENTATION-GUIDE.md) | documenter, describer-for-ai, planner, product-owner |
### Project Documentation
- [../adr/index.md](../adr/index.md) - Architecture Decision Records
- [../TESTING.md](../TESTING.md) - Testing guide
- [../development/TESTING.md](../development/TESTING.md) - Testing guide
- [../development/CODE-PATTERNS.md](../development/CODE-PATTERNS.md) - Code patterns reference
- [../architecture/OVERVIEW.md](../architecture/OVERVIEW.md) - System architecture
## Troubleshooting

View File

@@ -6,6 +6,16 @@ This guide covers security and debugging-focused subagents:
- **log-debug**: Production errors, observability, Bugsink/Sentry analysis
- **code-reviewer**: Code quality, security review, best practices
## Quick Reference
| Aspect | security-engineer | log-debug | code-reviewer |
| --------------- | ---------------------------------- | ---------------------------------------- | --------------------------- |
| **Primary Use** | Security audits, OWASP | Production debugging | Code quality review |
| **Key ADRs** | ADR-016 (Security), ADR-032 (Rate) | ADR-050 (Observability) | ADR-034, ADR-035 (Patterns) |
| **MCP Tools** | N/A | `mcp__bugsink__*`, `mcp__localerrors__*` | N/A |
| **Key Checks** | Auth, input validation, CORS | Logs, stacktraces, error patterns | Patterns, tests, security |
| **Delegate To** | `coder` (fix issues) | `devops` (infra), `coder` (fixes) | `coder`, `testwriter` |
## The security-engineer Subagent
### When to Use
@@ -432,8 +442,10 @@ tail -f /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log | grep "duration:"
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [DEVOPS-GUIDE.md](./DEVOPS-GUIDE.md) - Infrastructure debugging
- [TESTER-GUIDE.md](./TESTER-GUIDE.md) - Security testing
- [../adr/0016-api-security-hardening.md](../adr/0016-api-security-hardening.md) - Security ADR
- [../adr/0032-rate-limiting-strategy.md](../adr/0032-rate-limiting-strategy.md) - Rate limiting
- [../adr/0015-application-performance-monitoring-and-error-tracking.md](../adr/0015-application-performance-monitoring-and-error-tracking.md) - Monitoring ADR
- [../adr/0015-error-tracking-and-observability.md](../adr/0015-error-tracking-and-observability.md) - Monitoring ADR
- [../adr/0050-postgresql-function-observability.md](../adr/0050-postgresql-function-observability.md) - Database observability
- [../BARE-METAL-SETUP.md](../BARE-METAL-SETUP.md) - Production setup
- [../operations/BARE-METAL-SETUP.md](../operations/BARE-METAL-SETUP.md) - Production setup
- [../tools/BUGSINK-SETUP.md](../tools/BUGSINK-SETUP.md) - Bugsink configuration

View File

@@ -5,6 +5,17 @@ This guide covers two related but distinct subagents for testing in the Flyer Cr
- **tester**: Adversarial testing to find edge cases, race conditions, and vulnerabilities
- **testwriter**: Creating comprehensive test suites for features and fixes
## Quick Reference
| Aspect | tester | testwriter |
| ---------------- | -------------------------------------------- | ------------------------------------------ |
| **Primary Use** | Find bugs, security issues, edge cases | Create test suites, improve coverage |
| **Key Files** | N/A (analysis-focused) | `*.test.ts`, `src/tests/utils/` |
| **Key ADRs** | ADR-010 (Testing), ADR-040 (Test Economics) | ADR-010 (Testing), ADR-045 (Test Fixtures) |
| **Test Command** | `podman exec -it flyer-crawler-dev npm test` | Same |
| **Test Stack** | Vitest, Supertest, Testing Library | Same |
| **Delegate To** | `testwriter` (write tests for findings) | `coder` (fix failing tests) |
## Understanding the Difference
| Aspect | tester | testwriter |
@@ -399,6 +410,7 @@ A typical workflow for thorough testing:
- [OVERVIEW.md](./OVERVIEW.md) - Subagent system overview
- [CODER-GUIDE.md](./CODER-GUIDE.md) - Working with the coder subagent
- [../TESTING.md](../TESTING.md) - Testing guide
- [SECURITY-DEBUG-GUIDE.md](./SECURITY-DEBUG-GUIDE.md) - Security testing and code review
- [../development/TESTING.md](../development/TESTING.md) - Testing guide
- [../adr/0010-testing-strategy-and-standards.md](../adr/0010-testing-strategy-and-standards.md) - Testing ADR
- [../adr/0040-testing-economics-and-priorities.md](../adr/0040-testing-economics-and-priorities.md) - Testing priorities

View File

@@ -49,18 +49,24 @@ Bugsink is a lightweight, self-hosted error tracking platform that is fully comp
| Web UI | `https://localhost:8443` (nginx proxy) |
| Internal URL | `http://localhost:8000` (direct) |
| Credentials | `admin@localhost` / `admin` |
| Backend Project | Project ID 1 - `flyer-crawler-dev-backend` |
| Frontend Project | Project ID 2 - `flyer-crawler-dev-frontend` |
| Backend Project | Project ID 1 - `Backend API (Dev)` |
| Frontend Project | Project ID 2 - `Frontend (Dev)` |
| Infra Project | Project ID 4 - `Infrastructure (Dev)` |
| Backend DSN | `http://<key>@localhost:8000/1` |
| Frontend DSN | `http://<key>@localhost:8000/2` |
| Frontend DSN | `https://<key>@localhost/bugsink-api/2` (via nginx proxy) |
| Infra DSN | `http://<key>@localhost:8000/4` (Logstash only) |
| Database | `postgresql://bugsink:bugsink_dev_password@postgres:5432/bugsink` |
**Important:** The Frontend DSN uses an nginx proxy (`/bugsink-api/`) because the browser cannot reach `localhost:8000` directly (container-internal port). See [Frontend Nginx Proxy](#frontend-nginx-proxy) for details.
**Configuration Files:**
| File | Purpose |
| ----------------- | ----------------------------------------------------------------- |
| `compose.dev.yml` | Initial DSNs using `127.0.0.1:8000` (container startup) |
| `.env.local` | **OVERRIDES** compose.dev.yml with `localhost:8000` (app runtime) |
| File | Purpose |
| ------------------------------ | ------------------------------------------------------- |
| `compose.dev.yml` | Initial DSNs using `127.0.0.1:8000` (container startup) |
| `.env.local` | **OVERRIDES** compose.dev.yml (app runtime) |
| `docker/nginx/dev.conf` | Nginx proxy for Bugsink API (frontend error reporting) |
| `docker/logstash/bugsink.conf` | Log routing to Backend/Infrastructure projects |
**Note:** `.env.local` takes precedence over `compose.dev.yml` environment variables.
@@ -103,10 +109,10 @@ MSYS_NO_PATHCONV=1 podman exec -e DATABASE_URL=postgresql://bugsink:bugsink_dev_
### Production Token
SSH into the production server:
User executes this command on the production server:
```bash
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage create_auth_token"
cd /opt/bugsink && bugsink-manage create_auth_token
```
**Output:** Same format - 40-character hex token.
@@ -360,75 +366,127 @@ const config = {
---
## Frontend Nginx Proxy
The frontend Sentry SDK runs in the browser, which cannot directly reach `localhost:8000` (the Bugsink container-internal port). To solve this, we use an nginx proxy.
### How It Works
```text
Browser --HTTPS--> https://localhost/bugsink-api/2/store/
|
v (nginx proxy)
http://localhost:8000/api/2/store/
|
v
Bugsink (internal)
```
### Nginx Configuration
Location: `docker/nginx/dev.conf`
```nginx
# Proxy Bugsink Sentry API for frontend error reporting
location /bugsink-api/ {
proxy_pass http://localhost:8000/api/;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Allow large error payloads with stack traces
client_max_body_size 10M;
# Timeouts for error reporting
proxy_connect_timeout 10s;
proxy_send_timeout 30s;
proxy_read_timeout 30s;
}
```
### Frontend DSN Format
```bash
# .env.local
# Uses nginx proxy path instead of direct port
VITE_SENTRY_DSN=https://<key>@localhost/bugsink-api/2
```
### Testing Frontend Error Reporting
1. Open browser console at `https://localhost`
2. Trigger a test error:
```javascript
throw new Error('Test frontend error from browser');
```
3. Check Bugsink Frontend (Dev) project for the error
4. Verify browser console shows Sentry SDK activity (if VITE_SENTRY_DEBUG=true)
---
## Logstash Integration
Logstash aggregates logs from multiple sources and forwards error patterns to Bugsink.
**Note:** See [ADR-015](../adr/0015-application-performance-monitoring-and-error-tracking.md) for the full architecture.
### 3-Project Architecture
Logstash routes errors to different Bugsink projects based on log source:
| Project | ID | Receives |
| -------------------- | --- | --------------------------------------------- |
| Backend API (Dev) | 1 | Pino app errors, PostgreSQL errors |
| Frontend (Dev) | 2 | Browser errors (via Sentry SDK, not Logstash) |
| Infrastructure (Dev) | 4 | Redis warnings, NGINX errors, Vite errors |
### Log Sources
| Source | Log Path | Error Detection |
| ---------- | ---------------------- | ------------------------- |
| Pino (app) | `/app/logs/*.log` | level >= 50 (error/fatal) |
| Redis | `/var/log/redis/*.log` | WARNING/ERROR log levels |
| PostgreSQL | (future) | ERROR/FATAL log levels |
| Source | Log Path | Project Destination | Error Detection |
| ---------- | --------------------------- | ------------------- | ------------------------- |
| PM2 API | `/var/log/pm2/api-*.log` | Backend (1) | level >= 50 (error/fatal) |
| PM2 Worker | `/var/log/pm2/worker-*.log` | Backend (1) | level >= 50 (error/fatal) |
| PM2 Vite | `/var/log/pm2/vite-*.log` | Infrastructure (4) | error keyword patterns |
| PostgreSQL | `/var/log/postgresql/*.log` | Backend (1) | ERROR/FATAL log levels |
| Redis | `/var/log/redis/*.log` | Infrastructure (4) | WARNING level (`#`) |
| NGINX | `/var/log/nginx/error.log` | Infrastructure (4) | error/crit/alert/emerg |
### Pipeline Configuration
**Location:** `/etc/logstash/conf.d/bugsink.conf`
**Location:** `/etc/logstash/conf.d/bugsink.conf` (or `docker/logstash/bugsink.conf` in project)
```conf
# === INPUTS ===
input {
file {
path => "/app/logs/*.log"
codec => json
type => "pino"
tags => ["app"]
}
The configuration:
file {
path => "/var/log/redis/*.log"
type => "redis"
tags => ["redis"]
}
1. **Inputs**: Reads from PM2 logs, PostgreSQL logs, Redis logs, NGINX logs
2. **Filters**: Detects errors and assigns tags based on log type
3. **Outputs**: Routes to appropriate Bugsink project based on log source
**Key Routing Logic:**
```ruby
# Infrastructure logs -> Project 4
if "error" in [tags] and ([type] == "redis" or [type] == "nginx_error" or [type] == "pm2_vite") {
http { url => "http://localhost:8000/api/4/store/" ... }
}
# === FILTERS ===
filter {
if [type] == "pino" and [level] >= 50 {
mutate { add_tag => ["error"] }
}
if [type] == "redis" {
grok {
match => { "message" => "%{POSINT:pid}:%{WORD:role} %{MONTHDAY} %{MONTH} %{TIME} %{WORD:loglevel} %{GREEDYDATA:redis_message}" }
}
if [loglevel] in ["WARNING", "ERROR"] {
mutate { add_tag => ["error"] }
}
}
}
# === OUTPUT ===
output {
if "error" in [tags] {
http {
url => "http://localhost:8000/api/store/"
http_method => "post"
format => "json"
}
}
# Backend logs -> Project 1
else if "error" in [tags] and ([type] in ["pm2_api", "pm2_worker", "pino", "postgres"]) {
http { url => "http://localhost:8000/api/1/store/" ... }
}
```
### Benefits
1. **Secondary Capture Path**: Catches errors before SDK initialization
2. **Log-Based Errors**: Captures errors that don't throw exceptions
3. **Infrastructure Monitoring**: Redis connection issues, slow commands
4. **Historical Analysis**: Process existing log files
1. **Separation of Concerns**: Application errors separate from infrastructure issues
2. **Secondary Capture Path**: Catches errors before SDK initialization
3. **Log-Based Errors**: Captures errors that don't throw exceptions
4. **Infrastructure Monitoring**: Redis, NGINX, build tooling issues
5. **Historical Analysis**: Process existing log files
---
@@ -737,12 +795,232 @@ podman exec flyer-crawler-dev pg_isready -U bugsink -d bugsink -h postgres
podman exec flyer-crawler-dev psql -U postgres -h postgres -c "\l" | grep bugsink
```
**Production:**
**Production** (user executes on server):
```bash
ssh root@projectium.com "cd /opt/bugsink && bugsink-manage check"
cd /opt/bugsink && bugsink-manage check
```
### PostgreSQL Sequence Out of Sync (Duplicate Key Errors)
**Symptoms:**
- Bugsink throws `duplicate key value violates unique constraint "projects_project_pkey"`
- Error detail shows: `Key (id)=(1) already exists`
- New projects or other entities fail to create
**Root Cause:**
PostgreSQL sequences can become out of sync with actual data after:
- Manual data insertion or database seeding
- Restoring from backup
- Copying data between environments
The sequence generates IDs that already exist in the table.
**Diagnosis:**
```bash
# Dev Container - Check sequence vs max ID
podman exec flyer-crawler-dev psql -U bugsink -h postgres -d bugsink -c "
SELECT
(SELECT MAX(id) FROM projects_project) as max_id,
(SELECT last_value FROM projects_project_id_seq) as seq_last_value,
CASE
WHEN (SELECT MAX(id) FROM projects_project) <= (SELECT last_value FROM projects_project_id_seq)
THEN 'OK'
ELSE 'OUT OF SYNC - Needs reset'
END as status;
"
# Production (user executes on server)
cd /opt/bugsink && bugsink-manage dbshell
# Then run: SELECT MAX(id) as max_id, (SELECT last_value FROM projects_project_id_seq) as seq_value FROM projects_project;
```
**Solution:**
Reset the sequence to the maximum existing ID:
```bash
# Dev Container
podman exec flyer-crawler-dev psql -U bugsink -h postgres -d bugsink -c "
SELECT setval('projects_project_id_seq', COALESCE((SELECT MAX(id) FROM projects_project), 1), true);
"
# Production (user executes on server)
cd /opt/bugsink && bugsink-manage dbshell
# Then run: SELECT setval('projects_project_id_seq', COALESCE((SELECT MAX(id) FROM projects_project), 1), true);
```
**Verification:**
After running the fix, verify:
```bash
# Next ID should be max_id + 1
podman exec flyer-crawler-dev psql -U bugsink -h postgres -d bugsink -c "
SELECT nextval('projects_project_id_seq') - 1 as current_seq_value;
"
```
**Prevention:**
When manually inserting data or restoring backups, always reset sequences:
```sql
-- Generic pattern for any table/sequence
SELECT setval('SEQUENCE_NAME', COALESCE((SELECT MAX(id) FROM TABLE_NAME), 1), true);
-- Common Bugsink sequences that may need reset:
SELECT setval('projects_project_id_seq', COALESCE((SELECT MAX(id) FROM projects_project), 1), true);
SELECT setval('teams_team_id_seq', COALESCE((SELECT MAX(id) FROM teams_team), 1), true);
SELECT setval('releases_release_id_seq', COALESCE((SELECT MAX(id) FROM releases_release), 1), true);
```
### Logstash Level Field Constraint Violation
**Symptoms:**
- Bugsink errors: `value too long for type character varying(7)`
- Errors in Backend API project from Logstash
- Log shows `%{sentry_level}` literal string being sent
**Root Cause:**
Logstash sends the literal placeholder `%{sentry_level}` (16 characters) to Bugsink when:
- No error pattern is detected in the log message
- The `sentry_level` field is not properly initialized
- Bugsink's `level` column has a `varchar(7)` constraint
Valid Sentry levels are: `fatal`, `error`, `warning`, `info`, `debug` (all <= 7 characters).
**Diagnosis:**
```bash
# Check for recent level constraint errors in Bugsink
# Via MCP:
mcp__localerrors__list_issues({ project_id: 1, status: 'unresolved' })
# Or check Logstash logs for HTTP 500 responses
podman exec flyer-crawler-dev cat /var/log/logstash/logstash.log | grep "500"
```
**Solution:**
The fix requires updating the Logstash configuration (`docker/logstash/bugsink.conf`) to:
1. Validate `sentry_level` is not nil, empty, or contains placeholder text
2. Set a default value of "error" for any error-tagged event without a valid level
3. Normalize levels to lowercase
**Key filter block (Ruby):**
```ruby
ruby {
code => '
level = event.get("sentry_level")
# Check if level is invalid (nil, empty, contains placeholder, or invalid value)
if level.nil? || level.to_s.empty? || level.to_s.include?("%{") || level.to_s.length > 7
# Default to "error" for error-tagged events, "info" otherwise
if event.get("tags")&.include?("error")
event.set("sentry_level", "error")
else
event.set("sentry_level", "info")
end
else
# Normalize to lowercase and validate
normalized = level.to_s.downcase
valid_levels = ["fatal", "error", "warning", "info", "debug"]
unless valid_levels.include?(normalized)
normalized = "error"
end
event.set("sentry_level", normalized)
end
'
}
```
**Verification:**
After applying the fix:
1. Restart Logstash: `podman exec flyer-crawler-dev systemctl restart logstash`
2. Generate a test error and verify it appears in Bugsink without level errors
3. Check no new "value too long" errors appear in the project
### CSRF Verification Failed
**Symptoms:** "CSRF verification failed. Request aborted." error when performing actions in Bugsink UI (resolving issues, changing settings, etc.)
**Root Cause:**
Django 4.0+ requires `CSRF_TRUSTED_ORIGINS` to be explicitly configured for HTTPS POST requests. The error occurs because:
1. Bugsink is accessed via `https://localhost:8443` (nginx HTTPS proxy)
2. Django's CSRF protection validates the `Origin` header against `CSRF_TRUSTED_ORIGINS`
3. Without explicit configuration, Django rejects POST requests from HTTPS origins
**Why localhost vs 127.0.0.1 Matters:**
- `localhost` and `127.0.0.1` are treated as DIFFERENT origins by browsers
- If you access Bugsink via `https://localhost:8443`, Django must trust `https://localhost:8443`
- If you access via `https://127.0.0.1:8443`, Django must trust `https://127.0.0.1:8443`
- The fix includes BOTH to allow either access pattern
**Configuration (Already Applied):**
The Bugsink Django configuration in `Dockerfile.dev` includes:
```python
# CSRF Trusted Origins (Django 4.0+ requires full origin for HTTPS POST requests)
CSRF_TRUSTED_ORIGINS = [
"https://localhost:8443",
"https://127.0.0.1:8443",
"http://localhost:8000",
"http://127.0.0.1:8000",
]
# HTTPS proxy support (nginx reverse proxy on port 8443)
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
```
**Verification:**
```bash
# Verify CSRF_TRUSTED_ORIGINS is configured
podman exec flyer-crawler-dev sh -c 'cat /opt/bugsink/conf/bugsink_conf.py | grep -A 6 CSRF_TRUSTED'
# Expected output:
# CSRF_TRUSTED_ORIGINS = [
# "https://localhost:8443",
# "https://127.0.0.1:8443",
# "http://localhost:8000",
# "http://127.0.0.1:8000",
# ]
```
**If Issue Persists After Fix:**
1. **Rebuild the container image** (configuration is baked into the image):
```bash
podman-compose -f compose.dev.yml down
podman build -f Dockerfile.dev -t localhost/flyer-crawler-dev:latest .
podman-compose -f compose.dev.yml up -d
```
2. **Clear browser cookies** for localhost:8443
3. **Check nginx X-Forwarded-Proto header** - the nginx config must set this header for Django to recognize HTTPS:
```bash
podman exec flyer-crawler-dev cat /etc/nginx/sites-available/bugsink | grep X-Forwarded-Proto
# Should show: proxy_set_header X-Forwarded-Proto $scheme;
```
---
## Related Documentation

176
ecosystem.dev.config.cjs Normal file
View File

@@ -0,0 +1,176 @@
// ecosystem.dev.config.cjs
// ============================================================================
// DEVELOPMENT PM2 CONFIGURATION
// ============================================================================
// This file mirrors the production ecosystem.config.cjs but with settings
// appropriate for the development environment inside the dev container.
//
// Key differences from production:
// - Single instance for API (not cluster mode) to allow debugger attachment
// - tsx watch mode for hot reloading
// - Development-specific environment variables
// - Logs written to /var/log/pm2 for Logstash integration (ADR-050)
//
// Usage:
// pm2 start ecosystem.dev.config.cjs
// pm2 logs # View all logs
// pm2 logs flyer-crawler-api-dev # View API logs only
// pm2 restart all # Restart all processes
// pm2 delete all # Stop all processes
//
// Related:
// - ecosystem.config.cjs (production configuration)
// - docs/adr/0014-linux-only-platform.md
// - docs/adr/0050-postgresql-function-observability.md
// ============================================================================
// --- Environment Variable Validation ---
// In dev, we warn but don't fail - variables come from compose.dev.yml
const requiredVars = ['DB_HOST', 'JWT_SECRET'];
const missingVars = requiredVars.filter((key) => !process.env[key]);
if (missingVars.length > 0) {
console.warn(
'\n[ecosystem.dev.config.cjs] WARNING: The following environment variables are MISSING:',
);
missingVars.forEach((key) => console.warn(` - ${key}`));
console.warn(
'[ecosystem.dev.config.cjs] These should be set in compose.dev.yml or .env.local\n',
);
} else {
console.log('[ecosystem.dev.config.cjs] Required environment variables are present.');
}
// --- Shared Environment Variables ---
// These come from compose.dev.yml environment section
const sharedEnv = {
// Timezone: PST (America/Los_Angeles) for consistent log timestamps
TZ: process.env.TZ || 'America/Los_Angeles',
NODE_ENV: 'development',
DB_HOST: process.env.DB_HOST || 'postgres',
DB_PORT: process.env.DB_PORT || '5432',
DB_USER: process.env.DB_USER || 'postgres',
DB_PASSWORD: process.env.DB_PASSWORD || 'postgres',
DB_NAME: process.env.DB_NAME || 'flyer_crawler_dev',
REDIS_URL: process.env.REDIS_URL || 'redis://redis:6379',
REDIS_HOST: process.env.REDIS_HOST || 'redis',
REDIS_PORT: process.env.REDIS_PORT || '6379',
FRONTEND_URL: process.env.FRONTEND_URL || 'https://localhost',
JWT_SECRET: process.env.JWT_SECRET || 'dev-jwt-secret-change-in-production',
WORKER_LOCK_DURATION: process.env.WORKER_LOCK_DURATION || '120000',
// Sentry/Bugsink (ADR-015)
SENTRY_DSN: process.env.SENTRY_DSN,
SENTRY_ENVIRONMENT: process.env.SENTRY_ENVIRONMENT || 'development',
SENTRY_ENABLED: process.env.SENTRY_ENABLED || 'true',
SENTRY_DEBUG: process.env.SENTRY_DEBUG || 'true',
// Optional API keys (may not be set in dev)
GEMINI_API_KEY: process.env.GEMINI_API_KEY,
GOOGLE_MAPS_API_KEY: process.env.GOOGLE_MAPS_API_KEY,
GOOGLE_CLIENT_ID: process.env.GOOGLE_CLIENT_ID,
GOOGLE_CLIENT_SECRET: process.env.GOOGLE_CLIENT_SECRET,
};
module.exports = {
apps: [
// =========================================================================
// API SERVER (Development)
// =========================================================================
// Single instance with tsx watch for hot reloading.
// Unlike production, we don't use cluster mode to allow:
// - Debugger attachment
// - Simpler log output
// - Hot reloading via tsx watch
{
name: 'flyer-crawler-api-dev',
script: './node_modules/.bin/tsx',
args: 'watch server.ts',
cwd: '/app',
watch: false, // tsx watch handles this
instances: 1, // Single instance for debugging
exec_mode: 'fork', // Fork mode (not cluster) for debugging
max_memory_restart: '1G', // More generous in dev
kill_timeout: 5000,
// PM2 log configuration (ADR-050 - Logstash integration)
output: '/var/log/pm2/api-out.log',
error: '/var/log/pm2/api-error.log',
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
merge_logs: true,
// Restart configuration
max_restarts: 10,
exp_backoff_restart_delay: 500,
min_uptime: '5s',
// Environment
env: {
...sharedEnv,
PORT: '3001',
},
},
// =========================================================================
// BACKGROUND WORKER (Development)
// =========================================================================
// Processes background jobs (flyer processing, cleanup, etc.)
// Runs as a separate process like in production.
{
name: 'flyer-crawler-worker-dev',
script: './node_modules/.bin/tsx',
args: 'watch src/services/worker.ts',
cwd: '/app',
watch: false, // tsx watch handles this
instances: 1,
exec_mode: 'fork',
max_memory_restart: '1G',
kill_timeout: 10000, // Workers need more time to finish jobs
// PM2 log configuration (ADR-050)
output: '/var/log/pm2/worker-out.log',
error: '/var/log/pm2/worker-error.log',
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
merge_logs: true,
// Restart configuration
max_restarts: 10,
exp_backoff_restart_delay: 500,
min_uptime: '5s',
// Environment
env: {
...sharedEnv,
},
},
// =========================================================================
// VITE FRONTEND DEV SERVER (Development)
// =========================================================================
// Vite dev server for frontend hot module replacement.
// Listens on 5173 and is proxied by nginx to 443.
{
name: 'flyer-crawler-vite-dev',
script: './node_modules/.bin/vite',
args: '--host',
cwd: '/app',
watch: false, // Vite handles its own watching
instances: 1,
exec_mode: 'fork',
max_memory_restart: '2G', // Vite can use significant memory
kill_timeout: 5000,
// PM2 log configuration (ADR-050)
output: '/var/log/pm2/vite-out.log',
error: '/var/log/pm2/vite-error.log',
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
merge_logs: true,
// Restart configuration
max_restarts: 10,
exp_backoff_restart_delay: 500,
min_uptime: '5s',
// Environment
env: {
// Timezone: PST (America/Los_Angeles) for consistent log timestamps
TZ: process.env.TZ || 'America/Los_Angeles',
NODE_ENV: 'development',
// Vite-specific env vars (VITE_ prefix)
VITE_SENTRY_DSN: process.env.VITE_SENTRY_DSN,
VITE_SENTRY_ENVIRONMENT: process.env.VITE_SENTRY_ENVIRONMENT || 'development',
VITE_SENTRY_ENABLED: process.env.VITE_SENTRY_ENABLED || 'true',
VITE_SENTRY_DEBUG: process.env.VITE_SENTRY_DEBUG || 'true',
},
},
],
};

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "flyer-crawler",
"version": "0.12.7",
"version": "0.12.22",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "flyer-crawler",
"version": "0.12.7",
"version": "0.12.22",
"dependencies": {
"@bull-board/api": "^6.14.2",
"@bull-board/express": "^6.14.2",

View File

@@ -1,20 +1,25 @@
{
"name": "flyer-crawler",
"private": true,
"version": "0.12.7",
"version": "0.12.22",
"type": "module",
"scripts": {
"dev": "concurrently \"npm:start:dev\" \"vite\"",
"dev:container": "concurrently \"npm:start:dev\" \"vite --host\"",
"dev:pm2": "pm2 start ecosystem.dev.config.cjs && pm2 logs",
"dev:pm2:restart": "pm2 restart ecosystem.dev.config.cjs",
"dev:pm2:stop": "pm2 delete all",
"dev:pm2:status": "pm2 status",
"dev:pm2:logs": "pm2 logs",
"start": "npm run start:prod",
"build": "vite build",
"preview": "vite preview",
"test": "node scripts/check-linux.js && cross-env NODE_ENV=test tsx ./node_modules/vitest/vitest.mjs run",
"test-wsl": "cross-env NODE_ENV=test vitest run",
"test": "node scripts/check-linux.js && cross-env NODE_ENV=test TZ= tsx ./node_modules/vitest/vitest.mjs run",
"test-wsl": "cross-env NODE_ENV=test TZ= vitest run",
"test:coverage": "npm run clean && npm run test:unit -- --coverage && npm run test:integration -- --coverage",
"test:unit": "node scripts/check-linux.js && cross-env NODE_ENV=test tsx --max-old-space-size=8192 ./node_modules/vitest/vitest.mjs run --project unit -c vite.config.ts",
"test:integration": "node scripts/check-linux.js && cross-env NODE_ENV=test tsx --max-old-space-size=8192 ./node_modules/vitest/vitest.mjs run --project integration -c vitest.config.integration.ts",
"test:e2e": "node scripts/check-linux.js && cross-env NODE_ENV=test tsx --max-old-space-size=8192 ./node_modules/vitest/vitest.mjs run --config vitest.config.e2e.ts",
"test:unit": "node scripts/check-linux.js && cross-env NODE_ENV=test TZ= tsx --max-old-space-size=8192 ./node_modules/vitest/vitest.mjs run --project unit -c vite.config.ts",
"test:integration": "node scripts/check-linux.js && cross-env NODE_ENV=test TZ= tsx --max-old-space-size=8192 ./node_modules/vitest/vitest.mjs run --project integration -c vitest.config.integration.ts",
"test:e2e": "node scripts/check-linux.js && cross-env NODE_ENV=test TZ= tsx --max-old-space-size=8192 ./node_modules/vitest/vitest.mjs run --config vitest.config.e2e.ts",
"format": "prettier --write .",
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0",
"type-check": "tsc --noEmit",

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -0,0 +1 @@
dummy-image-content

View File

@@ -3,22 +3,48 @@
# ============================================================================
# Development Container Entrypoint
# ============================================================================
# This script starts the development server automatically when the container
# starts, both with VS Code Dev Containers and with plain podman-compose.
# This script starts all development services when the container starts,
# both with VS Code Dev Containers and with plain podman-compose.
#
# Services started:
# - Nginx (proxies Vite 5173 → 3000)
# - Nginx (HTTPS proxy: Vite 5173 -> 443, Bugsink 8000 -> 8443, API 3001)
# - Bugsink (error tracking) on port 8000
# - Logstash (log aggregation)
# - Node.js dev server (API + Frontend) on ports 3001 and 5173
# - PM2 (process manager) running:
# - flyer-crawler-api-dev: API server (port 3001)
# - flyer-crawler-worker-dev: Background job worker
# - flyer-crawler-vite-dev: Vite frontend dev server (port 5173)
#
# ADR-014: This architecture matches production (PM2 managing processes)
# ADR-050: Logs are written to /var/log/pm2 for Logstash integration
# ============================================================================
set -e
echo "🚀 Starting Flyer Crawler Dev Container..."
echo "Starting Flyer Crawler Dev Container..."
# ============================================================================
# Timezone Configuration
# ============================================================================
# Ensure TZ is set for consistent log timestamps across all services.
# TZ should be set via compose.dev.yml environment (default: America/Los_Angeles)
# ============================================================================
if [ -n "$TZ" ]; then
echo "Timezone configured: $TZ"
# Link timezone data if available (for date command and other tools)
if [ -f "/usr/share/zoneinfo/$TZ" ]; then
ln -sf "/usr/share/zoneinfo/$TZ" /etc/localtime
echo "$TZ" > /etc/timezone
echo "System timezone set to: $(date +%Z) ($(date))"
else
echo "Warning: Timezone data not found for $TZ, using TZ environment variable only"
fi
else
echo "Warning: TZ environment variable not set, using container default timezone"
fi
# Configure Bugsink HTTPS (ADR-015)
echo "🔒 Configuring Bugsink HTTPS..."
echo "Configuring Bugsink HTTPS..."
mkdir -p /etc/bugsink/ssl
if [ ! -f "/etc/bugsink/ssl/localhost+2.pem" ]; then
cd /etc/bugsink/ssl && mkcert localhost 127.0.0.1 ::1 > /dev/null 2>&1
@@ -53,52 +79,92 @@ NGINX_EOF
ln -sf /etc/nginx/sites-available/bugsink /etc/nginx/sites-enabled/bugsink
# Copy the dev nginx config from mounted volume to nginx sites-available
echo "📋 Copying nginx dev config..."
echo "Copying nginx dev config..."
cp /app/docker/nginx/dev.conf /etc/nginx/sites-available/default
# Ensure PM2 log directory exists with correct permissions
echo "Setting up PM2 log directory..."
mkdir -p /var/log/pm2
chmod 755 /var/log/pm2
# Start nginx in background (if installed)
if command -v nginx &> /dev/null; then
echo "🌐 Starting nginx (HTTPS: Vite 5173 443, Bugsink 8000 8443, API 3001 /api/)..."
echo "Starting nginx (HTTPS: Vite 5173 -> 443, Bugsink 8000 -> 8443, API 3001 -> /api/)..."
nginx &
fi
# Start Bugsink in background
echo "📊 Starting Bugsink error tracking..."
echo "Starting Bugsink error tracking..."
/usr/local/bin/start-bugsink.sh > /var/log/bugsink/server.log 2>&1 &
# Wait for Bugsink to initialize, then run snappea migrations
echo "Waiting for Bugsink to initialize..."
echo "Waiting for Bugsink to initialize..."
sleep 5
echo "🔧 Running Bugsink snappea database migrations..."
echo "Running Bugsink snappea database migrations..."
cd /opt/bugsink/conf && \
export DATABASE_URL="postgresql://bugsink:bugsink_dev_password@postgres:5432/bugsink" && \
export SECRET_KEY="dev-bugsink-secret-key-minimum-50-characters-for-security" && \
/opt/bugsink/bin/bugsink-manage migrate --database=snappea > /dev/null 2>&1
# Start Snappea task worker
echo "🔄 Starting Snappea task worker..."
echo "Starting Snappea task worker..."
cd /opt/bugsink/conf && \
export DATABASE_URL="postgresql://bugsink:bugsink_dev_password@postgres:5432/bugsink" && \
export SECRET_KEY="dev-bugsink-secret-key-minimum-50-characters-for-security" && \
/opt/bugsink/bin/bugsink-manage runsnappea > /var/log/bugsink/snappea.log 2>&1 &
# Start Logstash in background
echo "📝 Starting Logstash..."
echo "Starting Logstash..."
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/bugsink.conf > /var/log/logstash/logstash.log 2>&1 &
# Wait a few seconds for services to initialize
# Wait for services to initialize
sleep 3
# Change to app directory
cd /app
# Start development server
echo "💻 Starting development server..."
echo " - Frontend: https://localhost (nginx HTTPS → Vite on 5173)"
echo " - Backend API: http://localhost:3001"
echo " - Bugsink: https://localhost:8443 (nginx HTTPS → Bugsink on 8000)"
echo " - Note: Accept the self-signed certificate warnings in your browser"
# ============================================================================
# Start PM2 (ADR-014: Production-like process management)
# ============================================================================
# PM2 manages all Node.js processes:
# - API server (tsx watch server.ts)
# - Background worker (tsx watch src/services/worker.ts)
# - Vite dev server (vite --host)
#
# Logs are written to /var/log/pm2 for Logstash integration (ADR-050)
# ============================================================================
echo "Starting PM2 with development configuration..."
echo " - API Server: http://localhost:3001 (proxied via nginx)"
echo " - Frontend: https://localhost (nginx HTTPS -> Vite on 5173)"
echo " - Bugsink: https://localhost:8443 (nginx HTTPS -> Bugsink on 8000)"
echo " - PM2 Logs: /var/log/pm2/*.log"
echo ""
echo "Note: Accept the self-signed certificate warnings in your browser"
echo ""
# Run npm dev server (this will block and keep container alive)
exec npm run dev:container
# Delete any existing PM2 processes (clean start)
pm2 delete all 2>/dev/null || true
# Start PM2 with the dev ecosystem config
pm2 start /app/ecosystem.dev.config.cjs
# Show PM2 status
echo ""
echo "--- PM2 Process Status ---"
pm2 status
echo "-------------------------"
echo ""
echo "PM2 Commands:"
echo " pm2 logs # View all logs (tail -f style)"
echo " pm2 logs api # View API logs only"
echo " pm2 restart all # Restart all processes"
echo " pm2 status # Show process status"
echo ""
# Keep the container running by tailing PM2 logs
# This ensures:
# 1. Container stays alive
# 2. Logs are visible in docker logs / podman logs
# 3. PM2 processes continue running
exec pm2 logs --raw

101
server.ts
View File

@@ -18,27 +18,8 @@ import { getPool } from './src/services/db/connection.db';
import passport from './src/config/passport';
import { logger } from './src/services/logger.server';
// Import routers
import authRouter from './src/routes/auth.routes';
import userRouter from './src/routes/user.routes';
import adminRouter from './src/routes/admin.routes';
import aiRouter from './src/routes/ai.routes';
import budgetRouter from './src/routes/budget.routes';
import flyerRouter from './src/routes/flyer.routes';
import recipeRouter from './src/routes/recipe.routes';
import personalizationRouter from './src/routes/personalization.routes';
import priceRouter from './src/routes/price.routes';
import statsRouter from './src/routes/stats.routes';
import gamificationRouter from './src/routes/gamification.routes';
import systemRouter from './src/routes/system.routes';
import healthRouter from './src/routes/health.routes';
import upcRouter from './src/routes/upc.routes';
import inventoryRouter from './src/routes/inventory.routes';
import receiptRouter from './src/routes/receipt.routes';
import dealsRouter from './src/routes/deals.routes';
import reactionsRouter from './src/routes/reactions.routes';
import storeRouter from './src/routes/store.routes';
import categoryRouter from './src/routes/category.routes';
// Import the versioned API router factory (ADR-008 Phase 2)
import { createApiRouter } from './src/routes/versioned';
import { errorHandler } from './src/middleware/errorHandler';
import { backgroundJobService, startBackgroundJobs } from './src/services/backgroundJobService';
import { websocketService } from './src/services/websocketService.server';
@@ -235,11 +216,11 @@ if (process.env.NODE_ENV !== 'production') {
logger.info('API Documentation available at /docs/api-docs');
}
// --- API Routes ---
// --- API Routes (ADR-008: API Versioning Strategy - Phase 1) ---
// ADR-053: Worker Health Checks
// Expose queue metrics for monitoring.
app.get('/api/health/queues', async (req, res) => {
// Expose queue metrics for monitoring at versioned endpoint.
app.get('/api/v1/health/queues', async (req, res) => {
try {
const statuses = await monitoringService.getQueueStatuses();
res.json(statuses);
@@ -249,48 +230,36 @@ app.get('/api/health/queues', async (req, res) => {
}
});
// The order of route registration is critical.
// More specific routes should be registered before more general ones.
// 1. Authentication routes for login, registration, etc.
app.use('/api/auth', authRouter); // This was a duplicate, fixed.
// 2. System routes for health checks, etc.
app.use('/api/health', healthRouter);
// 3. System routes for pm2 status, etc.
app.use('/api/system', systemRouter);
// 3. General authenticated user routes.
app.use('/api/users', userRouter);
// 4. AI routes, some of which use optional authentication.
app.use('/api/ai', aiRouter);
// 5. Admin routes, which are all protected by admin-level checks.
app.use('/api/admin', adminRouter); // This seems to be missing from the original file list, but is required.
// 6. Budgeting and spending analysis routes.
app.use('/api/budgets', budgetRouter);
// 7. Gamification routes for achievements.
app.use('/api/achievements', gamificationRouter);
// 8. Public flyer routes.
app.use('/api/flyers', flyerRouter);
// 8. Public recipe routes.
app.use('/api/recipes', recipeRouter);
// 9. Public personalization data routes (master items, etc.).
app.use('/api/personalization', personalizationRouter);
// 9.5. Price history routes.
app.use('/api/price-history', priceRouter);
// 10. Public statistics routes.
app.use('/api/stats', statsRouter);
// 11. UPC barcode scanning routes.
app.use('/api/upc', upcRouter);
// 12. Inventory and expiry tracking routes.
app.use('/api/inventory', inventoryRouter);
// 13. Receipt scanning routes.
app.use('/api/receipts', receiptRouter);
// 14. Deals and best prices routes.
app.use('/api/deals', dealsRouter);
// 15. Reactions/social features routes.
app.use('/api/reactions', reactionsRouter);
// 16. Store management routes.
app.use('/api/stores', storeRouter);
// 17. Category discovery routes (ADR-023: Database Normalization)
app.use('/api/categories', categoryRouter);
// --- Backwards Compatibility Redirect (ADR-008: API Versioning Strategy) ---
// Redirect old /api/* paths to /api/v1/* for backwards compatibility.
// This allows clients to gradually migrate to the versioned API.
// IMPORTANT: This middleware MUST be mounted BEFORE createApiRouter() so that
// unversioned paths like /api/users are redirected to /api/v1/users BEFORE
// the versioned router's detectApiVersion middleware rejects them as invalid versions.
app.use('/api', (req, res, next) => {
// Check if the path starts with a version-like prefix (/v followed by digits).
// This includes both supported versions (v1, v2) and unsupported ones (v99).
// Unsupported versions will be handled by detectApiVersion middleware which returns 404.
// This redirect only handles legacy unversioned paths like /api/users -> /api/v1/users.
const versionPattern = /^\/v\d+/;
const startsWithVersionPattern = versionPattern.test(req.path);
if (!startsWithVersionPattern) {
const newPath = `/api/v1${req.path}`;
logger.info({ oldPath: `/api${req.path}`, newPath }, 'Redirecting to versioned API');
return res.redirect(301, newPath);
}
next();
});
// Mount the versioned API router (ADR-008 Phase 2).
// The createApiRouter() factory handles:
// - Version detection and validation via detectApiVersion middleware
// - Route registration in correct precedence order
// - Version-specific route availability
// - Deprecation headers via addDeprecationHeaders middleware
// - X-API-Version response headers
// All domain routers are registered in versioned.ts with proper ordering.
app.use('/api', createApiRouter());
// --- Error Handling and Server Startup ---

View File

@@ -1,21 +1,31 @@
-- Update flyer URLs from example.com to environment-specific URLs
--
-- This script should be run after determining the correct base URL for the environment:
-- - Dev container: http://127.0.0.1
-- - Dev container: https://localhost (NOT 127.0.0.1 - avoids SSL mixed-origin issues)
-- - Test environment: https://flyer-crawler-test.projectium.com
-- - Production: https://flyer-crawler.projectium.com
-- For dev container (run in dev database):
-- Uses 'localhost' instead of '127.0.0.1' to match how users access the site.
-- This avoids ERR_CERT_AUTHORITY_INVALID errors when images are loaded from a
-- different origin than the page.
UPDATE flyers
SET
image_url = REPLACE(image_url, 'example.com', '127.0.0.1'),
image_url = REPLACE(image_url, 'https://', 'http://'),
icon_url = REPLACE(icon_url, 'example.com', '127.0.0.1'),
icon_url = REPLACE(icon_url, 'https://', 'http://')
image_url = REPLACE(image_url, 'example.com', 'localhost'),
icon_url = REPLACE(icon_url, 'example.com', 'localhost')
WHERE
image_url LIKE '%example.com%'
OR icon_url LIKE '%example.com%';
-- Also fix any existing 127.0.0.1 URLs to use localhost:
UPDATE flyers
SET
image_url = REPLACE(image_url, '127.0.0.1', 'localhost'),
icon_url = REPLACE(icon_url, '127.0.0.1', 'localhost')
WHERE
image_url LIKE '%127.0.0.1%'
OR icon_url LIKE '%127.0.0.1%';
-- For test environment (run in test database):
-- UPDATE flyers
-- SET
@@ -37,7 +47,7 @@ WHERE
-- Verify the changes:
SELECT flyer_id, image_url, icon_url
FROM flyers
WHERE image_url LIKE '%127.0.0.1%'
OR icon_url LIKE '%127.0.0.1%'
WHERE image_url LIKE '%localhost%'
OR icon_url LIKE '%localhost%'
OR image_url LIKE '%flyer-crawler%'
OR icon_url LIKE '%flyer-crawler%';

View File

@@ -21,7 +21,7 @@ export const AppGuard: React.FC<AppGuardProps> = ({ children }) => {
const commitMessage = config.app.commitMessage;
return (
<div className="bg-gray-100 dark:bg-gray-950 min-h-screen font-sans text-gray-800 dark:text-gray-200">
<div className="bg-slate-50 dark:bg-slate-900 min-h-screen font-sans text-gray-800 dark:text-gray-200 bg-[radial-gradient(ellipse_at_top_right,_var(--tw-gradient-stops))] from-slate-50 via-gray-100 to-slate-100 dark:from-slate-800 dark:via-slate-900 dark:to-black">
{/* Toaster component for displaying notifications. It's placed at the top level. */}
<Toaster position="top-center" reverseOrder={false} />
{/* Add CSS variables for toast theming based on dark mode */}

View File

@@ -0,0 +1,378 @@
// src/components/FeatureFlag.test.tsx
/**
* Unit tests for the FeatureFlag component (ADR-024).
*
* These tests verify:
* - Component renders children when feature is enabled
* - Component hides children when feature is disabled
* - Component renders fallback when feature is disabled
* - Component returns null when disabled and no fallback provided
* - All feature flag names are properly handled
*/
import React from 'react';
import { render, screen } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach } from 'vitest';
// Mock the useFeatureFlag hook
const mockUseFeatureFlag = vi.fn();
vi.mock('../hooks/useFeatureFlag', () => ({
useFeatureFlag: (flagName: string) => mockUseFeatureFlag(flagName),
}));
// Import after mocking
import { FeatureFlag } from './FeatureFlag';
describe('FeatureFlag component', () => {
beforeEach(() => {
mockUseFeatureFlag.mockReset();
// Default to disabled
mockUseFeatureFlag.mockReturnValue(false);
});
describe('when feature is enabled', () => {
beforeEach(() => {
mockUseFeatureFlag.mockReturnValue(true);
});
it('should render children', () => {
render(
<FeatureFlag feature="newDashboard">
<div data-testid="new-feature">New Feature Content</div>
</FeatureFlag>,
);
expect(screen.getByTestId('new-feature')).toBeInTheDocument();
expect(screen.getByText('New Feature Content')).toBeInTheDocument();
});
it('should not render fallback', () => {
render(
<FeatureFlag feature="newDashboard" fallback={<div data-testid="fallback">Fallback</div>}>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
expect(screen.getByTestId('new-feature')).toBeInTheDocument();
expect(screen.queryByTestId('fallback')).not.toBeInTheDocument();
});
it('should render multiple children', () => {
render(
<FeatureFlag feature="newDashboard">
<div data-testid="child-1">Child 1</div>
<div data-testid="child-2">Child 2</div>
</FeatureFlag>,
);
expect(screen.getByTestId('child-1')).toBeInTheDocument();
expect(screen.getByTestId('child-2')).toBeInTheDocument();
});
it('should render text content', () => {
render(<FeatureFlag feature="newDashboard">Just some text</FeatureFlag>);
expect(screen.getByText('Just some text')).toBeInTheDocument();
});
it('should call useFeatureFlag with correct flag name', () => {
render(
<FeatureFlag feature="betaRecipes">
<div>Content</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('betaRecipes');
});
});
describe('when feature is disabled', () => {
beforeEach(() => {
mockUseFeatureFlag.mockReturnValue(false);
});
it('should not render children', () => {
render(
<FeatureFlag feature="newDashboard">
<div data-testid="new-feature">New Feature Content</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(screen.queryByText('New Feature Content')).not.toBeInTheDocument();
});
it('should render fallback when provided', () => {
render(
<FeatureFlag
feature="newDashboard"
fallback={<div data-testid="fallback">Legacy Feature</div>}
>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(screen.getByTestId('fallback')).toBeInTheDocument();
expect(screen.getByText('Legacy Feature')).toBeInTheDocument();
});
it('should render null when no fallback is provided', () => {
const { container } = render(
<FeatureFlag feature="newDashboard">
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
// Container should be empty (just the wrapper)
expect(container.innerHTML).toBe('');
});
it('should render complex fallback components', () => {
const FallbackComponent = () => (
<div data-testid="complex-fallback">
<h1>Legacy Dashboard</h1>
<p>This is the old version</p>
</div>
);
render(
<FeatureFlag feature="newDashboard" fallback={<FallbackComponent />}>
<div data-testid="new-feature">New Dashboard</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(screen.getByTestId('complex-fallback')).toBeInTheDocument();
expect(screen.getByText('Legacy Dashboard')).toBeInTheDocument();
expect(screen.getByText('This is the old version')).toBeInTheDocument();
});
it('should render text fallback', () => {
render(
<FeatureFlag feature="newDashboard" fallback="Feature not available">
<div>New Feature</div>
</FeatureFlag>,
);
expect(screen.getByText('Feature not available')).toBeInTheDocument();
});
});
describe('with different feature flags', () => {
it('should work with newDashboard flag', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<FeatureFlag feature="newDashboard">
<div data-testid="dashboard">Dashboard</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('newDashboard');
expect(screen.getByTestId('dashboard')).toBeInTheDocument();
});
it('should work with betaRecipes flag', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<FeatureFlag feature="betaRecipes">
<div data-testid="recipes">Recipes</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('betaRecipes');
expect(screen.getByTestId('recipes')).toBeInTheDocument();
});
it('should work with experimentalAi flag', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<FeatureFlag feature="experimentalAi">
<div data-testid="ai">AI Feature</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('experimentalAi');
expect(screen.getByTestId('ai')).toBeInTheDocument();
});
it('should work with debugMode flag', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<FeatureFlag feature="debugMode">
<div data-testid="debug">Debug Panel</div>
</FeatureFlag>,
);
expect(mockUseFeatureFlag).toHaveBeenCalledWith('debugMode');
expect(screen.getByTestId('debug')).toBeInTheDocument();
});
});
describe('real-world usage patterns', () => {
it('should work for A/B testing pattern', () => {
mockUseFeatureFlag.mockReturnValue(false);
render(
<FeatureFlag feature="newDashboard" fallback={<div data-testid="old-ui">Old UI</div>}>
<div data-testid="new-ui">New UI</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-ui')).not.toBeInTheDocument();
expect(screen.getByTestId('old-ui')).toBeInTheDocument();
});
it('should work for gradual rollout pattern', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(
<div>
<nav data-testid="nav">Navigation</nav>
<FeatureFlag feature="betaRecipes">
<aside data-testid="recipe-suggestions">Recipe Suggestions</aside>
</FeatureFlag>
<main data-testid="main">Main Content</main>
</div>,
);
expect(screen.getByTestId('nav')).toBeInTheDocument();
expect(screen.getByTestId('recipe-suggestions')).toBeInTheDocument();
expect(screen.getByTestId('main')).toBeInTheDocument();
});
it('should work nested within conditional logic', () => {
mockUseFeatureFlag.mockReturnValue(true);
const isLoggedIn = true;
render(
<div>
{isLoggedIn && (
<FeatureFlag
feature="experimentalAi"
fallback={<div data-testid="standard">Standard</div>}
>
<div data-testid="ai-search">AI Search</div>
</FeatureFlag>
)}
</div>,
);
expect(screen.getByTestId('ai-search')).toBeInTheDocument();
});
it('should work with multiple FeatureFlag components', () => {
// First call for newDashboard returns true
// Second call for debugMode returns false
mockUseFeatureFlag.mockImplementation((flag: string) => {
if (flag === 'newDashboard') return true;
if (flag === 'debugMode') return false;
return false;
});
render(
<div>
<FeatureFlag feature="newDashboard">
<div data-testid="new-dashboard">New Dashboard</div>
</FeatureFlag>
<FeatureFlag feature="debugMode" fallback={<div data-testid="no-debug">No Debug</div>}>
<div data-testid="debug-panel">Debug Panel</div>
</FeatureFlag>
</div>,
);
expect(screen.getByTestId('new-dashboard')).toBeInTheDocument();
expect(screen.queryByTestId('debug-panel')).not.toBeInTheDocument();
expect(screen.getByTestId('no-debug')).toBeInTheDocument();
});
});
describe('edge cases', () => {
it('should handle undefined fallback gracefully', () => {
mockUseFeatureFlag.mockReturnValue(false);
const { container } = render(
<FeatureFlag feature="newDashboard" fallback={undefined}>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(container.innerHTML).toBe('');
});
it('should handle null children gracefully when enabled', () => {
mockUseFeatureFlag.mockReturnValue(true);
const { container } = render(<FeatureFlag feature="newDashboard">{null}</FeatureFlag>);
// Should render nothing (null)
expect(container.innerHTML).toBe('');
});
it('should handle empty children when enabled', () => {
mockUseFeatureFlag.mockReturnValue(true);
const { container } = render(
<FeatureFlag feature="newDashboard">
<></>
</FeatureFlag>,
);
// Should render the empty fragment
expect(container.innerHTML).toBe('');
});
it('should handle boolean children', () => {
mockUseFeatureFlag.mockReturnValue(true);
// React ignores boolean children, so nothing should render
const { container } = render(
<FeatureFlag feature="newDashboard">{true as unknown as React.ReactNode}</FeatureFlag>,
);
expect(container.innerHTML).toBe('');
});
it('should handle number children', () => {
mockUseFeatureFlag.mockReturnValue(true);
render(<FeatureFlag feature="newDashboard">{42}</FeatureFlag>);
expect(screen.getByText('42')).toBeInTheDocument();
});
});
describe('re-rendering behavior', () => {
it('should update when feature flag value changes', () => {
const { rerender } = render(
<FeatureFlag feature="newDashboard" fallback={<div data-testid="fallback">Fallback</div>}>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
// Initially disabled
expect(screen.queryByTestId('new-feature')).not.toBeInTheDocument();
expect(screen.getByTestId('fallback')).toBeInTheDocument();
// Enable the flag
mockUseFeatureFlag.mockReturnValue(true);
rerender(
<FeatureFlag feature="newDashboard" fallback={<div data-testid="fallback">Fallback</div>}>
<div data-testid="new-feature">New Feature</div>
</FeatureFlag>,
);
// Now enabled
expect(screen.getByTestId('new-feature')).toBeInTheDocument();
expect(screen.queryByTestId('fallback')).not.toBeInTheDocument();
});
});
});

View File

@@ -0,0 +1,75 @@
// src/components/FeatureFlag.tsx
import type { ReactNode } from 'react';
import { useFeatureFlag, type FeatureFlagName } from '../hooks/useFeatureFlag';
/**
* Props for the FeatureFlag component.
*/
export interface FeatureFlagProps {
/**
* The name of the feature flag to check.
* Must be a valid FeatureFlagName defined in config.featureFlags.
*/
feature: FeatureFlagName;
/**
* Content to render when the feature flag is enabled.
*/
children: ReactNode;
/**
* Optional content to render when the feature flag is disabled.
* If not provided, nothing is rendered when the flag is disabled.
* @default null
*/
fallback?: ReactNode;
}
/**
* Declarative component for conditional rendering based on feature flag state.
*
* This component provides a clean, declarative API for rendering content based
* on whether a feature flag is enabled or disabled. It uses the useFeatureFlag
* hook internally and supports an optional fallback for disabled features.
*
* @param props - Component props
* @param props.feature - The feature flag name to check
* @param props.children - Content rendered when feature is enabled
* @param props.fallback - Content rendered when feature is disabled (default: null)
*
* @example
* // Basic usage - show new feature when enabled
* <FeatureFlag feature="newDashboard">
* <NewDashboard />
* </FeatureFlag>
*
* @example
* // With fallback - show alternative when feature is disabled
* <FeatureFlag feature="newDashboard" fallback={<LegacyDashboard />}>
* <NewDashboard />
* </FeatureFlag>
*
* @example
* // Wrap a section of UI that should only appear when flag is enabled
* <div className="sidebar">
* <Navigation />
* <FeatureFlag feature="betaRecipes">
* <RecipeSuggestions />
* </FeatureFlag>
* <Footer />
* </div>
*
* @example
* // Combine with other conditional logic
* {isLoggedIn && (
* <FeatureFlag feature="experimentalAi" fallback={<StandardSearch />}>
* <AiPoweredSearch />
* </FeatureFlag>
* )}
*
* @see docs/adr/0024-feature-flagging-strategy.md
*/
export function FeatureFlag({ feature, children, fallback = null }: FeatureFlagProps): ReactNode {
const isEnabled = useFeatureFlag(feature);
return isEnabled ? children : fallback;
}

View File

@@ -31,7 +31,7 @@ export const Header: React.FC<HeaderProps> = ({
// The state and handlers for the old AuthModal and SignUpModal have been removed.
return (
<>
<header className="bg-white dark:bg-gray-900 shadow-md sticky top-0 z-20 border-b-2 border-brand-primary dark:border-brand-secondary">
<header className="bg-white/80 dark:bg-slate-900/80 backdrop-blur-md shadow-sm sticky top-0 z-20 border-b border-gray-200/50 dark:border-gray-700/50 supports-[backdrop-filter]:bg-white/60 dark:supports-[backdrop-filter]:bg-slate-900/60">
<div className="max-w-screen-2xl mx-auto px-4 sm:px-6 lg:px-8">
<div className="flex items-center justify-between h-16">
<div className="flex items-center">

View File

@@ -0,0 +1,424 @@
// src/components/NotificationBell.test.tsx
import React from 'react';
import { screen, fireEvent, act } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest';
import { NotificationBell, ConnectionStatus } from './NotificationBell';
import { renderWithProviders } from '../tests/utils/renderWithProviders';
// Mock the useWebSocket hook
vi.mock('../hooks/useWebSocket', () => ({
useWebSocket: vi.fn(),
}));
// Mock the useEventBus hook
vi.mock('../hooks/useEventBus', () => ({
useEventBus: vi.fn(),
}));
// Import the mocked modules
import { useWebSocket } from '../hooks/useWebSocket';
import { useEventBus } from '../hooks/useEventBus';
// Type the mocked functions
const mockUseWebSocket = useWebSocket as Mock;
const mockUseEventBus = useEventBus as Mock;
describe('NotificationBell', () => {
let eventBusCallback: ((data?: unknown) => void) | null = null;
beforeEach(() => {
vi.clearAllMocks();
eventBusCallback = null;
// Default mock: connected state, no error
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
// Capture the callback passed to useEventBus
mockUseEventBus.mockImplementation((_event: string, callback: (data?: unknown) => void) => {
eventBusCallback = callback;
});
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('rendering', () => {
it('should render the notification bell button', () => {
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button', { name: /notifications/i });
expect(button).toBeInTheDocument();
});
it('should render with custom className', () => {
renderWithProviders(<NotificationBell className="custom-class" />);
const container = screen.getByRole('button').parentElement;
expect(container).toHaveClass('custom-class');
});
it('should show connection status indicator by default', () => {
const { container } = renderWithProviders(<NotificationBell />);
// The status indicator is a span with inline style containing backgroundColor
const statusIndicator = container.querySelector('span[title="Connected"]');
expect(statusIndicator).toBeInTheDocument();
});
it('should hide connection status indicator when showConnectionStatus is false', () => {
const { container } = renderWithProviders(<NotificationBell showConnectionStatus={false} />);
// No status indicator should be present (no span with title Connected/Connecting/Disconnected)
const connectedIndicator = container.querySelector('span[title="Connected"]');
const connectingIndicator = container.querySelector('span[title="Connecting"]');
const disconnectedIndicator = container.querySelector('span[title="Disconnected"]');
expect(connectedIndicator).not.toBeInTheDocument();
expect(connectingIndicator).not.toBeInTheDocument();
expect(disconnectedIndicator).not.toBeInTheDocument();
});
});
describe('unread count badge', () => {
it('should not show badge when unread count is zero', () => {
renderWithProviders(<NotificationBell />);
// The badge displays numbers, check that no number badge exists
const badge = screen.queryByText(/^\d+$/);
expect(badge).not.toBeInTheDocument();
});
it('should show badge with count when notifications arrive', () => {
renderWithProviders(<NotificationBell />);
// Simulate a notification arriving via event bus
expect(eventBusCallback).not.toBeNull();
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test' }] });
});
const badge = screen.getByText('1');
expect(badge).toBeInTheDocument();
});
it('should increment count when multiple notifications arrive', () => {
renderWithProviders(<NotificationBell />);
// Simulate multiple notifications
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test 1' }] });
eventBusCallback!({ deals: [{ item_name: 'Test 2' }] });
eventBusCallback!({ deals: [{ item_name: 'Test 3' }] });
});
const badge = screen.getByText('3');
expect(badge).toBeInTheDocument();
});
it('should display 99+ when count exceeds 99', () => {
renderWithProviders(<NotificationBell />);
// Simulate 100 notifications
act(() => {
for (let i = 0; i < 100; i++) {
eventBusCallback!({ deals: [{ item_name: `Test ${i}` }] });
}
});
const badge = screen.getByText('99+');
expect(badge).toBeInTheDocument();
});
it('should not increment count when notification data is undefined', () => {
renderWithProviders(<NotificationBell />);
// Simulate a notification with undefined data
act(() => {
eventBusCallback!(undefined);
});
const badge = screen.queryByText(/^\d+$/);
expect(badge).not.toBeInTheDocument();
});
});
describe('click behavior', () => {
it('should reset unread count when clicked', () => {
renderWithProviders(<NotificationBell />);
// First, add some notifications
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test' }] });
});
expect(screen.getByText('1')).toBeInTheDocument();
// Click the bell
const button = screen.getByRole('button');
fireEvent.click(button);
// Badge should no longer show
expect(screen.queryByText('1')).not.toBeInTheDocument();
});
it('should call onClick callback when provided', () => {
const mockOnClick = vi.fn();
renderWithProviders(<NotificationBell onClick={mockOnClick} />);
const button = screen.getByRole('button');
fireEvent.click(button);
expect(mockOnClick).toHaveBeenCalledTimes(1);
});
it('should handle click without onClick callback', () => {
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
// Should not throw
expect(() => fireEvent.click(button)).not.toThrow();
});
});
describe('connection status', () => {
it('should show green indicator when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
const { container } = renderWithProviders(<NotificationBell />);
const statusIndicator = container.querySelector('span[title="Connected"]');
expect(statusIndicator).toBeInTheDocument();
expect(statusIndicator).toHaveStyle({ backgroundColor: 'rgb(16, 185, 129)' });
});
it('should show red indicator when error occurs', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Connection failed',
});
const { container } = renderWithProviders(<NotificationBell />);
const statusIndicator = container.querySelector('span[title="Disconnected"]');
expect(statusIndicator).toBeInTheDocument();
expect(statusIndicator).toHaveStyle({ backgroundColor: 'rgb(239, 68, 68)' });
});
it('should show amber indicator when connecting', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: null,
});
const { container } = renderWithProviders(<NotificationBell />);
const statusIndicator = container.querySelector('span[title="Connecting"]');
expect(statusIndicator).toBeInTheDocument();
expect(statusIndicator).toHaveStyle({ backgroundColor: 'rgb(245, 158, 11)' });
});
it('should show error tooltip when disconnected with error', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Connection failed',
});
renderWithProviders(<NotificationBell />);
expect(screen.getByText('Live notifications unavailable')).toBeInTheDocument();
});
it('should not show error tooltip when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<NotificationBell />);
expect(screen.queryByText('Live notifications unavailable')).not.toBeInTheDocument();
});
});
describe('aria attributes', () => {
it('should have correct aria-label without unread notifications', () => {
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
expect(button).toHaveAttribute('aria-label', 'Notifications');
});
it('should have correct aria-label with unread notifications', () => {
renderWithProviders(<NotificationBell />);
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test' }] });
eventBusCallback!({ deals: [{ item_name: 'Test2' }] });
});
const button = screen.getByRole('button');
expect(button).toHaveAttribute('aria-label', 'Notifications (2 unread)');
});
it('should have correct title when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
expect(button).toHaveAttribute('title', 'Connected to live notifications');
});
it('should have correct title when connecting', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: null,
});
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
expect(button).toHaveAttribute('title', 'Connecting...');
});
it('should have correct title when error occurs', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Network error',
});
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
expect(button).toHaveAttribute('title', 'WebSocket error: Network error');
});
});
describe('bell icon styling', () => {
it('should have default color when no unread notifications', () => {
renderWithProviders(<NotificationBell />);
const button = screen.getByRole('button');
const svg = button.querySelector('svg');
expect(svg).toHaveClass('text-gray-600');
});
it('should have highlighted color when there are unread notifications', () => {
renderWithProviders(<NotificationBell />);
act(() => {
eventBusCallback!({ deals: [{ item_name: 'Test' }] });
});
const button = screen.getByRole('button');
const svg = button.querySelector('svg');
expect(svg).toHaveClass('text-blue-600');
});
});
describe('event bus subscription', () => {
it('should subscribe to notification:deal event', () => {
renderWithProviders(<NotificationBell />);
expect(mockUseEventBus).toHaveBeenCalledWith('notification:deal', expect.any(Function));
});
});
describe('useWebSocket configuration', () => {
it('should call useWebSocket with autoConnect: true', () => {
renderWithProviders(<NotificationBell />);
expect(mockUseWebSocket).toHaveBeenCalledWith({ autoConnect: true });
});
});
});
describe('ConnectionStatus', () => {
beforeEach(() => {
vi.clearAllMocks();
});
afterEach(() => {
vi.restoreAllMocks();
});
it('should show "Live" text when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<ConnectionStatus />);
expect(screen.getByText('Live')).toBeInTheDocument();
});
it('should show "Offline" text when disconnected with error', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Connection failed',
});
renderWithProviders(<ConnectionStatus />);
expect(screen.getByText('Offline')).toBeInTheDocument();
});
it('should show "Connecting..." text when connecting', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: null,
});
renderWithProviders(<ConnectionStatus />);
expect(screen.getByText('Connecting...')).toBeInTheDocument();
});
it('should call useWebSocket with autoConnect: true', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<ConnectionStatus />);
expect(mockUseWebSocket).toHaveBeenCalledWith({ autoConnect: true });
});
it('should render Wifi icon when connected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: true,
error: null,
});
renderWithProviders(<ConnectionStatus />);
const container = screen.getByText('Live').parentElement;
const svg = container?.querySelector('svg');
expect(svg).toBeInTheDocument();
expect(svg).toHaveClass('text-green-600');
});
it('should render WifiOff icon when disconnected', () => {
mockUseWebSocket.mockReturnValue({
isConnected: false,
error: 'Connection failed',
});
renderWithProviders(<ConnectionStatus />);
const container = screen.getByText('Offline').parentElement;
const svg = container?.querySelector('svg');
expect(svg).toBeInTheDocument();
expect(svg).toHaveClass('text-red-600');
});
});

View File

@@ -0,0 +1,776 @@
// src/components/NotificationToastHandler.test.tsx
import React from 'react';
import { render, act } from '@testing-library/react';
import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest';
import { NotificationToastHandler } from './NotificationToastHandler';
import type { DealNotificationData, SystemMessageData } from '../types/websocket';
// Use vi.hoisted to properly hoist mock functions
const { mockToastSuccess, mockToastError, mockToastDefault } = vi.hoisted(() => ({
mockToastSuccess: vi.fn(),
mockToastError: vi.fn(),
mockToastDefault: vi.fn(),
}));
// Mock react-hot-toast
vi.mock('react-hot-toast', () => {
const toastFn = (message: string, options?: unknown) => mockToastDefault(message, options);
toastFn.success = mockToastSuccess;
toastFn.error = mockToastError;
return {
default: toastFn,
};
});
// Mock useWebSocket hook
vi.mock('../hooks/useWebSocket', () => ({
useWebSocket: vi.fn(),
}));
// Mock useEventBus hook
vi.mock('../hooks/useEventBus', () => ({
useEventBus: vi.fn(),
}));
// Mock formatCurrency
vi.mock('../utils/formatUtils', () => ({
formatCurrency: vi.fn((cents: number) => `$${(cents / 100).toFixed(2)}`),
}));
// Import mocked modules
import { useWebSocket } from '../hooks/useWebSocket';
import { useEventBus } from '../hooks/useEventBus';
const mockUseWebSocket = useWebSocket as Mock;
const mockUseEventBus = useEventBus as Mock;
describe('NotificationToastHandler', () => {
let eventBusCallbacks: Map<string, (data?: unknown) => void>;
let onConnectCallback: (() => void) | undefined;
let onDisconnectCallback: (() => void) | undefined;
beforeEach(() => {
vi.clearAllMocks();
vi.useFakeTimers();
// Clear toast mocks
mockToastSuccess.mockClear();
mockToastError.mockClear();
mockToastDefault.mockClear();
eventBusCallbacks = new Map();
onConnectCallback = undefined;
onDisconnectCallback = undefined;
// Default mock implementation for useWebSocket
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: true,
error: null,
};
},
);
// Capture callbacks for different event types
mockUseEventBus.mockImplementation((event: string, callback: (data?: unknown) => void) => {
eventBusCallbacks.set(event, callback);
});
});
afterEach(() => {
vi.useRealTimers();
vi.restoreAllMocks();
});
describe('rendering', () => {
it('should render null (no visible output)', () => {
const { container } = render(<NotificationToastHandler />);
expect(container.firstChild).toBeNull();
});
it('should subscribe to event bus on mount', () => {
render(<NotificationToastHandler />);
expect(mockUseEventBus).toHaveBeenCalledWith('notification:deal', expect.any(Function));
expect(mockUseEventBus).toHaveBeenCalledWith('notification:system', expect.any(Function));
expect(mockUseEventBus).toHaveBeenCalledWith('notification:error', expect.any(Function));
});
});
describe('connection events', () => {
it('should show success toast on connect when enabled', () => {
render(<NotificationToastHandler enabled={true} />);
// Trigger onConnect callback
onConnectCallback?.();
expect(mockToastSuccess).toHaveBeenCalledWith(
'Connected to live notifications',
expect.objectContaining({
duration: 2000,
icon: expect.any(String),
}),
);
});
it('should not show success toast on connect when disabled', () => {
render(<NotificationToastHandler enabled={false} />);
onConnectCallback?.();
expect(mockToastSuccess).not.toHaveBeenCalled();
});
it('should show error toast on disconnect when error exists', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection lost',
};
},
);
render(<NotificationToastHandler enabled={true} />);
onDisconnectCallback?.();
expect(mockToastError).toHaveBeenCalledWith(
'Disconnected from live notifications',
expect.objectContaining({
duration: 3000,
icon: expect.any(String),
}),
);
});
it('should not show disconnect toast when disabled', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection lost',
};
},
);
render(<NotificationToastHandler enabled={false} />);
onDisconnectCallback?.();
expect(mockToastError).not.toHaveBeenCalled();
});
it('should not show disconnect toast when no error', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: null,
};
},
);
render(<NotificationToastHandler enabled={true} />);
onDisconnectCallback?.();
expect(mockToastError).not.toHaveBeenCalled();
});
});
describe('deal notifications', () => {
it('should show toast for single deal notification', () => {
render(<NotificationToastHandler />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal found',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(mockToastSuccess).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
duration: 5000,
icon: expect.any(String),
position: 'top-right',
}),
);
});
it('should show toast for multiple deals notification', () => {
render(<NotificationToastHandler />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Store A',
store_id: 1,
},
{
item_name: 'Bread',
best_price_in_cents: 299,
store_name: 'Store B',
store_id: 2,
},
{
item_name: 'Eggs',
best_price_in_cents: 499,
store_name: 'Store C',
store_id: 3,
},
],
user_id: 'user-123',
message: 'Multiple deals found',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(mockToastSuccess).toHaveBeenCalled();
});
it('should not show toast when disabled', () => {
render(<NotificationToastHandler enabled={false} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal found',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(mockToastSuccess).not.toHaveBeenCalled();
});
it('should not show toast when data is undefined', () => {
render(<NotificationToastHandler />);
const callback = eventBusCallbacks.get('notification:deal');
callback?.(undefined);
expect(mockToastSuccess).not.toHaveBeenCalled();
});
});
describe('system messages', () => {
it('should show error toast for error severity', () => {
render(<NotificationToastHandler />);
const systemData: SystemMessageData = {
message: 'System error occurred',
severity: 'error',
};
const callback = eventBusCallbacks.get('notification:system');
callback?.(systemData);
expect(mockToastError).toHaveBeenCalledWith(
'System error occurred',
expect.objectContaining({
duration: 6000,
position: 'top-center',
icon: expect.any(String),
}),
);
});
it('should show warning toast for warning severity', () => {
render(<NotificationToastHandler />);
const systemData: SystemMessageData = {
message: 'System warning',
severity: 'warning',
};
// For warning, the default toast() is called
const callback = eventBusCallbacks.get('notification:system');
callback?.(systemData);
// Warning uses the regular toast function (mockToastDefault)
expect(mockToastDefault).toHaveBeenCalledWith(
'System warning',
expect.objectContaining({
duration: 4000,
position: 'top-center',
icon: expect.any(String),
}),
);
});
it('should show info toast for info severity', () => {
render(<NotificationToastHandler />);
const systemData: SystemMessageData = {
message: 'System info',
severity: 'info',
};
const callback = eventBusCallbacks.get('notification:system');
callback?.(systemData);
// Info uses the regular toast function (mockToastDefault)
expect(mockToastDefault).toHaveBeenCalledWith(
'System info',
expect.objectContaining({
duration: 4000,
position: 'top-center',
icon: expect.any(String),
}),
);
});
it('should not show toast when disabled', () => {
render(<NotificationToastHandler enabled={false} />);
const systemData: SystemMessageData = {
message: 'System error',
severity: 'error',
};
const callback = eventBusCallbacks.get('notification:system');
callback?.(systemData);
expect(mockToastError).not.toHaveBeenCalled();
});
it('should not show toast when data is undefined', () => {
render(<NotificationToastHandler />);
const callback = eventBusCallbacks.get('notification:system');
callback?.(undefined);
expect(mockToastError).not.toHaveBeenCalled();
});
});
describe('error notifications', () => {
it('should show error toast with message and code', () => {
render(<NotificationToastHandler />);
const errorData = {
message: 'Something went wrong',
code: 'ERR_001',
};
const callback = eventBusCallbacks.get('notification:error');
callback?.(errorData);
expect(mockToastError).toHaveBeenCalledWith(
'Error: Something went wrong',
expect.objectContaining({
duration: 5000,
icon: expect.any(String),
}),
);
});
it('should show error toast without code', () => {
render(<NotificationToastHandler />);
const errorData = {
message: 'Something went wrong',
};
const callback = eventBusCallbacks.get('notification:error');
callback?.(errorData);
expect(mockToastError).toHaveBeenCalledWith(
'Error: Something went wrong',
expect.objectContaining({
duration: 5000,
}),
);
});
it('should not show toast when disabled', () => {
render(<NotificationToastHandler enabled={false} />);
const errorData = {
message: 'Something went wrong',
};
const callback = eventBusCallbacks.get('notification:error');
callback?.(errorData);
expect(mockToastError).not.toHaveBeenCalled();
});
it('should not show toast when data is undefined', () => {
render(<NotificationToastHandler />);
const callback = eventBusCallbacks.get('notification:error');
callback?.(undefined);
expect(mockToastError).not.toHaveBeenCalled();
});
});
describe('sound playback', () => {
it('should not play sound by default', () => {
const audioPlayMock = vi.fn().mockResolvedValue(undefined);
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={false} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(AudioMock).not.toHaveBeenCalled();
});
it('should create Audio instance when playSound is true', () => {
const audioPlayMock = vi.fn().mockResolvedValue(undefined);
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
// Verify Audio constructor was called with correct URL
expect(AudioMock).toHaveBeenCalledWith('/notification-sound.mp3');
});
it('should use custom sound URL', () => {
const audioPlayMock = vi.fn().mockResolvedValue(undefined);
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} soundUrl="/custom-sound.mp3" />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(AudioMock).toHaveBeenCalledWith('/custom-sound.mp3');
});
it('should handle audio play failure gracefully', () => {
vi.spyOn(console, 'warn').mockImplementation(() => {});
const audioPlayMock = vi.fn().mockRejectedValue(new Error('Autoplay blocked'));
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
// Should not throw even if play() fails
expect(() => callback?.(dealData)).not.toThrow();
// Audio constructor should still be called
expect(AudioMock).toHaveBeenCalled();
});
it('should handle Audio constructor failure gracefully', () => {
vi.spyOn(console, 'warn').mockImplementation(() => {});
const AudioMock = vi.fn().mockImplementation(() => {
throw new Error('Audio not supported');
});
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
// Should not throw
expect(() => callback?.(dealData)).not.toThrow();
});
});
describe('persistent connection error', () => {
it('should show error toast after delay when connection error persists', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection failed',
};
},
);
render(<NotificationToastHandler enabled={true} />);
// Fast-forward 5 seconds
act(() => {
vi.advanceTimersByTime(5000);
});
expect(mockToastError).toHaveBeenCalledWith(
'Unable to connect to live notifications. Some features may be limited.',
expect.objectContaining({
duration: 5000,
icon: expect.any(String),
}),
);
});
it('should not show error toast before delay', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection failed',
};
},
);
render(<NotificationToastHandler enabled={true} />);
// Advance only 4 seconds
act(() => {
vi.advanceTimersByTime(4000);
});
expect(mockToastError).not.toHaveBeenCalledWith(
expect.stringContaining('Unable to connect'),
expect.anything(),
);
});
it('should not show persistent error toast when disabled', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection failed',
};
},
);
render(<NotificationToastHandler enabled={false} />);
act(() => {
vi.advanceTimersByTime(5000);
});
expect(mockToastError).not.toHaveBeenCalled();
});
it('should clear timeout on unmount', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: 'Connection failed',
};
},
);
const { unmount } = render(<NotificationToastHandler enabled={true} />);
// Unmount before timer fires
unmount();
act(() => {
vi.advanceTimersByTime(5000);
});
// The toast should not be called because component unmounted
expect(mockToastError).not.toHaveBeenCalledWith(
expect.stringContaining('Unable to connect'),
expect.anything(),
);
});
it('should not show persistent error toast when there is no error', () => {
mockUseWebSocket.mockImplementation(
(options: { onConnect?: () => void; onDisconnect?: () => void }) => {
onConnectCallback = options?.onConnect;
onDisconnectCallback = options?.onDisconnect;
return {
isConnected: false,
error: null,
};
},
);
render(<NotificationToastHandler enabled={true} />);
act(() => {
vi.advanceTimersByTime(5000);
});
expect(mockToastError).not.toHaveBeenCalled();
});
});
describe('default props', () => {
it('should default enabled to true', () => {
render(<NotificationToastHandler />);
onConnectCallback?.();
expect(mockToastSuccess).toHaveBeenCalled();
});
it('should default playSound to false', () => {
const AudioMock = vi.fn();
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(AudioMock).not.toHaveBeenCalled();
});
it('should default soundUrl to /notification-sound.mp3', () => {
const audioPlayMock = vi.fn().mockResolvedValue(undefined);
const AudioMock = vi.fn().mockImplementation(() => ({
play: audioPlayMock,
volume: 0,
}));
vi.stubGlobal('Audio', AudioMock);
render(<NotificationToastHandler playSound={true} />);
const dealData: DealNotificationData = {
deals: [
{
item_name: 'Milk',
best_price_in_cents: 399,
store_name: 'Test Store',
store_id: 1,
},
],
user_id: 'user-123',
message: 'New deal',
};
const callback = eventBusCallbacks.get('notification:deal');
callback?.(dealData);
expect(AudioMock).toHaveBeenCalledWith('/notification-sound.mp3');
});
});
});

View File

@@ -24,6 +24,28 @@ const config = {
debug: import.meta.env.VITE_SENTRY_DEBUG === 'true',
enabled: import.meta.env.VITE_SENTRY_ENABLED !== 'false',
},
/**
* Feature flags for conditional feature rendering (ADR-024).
*
* All flags default to false (disabled) when the environment variable is not set
* or is set to any value other than 'true'. This opt-in model ensures features
* are explicitly enabled, preventing accidental exposure of incomplete features.
*
* Environment variables follow the naming convention: VITE_FEATURE_SNAKE_CASE
* Config properties use camelCase for consistency with JavaScript conventions.
*
* @see docs/adr/0024-feature-flagging-strategy.md
*/
featureFlags: {
/** Enable the redesigned dashboard UI (VITE_FEATURE_NEW_DASHBOARD) */
newDashboard: import.meta.env.VITE_FEATURE_NEW_DASHBOARD === 'true',
/** Enable beta recipe features (VITE_FEATURE_BETA_RECIPES) */
betaRecipes: import.meta.env.VITE_FEATURE_BETA_RECIPES === 'true',
/** Enable experimental AI features (VITE_FEATURE_EXPERIMENTAL_AI) */
experimentalAi: import.meta.env.VITE_FEATURE_EXPERIMENTAL_AI === 'true',
/** Enable debug mode UI elements (VITE_FEATURE_DEBUG_MODE) */
debugMode: import.meta.env.VITE_FEATURE_DEBUG_MODE === 'true',
},
};
export default config;

183
src/config/apiVersions.ts Normal file
View File

@@ -0,0 +1,183 @@
// src/config/apiVersions.ts
/**
* @file API version constants, types, and configuration.
* Implements ADR-008 Phase 2: API Versioning Infrastructure.
*
* This module provides centralized version definitions used by:
* - Version detection middleware (apiVersion.middleware.ts)
* - Deprecation headers middleware (deprecation.middleware.ts)
* - Versioned router factory (versioned.ts)
*
* @see docs/architecture/api-versioning-infrastructure.md
*
* @example
* ```typescript
* import {
* CURRENT_API_VERSION,
* VERSION_CONFIGS,
* isValidApiVersion,
* } from './apiVersions';
*
* // Check if a version is supported
* if (isValidApiVersion('v1')) {
* const config = VERSION_CONFIGS.v1;
* console.log(`v1 status: ${config.status}`);
* }
* ```
*/
// --- Type Definitions ---
/**
* All API versions as a const tuple for type derivation.
* Add new versions here when introducing them.
*/
export const API_VERSIONS = ['v1', 'v2'] as const;
/**
* Union type of supported API versions.
* Currently: 'v1' | 'v2'
*/
export type ApiVersion = (typeof API_VERSIONS)[number];
/**
* Version lifecycle status.
* - 'active': Version is fully supported and recommended
* - 'deprecated': Version works but clients should migrate (deprecation headers sent)
* - 'sunset': Version is scheduled for removal or already removed
*/
export type VersionStatus = 'active' | 'deprecated' | 'sunset';
/**
* Deprecation information for an API version.
* Follows RFC 8594 (Sunset Header) and draft-ietf-httpapi-deprecation-header.
*
* Used by deprecation middleware to set appropriate HTTP headers:
* - `Deprecation: true` (draft-ietf-httpapi-deprecation-header)
* - `Sunset: <date>` (RFC 8594)
* - `Link: <url>; rel="successor-version"` (RFC 8288)
*/
export interface VersionDeprecation {
/** Indicates if this version is deprecated (maps to Deprecation header) */
deprecated: boolean;
/** ISO 8601 date string when the version will be sunset (maps to Sunset header) */
sunsetDate?: string;
/** The version clients should migrate to (maps to Link rel="successor-version") */
successorVersion?: ApiVersion;
/** Human-readable message explaining the deprecation (for documentation/logs) */
message?: string;
}
/**
* Complete configuration for an API version.
* Combines version identifier, lifecycle status, and deprecation details.
*/
export interface VersionConfig {
/** The version identifier (e.g., 'v1') */
version: ApiVersion;
/** Current lifecycle status of this version */
status: VersionStatus;
/** ISO 8601 date when the version will be sunset (RFC 8594) - convenience field */
sunsetDate?: string;
/** The version clients should migrate to - convenience field */
successorVersion?: ApiVersion;
}
// --- Constants ---
/**
* The current/latest stable API version.
* New clients should use this version.
*/
export const CURRENT_API_VERSION: ApiVersion = 'v1';
/**
* The default API version for requests without explicit version.
* Used when version cannot be detected from the request path.
*/
export const DEFAULT_VERSION: ApiVersion = 'v1';
/**
* Array of all supported API versions.
* Used for validation and enumeration.
*/
export const SUPPORTED_VERSIONS: readonly ApiVersion[] = API_VERSIONS;
/**
* Configuration map for all API versions.
* Provides lifecycle status and deprecation information for each version.
*
* To mark v1 as deprecated (example for future use):
* @example
* ```typescript
* VERSION_CONFIGS.v1 = {
* version: 'v1',
* status: 'deprecated',
* sunsetDate: '2027-01-01T00:00:00Z',
* successorVersion: 'v2',
* };
* ```
*/
export const VERSION_CONFIGS: Record<ApiVersion, VersionConfig> = {
v1: {
version: 'v1',
status: 'active',
// No deprecation info - v1 is the current active version
},
v2: {
version: 'v2',
status: 'active',
// v2 is defined for infrastructure readiness but not yet implemented
},
};
// --- Utility Functions ---
/**
* Type guard to check if a string is a valid ApiVersion.
*
* @param value - The string to check
* @returns True if the string is a valid API version
*
* @example
* ```typescript
* const userInput = 'v1';
* if (isValidApiVersion(userInput)) {
* // userInput is now typed as ApiVersion
* const config = VERSION_CONFIGS[userInput];
* }
* ```
*/
export function isValidApiVersion(value: string): value is ApiVersion {
return API_VERSIONS.includes(value as ApiVersion);
}
/**
* Check if a version is deprecated.
*
* @param version - The API version to check
* @returns True if the version status is 'deprecated'
*/
export function isVersionDeprecated(version: ApiVersion): boolean {
return VERSION_CONFIGS[version].status === 'deprecated';
}
/**
* Get deprecation information for a version.
* Constructs a VersionDeprecation object from the version config.
*
* @param version - The API version to get deprecation info for
* @returns VersionDeprecation object with current deprecation state
*/
export function getVersionDeprecation(version: ApiVersion): VersionDeprecation {
const config = VERSION_CONFIGS[version];
return {
deprecated: config.status === 'deprecated',
sunsetDate: config.sunsetDate,
successorVersion: config.successorVersion,
message:
config.status === 'deprecated'
? `API ${version} is deprecated${config.sunsetDate ? ` and will be sunset on ${config.sunsetDate}` : ''}${config.successorVersion ? `. Please migrate to ${config.successorVersion}.` : '.'}`
: undefined,
};
}

View File

@@ -112,6 +112,15 @@ const googleSchema = z.object({
clientSecret: z.string().optional(),
});
/**
* GitHub OAuth configuration schema.
* Used for GitHub social login functionality.
*/
const githubSchema = z.object({
clientId: z.string().optional(),
clientSecret: z.string().optional(),
});
/**
* Worker concurrency configuration schema.
*/
@@ -146,6 +155,38 @@ const sentrySchema = z.object({
debug: booleanString(false),
});
/**
* Feature flags configuration schema (ADR-024).
*
* All flags default to `false` (disabled) for safety, following an opt-in model.
* Set the corresponding environment variable to 'true' to enable a feature.
*
* Environment variable naming convention: `FEATURE_SNAKE_CASE`
* Config property naming convention: `camelCase`
*
* @example
* // Enable via environment:
* FEATURE_BUGSINK_SYNC=true
*
* // Check in code:
* import { config } from './config/env';
* if (config.featureFlags.bugsinkSync) { ... }
*/
const featureFlagsSchema = z.object({
/** Enable Bugsink error sync integration (FEATURE_BUGSINK_SYNC) */
bugsinkSync: booleanString(false),
/** Enable advanced RBAC features (FEATURE_ADVANCED_RBAC) */
advancedRbac: booleanString(false),
/** Enable new dashboard experience (FEATURE_NEW_DASHBOARD) */
newDashboard: booleanString(false),
/** Enable beta recipe features (FEATURE_BETA_RECIPES) */
betaRecipes: booleanString(false),
/** Enable experimental AI features (FEATURE_EXPERIMENTAL_AI) */
experimentalAi: booleanString(false),
/** Enable debug mode for development (FEATURE_DEBUG_MODE) */
debugMode: booleanString(false),
});
/**
* Complete environment configuration schema.
*/
@@ -157,9 +198,11 @@ const envSchema = z.object({
ai: aiSchema,
upc: upcSchema,
google: googleSchema,
github: githubSchema,
worker: workerSchema,
server: serverSchema,
sentry: sentrySchema,
featureFlags: featureFlagsSchema,
});
export type EnvConfig = z.infer<typeof envSchema>;
@@ -209,6 +252,10 @@ function loadEnvVars(): unknown {
clientId: process.env.GOOGLE_CLIENT_ID,
clientSecret: process.env.GOOGLE_CLIENT_SECRET,
},
github: {
clientId: process.env.GITHUB_CLIENT_ID,
clientSecret: process.env.GITHUB_CLIENT_SECRET,
},
worker: {
concurrency: process.env.WORKER_CONCURRENCY,
lockDuration: process.env.WORKER_LOCK_DURATION,
@@ -230,6 +277,14 @@ function loadEnvVars(): unknown {
environment: process.env.SENTRY_ENVIRONMENT || process.env.NODE_ENV,
debug: process.env.SENTRY_DEBUG,
},
featureFlags: {
bugsinkSync: process.env.FEATURE_BUGSINK_SYNC,
advancedRbac: process.env.FEATURE_ADVANCED_RBAC,
newDashboard: process.env.FEATURE_NEW_DASHBOARD,
betaRecipes: process.env.FEATURE_BETA_RECIPES,
experimentalAi: process.env.FEATURE_EXPERIMENTAL_AI,
debugMode: process.env.FEATURE_DEBUG_MODE,
},
};
}
@@ -367,3 +422,43 @@ export const isUpcItemDbConfigured = !!config.upc.upcItemDbApiKey;
* Returns true if Barcode Lookup API is configured.
*/
export const isBarcodeLookupConfigured = !!config.upc.barcodeLookupApiKey;
/**
* Returns true if Google OAuth is configured (both client ID and secret present).
*/
export const isGoogleOAuthConfigured = !!config.google.clientId && !!config.google.clientSecret;
/**
* Returns true if GitHub OAuth is configured (both client ID and secret present).
*/
export const isGithubOAuthConfigured = !!config.github.clientId && !!config.github.clientSecret;
// --- Feature Flag Helpers (ADR-024) ---
/**
* Type representing valid feature flag names.
* Derived from the featureFlagsSchema for type safety.
*/
export type FeatureFlagName = keyof typeof config.featureFlags;
/**
* Check if a feature flag is enabled.
*
* This is a convenience function for checking feature flag state.
* For more advanced usage (logging, all flags), use the featureFlags service.
*
* @param flagName - The name of the feature flag to check
* @returns boolean indicating if the feature is enabled
*
* @example
* ```typescript
* import { isFeatureFlagEnabled } from './config/env';
*
* if (isFeatureFlagEnabled('newDashboard')) {
* // Use new dashboard
* }
* ```
*/
export function isFeatureFlagEnabled(flagName: FeatureFlagName): boolean {
return config.featureFlags[flagName];
}

View File

@@ -172,7 +172,7 @@ if (process.env.GOOGLE_CLIENT_ID && process.env.GOOGLE_CLIENT_SECRET) {
{
clientID: process.env.GOOGLE_CLIENT_ID,
clientSecret: process.env.GOOGLE_CLIENT_SECRET,
callbackURL: '/api/auth/google/callback',
callbackURL: '/api/v1/auth/google/callback',
scope: ['profile', 'email'],
},
async (
@@ -242,7 +242,7 @@ if (process.env.GITHUB_CLIENT_ID && process.env.GITHUB_CLIENT_SECRET) {
{
clientID: process.env.GITHUB_CLIENT_ID,
clientSecret: process.env.GITHUB_CLIENT_SECRET,
callbackURL: '/api/auth/github/callback',
callbackURL: '/api/v1/auth/github/callback',
scope: ['user:email'],
},
async (

View File

@@ -79,10 +79,10 @@ describe('swagger configuration', () => {
expect(spec.servers.length).toBeGreaterThan(0);
});
it('should have /api as the server URL', () => {
const apiServer = spec.servers.find((s) => s.url === '/api');
it('should have /api/v1 as the server URL (ADR-008)', () => {
const apiServer = spec.servers.find((s) => s.url === '/api/v1');
expect(apiServer).toBeDefined();
expect(apiServer?.description).toBe('API server');
expect(apiServer?.description).toBe('API server (v1)');
});
});

Some files were not shown because too many files have changed in this diff Show More