Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2379f3a878 | ||
| 0232b9de7a | |||
|
|
2e98bc3fc7 | ||
| ec2f143218 |
87
CLAUDE.md
87
CLAUDE.md
@@ -517,3 +517,90 @@ ssh root@projectium.com "tail -50 /var/www/flyer-crawler.projectium.com/logs/app
|
||||
- Checking service status
|
||||
|
||||
**Important:** SSH access requires the host machine to have SSH keys configured for `root@projectium.com`.
|
||||
|
||||
---
|
||||
|
||||
## Logstash Configuration (ADR-050)
|
||||
|
||||
The production server uses **Logstash** to aggregate logs from multiple sources and forward errors to Bugsink for centralized error tracking.
|
||||
|
||||
**Log Sources:**
|
||||
|
||||
- **PostgreSQL function logs** - Structured JSON logs from `fn_log()` helper function
|
||||
- **PM2 worker logs** - Service logs from BullMQ job workers (stdout)
|
||||
- **Redis logs** - Operational logs (INFO level) and errors
|
||||
- **NGINX logs** - Access logs (all requests) and error logs
|
||||
|
||||
### Configuration Location
|
||||
|
||||
**Primary configuration file:**
|
||||
|
||||
- `/etc/logstash/conf.d/bugsink.conf` - Complete Logstash pipeline configuration
|
||||
|
||||
**Related files:**
|
||||
|
||||
- `/etc/postgresql/14/main/conf.d/observability.conf` - PostgreSQL logging configuration
|
||||
- `/var/log/postgresql/*.log` - PostgreSQL log files
|
||||
- `/home/gitea-runner/.pm2/logs/*.log` - PM2 worker logs
|
||||
- `/var/log/redis/redis-server.log` - Redis logs
|
||||
- `/var/log/nginx/access.log` - NGINX access logs
|
||||
- `/var/log/nginx/error.log` - NGINX error logs
|
||||
- `/var/log/logstash/*.log` - Logstash file outputs (operational logs)
|
||||
- `/var/lib/logstash/sincedb_*` - Logstash position tracking files
|
||||
|
||||
### Key Features
|
||||
|
||||
1. **Multi-source aggregation**: Collects logs from PostgreSQL, PM2 workers, Redis, and NGINX
|
||||
2. **Environment-based routing**: Automatically detects production vs test environments and routes errors to the correct Bugsink project
|
||||
3. **Structured JSON parsing**: Extracts `fn_log()` function output from PostgreSQL logs and Pino JSON from PM2 workers
|
||||
4. **Sentry-compatible format**: Transforms events to Sentry format with `event_id`, `timestamp`, `level`, `message`, and `extra` context
|
||||
5. **Error filtering**: Only forwards WARNING and ERROR level messages to Bugsink
|
||||
6. **Operational log storage**: Stores non-error logs (Redis INFO, NGINX access, PM2 operational) to `/var/log/logstash/` for analysis
|
||||
7. **Request monitoring**: Categorizes NGINX requests by status code (2xx, 3xx, 4xx, 5xx) and identifies slow requests
|
||||
|
||||
### Common Maintenance Commands
|
||||
|
||||
```bash
|
||||
# Check Logstash status
|
||||
systemctl status logstash
|
||||
|
||||
# Restart Logstash after configuration changes
|
||||
systemctl restart logstash
|
||||
|
||||
# Test configuration syntax
|
||||
/usr/share/logstash/bin/logstash --config.test_and_exit -f /etc/logstash/conf.d/bugsink.conf
|
||||
|
||||
# View Logstash logs
|
||||
journalctl -u logstash -f
|
||||
|
||||
# Check Logstash stats (events processed, failures)
|
||||
curl -XGET 'localhost:9600/_node/stats/pipelines?pretty' | jq '.pipelines.main.plugins.filters'
|
||||
|
||||
# Monitor PostgreSQL logs being processed
|
||||
tail -f /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log
|
||||
|
||||
# View operational log outputs
|
||||
tail -f /var/log/logstash/pm2-workers-$(date +%Y-%m-%d).log
|
||||
tail -f /var/log/logstash/redis-operational-$(date +%Y-%m-%d).log
|
||||
tail -f /var/log/logstash/nginx-access-$(date +%Y-%m-%d).log
|
||||
|
||||
# Check disk usage of log files
|
||||
du -sh /var/log/logstash/
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
| Issue | Check | Solution |
|
||||
| ------------------------------- | ---------------------------- | ---------------------------------------------------------------------------------------------- |
|
||||
| Errors not appearing in Bugsink | Check Logstash is running | `systemctl status logstash` |
|
||||
| Configuration syntax errors | Test config file | `/usr/share/logstash/bin/logstash --config.test_and_exit -f /etc/logstash/conf.d/bugsink.conf` |
|
||||
| Grok pattern failures | Check Logstash stats | `curl localhost:9600/_node/stats/pipelines?pretty \| jq '.pipelines.main.plugins.filters'` |
|
||||
| Wrong Bugsink project | Verify environment detection | Check tags in logs match expected environment (production/test) |
|
||||
| Permission denied reading logs | Check Logstash permissions | `groups logstash` should include `postgres`, `adm` groups |
|
||||
| PM2 logs not captured | Check file paths exist | `ls /home/gitea-runner/.pm2/logs/flyer-crawler-worker-*.log` |
|
||||
| NGINX access logs not showing | Check file output directory | `ls -lh /var/log/logstash/nginx-access-*.log` |
|
||||
| High disk usage | Check log rotation | Verify `/etc/logrotate.d/logstash` is configured and running daily |
|
||||
|
||||
**Full setup guide**: See [docs/BARE-METAL-SETUP.md](docs/BARE-METAL-SETUP.md) section "PostgreSQL Function Observability (ADR-050)"
|
||||
|
||||
**Architecture details**: See [docs/adr/0050-postgresql-function-observability.md](docs/adr/0050-postgresql-function-observability.md)
|
||||
|
||||
@@ -225,14 +225,20 @@ filter {\n\
|
||||
mutate { add_tag => ["error"] }\n\
|
||||
}\n\
|
||||
\n\
|
||||
# Redis error detection\n\
|
||||
# Redis log parsing\n\
|
||||
if [type] == "redis" {\n\
|
||||
grok {\n\
|
||||
match => { "message" => "%%{POSINT:pid}:%%{WORD:role} %%{MONTHDAY} %%{MONTH} %%{TIME} %%{WORD:loglevel} %%{GREEDYDATA:redis_message}" }\n\
|
||||
}\n\
|
||||
\n\
|
||||
# Tag errors (WARNING/ERROR) for Bugsink forwarding\n\
|
||||
if [loglevel] in ["WARNING", "ERROR"] {\n\
|
||||
mutate { add_tag => ["error"] }\n\
|
||||
}\n\
|
||||
# Tag INFO-level operational events (startup, config, persistence)\n\
|
||||
else if [loglevel] == "INFO" {\n\
|
||||
mutate { add_tag => ["redis_operational"] }\n\
|
||||
}\n\
|
||||
}\n\
|
||||
\n\
|
||||
# PostgreSQL function log parsing (ADR-050)\n\
|
||||
@@ -265,6 +271,7 @@ filter {\n\
|
||||
}\n\
|
||||
\n\
|
||||
output {\n\
|
||||
# Forward errors to Bugsink\n\
|
||||
if "error" in [tags] {\n\
|
||||
http {\n\
|
||||
url => "http://localhost:8000/api/store/"\n\
|
||||
@@ -272,14 +279,23 @@ output {\n\
|
||||
format => "json"\n\
|
||||
}\n\
|
||||
}\n\
|
||||
\n\
|
||||
# Store Redis operational logs (INFO level) to file\n\
|
||||
if "redis_operational" in [tags] {\n\
|
||||
file {\n\
|
||||
path => "/var/log/logstash/redis-operational-%%{+YYYY-MM-dd}.log"\n\
|
||||
codec => json_lines\n\
|
||||
}\n\
|
||||
}\n\
|
||||
\n\
|
||||
# Debug output (comment out in production)\n\
|
||||
stdout { codec => rubydebug }\n\
|
||||
}\n\
|
||||
' > /etc/logstash/conf.d/bugsink.conf
|
||||
|
||||
# Create Logstash sincedb directory
|
||||
# Create Logstash directories
|
||||
RUN mkdir -p /var/lib/logstash && chown -R logstash:logstash /var/lib/logstash
|
||||
RUN mkdir -p /var/log/logstash && chown -R logstash:logstash /var/log/logstash
|
||||
|
||||
# ============================================================================
|
||||
# Set Working Directory
|
||||
|
||||
@@ -128,6 +128,25 @@ services:
|
||||
- ./docker/postgres/postgresql.conf.override:/etc/postgresql/postgresql.conf.d/custom.conf:ro
|
||||
# Create log volume for Logstash access (ADR-050)
|
||||
- postgres_logs:/var/log/postgresql
|
||||
# Override postgres command to include custom config (ADR-050)
|
||||
command: >
|
||||
postgres
|
||||
-c config_file=/var/lib/postgresql/data/postgresql.conf
|
||||
-c hba_file=/var/lib/postgresql/data/pg_hba.conf
|
||||
-c log_min_messages=notice
|
||||
-c client_min_messages=notice
|
||||
-c logging_collector=on
|
||||
-c log_destination=stderr
|
||||
-c log_directory=/var/log/postgresql
|
||||
-c log_filename=postgresql-%Y-%m-%d.log
|
||||
-c log_rotation_age=1d
|
||||
-c log_rotation_size=100MB
|
||||
-c log_truncate_on_rotation=on
|
||||
-c log_line_prefix='%t [%p] %u@%d '
|
||||
-c log_min_duration_statement=1000
|
||||
-c log_statement=none
|
||||
-c log_connections=on
|
||||
-c log_disconnections=on
|
||||
# Healthcheck ensures postgres is ready before app starts
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'pg_isready -U postgres -d flyer_crawler_dev']
|
||||
|
||||
@@ -24,6 +24,6 @@ log_min_duration_statement = 1000
|
||||
# Log statement types (off for production, 'all' for debugging)
|
||||
log_statement = 'none'
|
||||
|
||||
# Connection logging
|
||||
# Connection logging (useful for dev, can be disabled in production)
|
||||
log_connections = on
|
||||
log_disconnections = on
|
||||
|
||||
@@ -1244,6 +1244,620 @@ If you only need application error tracking, the Sentry SDK integration is suffi
|
||||
|
||||
---
|
||||
|
||||
## PostgreSQL Function Observability (ADR-050)
|
||||
|
||||
PostgreSQL function observability provides structured logging and error tracking for database functions, preventing silent failures. This setup forwards database errors to Bugsink for centralized monitoring.
|
||||
|
||||
See [ADR-050](adr/0050-postgresql-function-observability.md) for the full architecture decision.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- PostgreSQL 14+ installed and running
|
||||
- Logstash installed and configured (see [Logstash section](#logstash-log-aggregation) above)
|
||||
- Bugsink running at `https://bugsink.projectium.com`
|
||||
|
||||
### Step 1: Configure PostgreSQL Logging
|
||||
|
||||
Create the observability configuration file:
|
||||
|
||||
```bash
|
||||
sudo nano /etc/postgresql/14/main/conf.d/observability.conf
|
||||
```
|
||||
|
||||
Add the following content:
|
||||
|
||||
```ini
|
||||
# PostgreSQL Logging Configuration for Database Function Observability (ADR-050)
|
||||
|
||||
# Enable logging to files for Logstash pickup
|
||||
logging_collector = on
|
||||
log_destination = 'stderr'
|
||||
log_directory = '/var/log/postgresql'
|
||||
log_filename = 'postgresql-%Y-%m-%d.log'
|
||||
log_rotation_age = 1d
|
||||
log_rotation_size = 100MB
|
||||
log_truncate_on_rotation = on
|
||||
|
||||
# Log level - capture NOTICE and above (includes fn_log WARNING/ERROR)
|
||||
log_min_messages = notice
|
||||
client_min_messages = notice
|
||||
|
||||
# Include useful context in log prefix
|
||||
log_line_prefix = '%t [%p] %u@%d '
|
||||
|
||||
# Capture slow queries from functions (1 second threshold)
|
||||
log_min_duration_statement = 1000
|
||||
|
||||
# Log statement types (off for production)
|
||||
log_statement = 'none'
|
||||
|
||||
# Connection logging (off for production to reduce noise)
|
||||
log_connections = off
|
||||
log_disconnections = off
|
||||
```
|
||||
|
||||
Set up the log directory:
|
||||
|
||||
```bash
|
||||
# Create log directory
|
||||
sudo mkdir -p /var/log/postgresql
|
||||
|
||||
# Set ownership to postgres user
|
||||
sudo chown postgres:postgres /var/log/postgresql
|
||||
sudo chmod 750 /var/log/postgresql
|
||||
```
|
||||
|
||||
Restart PostgreSQL:
|
||||
|
||||
```bash
|
||||
sudo systemctl restart postgresql
|
||||
```
|
||||
|
||||
Verify logging is working:
|
||||
|
||||
```bash
|
||||
# Check that log files are being created
|
||||
ls -la /var/log/postgresql/
|
||||
|
||||
# Should see files like: postgresql-2026-01-20.log
|
||||
```
|
||||
|
||||
### Step 2: Configure Logstash for PostgreSQL Logs
|
||||
|
||||
The Logstash configuration is located at `/etc/logstash/conf.d/bugsink.conf`.
|
||||
|
||||
**Key features:**
|
||||
|
||||
- Parses PostgreSQL log format with grok patterns
|
||||
- Extracts JSON from `fn_log()` function calls
|
||||
- Tags WARNING/ERROR level logs
|
||||
- Routes production database errors to Bugsink project 1
|
||||
- Routes test database errors to Bugsink project 3
|
||||
- Transforms events to Sentry-compatible format
|
||||
|
||||
**Configuration file:** `/etc/logstash/conf.d/bugsink.conf`
|
||||
|
||||
See the [Logstash Configuration Reference](#logstash-configuration-reference) below for the complete configuration.
|
||||
|
||||
**Grant Logstash access to PostgreSQL logs:**
|
||||
|
||||
```bash
|
||||
# Add logstash user to postgres group
|
||||
sudo usermod -aG postgres logstash
|
||||
|
||||
# Verify group membership
|
||||
groups logstash
|
||||
|
||||
# Restart Logstash to apply changes
|
||||
sudo systemctl restart logstash
|
||||
```
|
||||
|
||||
### Step 3: Test the Pipeline
|
||||
|
||||
Test structured logging from PostgreSQL:
|
||||
|
||||
```bash
|
||||
# Production database (routes to Bugsink project 1)
|
||||
sudo -u postgres psql -d flyer-crawler-prod -c "SELECT fn_log('WARNING', 'test_observability', 'Testing PostgreSQL observability pipeline', '{\"environment\": \"production\"}'::jsonb);"
|
||||
|
||||
# Test database (routes to Bugsink project 3)
|
||||
sudo -u postgres psql -d flyer-crawler-test -c "SELECT fn_log('WARNING', 'test_observability', 'Testing PostgreSQL observability pipeline', '{\"environment\": \"test\"}'::jsonb);"
|
||||
```
|
||||
|
||||
Check Bugsink UI:
|
||||
|
||||
- Production errors: <https://bugsink.projectium.com> → Project 1 (flyer-crawler-backend)
|
||||
- Test errors: <https://bugsink.projectium.com> → Project 3 (flyer-crawler-backend-test)
|
||||
|
||||
### Step 4: Verify Database Functions
|
||||
|
||||
The following critical functions use `fn_log()` for observability:
|
||||
|
||||
| Function | What it logs |
|
||||
| -------------------------- | ---------------------------------------- |
|
||||
| `award_achievement()` | Missing achievements, duplicate awards |
|
||||
| `fork_recipe()` | Missing original recipes |
|
||||
| `handle_new_user()` | User creation events |
|
||||
| `approve_correction()` | Permission denied, corrections not found |
|
||||
| `complete_shopping_list()` | Permission checks, list not found |
|
||||
|
||||
Test error logging with a database function:
|
||||
|
||||
```bash
|
||||
# Try to award a non-existent achievement (should fail and log to Bugsink)
|
||||
sudo -u postgres psql -d flyer-crawler-test -c "SELECT award_achievement('00000000-0000-0000-0000-000000000000'::uuid, 'NonexistentBadge');"
|
||||
|
||||
# Check Bugsink project 3 - should see an ERROR with full context
|
||||
```
|
||||
|
||||
### Logstash Configuration Reference
|
||||
|
||||
Complete configuration for PostgreSQL observability (`/etc/logstash/conf.d/bugsink.conf`):
|
||||
|
||||
```conf
|
||||
input {
|
||||
# PostgreSQL function logs (ADR-050)
|
||||
# Both production and test databases write to the same log files
|
||||
file {
|
||||
path => "/var/log/postgresql/*.log"
|
||||
type => "postgres"
|
||||
tags => ["postgres", "database"]
|
||||
start_position => "beginning"
|
||||
sincedb_path => "/var/lib/logstash/sincedb_postgres"
|
||||
}
|
||||
}
|
||||
|
||||
filter {
|
||||
# PostgreSQL function log parsing (ADR-050)
|
||||
if [type] == "postgres" {
|
||||
|
||||
# Extract timestamp, timezone, process ID, user, database, level, and message
|
||||
grok {
|
||||
match => { "message" => "%{TIMESTAMP_ISO8601:pg_timestamp} [+-]%{INT:pg_timezone} \[%{POSINT:pg_pid}\] %{DATA:pg_user}@%{DATA:pg_database} %{WORD:pg_level}: %{GREEDYDATA:pg_message}" }
|
||||
}
|
||||
|
||||
# Try to parse pg_message as JSON (from fn_log())
|
||||
if [pg_message] =~ /^\{/ {
|
||||
json {
|
||||
source => "pg_message"
|
||||
target => "fn_log"
|
||||
skip_on_invalid_json => true
|
||||
}
|
||||
|
||||
# Mark as error if level is WARNING or ERROR
|
||||
if [fn_log][level] in ["WARNING", "ERROR"] {
|
||||
mutate { add_tag => ["error", "db_function"] }
|
||||
}
|
||||
}
|
||||
|
||||
# Also catch native PostgreSQL errors
|
||||
if [pg_level] in ["ERROR", "FATAL"] {
|
||||
mutate { add_tag => ["error", "postgres_native"] }
|
||||
}
|
||||
|
||||
# Detect environment from database name
|
||||
if [pg_database] == "flyer-crawler-prod" {
|
||||
mutate {
|
||||
add_tag => ["production"]
|
||||
}
|
||||
} else if [pg_database] == "flyer-crawler-test" {
|
||||
mutate {
|
||||
add_tag => ["test"]
|
||||
}
|
||||
}
|
||||
|
||||
# Generate event_id for Sentry
|
||||
if "error" in [tags] {
|
||||
uuid {
|
||||
target => "[@metadata][event_id]"
|
||||
overwrite => true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output {
|
||||
# Production database errors -> project 1 (flyer-crawler-backend)
|
||||
if "error" in [tags] and "production" in [tags] {
|
||||
http {
|
||||
url => "https://bugsink.projectium.com/api/1/store/"
|
||||
http_method => "post"
|
||||
format => "json"
|
||||
headers => {
|
||||
"X-Sentry-Auth" => "Sentry sentry_version=7, sentry_client=logstash/1.0, sentry_key=911aef02b9a548fa8fabb8a3c81abfe5"
|
||||
"Content-Type" => "application/json"
|
||||
}
|
||||
mapping => {
|
||||
"event_id" => "%{[@metadata][event_id]}"
|
||||
"timestamp" => "%{@timestamp}"
|
||||
"platform" => "other"
|
||||
"level" => "error"
|
||||
"logger" => "postgresql"
|
||||
"message" => "%{[fn_log][message]}"
|
||||
"environment" => "production"
|
||||
"extra" => {
|
||||
"pg_user" => "%{[pg_user]}"
|
||||
"pg_database" => "%{[pg_database]}"
|
||||
"pg_function" => "%{[fn_log][function]}"
|
||||
"pg_level" => "%{[pg_level]}"
|
||||
"context" => "%{[fn_log][context]}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Test database errors -> project 3 (flyer-crawler-backend-test)
|
||||
if "error" in [tags] and "test" in [tags] {
|
||||
http {
|
||||
url => "https://bugsink.projectium.com/api/3/store/"
|
||||
http_method => "post"
|
||||
format => "json"
|
||||
headers => {
|
||||
"X-Sentry-Auth" => "Sentry sentry_version=7, sentry_client=logstash/1.0, sentry_key=cdb99c314589431e83d4cc38a809449b"
|
||||
"Content-Type" => "application/json"
|
||||
}
|
||||
mapping => {
|
||||
"event_id" => "%{[@metadata][event_id]}"
|
||||
"timestamp" => "%{@timestamp}"
|
||||
"platform" => "other"
|
||||
"level" => "error"
|
||||
"logger" => "postgresql"
|
||||
"message" => "%{[fn_log][message]}"
|
||||
"environment" => "test"
|
||||
"extra" => {
|
||||
"pg_user" => "%{[pg_user]}"
|
||||
"pg_database" => "%{[pg_database]}"
|
||||
"pg_function" => "%{[fn_log][function]}"
|
||||
"pg_level" => "%{[pg_level]}"
|
||||
"context" => "%{[fn_log][context]}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Extended Logstash Configuration (PM2, Redis, NGINX)
|
||||
|
||||
The complete production Logstash configuration includes additional log sources beyond PostgreSQL:
|
||||
|
||||
**Input Sources:**
|
||||
|
||||
```conf
|
||||
input {
|
||||
# PostgreSQL function logs (shown above)
|
||||
|
||||
# PM2 Worker stdout logs (production)
|
||||
file {
|
||||
path => "/home/gitea-runner/.pm2/logs/flyer-crawler-worker-*.log"
|
||||
type => "pm2_stdout"
|
||||
tags => ["infra", "pm2", "worker", "production"]
|
||||
start_position => "end"
|
||||
sincedb_path => "/var/lib/logstash/sincedb_pm2_worker_prod"
|
||||
exclude => "*-test-*.log"
|
||||
}
|
||||
|
||||
# PM2 Analytics Worker stdout (production)
|
||||
file {
|
||||
path => "/home/gitea-runner/.pm2/logs/flyer-crawler-analytics-worker-*.log"
|
||||
type => "pm2_stdout"
|
||||
tags => ["infra", "pm2", "analytics", "production"]
|
||||
start_position => "end"
|
||||
sincedb_path => "/var/lib/logstash/sincedb_pm2_analytics_prod"
|
||||
exclude => "*-test-*.log"
|
||||
}
|
||||
|
||||
# PM2 Worker stdout (test environment)
|
||||
file {
|
||||
path => "/home/gitea-runner/.pm2/logs/flyer-crawler-worker-test-*.log"
|
||||
type => "pm2_stdout"
|
||||
tags => ["infra", "pm2", "worker", "test"]
|
||||
start_position => "end"
|
||||
sincedb_path => "/var/lib/logstash/sincedb_pm2_worker_test"
|
||||
}
|
||||
|
||||
# PM2 Analytics Worker stdout (test environment)
|
||||
file {
|
||||
path => "/home/gitea-runner/.pm2/logs/flyer-crawler-analytics-worker-test-*.log"
|
||||
type => "pm2_stdout"
|
||||
tags => ["infra", "pm2", "analytics", "test"]
|
||||
start_position => "end"
|
||||
sincedb_path => "/var/lib/logstash/sincedb_pm2_analytics_test"
|
||||
}
|
||||
|
||||
# Redis logs (already configured)
|
||||
file {
|
||||
path => "/var/log/redis/redis-server.log"
|
||||
type => "redis"
|
||||
tags => ["infra", "redis"]
|
||||
start_position => "end"
|
||||
sincedb_path => "/var/lib/logstash/sincedb_redis"
|
||||
}
|
||||
|
||||
# NGINX access logs
|
||||
file {
|
||||
path => "/var/log/nginx/access.log"
|
||||
type => "nginx_access"
|
||||
tags => ["infra", "nginx", "access"]
|
||||
start_position => "end"
|
||||
sincedb_path => "/var/lib/logstash/sincedb_nginx_access"
|
||||
}
|
||||
|
||||
# NGINX error logs
|
||||
file {
|
||||
path => "/var/log/nginx/error.log"
|
||||
type => "nginx_error"
|
||||
tags => ["infra", "nginx", "error"]
|
||||
start_position => "end"
|
||||
sincedb_path => "/var/lib/logstash/sincedb_nginx_error"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Filter Rules:**
|
||||
|
||||
```conf
|
||||
filter {
|
||||
# PostgreSQL filters (shown above)
|
||||
|
||||
# PM2 Worker log parsing
|
||||
if [type] == "pm2_stdout" {
|
||||
# Try to parse as JSON first (if worker uses Pino)
|
||||
json {
|
||||
source => "message"
|
||||
target => "pm2_json"
|
||||
skip_on_invalid_json => true
|
||||
}
|
||||
|
||||
# If JSON parsing succeeded, extract level and tag errors
|
||||
if [pm2_json][level] {
|
||||
if [pm2_json][level] >= 50 {
|
||||
mutate { add_tag => ["error"] }
|
||||
}
|
||||
}
|
||||
# If not JSON, check for error keywords in plain text
|
||||
else if [message] =~ /(Error|ERROR|Exception|EXCEPTION|Fatal|FATAL|failed|FAILED)/ {
|
||||
mutate { add_tag => ["error"] }
|
||||
}
|
||||
|
||||
# Generate event_id for errors
|
||||
if "error" in [tags] {
|
||||
uuid {
|
||||
target => "[@metadata][event_id]"
|
||||
overwrite => true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Redis log parsing
|
||||
if [type] == "redis" {
|
||||
grok {
|
||||
match => { "message" => "%{POSINT:pid}:%{WORD:role} %{MONTHDAY} %{MONTH} %{TIME} %{WORD:loglevel} %{GREEDYDATA:redis_message}" }
|
||||
}
|
||||
|
||||
# Tag errors (WARNING/ERROR) for Bugsink forwarding
|
||||
if [loglevel] in ["WARNING", "ERROR"] {
|
||||
mutate { add_tag => ["error"] }
|
||||
uuid {
|
||||
target => "[@metadata][event_id]"
|
||||
overwrite => true
|
||||
}
|
||||
}
|
||||
# Tag INFO-level operational events (startup, config, persistence)
|
||||
else if [loglevel] == "INFO" {
|
||||
mutate { add_tag => ["redis_operational"] }
|
||||
}
|
||||
}
|
||||
|
||||
# NGINX access log parsing
|
||||
if [type] == "nginx_access" {
|
||||
grok {
|
||||
match => { "message" => "%{COMBINEDAPACHELOG}" }
|
||||
}
|
||||
|
||||
# Parse response time if available (requires NGINX log format with request_time)
|
||||
if [message] =~ /request_time:(\d+\.\d+)/ {
|
||||
grok {
|
||||
match => { "message" => "request_time:(?<request_time_seconds>\d+\.\d+)" }
|
||||
}
|
||||
}
|
||||
|
||||
# Categorize by status code
|
||||
if [response] =~ /^5\d{2}$/ {
|
||||
mutate { add_tag => ["error", "http_5xx"] }
|
||||
uuid {
|
||||
target => "[@metadata][event_id]"
|
||||
overwrite => true
|
||||
}
|
||||
}
|
||||
else if [response] =~ /^4\d{2}$/ {
|
||||
mutate { add_tag => ["client_error", "http_4xx"] }
|
||||
}
|
||||
else if [response] =~ /^2\d{2}$/ {
|
||||
mutate { add_tag => ["success", "http_2xx"] }
|
||||
}
|
||||
else if [response] =~ /^3\d{2}$/ {
|
||||
mutate { add_tag => ["redirect", "http_3xx"] }
|
||||
}
|
||||
|
||||
# Tag slow requests (>1 second response time)
|
||||
if [request_time_seconds] and [request_time_seconds] > 1.0 {
|
||||
mutate { add_tag => ["slow_request"] }
|
||||
}
|
||||
|
||||
# Always tag for monitoring
|
||||
mutate { add_tag => ["access_log"] }
|
||||
}
|
||||
|
||||
# NGINX error log parsing
|
||||
if [type] == "nginx_error" {
|
||||
mutate { add_tag => ["error"] }
|
||||
uuid {
|
||||
target => "[@metadata][event_id]"
|
||||
overwrite => true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Output Rules:**
|
||||
|
||||
```conf
|
||||
output {
|
||||
# Production errors -> Bugsink infrastructure project (5)
|
||||
# Includes: PM2 worker errors, Redis errors, NGINX 5xx, PostgreSQL errors
|
||||
if "error" in [tags] and "infra" in [tags] and "production" in [tags] {
|
||||
http {
|
||||
url => "https://bugsink.projectium.com/api/5/store/"
|
||||
http_method => "post"
|
||||
format => "json"
|
||||
headers => {
|
||||
"X-Sentry-Auth" => "Sentry sentry_version=7, sentry_client=logstash/1.0, sentry_key=b083076f94fb461b889d5dffcbef43bf"
|
||||
"Content-Type" => "application/json"
|
||||
}
|
||||
mapping => {
|
||||
"event_id" => "%{[@metadata][event_id]}"
|
||||
"timestamp" => "%{@timestamp}"
|
||||
"platform" => "other"
|
||||
"level" => "error"
|
||||
"logger" => "%{type}"
|
||||
"message" => "%{message}"
|
||||
"environment" => "production"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Test errors -> Bugsink test infrastructure project (6)
|
||||
if "error" in [tags] and "infra" in [tags] and "test" in [tags] {
|
||||
http {
|
||||
url => "https://bugsink.projectium.com/api/6/store/"
|
||||
http_method => "post"
|
||||
format => "json"
|
||||
headers => {
|
||||
"X-Sentry-Auth" => "Sentry sentry_version=7, sentry_client=logstash/1.0, sentry_key=25020dd6c2b74ad78463ec90e90fadab"
|
||||
"Content-Type" => "application/json"
|
||||
}
|
||||
mapping => {
|
||||
"event_id" => "%{[@metadata][event_id]}"
|
||||
"timestamp" => "%{@timestamp}"
|
||||
"platform" => "other"
|
||||
"level" => "error"
|
||||
"logger" => "%{type}"
|
||||
"message" => "%{message}"
|
||||
"environment" => "test"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# PM2 worker operational logs (non-errors) -> file
|
||||
if [type] == "pm2_stdout" and "error" not in [tags] {
|
||||
file {
|
||||
path => "/var/log/logstash/pm2-workers-%{+YYYY-MM-dd}.log"
|
||||
codec => json_lines
|
||||
}
|
||||
}
|
||||
|
||||
# Redis INFO logs (operational events) -> file
|
||||
if "redis_operational" in [tags] {
|
||||
file {
|
||||
path => "/var/log/logstash/redis-operational-%{+YYYY-MM-dd}.log"
|
||||
codec => json_lines
|
||||
}
|
||||
}
|
||||
|
||||
# NGINX access logs (all requests) -> file
|
||||
if "access_log" in [tags] {
|
||||
file {
|
||||
path => "/var/log/logstash/nginx-access-%{+YYYY-MM-dd}.log"
|
||||
codec => json_lines
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Setup Instructions:**
|
||||
|
||||
1. Create log output directory:
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /var/log/logstash
|
||||
sudo chown logstash:logstash /var/log/logstash
|
||||
```
|
||||
|
||||
2. Configure logrotate for Logstash file outputs:
|
||||
|
||||
```bash
|
||||
sudo tee /etc/logrotate.d/logstash <<EOF
|
||||
/var/log/logstash/*.log {
|
||||
daily
|
||||
rotate 30
|
||||
compress
|
||||
delaycompress
|
||||
missingok
|
||||
notifempty
|
||||
create 0644 logstash logstash
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
3. Verify Logstash can read PM2 logs:
|
||||
|
||||
```bash
|
||||
# Add logstash to required groups
|
||||
sudo usermod -a -G postgres logstash
|
||||
sudo usermod -a -G adm logstash
|
||||
|
||||
# Test permissions
|
||||
sudo -u logstash cat /home/gitea-runner/.pm2/logs/flyer-crawler-worker-*.log | head -5
|
||||
sudo -u logstash cat /var/log/redis/redis-server.log | head -5
|
||||
sudo -u logstash cat /var/log/nginx/access.log | head -5
|
||||
```
|
||||
|
||||
4. Restart Logstash:
|
||||
|
||||
```bash
|
||||
sudo systemctl restart logstash
|
||||
```
|
||||
|
||||
**Verification:**
|
||||
|
||||
```bash
|
||||
# Check Logstash is processing new log sources
|
||||
curl -s http://localhost:9600/_node/stats/pipelines?pretty | jq '.pipelines.main.events'
|
||||
|
||||
# Check file outputs
|
||||
ls -lh /var/log/logstash/
|
||||
tail -f /var/log/logstash/pm2-workers-$(date +%Y-%m-%d).log
|
||||
tail -f /var/log/logstash/redis-operational-$(date +%Y-%m-%d).log
|
||||
tail -f /var/log/logstash/nginx-access-$(date +%Y-%m-d).log
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
| Issue | Solution |
|
||||
| ------------------------------ | --------------------------------------------------------------------------------------------------- |
|
||||
| No logs appearing in Bugsink | Check Logstash status: `sudo journalctl -u logstash -f` |
|
||||
| Permission denied errors | Verify logstash is in postgres group: `groups logstash` |
|
||||
| Grok parse failures | Check Logstash stats: `curl -s http://localhost:9600/_node/stats/pipelines?pretty \| grep failures` |
|
||||
| Wrong Bugsink project | Verify database name detection in filter (flyer-crawler-prod vs flyer-crawler-test) |
|
||||
| PostgreSQL logs not created | Check `logging_collector = on` and restart PostgreSQL |
|
||||
| Events not formatted correctly | Check mapping in output section matches Sentry event schema |
|
||||
| Test config before restarting | Run: `/usr/share/logstash/bin/logstash --config.test_and_exit -f /etc/logstash/conf.d/bugsink.conf` |
|
||||
|
||||
### Maintenance Commands
|
||||
|
||||
| Task | Command |
|
||||
| ----------------------------- | ---------------------------------------------------------------------------------------------- |
|
||||
| View Logstash status | `sudo systemctl status logstash` |
|
||||
| View Logstash logs | `sudo journalctl -u logstash -f` |
|
||||
| View PostgreSQL logs | `tail -f /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log` |
|
||||
| Test Logstash config | `/usr/share/logstash/bin/logstash --config.test_and_exit -f /etc/logstash/conf.d/bugsink.conf` |
|
||||
| Restart Logstash | `sudo systemctl restart logstash` |
|
||||
| Check Logstash pipeline stats | `curl -s http://localhost:9600/_node/stats/pipelines?pretty` |
|
||||
| Clear sincedb (re-read logs) | `sudo rm /var/lib/logstash/sincedb_postgres && sudo systemctl restart logstash` |
|
||||
|
||||
---
|
||||
|
||||
## SSL/TLS with Let's Encrypt
|
||||
|
||||
### Install Certbot
|
||||
|
||||
460
docs/LOGSTASH-TROUBLESHOOTING.md
Normal file
460
docs/LOGSTASH-TROUBLESHOOTING.md
Normal file
@@ -0,0 +1,460 @@
|
||||
# Logstash Troubleshooting Runbook
|
||||
|
||||
This runbook provides step-by-step diagnostics and solutions for common Logstash issues in the PostgreSQL observability pipeline (ADR-050).
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Symptom | Most Likely Cause | Quick Check |
|
||||
| ------------------------ | ---------------------------- | ------------------------------------- |
|
||||
| No errors in Bugsink | Logstash not running | `systemctl status logstash` |
|
||||
| Events not processed | Grok pattern mismatch | Check filter failures in stats |
|
||||
| Wrong Bugsink project | Environment detection failed | Verify `pg_database` field extraction |
|
||||
| 403 authentication error | Missing/wrong DSN key | Check `X-Sentry-Auth` header |
|
||||
| 500 error from Bugsink | Invalid event format | Verify `event_id` and required fields |
|
||||
|
||||
---
|
||||
|
||||
## Diagnostic Steps
|
||||
|
||||
### 1. Verify Logstash is Running
|
||||
|
||||
```bash
|
||||
# Check service status
|
||||
systemctl status logstash
|
||||
|
||||
# If stopped, start it
|
||||
systemctl start logstash
|
||||
|
||||
# View recent logs
|
||||
journalctl -u logstash -n 50 --no-pager
|
||||
```
|
||||
|
||||
**Expected output:**
|
||||
|
||||
- Status: `active (running)`
|
||||
- No error messages in recent logs
|
||||
|
||||
---
|
||||
|
||||
### 2. Check Configuration Syntax
|
||||
|
||||
```bash
|
||||
# Test configuration file
|
||||
/usr/share/logstash/bin/logstash --config.test_and_exit -f /etc/logstash/conf.d/bugsink.conf
|
||||
```
|
||||
|
||||
**Expected output:**
|
||||
|
||||
```
|
||||
Configuration OK
|
||||
```
|
||||
|
||||
**If syntax errors:**
|
||||
|
||||
1. Review error message for line number
|
||||
2. Check for missing braces, quotes, or commas
|
||||
3. Verify plugin names are correct (e.g., `json`, `grok`, `uuid`, `http`)
|
||||
|
||||
---
|
||||
|
||||
### 3. Verify PostgreSQL Logs Are Being Read
|
||||
|
||||
```bash
|
||||
# Check if log file exists and has content
|
||||
ls -lh /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log
|
||||
|
||||
# Check Logstash can read the file
|
||||
sudo -u logstash cat /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log | head -10
|
||||
```
|
||||
|
||||
**Expected output:**
|
||||
|
||||
- Log file exists and is not empty
|
||||
- Logstash user can read the file without permission errors
|
||||
|
||||
**If permission denied:**
|
||||
|
||||
```bash
|
||||
# Check Logstash is in postgres group
|
||||
groups logstash
|
||||
|
||||
# Should show: logstash : logstash adm postgres
|
||||
|
||||
# If not, add to group
|
||||
usermod -a -G postgres logstash
|
||||
systemctl restart logstash
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4. Check Logstash Pipeline Stats
|
||||
|
||||
```bash
|
||||
# Get pipeline statistics
|
||||
curl -XGET 'localhost:9600/_node/stats/pipelines?pretty' | jq '.pipelines.main.plugins.filters'
|
||||
```
|
||||
|
||||
**Key metrics to check:**
|
||||
|
||||
1. **Grok filter events:**
|
||||
- `"events.in"` - Total events received
|
||||
- `"events.out"` - Events successfully parsed
|
||||
- `"failures"` - Events that failed to parse
|
||||
|
||||
**If failures > 0:** Grok pattern doesn't match log format. Check PostgreSQL log format.
|
||||
|
||||
2. **JSON filter events:**
|
||||
- `"events.in"` - Events received by JSON parser
|
||||
- `"events.out"` - Successfully parsed JSON
|
||||
|
||||
**If events.in = 0:** Regex check `pg_message =~ /^\{/` is not matching. Verify fn_log() output format.
|
||||
|
||||
3. **UUID filter events:**
|
||||
- Should match number of errors being forwarded
|
||||
|
||||
---
|
||||
|
||||
### 5. Test Grok Pattern Manually
|
||||
|
||||
```bash
|
||||
# Get a sample log line
|
||||
tail -1 /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log
|
||||
|
||||
# Example expected format:
|
||||
# 2026-01-20 10:30:00 +05 [12345] flyer_crawler_prod@flyer-crawler-prod WARNING: {"level":"WARNING","source":"postgresql",...}
|
||||
```
|
||||
|
||||
**Pattern breakdown:**
|
||||
|
||||
```
|
||||
%{TIMESTAMP_ISO8601:pg_timestamp} # 2026-01-20 10:30:00
|
||||
[+-]%{INT:pg_timezone} # +05
|
||||
\[%{POSINT:pg_pid}\] # [12345]
|
||||
%{DATA:pg_user}@%{DATA:pg_database} # flyer_crawler_prod@flyer-crawler-prod
|
||||
%{WORD:pg_level}: # WARNING:
|
||||
%{GREEDYDATA:pg_message} # (rest of line)
|
||||
```
|
||||
|
||||
**If pattern doesn't match:**
|
||||
|
||||
1. Check PostgreSQL `log_line_prefix` setting in `/etc/postgresql/14/main/conf.d/observability.conf`
|
||||
2. Should be: `log_line_prefix = '%t [%p] %u@%d '`
|
||||
3. Restart PostgreSQL if changed: `systemctl restart postgresql`
|
||||
|
||||
---
|
||||
|
||||
### 6. Verify Environment Detection
|
||||
|
||||
```bash
|
||||
# Check recent PostgreSQL logs for database field
|
||||
tail -20 /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log | grep -E "flyer-crawler-(prod|test)"
|
||||
```
|
||||
|
||||
**Expected:**
|
||||
|
||||
- Production database: `flyer_crawler_prod@flyer-crawler-prod`
|
||||
- Test database: `flyer_crawler_test@flyer-crawler-test`
|
||||
|
||||
**If database name doesn't match:**
|
||||
|
||||
- Check database connection string in application
|
||||
- Verify `DB_DATABASE_PROD` and `DB_DATABASE_TEST` Gitea secrets
|
||||
|
||||
---
|
||||
|
||||
### 7. Test Bugsink API Connection
|
||||
|
||||
```bash
|
||||
# Test production endpoint
|
||||
curl -X POST https://bugsink.projectium.com/api/1/store/ \
|
||||
-H "X-Sentry-Auth: Sentry sentry_version=7, sentry_client=test/1.0, sentry_key=911aef02b9a548fa8fabb8a3c81abfe5" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"event_id": "12345678901234567890123456789012",
|
||||
"timestamp": "2026-01-20T10:30:00Z",
|
||||
"platform": "other",
|
||||
"level": "error",
|
||||
"logger": "test",
|
||||
"message": "Test error from troubleshooting"
|
||||
}'
|
||||
```
|
||||
|
||||
**Expected response:**
|
||||
|
||||
- HTTP 200 OK
|
||||
- Response body: `{"id": "..."}`
|
||||
|
||||
**If 403 Forbidden:**
|
||||
|
||||
- DSN key is wrong in `/etc/logstash/conf.d/bugsink.conf`
|
||||
- Get correct key from Bugsink UI: Settings → Projects → DSN
|
||||
|
||||
**If 500 Internal Server Error:**
|
||||
|
||||
- Missing required fields (event_id, timestamp, level)
|
||||
- Check `mapping` section in Logstash config
|
||||
|
||||
---
|
||||
|
||||
### 8. Monitor Logstash Output in Real-Time
|
||||
|
||||
```bash
|
||||
# Watch Logstash processing logs
|
||||
journalctl -u logstash -f
|
||||
```
|
||||
|
||||
**What to look for:**
|
||||
|
||||
- `"response code => 200"` - Successful forwarding to Bugsink
|
||||
- `"response code => 403"` - Authentication failure
|
||||
- `"response code => 500"` - Invalid event format
|
||||
- Grok parse failures
|
||||
|
||||
---
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
### Issue 1: Grok Pattern Parse Failures
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Logstash stats show increasing `"failures"` count
|
||||
- No events reaching Bugsink
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
```bash
|
||||
curl -XGET 'localhost:9600/_node/stats/pipelines?pretty' | jq '.pipelines.main.plugins.filters[] | select(.name == "grok") | .failures'
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Check PostgreSQL log format matches expected pattern
|
||||
2. Verify `log_line_prefix` in PostgreSQL config
|
||||
3. Test with sample log line using Grok Debugger (Kibana Dev Tools)
|
||||
|
||||
---
|
||||
|
||||
### Issue 2: JSON Filter Not Parsing fn_log() Output
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Grok parses successfully but JSON filter shows 0 events
|
||||
- `[fn_log]` fields missing in Logstash output
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
```bash
|
||||
# Check if pg_message field contains JSON
|
||||
tail -20 /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log | grep "WARNING:" | grep "{"
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify `fn_log()` function exists in database:
|
||||
```sql
|
||||
\df fn_log
|
||||
```
|
||||
2. Test `fn_log()` output format:
|
||||
```sql
|
||||
SELECT fn_log('WARNING', 'test', 'Test message', '{"key":"value"}'::jsonb);
|
||||
```
|
||||
3. Check logs show JSON output starting with `{`
|
||||
|
||||
---
|
||||
|
||||
### Issue 3: Events Going to Wrong Bugsink Project
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Production errors appear in test project (or vice versa)
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
```bash
|
||||
# Check database name detection in recent logs
|
||||
tail -50 /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log | grep -E "(flyer-crawler-prod|flyer-crawler-test)"
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify database names in filter section match actual database names
|
||||
2. Check `pg_database` field is correctly extracted by grok pattern:
|
||||
```bash
|
||||
# Enable debug output in Logstash config temporarily
|
||||
stdout { codec => rubydebug { metadata => true } }
|
||||
```
|
||||
3. Verify environment tagging in filter:
|
||||
- `pg_database == "flyer-crawler-prod"` → adds "production" tag → routes to project 1
|
||||
- `pg_database == "flyer-crawler-test"` → adds "test" tag → routes to project 3
|
||||
|
||||
---
|
||||
|
||||
### Issue 4: 403 Authentication Errors from Bugsink
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Logstash logs show `response code => 403`
|
||||
- Events not appearing in Bugsink
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
```bash
|
||||
# Check Logstash output logs for authentication errors
|
||||
journalctl -u logstash -n 100 | grep "403"
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify DSN key in `/etc/logstash/conf.d/bugsink.conf` matches Bugsink project
|
||||
2. Get correct DSN from Bugsink UI:
|
||||
- Navigate to Settings → Projects → Click project
|
||||
- Copy "DSN" value
|
||||
- Extract key: `http://KEY@host/PROJECT_ID` → use KEY
|
||||
3. Update `X-Sentry-Auth` header in Logstash config:
|
||||
```conf
|
||||
"X-Sentry-Auth" => "Sentry sentry_version=7, sentry_client=logstash/1.0, sentry_key=YOUR_KEY_HERE"
|
||||
```
|
||||
4. Restart Logstash: `systemctl restart logstash`
|
||||
|
||||
---
|
||||
|
||||
### Issue 5: 500 Errors from Bugsink
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Logstash logs show `response code => 500`
|
||||
- Bugsink logs show validation errors
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
```bash
|
||||
# Check Bugsink logs for details
|
||||
docker logs bugsink-web 2>&1 | tail -50
|
||||
```
|
||||
|
||||
**Common causes:**
|
||||
|
||||
1. Missing `event_id` field
|
||||
2. Invalid timestamp format
|
||||
3. Missing required Sentry fields
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Verify `uuid` filter is generating `event_id`:
|
||||
```conf
|
||||
uuid {
|
||||
target => "[@metadata][event_id]"
|
||||
overwrite => true
|
||||
}
|
||||
```
|
||||
2. Check `mapping` section includes all required fields:
|
||||
- `event_id` (UUID)
|
||||
- `timestamp` (ISO 8601)
|
||||
- `platform` (string)
|
||||
- `level` (error/warning/info)
|
||||
- `logger` (string)
|
||||
- `message` (string)
|
||||
|
||||
---
|
||||
|
||||
### Issue 6: High Memory Usage by Logstash
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Server running out of memory
|
||||
- Logstash OOM killed
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
```bash
|
||||
# Check Logstash memory usage
|
||||
ps aux | grep logstash
|
||||
systemctl status logstash
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Limit Logstash heap size in `/etc/logstash/jvm.options`:
|
||||
```
|
||||
-Xms1g
|
||||
-Xmx1g
|
||||
```
|
||||
2. Restart Logstash: `systemctl restart logstash`
|
||||
3. Monitor with: `top -p $(pgrep -f logstash)`
|
||||
|
||||
---
|
||||
|
||||
### Issue 7: Log File Rotation Issues
|
||||
|
||||
**Symptoms:**
|
||||
|
||||
- Logstash stops processing after log file rotates
|
||||
- Sincedb file pointing to old inode
|
||||
|
||||
**Diagnosis:**
|
||||
|
||||
```bash
|
||||
# Check sincedb file
|
||||
cat /var/lib/logstash/sincedb_postgres
|
||||
|
||||
# Check current log file inode
|
||||
ls -li /var/log/postgresql/postgresql-$(date +%Y-%m-%d).log
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
|
||||
1. Logstash should automatically detect rotation
|
||||
2. If stuck, delete sincedb file (will reprocess recent logs):
|
||||
```bash
|
||||
systemctl stop logstash
|
||||
rm /var/lib/logstash/sincedb_postgres
|
||||
systemctl start logstash
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Verification Checklist
|
||||
|
||||
After making any changes, verify the pipeline is working:
|
||||
|
||||
- [ ] Logstash is running: `systemctl status logstash`
|
||||
- [ ] Configuration is valid: `/usr/share/logstash/bin/logstash --config.test_and_exit -f /etc/logstash/conf.d/bugsink.conf`
|
||||
- [ ] No grok failures: `curl localhost:9600/_node/stats/pipelines?pretty | jq '.pipelines.main.plugins.filters[] | select(.name == "grok") | .failures'`
|
||||
- [ ] Events being processed: `curl localhost:9600/_node/stats/pipelines?pretty | jq '.pipelines.main.events'`
|
||||
- [ ] Test error appears in Bugsink: Trigger a database function error and check Bugsink UI
|
||||
|
||||
---
|
||||
|
||||
## Test Database Function Error
|
||||
|
||||
To generate a test error for verification:
|
||||
|
||||
```bash
|
||||
# Connect to production database
|
||||
sudo -u postgres psql -d flyer-crawler-prod
|
||||
|
||||
# Trigger an error (achievement not found)
|
||||
SELECT award_achievement('00000000-0000-0000-0000-000000000001'::uuid, 'Nonexistent Badge');
|
||||
\q
|
||||
```
|
||||
|
||||
**Expected flow:**
|
||||
|
||||
1. PostgreSQL logs the error to `/var/log/postgresql/postgresql-YYYY-MM-DD.log`
|
||||
2. Logstash reads and parses the log (within ~30 seconds)
|
||||
3. Error appears in Bugsink project 1 (production)
|
||||
|
||||
**If error doesn't appear:**
|
||||
|
||||
- Check each diagnostic step above
|
||||
- Review Logstash logs: `journalctl -u logstash -f`
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- **Setup Guide**: [docs/BARE-METAL-SETUP.md](BARE-METAL-SETUP.md) - PostgreSQL Function Observability section
|
||||
- **Architecture**: [docs/adr/0050-postgresql-function-observability.md](adr/0050-postgresql-function-observability.md)
|
||||
- **Configuration Reference**: [CLAUDE.md](../CLAUDE.md) - Logstash Configuration section
|
||||
- **Bugsink MCP Server**: [CLAUDE.md](../CLAUDE.md) - Sentry/Bugsink MCP Server Setup section
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "flyer-crawler",
|
||||
"version": "0.11.17",
|
||||
"version": "0.11.19",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "flyer-crawler",
|
||||
"version": "0.11.17",
|
||||
"version": "0.11.19",
|
||||
"dependencies": {
|
||||
"@bull-board/api": "^6.14.2",
|
||||
"@bull-board/express": "^6.14.2",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "flyer-crawler",
|
||||
"private": true,
|
||||
"version": "0.11.17",
|
||||
"version": "0.11.19",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "concurrently \"npm:start:dev\" \"vite\"",
|
||||
|
||||
@@ -73,7 +73,25 @@ RETURNS TABLE (
|
||||
LANGUAGE plpgsql
|
||||
SECURITY INVOKER -- Runs with the privileges of the calling user.
|
||||
AS $$
|
||||
DECLARE
|
||||
v_watched_items_count INTEGER;
|
||||
v_result_count INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object('user_id', p_user_id);
|
||||
|
||||
-- Tier 2 logging: Check if user has any watched items
|
||||
SELECT COUNT(*) INTO v_watched_items_count
|
||||
FROM public.user_watched_items
|
||||
WHERE user_id = p_user_id;
|
||||
|
||||
IF v_watched_items_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'get_best_sale_prices_for_user',
|
||||
'User has no watched items',
|
||||
v_context);
|
||||
RETURN; -- Return empty result set
|
||||
END IF;
|
||||
|
||||
RETURN QUERY
|
||||
WITH UserWatchedSales AS (
|
||||
-- This CTE gathers all sales from active flyers that match the user's watched items.
|
||||
@@ -104,6 +122,20 @@ BEGIN
|
||||
SELECT uws.master_item_id, uws.item_name, uws.price_in_cents, uws.store_name, uws.flyer_id, uws.flyer_icon_url, uws.flyer_image_url, uws.flyer_valid_from, uws.flyer_valid_to
|
||||
FROM UserWatchedSales uws
|
||||
WHERE uws.rn = 1;
|
||||
|
||||
-- Tier 2 logging: Check if any sales were found
|
||||
GET DIAGNOSTICS v_result_count = ROW_COUNT;
|
||||
IF v_result_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'get_best_sale_prices_for_user',
|
||||
'No sales found for watched items',
|
||||
v_context || jsonb_build_object('watched_items_count', v_watched_items_count));
|
||||
END IF;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'get_best_sale_prices_for_user',
|
||||
'Unexpected error getting best sale prices: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$;
|
||||
|
||||
@@ -125,7 +157,42 @@ RETURNS TABLE (
|
||||
LANGUAGE plpgsql
|
||||
SECURITY INVOKER -- Runs with the privileges of the calling user.
|
||||
AS $$
|
||||
DECLARE
|
||||
v_menu_plan_exists BOOLEAN;
|
||||
v_planned_meals_count INTEGER;
|
||||
v_result_count INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'menu_plan_id', p_menu_plan_id,
|
||||
'user_id', p_user_id
|
||||
);
|
||||
|
||||
-- Tier 2 logging: Check if menu plan exists and belongs to user
|
||||
SELECT EXISTS(
|
||||
SELECT 1 FROM public.menu_plans
|
||||
WHERE menu_plan_id = p_menu_plan_id AND user_id = p_user_id
|
||||
) INTO v_menu_plan_exists;
|
||||
|
||||
IF NOT v_menu_plan_exists THEN
|
||||
PERFORM fn_log('NOTICE', 'generate_shopping_list_for_menu_plan',
|
||||
'Menu plan not found or does not belong to user',
|
||||
v_context);
|
||||
RETURN; -- Return empty result set
|
||||
END IF;
|
||||
|
||||
-- Tier 2 logging: Check if menu plan has any recipes
|
||||
SELECT COUNT(*) INTO v_planned_meals_count
|
||||
FROM public.planned_meals
|
||||
WHERE menu_plan_id = p_menu_plan_id;
|
||||
|
||||
IF v_planned_meals_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'generate_shopping_list_for_menu_plan',
|
||||
'Menu plan has no recipes',
|
||||
v_context);
|
||||
RETURN; -- Return empty result set
|
||||
END IF;
|
||||
|
||||
RETURN QUERY
|
||||
WITH RequiredIngredients AS (
|
||||
-- This CTE calculates the total quantity of each ingredient needed for the menu plan.
|
||||
@@ -163,6 +230,20 @@ BEGIN
|
||||
WHERE
|
||||
-- Only include items that actually need to be purchased.
|
||||
GREATEST(0, req.total_required - COALESCE(pi.quantity, 0)) > 0;
|
||||
|
||||
-- Tier 2 logging: Check if any items need to be purchased
|
||||
GET DIAGNOSTICS v_result_count = ROW_COUNT;
|
||||
IF v_result_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'generate_shopping_list_for_menu_plan',
|
||||
'All ingredients already in pantry (no shopping needed)',
|
||||
v_context || jsonb_build_object('planned_meals_count', v_planned_meals_count));
|
||||
END IF;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'generate_shopping_list_for_menu_plan',
|
||||
'Unexpected error generating shopping list: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$;
|
||||
|
||||
@@ -458,10 +539,14 @@ STABLE -- This function does not modify the database.
|
||||
AS $$
|
||||
DECLARE
|
||||
suggested_id BIGINT;
|
||||
best_score REAL;
|
||||
-- A similarity score between 0 and 1. A higher value means a better match.
|
||||
-- This threshold can be adjusted based on observed performance. 0.4 is a reasonable starting point.
|
||||
similarity_threshold REAL := 0.4;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object('flyer_item_name', p_flyer_item_name, 'similarity_threshold', similarity_threshold);
|
||||
|
||||
WITH candidates AS (
|
||||
-- Search for matches in the primary master_grocery_items table
|
||||
SELECT
|
||||
@@ -480,7 +565,14 @@ BEGIN
|
||||
WHERE alias % p_flyer_item_name
|
||||
)
|
||||
-- Select the master_item_id with the highest similarity score, provided it's above our threshold.
|
||||
SELECT master_item_id INTO suggested_id FROM candidates WHERE score >= similarity_threshold ORDER BY score DESC, master_item_id LIMIT 1;
|
||||
SELECT master_item_id, score INTO suggested_id, best_score FROM candidates WHERE score >= similarity_threshold ORDER BY score DESC, master_item_id LIMIT 1;
|
||||
|
||||
-- Tier 2 logging: Log when no match found (anomaly detection)
|
||||
IF suggested_id IS NULL THEN
|
||||
PERFORM fn_log('INFO', 'suggest_master_item_for_flyer_item',
|
||||
'No master item match found for flyer item',
|
||||
v_context || jsonb_build_object('best_score', best_score));
|
||||
END IF;
|
||||
|
||||
RETURN suggested_id;
|
||||
END;
|
||||
@@ -500,10 +592,18 @@ RETURNS TABLE (
|
||||
recommendation_score NUMERIC,
|
||||
recommendation_reason TEXT
|
||||
)
|
||||
LANGUAGE sql
|
||||
LANGUAGE plpgsql
|
||||
STABLE
|
||||
SECURITY INVOKER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_count INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object('user_id', p_user_id, 'limit', p_limit);
|
||||
|
||||
-- Execute the recommendation query
|
||||
RETURN QUERY
|
||||
WITH UserHighRatedRecipes AS (
|
||||
-- CTE 1: Get recipes the user has rated 4 stars or higher.
|
||||
SELECT rr.recipe_id, rr.rating
|
||||
@@ -581,6 +681,15 @@ ORDER BY
|
||||
r.rating_count DESC,
|
||||
r.name ASC
|
||||
LIMIT p_limit;
|
||||
|
||||
-- Tier 2 logging: Log when no recommendations generated (anomaly detection)
|
||||
GET DIAGNOSTICS v_count = ROW_COUNT;
|
||||
IF v_count = 0 THEN
|
||||
PERFORM fn_log('INFO', 'recommend_recipes_for_user',
|
||||
'No recipe recommendations generated for user',
|
||||
v_context);
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Function to approve a suggested correction and apply it.
|
||||
@@ -743,49 +852,85 @@ RETURNS TABLE(
|
||||
avg_rating NUMERIC,
|
||||
missing_ingredients_count BIGINT
|
||||
)
|
||||
LANGUAGE sql
|
||||
LANGUAGE plpgsql
|
||||
STABLE
|
||||
SECURITY INVOKER
|
||||
AS $$
|
||||
WITH UserPantryItems AS (
|
||||
-- CTE 1: Get a distinct set of master item IDs from the user's pantry.
|
||||
SELECT master_item_id, quantity, unit
|
||||
DECLARE
|
||||
v_pantry_item_count INTEGER;
|
||||
v_result_count INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object('user_id', p_user_id);
|
||||
|
||||
-- Tier 2 logging: Check if user has any pantry items
|
||||
SELECT COUNT(*) INTO v_pantry_item_count
|
||||
FROM public.pantry_items
|
||||
WHERE user_id = p_user_id AND quantity > 0
|
||||
),
|
||||
RecipeIngredientStats AS (
|
||||
-- CTE 2: For each recipe, count its total ingredients and how many of those are in the user's pantry.
|
||||
WHERE user_id = p_user_id AND quantity > 0;
|
||||
|
||||
IF v_pantry_item_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'find_recipes_from_pantry',
|
||||
'User has empty pantry',
|
||||
v_context);
|
||||
RETURN; -- Return empty result set
|
||||
END IF;
|
||||
|
||||
-- Execute the main query and return results
|
||||
RETURN QUERY
|
||||
WITH UserPantryItems AS (
|
||||
-- CTE 1: Get a distinct set of master item IDs from the user's pantry.
|
||||
SELECT pi.master_item_id, pi.quantity, pi.unit
|
||||
FROM public.pantry_items pi
|
||||
WHERE pi.user_id = p_user_id AND pi.quantity > 0
|
||||
),
|
||||
RecipeIngredientStats AS (
|
||||
-- CTE 2: For each recipe, count its total ingredients and how many of those are in the user's pantry.
|
||||
SELECT
|
||||
ri.recipe_id,
|
||||
-- Count how many ingredients DO NOT meet the pantry requirements.
|
||||
-- An ingredient is missing if it's not in the pantry OR if the quantity is insufficient.
|
||||
-- The filter condition handles this logic.
|
||||
COUNT(*) FILTER (
|
||||
WHERE upi.master_item_id IS NULL -- The item is not in the pantry at all
|
||||
OR upi.quantity < ri.quantity -- The user has the item, but not enough of it
|
||||
) AS missing_ingredients_count
|
||||
FROM public.recipe_ingredients ri
|
||||
-- LEFT JOIN to the user's pantry on both item and unit.
|
||||
-- We only compare quantities if the units match (e.g., 'g' vs 'g').
|
||||
LEFT JOIN UserPantryItems upi
|
||||
ON ri.master_item_id = upi.master_item_id
|
||||
AND ri.unit = upi.unit
|
||||
GROUP BY ri.recipe_id
|
||||
)
|
||||
-- Final Step: Select recipes where the total ingredient count matches the pantry ingredient count.
|
||||
SELECT
|
||||
ri.recipe_id,
|
||||
-- Count how many ingredients DO NOT meet the pantry requirements.
|
||||
-- An ingredient is missing if it's not in the pantry OR if the quantity is insufficient.
|
||||
-- The filter condition handles this logic.
|
||||
COUNT(*) FILTER (
|
||||
WHERE upi.master_item_id IS NULL -- The item is not in the pantry at all
|
||||
OR upi.quantity < ri.quantity -- The user has the item, but not enough of it
|
||||
) AS missing_ingredients_count
|
||||
FROM public.recipe_ingredients ri
|
||||
-- LEFT JOIN to the user's pantry on both item and unit.
|
||||
-- We only compare quantities if the units match (e.g., 'g' vs 'g').
|
||||
LEFT JOIN UserPantryItems upi
|
||||
ON ri.master_item_id = upi.master_item_id
|
||||
AND ri.unit = upi.unit
|
||||
GROUP BY ri.recipe_id
|
||||
)
|
||||
-- Final Step: Select recipes where the total ingredient count matches the pantry ingredient count.
|
||||
SELECT
|
||||
r.recipe_id,
|
||||
r.name,
|
||||
r.description,
|
||||
r.prep_time_minutes,
|
||||
r.cook_time_minutes,
|
||||
r.avg_rating,
|
||||
ris.missing_ingredients_count
|
||||
FROM public.recipes r
|
||||
JOIN RecipeIngredientStats ris ON r.recipe_id = ris.recipe_id
|
||||
-- Order by recipes with the fewest missing ingredients first, then by rating.
|
||||
-- Recipes with 0 missing ingredients are the ones that can be made.
|
||||
ORDER BY ris.missing_ingredients_count ASC, r.avg_rating DESC, r.name ASC;
|
||||
r.recipe_id,
|
||||
r.name,
|
||||
r.description,
|
||||
r.prep_time_minutes,
|
||||
r.cook_time_minutes,
|
||||
r.avg_rating,
|
||||
ris.missing_ingredients_count
|
||||
FROM public.recipes r
|
||||
JOIN RecipeIngredientStats ris ON r.recipe_id = ris.recipe_id
|
||||
-- Order by recipes with the fewest missing ingredients first, then by rating.
|
||||
-- Recipes with 0 missing ingredients are the ones that can be made.
|
||||
ORDER BY ris.missing_ingredients_count ASC, r.avg_rating DESC, r.name ASC;
|
||||
|
||||
-- Tier 2 logging: Check if any recipes were found
|
||||
GET DIAGNOSTICS v_result_count = ROW_COUNT;
|
||||
IF v_result_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'find_recipes_from_pantry',
|
||||
'No recipes found matching pantry items',
|
||||
v_context || jsonb_build_object('pantry_item_count', v_pantry_item_count));
|
||||
END IF;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'find_recipes_from_pantry',
|
||||
'Unexpected error finding recipes from pantry: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Function to suggest alternative units for a given pantry item.
|
||||
@@ -1409,7 +1554,15 @@ DECLARE
|
||||
flyer_valid_to DATE;
|
||||
current_summary_date DATE;
|
||||
flyer_location_id BIGINT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'flyer_item_id', NEW.flyer_item_id,
|
||||
'flyer_id', NEW.flyer_id,
|
||||
'master_item_id', NEW.master_item_id,
|
||||
'price_in_cents', NEW.price_in_cents
|
||||
);
|
||||
|
||||
-- If the item could not be matched, add it to the unmatched queue for review.
|
||||
IF NEW.master_item_id IS NULL THEN
|
||||
INSERT INTO public.unmatched_flyer_items (flyer_item_id)
|
||||
@@ -1427,6 +1580,14 @@ BEGIN
|
||||
FROM public.flyers
|
||||
WHERE flyer_id = NEW.flyer_id;
|
||||
|
||||
-- Tier 3 logging: Log when flyer lookup fails
|
||||
IF flyer_valid_from IS NULL OR flyer_valid_to IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'update_price_history_on_flyer_item_insert',
|
||||
'Flyer not found or missing validity dates',
|
||||
v_context);
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
|
||||
-- This single, set-based query is much more performant than looping.
|
||||
-- It generates all date/location pairs and inserts/updates them in one operation.
|
||||
INSERT INTO public.item_price_history (master_item_id, summary_date, store_location_id, min_price_in_cents, max_price_in_cents, avg_price_in_cents, data_points_count)
|
||||
@@ -1449,6 +1610,14 @@ BEGIN
|
||||
data_points_count = item_price_history.data_points_count + 1;
|
||||
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Tier 3 logging: Log unexpected errors in trigger
|
||||
PERFORM fn_log('ERROR', 'update_price_history_on_flyer_item_insert',
|
||||
'Unexpected error in price history update: ' || SQLERRM,
|
||||
v_context);
|
||||
-- Re-raise the exception to ensure trigger failure is visible
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -1511,6 +1680,30 @@ BEGIN
|
||||
AND iph.store_location_id = na.store_location_id;
|
||||
|
||||
-- 4. Delete any history records that no longer have any data points.
|
||||
-- We need to recreate the CTE since CTEs are scoped to a single statement.
|
||||
WITH affected_days_and_locations AS (
|
||||
SELECT DISTINCT
|
||||
generate_series(f.valid_from, f.valid_to, '1 day'::interval)::date AS summary_date,
|
||||
fl.store_location_id
|
||||
FROM public.flyers f
|
||||
JOIN public.flyer_locations fl ON f.flyer_id = fl.flyer_id
|
||||
WHERE f.flyer_id = OLD.flyer_id
|
||||
),
|
||||
new_aggregates AS (
|
||||
SELECT
|
||||
adl.summary_date,
|
||||
adl.store_location_id,
|
||||
MIN(fi.price_in_cents) AS min_price,
|
||||
MAX(fi.price_in_cents) AS max_price,
|
||||
ROUND(AVG(fi.price_in_cents))::int AS avg_price,
|
||||
COUNT(fi.flyer_item_id)::int AS data_points
|
||||
FROM affected_days_and_locations adl
|
||||
LEFT JOIN public.flyer_items fi ON fi.master_item_id = OLD.master_item_id AND fi.price_in_cents IS NOT NULL
|
||||
LEFT JOIN public.flyers f ON fi.flyer_id = f.flyer_id AND adl.summary_date BETWEEN f.valid_from AND f.valid_to
|
||||
LEFT JOIN public.flyer_locations fl ON fi.flyer_id = fl.flyer_id AND adl.store_location_id = fl.store_location_id
|
||||
WHERE fl.flyer_id IS NOT NULL
|
||||
GROUP BY adl.summary_date, adl.store_location_id
|
||||
)
|
||||
DELETE FROM public.item_price_history iph
|
||||
WHERE iph.master_item_id = OLD.master_item_id
|
||||
AND NOT EXISTS (
|
||||
@@ -1533,22 +1726,45 @@ DROP FUNCTION IF EXISTS public.update_recipe_rating_aggregates();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.update_recipe_rating_aggregates()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_recipe_id BIGINT;
|
||||
v_rows_updated INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_recipe_id := COALESCE(NEW.recipe_id, OLD.recipe_id);
|
||||
v_context := jsonb_build_object('recipe_id', v_recipe_id);
|
||||
|
||||
UPDATE public.recipes
|
||||
SET
|
||||
avg_rating = (
|
||||
SELECT AVG(rating)
|
||||
FROM public.recipe_ratings
|
||||
WHERE recipe_id = COALESCE(NEW.recipe_id, OLD.recipe_id) -- This is correct, no change needed
|
||||
WHERE recipe_id = v_recipe_id
|
||||
),
|
||||
rating_count = (
|
||||
SELECT COUNT(*)
|
||||
FROM public.recipe_ratings
|
||||
WHERE recipe_id = COALESCE(NEW.recipe_id, OLD.recipe_id) -- This is correct, no change needed
|
||||
WHERE recipe_id = v_recipe_id
|
||||
)
|
||||
WHERE recipe_id = COALESCE(NEW.recipe_id, OLD.recipe_id);
|
||||
WHERE recipe_id = v_recipe_id;
|
||||
|
||||
-- Tier 3 logging: Log when recipe update fails
|
||||
GET DIAGNOSTICS v_rows_updated = ROW_COUNT;
|
||||
IF v_rows_updated = 0 THEN
|
||||
PERFORM fn_log('ERROR', 'update_recipe_rating_aggregates',
|
||||
'Recipe not found for rating aggregate update',
|
||||
v_context);
|
||||
END IF;
|
||||
|
||||
RETURN NULL; -- The result is ignored since this is an AFTER trigger.
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Tier 3 logging: Log unexpected errors in trigger
|
||||
PERFORM fn_log('ERROR', 'update_recipe_rating_aggregates',
|
||||
'Unexpected error in rating aggregate update: ' || SQLERRM,
|
||||
v_context);
|
||||
-- Re-raise the exception to ensure trigger failure is visible
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -1563,12 +1779,30 @@ DROP FUNCTION IF EXISTS public.log_new_recipe();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_recipe()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_full_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'user_id', NEW.user_id,
|
||||
'recipe_id', NEW.recipe_id,
|
||||
'recipe_name', NEW.name
|
||||
);
|
||||
|
||||
-- Get user's full name (Tier 3 logging: Log if profile lookup fails)
|
||||
SELECT full_name INTO v_full_name FROM public.profiles WHERE user_id = NEW.user_id;
|
||||
IF v_full_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_recipe',
|
||||
'Profile not found for user creating recipe',
|
||||
v_context);
|
||||
v_full_name := 'Unknown User';
|
||||
END IF;
|
||||
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.user_id,
|
||||
'recipe_created',
|
||||
(SELECT full_name FROM public.profiles WHERE user_id = NEW.user_id) || ' created a new recipe: ' || NEW.name,
|
||||
v_full_name || ' created a new recipe: ' || NEW.name,
|
||||
'chef-hat',
|
||||
jsonb_build_object('recipe_id', NEW.recipe_id, 'recipe_name', NEW.name)
|
||||
);
|
||||
@@ -1577,6 +1811,14 @@ BEGIN
|
||||
PERFORM public.award_achievement(NEW.user_id, 'First Recipe');
|
||||
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Tier 3 logging: Log unexpected errors in trigger
|
||||
PERFORM fn_log('ERROR', 'log_new_recipe',
|
||||
'Unexpected error in recipe activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
-- Re-raise the exception to ensure trigger failure is visible
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -1593,13 +1835,39 @@ DROP FUNCTION IF EXISTS public.update_flyer_item_count();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.update_flyer_item_count()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_rows_updated INTEGER;
|
||||
v_context JSONB;
|
||||
v_flyer_id BIGINT;
|
||||
BEGIN
|
||||
-- Determine which flyer_id to use based on operation
|
||||
IF (TG_OP = 'INSERT') THEN
|
||||
v_flyer_id := NEW.flyer_id;
|
||||
v_context := jsonb_build_object('flyer_id', NEW.flyer_id, 'operation', 'INSERT');
|
||||
|
||||
UPDATE public.flyers SET item_count = item_count + 1 WHERE flyer_id = NEW.flyer_id;
|
||||
ELSIF (TG_OP = 'DELETE') THEN
|
||||
v_flyer_id := OLD.flyer_id;
|
||||
v_context := jsonb_build_object('flyer_id', OLD.flyer_id, 'operation', 'DELETE');
|
||||
|
||||
UPDATE public.flyers SET item_count = item_count - 1 WHERE flyer_id = OLD.flyer_id;
|
||||
END IF;
|
||||
|
||||
-- Tier 3 logging: Log if flyer not found
|
||||
GET DIAGNOSTICS v_rows_updated = ROW_COUNT;
|
||||
IF v_rows_updated = 0 THEN
|
||||
PERFORM fn_log('ERROR', 'update_flyer_item_count',
|
||||
'Flyer not found for item count update',
|
||||
v_context);
|
||||
END IF;
|
||||
|
||||
RETURN NULL; -- The result is ignored since this is an AFTER trigger.
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'update_flyer_item_count',
|
||||
'Unexpected error updating flyer item count: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -1615,27 +1883,55 @@ DROP FUNCTION IF EXISTS public.log_new_flyer();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_flyer()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_store_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'flyer_id', NEW.flyer_id,
|
||||
'store_id', NEW.store_id,
|
||||
'uploaded_by', NEW.uploaded_by,
|
||||
'valid_from', NEW.valid_from,
|
||||
'valid_to', NEW.valid_to
|
||||
);
|
||||
|
||||
-- If the flyer was uploaded by a registered user, award the 'First-Upload' achievement.
|
||||
-- The award_achievement function handles checking if the user already has it.
|
||||
IF NEW.uploaded_by IS NOT NULL THEN
|
||||
PERFORM public.award_achievement(NEW.uploaded_by, 'First-Upload');
|
||||
END IF;
|
||||
|
||||
-- Get store name (Tier 3 logging: Log if store lookup fails)
|
||||
SELECT name INTO v_store_name FROM public.stores WHERE store_id = NEW.store_id;
|
||||
IF v_store_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_flyer',
|
||||
'Store not found for flyer',
|
||||
v_context);
|
||||
v_store_name := 'Unknown Store';
|
||||
END IF;
|
||||
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.uploaded_by, -- Log the user who uploaded it
|
||||
'flyer_uploaded',
|
||||
'A new flyer for ' || (SELECT name FROM public.stores WHERE store_id = NEW.store_id) || ' has been uploaded.',
|
||||
'A new flyer for ' || v_store_name || ' has been uploaded.',
|
||||
'file-text',
|
||||
jsonb_build_object(
|
||||
'flyer_id', NEW.flyer_id,
|
||||
'store_name', (SELECT name FROM public.stores WHERE store_id = NEW.store_id),
|
||||
'store_name', v_store_name,
|
||||
'valid_from', to_char(NEW.valid_from, 'YYYY-MM-DD'),
|
||||
'valid_to', to_char(NEW.valid_to, 'YYYY-MM-DD')
|
||||
)
|
||||
);
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Tier 3 logging: Log unexpected errors in trigger
|
||||
PERFORM fn_log('ERROR', 'log_new_flyer',
|
||||
'Unexpected error in flyer activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
-- Re-raise the exception to ensure trigger failure is visible
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -1650,14 +1946,41 @@ DROP FUNCTION IF EXISTS public.log_new_favorite_recipe();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_favorite_recipe()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_user_name TEXT;
|
||||
v_recipe_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'user_id', NEW.user_id,
|
||||
'recipe_id', NEW.recipe_id
|
||||
);
|
||||
|
||||
-- Get user name (Tier 3 logging: Log if profile lookup fails)
|
||||
SELECT full_name INTO v_user_name FROM public.profiles WHERE user_id = NEW.user_id;
|
||||
IF v_user_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_favorite_recipe',
|
||||
'Profile not found for user',
|
||||
v_context);
|
||||
v_user_name := 'Unknown User';
|
||||
END IF;
|
||||
|
||||
-- Get recipe name (Tier 3 logging: Log if recipe lookup fails)
|
||||
SELECT name INTO v_recipe_name FROM public.recipes WHERE recipe_id = NEW.recipe_id;
|
||||
IF v_recipe_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_favorite_recipe',
|
||||
'Recipe not found',
|
||||
v_context);
|
||||
v_recipe_name := 'Unknown Recipe';
|
||||
END IF;
|
||||
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.user_id,
|
||||
'recipe_favorited',
|
||||
(SELECT full_name FROM public.profiles WHERE user_id = NEW.user_id) || ' favorited the recipe: ' || (SELECT name FROM public.recipes WHERE recipe_id = NEW.recipe_id),
|
||||
v_user_name || ' favorited the recipe: ' || v_recipe_name,
|
||||
'heart',
|
||||
jsonb_build_object(
|
||||
jsonb_build_object(
|
||||
'recipe_id', NEW.recipe_id
|
||||
)
|
||||
);
|
||||
@@ -1665,6 +1988,12 @@ BEGIN
|
||||
-- Award 'First Favorite' achievement.
|
||||
PERFORM public.award_achievement(NEW.user_id, 'First Favorite');
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_favorite_recipe',
|
||||
'Unexpected error in favorite recipe activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -1679,16 +2008,44 @@ DROP FUNCTION IF EXISTS public.log_new_list_share();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_list_share()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_user_name TEXT;
|
||||
v_list_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'shared_by_user_id', NEW.shared_by_user_id,
|
||||
'shopping_list_id', NEW.shopping_list_id,
|
||||
'shared_with_user_id', NEW.shared_with_user_id
|
||||
);
|
||||
|
||||
-- Get user name (Tier 3 logging: Log if profile lookup fails)
|
||||
SELECT full_name INTO v_user_name FROM public.profiles WHERE user_id = NEW.shared_by_user_id;
|
||||
IF v_user_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_list_share',
|
||||
'Profile not found for sharing user',
|
||||
v_context);
|
||||
v_user_name := 'Unknown User';
|
||||
END IF;
|
||||
|
||||
-- Get list name (Tier 3 logging: Log if list lookup fails)
|
||||
SELECT name INTO v_list_name FROM public.shopping_lists WHERE shopping_list_id = NEW.shopping_list_id;
|
||||
IF v_list_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_list_share',
|
||||
'Shopping list not found',
|
||||
v_context);
|
||||
v_list_name := 'Unknown List';
|
||||
END IF;
|
||||
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.shared_by_user_id,
|
||||
'list_shared',
|
||||
(SELECT full_name FROM public.profiles WHERE user_id = NEW.shared_by_user_id) || ' shared a shopping list.',
|
||||
v_user_name || ' shared a shopping list.',
|
||||
'share-2',
|
||||
jsonb_build_object(
|
||||
'shopping_list_id', NEW.shopping_list_id,
|
||||
'list_name', (SELECT name FROM public.shopping_lists WHERE shopping_list_id = NEW.shopping_list_id),
|
||||
'list_name', v_list_name,
|
||||
'shared_with_user_id', NEW.shared_with_user_id
|
||||
)
|
||||
);
|
||||
@@ -1696,6 +2053,12 @@ BEGIN
|
||||
-- Award 'List Sharer' achievement.
|
||||
PERFORM public.award_achievement(NEW.shared_by_user_id, 'List Sharer');
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_list_share',
|
||||
'Unexpected error in list share activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -1710,12 +2073,30 @@ DROP FUNCTION IF EXISTS public.log_new_recipe_collection_share();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_recipe_collection_share()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_user_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'shared_by_user_id', NEW.shared_by_user_id,
|
||||
'recipe_collection_id', NEW.recipe_collection_id,
|
||||
'shared_with_user_id', NEW.shared_with_user_id
|
||||
);
|
||||
|
||||
-- Get user name (Tier 3 logging: Log if profile lookup fails)
|
||||
SELECT full_name INTO v_user_name FROM public.profiles WHERE user_id = NEW.shared_by_user_id;
|
||||
IF v_user_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_recipe_collection_share',
|
||||
'Profile not found for sharing user',
|
||||
v_context);
|
||||
v_user_name := 'Unknown User';
|
||||
END IF;
|
||||
|
||||
-- Log the activity
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.shared_by_user_id, 'recipe_collection_shared',
|
||||
(SELECT full_name FROM public.profiles WHERE user_id = NEW.shared_by_user_id) || ' shared a recipe collection.',
|
||||
v_user_name || ' shared a recipe collection.',
|
||||
'book',
|
||||
jsonb_build_object('collection_id', NEW.recipe_collection_id, 'shared_with_user_id', NEW.shared_with_user_id)
|
||||
);
|
||||
@@ -1723,6 +2104,12 @@ BEGIN
|
||||
-- Award 'Recipe Sharer' achievement.
|
||||
PERFORM public.award_achievement(NEW.shared_by_user_id, 'Recipe Sharer');
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_recipe_collection_share',
|
||||
'Unexpected error in recipe collection share activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -1775,14 +2162,38 @@ DROP FUNCTION IF EXISTS public.increment_recipe_fork_count();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.increment_recipe_fork_count()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_rows_updated INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
-- Only run if the recipe is a fork (original_recipe_id is not null).
|
||||
IF NEW.original_recipe_id IS NOT NULL THEN
|
||||
v_context := jsonb_build_object(
|
||||
'recipe_id', NEW.recipe_id,
|
||||
'original_recipe_id', NEW.original_recipe_id,
|
||||
'user_id', NEW.user_id
|
||||
);
|
||||
|
||||
-- Tier 3 logging: Log if original recipe not found
|
||||
UPDATE public.recipes SET fork_count = fork_count + 1 WHERE recipe_id = NEW.original_recipe_id;
|
||||
GET DIAGNOSTICS v_rows_updated = ROW_COUNT;
|
||||
|
||||
IF v_rows_updated = 0 THEN
|
||||
PERFORM fn_log('ERROR', 'increment_recipe_fork_count',
|
||||
'Original recipe not found for fork count increment',
|
||||
v_context);
|
||||
END IF;
|
||||
|
||||
-- Award 'First Fork' achievement.
|
||||
PERFORM public.award_achievement(NEW.user_id, 'First Fork');
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'increment_recipe_fork_count',
|
||||
'Unexpected error incrementing fork count: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
|
||||
@@ -1624,7 +1624,25 @@ RETURNS TABLE (
|
||||
LANGUAGE plpgsql
|
||||
SECURITY INVOKER -- Runs with the privileges of the calling user.
|
||||
AS $$
|
||||
DECLARE
|
||||
v_watched_items_count INTEGER;
|
||||
v_result_count INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object('user_id', p_user_id);
|
||||
|
||||
-- Tier 2 logging: Check if user has any watched items
|
||||
SELECT COUNT(*) INTO v_watched_items_count
|
||||
FROM public.user_watched_items
|
||||
WHERE user_id = p_user_id;
|
||||
|
||||
IF v_watched_items_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'get_best_sale_prices_for_user',
|
||||
'User has no watched items',
|
||||
v_context);
|
||||
RETURN; -- Return empty result set
|
||||
END IF;
|
||||
|
||||
RETURN QUERY
|
||||
WITH UserWatchedSales AS (
|
||||
-- This CTE gathers all sales from active flyers that match the user's watched items.
|
||||
@@ -1633,7 +1651,7 @@ BEGIN
|
||||
mgi.name AS item_name,
|
||||
fi.price_in_cents,
|
||||
s.name AS store_name,
|
||||
f.flyer_id AS flyer_id,
|
||||
f.flyer_id AS flyer_id,
|
||||
f.image_url AS flyer_image_url,
|
||||
f.icon_url AS flyer_icon_url,
|
||||
f.valid_from AS flyer_valid_from,
|
||||
@@ -1642,10 +1660,10 @@ BEGIN
|
||||
ROW_NUMBER() OVER (PARTITION BY uwi.master_item_id ORDER BY fi.price_in_cents ASC, f.valid_to DESC, s.name ASC) as rn
|
||||
FROM
|
||||
public.user_watched_items uwi
|
||||
JOIN public.master_grocery_items mgi ON uwi.master_item_id = mgi.master_grocery_item_id
|
||||
JOIN public.master_grocery_items mgi ON uwi.master_item_id = mgi.master_grocery_item_id
|
||||
JOIN public.flyer_items fi ON uwi.master_item_id = fi.master_item_id
|
||||
JOIN public.flyers f ON fi.flyer_id = f.flyer_id
|
||||
JOIN public.stores s ON f.store_id = s.store_id
|
||||
JOIN public.flyers f ON fi.flyer_id = f.flyer_id
|
||||
JOIN public.stores s ON f.store_id = s.store_id
|
||||
WHERE uwi.user_id = p_user_id
|
||||
AND f.valid_from <= CURRENT_DATE
|
||||
AND f.valid_to >= CURRENT_DATE
|
||||
@@ -1655,6 +1673,20 @@ BEGIN
|
||||
SELECT uws.master_item_id, uws.item_name, uws.price_in_cents, uws.store_name, uws.flyer_id, uws.flyer_icon_url, uws.flyer_image_url, uws.flyer_valid_from, uws.flyer_valid_to
|
||||
FROM UserWatchedSales uws
|
||||
WHERE uws.rn = 1;
|
||||
|
||||
-- Tier 2 logging: Check if any sales were found
|
||||
GET DIAGNOSTICS v_result_count = ROW_COUNT;
|
||||
IF v_result_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'get_best_sale_prices_for_user',
|
||||
'No sales found for watched items',
|
||||
v_context || jsonb_build_object('watched_items_count', v_watched_items_count));
|
||||
END IF;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'get_best_sale_prices_for_user',
|
||||
'Unexpected error getting best sale prices: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$;
|
||||
|
||||
@@ -1676,7 +1708,42 @@ RETURNS TABLE (
|
||||
LANGUAGE plpgsql
|
||||
SECURITY INVOKER -- Runs with the privileges of the calling user.
|
||||
AS $$
|
||||
DECLARE
|
||||
v_menu_plan_exists BOOLEAN;
|
||||
v_planned_meals_count INTEGER;
|
||||
v_result_count INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'menu_plan_id', p_menu_plan_id,
|
||||
'user_id', p_user_id
|
||||
);
|
||||
|
||||
-- Tier 2 logging: Check if menu plan exists and belongs to user
|
||||
SELECT EXISTS(
|
||||
SELECT 1 FROM public.menu_plans
|
||||
WHERE menu_plan_id = p_menu_plan_id AND user_id = p_user_id
|
||||
) INTO v_menu_plan_exists;
|
||||
|
||||
IF NOT v_menu_plan_exists THEN
|
||||
PERFORM fn_log('NOTICE', 'generate_shopping_list_for_menu_plan',
|
||||
'Menu plan not found or does not belong to user',
|
||||
v_context);
|
||||
RETURN; -- Return empty result set
|
||||
END IF;
|
||||
|
||||
-- Tier 2 logging: Check if menu plan has any recipes
|
||||
SELECT COUNT(*) INTO v_planned_meals_count
|
||||
FROM public.planned_meals
|
||||
WHERE menu_plan_id = p_menu_plan_id;
|
||||
|
||||
IF v_planned_meals_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'generate_shopping_list_for_menu_plan',
|
||||
'Menu plan has no recipes',
|
||||
v_context);
|
||||
RETURN; -- Return empty result set
|
||||
END IF;
|
||||
|
||||
RETURN QUERY
|
||||
WITH RequiredIngredients AS (
|
||||
-- This CTE calculates the total quantity of each ingredient needed for the menu plan.
|
||||
@@ -1714,6 +1781,20 @@ BEGIN
|
||||
WHERE
|
||||
-- Only include items that actually need to be purchased.
|
||||
GREATEST(0, req.total_required - COALESCE(pi.quantity, 0)) > 0;
|
||||
|
||||
-- Tier 2 logging: Check if any items need to be purchased
|
||||
GET DIAGNOSTICS v_result_count = ROW_COUNT;
|
||||
IF v_result_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'generate_shopping_list_for_menu_plan',
|
||||
'All ingredients already in pantry (no shopping needed)',
|
||||
v_context || jsonb_build_object('planned_meals_count', v_planned_meals_count));
|
||||
END IF;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'generate_shopping_list_for_menu_plan',
|
||||
'Unexpected error generating shopping list: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$;
|
||||
|
||||
@@ -2006,10 +2087,14 @@ STABLE -- This function does not modify the database.
|
||||
AS $$
|
||||
DECLARE
|
||||
suggested_id BIGINT;
|
||||
best_score REAL;
|
||||
-- A similarity score between 0 and 1. A higher value means a better match.
|
||||
-- This threshold can be adjusted based on observed performance. 0.4 is a reasonable starting point.
|
||||
similarity_threshold REAL := 0.4;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object('flyer_item_name', p_flyer_item_name, 'similarity_threshold', similarity_threshold);
|
||||
|
||||
WITH candidates AS (
|
||||
-- Search for matches in the primary master_grocery_items table
|
||||
SELECT
|
||||
@@ -2028,7 +2113,14 @@ BEGIN
|
||||
WHERE alias % p_flyer_item_name
|
||||
)
|
||||
-- Select the master_item_id with the highest similarity score, provided it's above our threshold.
|
||||
SELECT master_item_id INTO suggested_id FROM candidates WHERE score >= similarity_threshold ORDER BY score DESC, master_item_id LIMIT 1;
|
||||
SELECT master_item_id, score INTO suggested_id, best_score FROM candidates WHERE score >= similarity_threshold ORDER BY score DESC, master_item_id LIMIT 1;
|
||||
|
||||
-- Tier 2 logging: Log when no match found (anomaly detection)
|
||||
IF suggested_id IS NULL THEN
|
||||
PERFORM fn_log('INFO', 'suggest_master_item_for_flyer_item',
|
||||
'No master item match found for flyer item',
|
||||
v_context || jsonb_build_object('best_score', best_score));
|
||||
END IF;
|
||||
|
||||
RETURN suggested_id;
|
||||
END;
|
||||
@@ -2049,49 +2141,85 @@ RETURNS TABLE(
|
||||
avg_rating NUMERIC,
|
||||
missing_ingredients_count BIGINT
|
||||
)
|
||||
LANGUAGE sql
|
||||
LANGUAGE plpgsql
|
||||
STABLE
|
||||
SECURITY INVOKER
|
||||
AS $$
|
||||
WITH UserPantryItems AS (
|
||||
-- CTE 1: Get a distinct set of master item IDs from the user's pantry.
|
||||
SELECT master_item_id, quantity, unit
|
||||
DECLARE
|
||||
v_pantry_item_count INTEGER;
|
||||
v_result_count INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object('user_id', p_user_id);
|
||||
|
||||
-- Tier 2 logging: Check if user has any pantry items
|
||||
SELECT COUNT(*) INTO v_pantry_item_count
|
||||
FROM public.pantry_items
|
||||
WHERE user_id = p_user_id AND quantity > 0
|
||||
),
|
||||
RecipeIngredientStats AS (
|
||||
-- CTE 2: For each recipe, count its total ingredients and how many of those are in the user's pantry.
|
||||
WHERE user_id = p_user_id AND quantity > 0;
|
||||
|
||||
IF v_pantry_item_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'find_recipes_from_pantry',
|
||||
'User has empty pantry',
|
||||
v_context);
|
||||
RETURN; -- Return empty result set
|
||||
END IF;
|
||||
|
||||
-- Execute the main query and return results
|
||||
RETURN QUERY
|
||||
WITH UserPantryItems AS (
|
||||
-- CTE 1: Get a distinct set of master item IDs from the user's pantry.
|
||||
SELECT pi.master_item_id, pi.quantity, pi.unit
|
||||
FROM public.pantry_items pi
|
||||
WHERE pi.user_id = p_user_id AND pi.quantity > 0
|
||||
),
|
||||
RecipeIngredientStats AS (
|
||||
-- CTE 2: For each recipe, count its total ingredients and how many of those are in the user's pantry.
|
||||
SELECT
|
||||
ri.recipe_id,
|
||||
-- Count how many ingredients DO NOT meet the pantry requirements.
|
||||
-- An ingredient is missing if it's not in the pantry OR if the quantity is insufficient.
|
||||
-- The filter condition handles this logic.
|
||||
COUNT(*) FILTER (
|
||||
WHERE upi.master_item_id IS NULL -- The item is not in the pantry at all
|
||||
OR upi.quantity < ri.quantity -- The user has the item, but not enough of it
|
||||
) AS missing_ingredients_count
|
||||
FROM public.recipe_ingredients ri
|
||||
-- LEFT JOIN to the user's pantry on both item and unit.
|
||||
-- We only compare quantities if the units match (e.g., 'g' vs 'g').
|
||||
LEFT JOIN UserPantryItems upi
|
||||
ON ri.master_item_id = upi.master_item_id
|
||||
AND ri.unit = upi.unit
|
||||
GROUP BY ri.recipe_id
|
||||
)
|
||||
-- Final Step: Select recipes where the total ingredient count matches the pantry ingredient count.
|
||||
SELECT
|
||||
ri.recipe_id,
|
||||
-- Count how many ingredients DO NOT meet the pantry requirements.
|
||||
-- An ingredient is missing if it's not in the pantry OR if the quantity is insufficient.
|
||||
-- The filter condition handles this logic.
|
||||
COUNT(*) FILTER (
|
||||
WHERE upi.master_item_id IS NULL -- The item is not in the pantry at all
|
||||
OR upi.quantity < ri.quantity -- The user has the item, but not enough of it
|
||||
) AS missing_ingredients_count
|
||||
FROM public.recipe_ingredients ri
|
||||
-- LEFT JOIN to the user's pantry on both item and unit.
|
||||
-- We only compare quantities if the units match (e.g., 'g' vs 'g').
|
||||
LEFT JOIN UserPantryItems upi
|
||||
ON ri.master_item_id = upi.master_item_id
|
||||
AND ri.unit = upi.unit
|
||||
GROUP BY ri.recipe_id
|
||||
)
|
||||
-- Final Step: Select recipes where the total ingredient count matches the pantry ingredient count.
|
||||
SELECT
|
||||
r.recipe_id,
|
||||
r.name,
|
||||
r.description,
|
||||
r.prep_time_minutes,
|
||||
r.cook_time_minutes,
|
||||
r.avg_rating,
|
||||
ris.missing_ingredients_count
|
||||
FROM public.recipes r
|
||||
JOIN RecipeIngredientStats ris ON r.recipe_id = ris.recipe_id
|
||||
-- Order by recipes with the fewest missing ingredients first, then by rating.
|
||||
-- Recipes with 0 missing ingredients are the ones that can be made.
|
||||
ORDER BY ris.missing_ingredients_count ASC, r.avg_rating DESC, r.name ASC;
|
||||
r.recipe_id,
|
||||
r.name,
|
||||
r.description,
|
||||
r.prep_time_minutes,
|
||||
r.cook_time_minutes,
|
||||
r.avg_rating,
|
||||
ris.missing_ingredients_count
|
||||
FROM public.recipes r
|
||||
JOIN RecipeIngredientStats ris ON r.recipe_id = ris.recipe_id
|
||||
-- Order by recipes with the fewest missing ingredients first, then by rating.
|
||||
-- Recipes with 0 missing ingredients are the ones that can be made.
|
||||
ORDER BY ris.missing_ingredients_count ASC, r.avg_rating DESC, r.name ASC;
|
||||
|
||||
-- Tier 2 logging: Check if any recipes were found
|
||||
GET DIAGNOSTICS v_result_count = ROW_COUNT;
|
||||
IF v_result_count = 0 THEN
|
||||
PERFORM fn_log('NOTICE', 'find_recipes_from_pantry',
|
||||
'No recipes found matching pantry items',
|
||||
v_context || jsonb_build_object('pantry_item_count', v_pantry_item_count));
|
||||
END IF;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'find_recipes_from_pantry',
|
||||
'Unexpected error finding recipes from pantry: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Function to suggest alternative units for a given pantry item.
|
||||
@@ -2137,10 +2265,18 @@ RETURNS TABLE (
|
||||
recommendation_score NUMERIC,
|
||||
recommendation_reason TEXT
|
||||
)
|
||||
LANGUAGE sql
|
||||
LANGUAGE plpgsql
|
||||
STABLE
|
||||
SECURITY INVOKER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_count INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object('user_id', p_user_id, 'limit', p_limit);
|
||||
|
||||
-- Execute the recommendation query
|
||||
RETURN QUERY
|
||||
WITH UserHighRatedRecipes AS (
|
||||
-- CTE 1: Get recipes the user has rated 4 stars or higher.
|
||||
SELECT rr.recipe_id, rr.rating
|
||||
@@ -2218,6 +2354,15 @@ ORDER BY
|
||||
r.rating_count DESC,
|
||||
r.name ASC
|
||||
LIMIT p_limit;
|
||||
|
||||
-- Tier 2 logging: Log when no recommendations generated (anomaly detection)
|
||||
GET DIAGNOSTICS v_count = ROW_COUNT;
|
||||
IF v_count = 0 THEN
|
||||
PERFORM fn_log('INFO', 'recommend_recipes_for_user',
|
||||
'No recipe recommendations generated for user',
|
||||
v_context);
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Function to get a user's favorite recipes.
|
||||
@@ -2879,7 +3024,15 @@ DECLARE
|
||||
flyer_valid_to DATE;
|
||||
current_summary_date DATE;
|
||||
flyer_location_id BIGINT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'flyer_item_id', NEW.flyer_item_id,
|
||||
'flyer_id', NEW.flyer_id,
|
||||
'master_item_id', NEW.master_item_id,
|
||||
'price_in_cents', NEW.price_in_cents
|
||||
);
|
||||
|
||||
-- If the item could not be matched, add it to the unmatched queue for review.
|
||||
IF NEW.master_item_id IS NULL THEN
|
||||
INSERT INTO public.unmatched_flyer_items (flyer_item_id)
|
||||
@@ -2897,6 +3050,14 @@ BEGIN
|
||||
FROM public.flyers
|
||||
WHERE flyer_id = NEW.flyer_id;
|
||||
|
||||
-- Tier 3 logging: Log when flyer lookup fails
|
||||
IF flyer_valid_from IS NULL OR flyer_valid_to IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'update_price_history_on_flyer_item_insert',
|
||||
'Flyer not found or missing validity dates',
|
||||
v_context);
|
||||
RETURN NEW;
|
||||
END IF;
|
||||
|
||||
-- This single, set-based query is much more performant than looping.
|
||||
-- It generates all date/location pairs and inserts/updates them in one operation.
|
||||
INSERT INTO public.item_price_history (master_item_id, summary_date, store_location_id, min_price_in_cents, max_price_in_cents, avg_price_in_cents, data_points_count)
|
||||
@@ -2919,6 +3080,14 @@ BEGIN
|
||||
data_points_count = item_price_history.data_points_count + 1;
|
||||
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Tier 3 logging: Log unexpected errors in trigger
|
||||
PERFORM fn_log('ERROR', 'update_price_history_on_flyer_item_insert',
|
||||
'Unexpected error in price history update: ' || SQLERRM,
|
||||
v_context);
|
||||
-- Re-raise the exception to ensure trigger failure is visible
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -2981,6 +3150,30 @@ BEGIN
|
||||
AND iph.store_location_id = na.store_location_id;
|
||||
|
||||
-- 4. Delete any history records that no longer have any data points.
|
||||
-- We need to recreate the CTE since CTEs are scoped to a single statement.
|
||||
WITH affected_days_and_locations AS (
|
||||
SELECT DISTINCT
|
||||
generate_series(f.valid_from, f.valid_to, '1 day'::interval)::date AS summary_date,
|
||||
fl.store_location_id
|
||||
FROM public.flyers f
|
||||
JOIN public.flyer_locations fl ON f.flyer_id = fl.flyer_id
|
||||
WHERE f.flyer_id = OLD.flyer_id
|
||||
),
|
||||
new_aggregates AS (
|
||||
SELECT
|
||||
adl.summary_date,
|
||||
adl.store_location_id,
|
||||
MIN(fi.price_in_cents) AS min_price,
|
||||
MAX(fi.price_in_cents) AS max_price,
|
||||
ROUND(AVG(fi.price_in_cents))::int AS avg_price,
|
||||
COUNT(fi.flyer_item_id)::int AS data_points
|
||||
FROM affected_days_and_locations adl
|
||||
LEFT JOIN public.flyer_items fi ON fi.master_item_id = OLD.master_item_id AND fi.price_in_cents IS NOT NULL
|
||||
LEFT JOIN public.flyers f ON fi.flyer_id = f.flyer_id AND adl.summary_date BETWEEN f.valid_from AND f.valid_to
|
||||
LEFT JOIN public.flyer_locations fl ON fi.flyer_id = fl.flyer_id AND adl.store_location_id = fl.store_location_id
|
||||
WHERE fl.flyer_id IS NOT NULL
|
||||
GROUP BY adl.summary_date, adl.store_location_id
|
||||
)
|
||||
DELETE FROM public.item_price_history iph
|
||||
WHERE iph.master_item_id = OLD.master_item_id
|
||||
AND NOT EXISTS (
|
||||
@@ -3003,22 +3196,45 @@ DROP FUNCTION IF EXISTS public.update_recipe_rating_aggregates();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.update_recipe_rating_aggregates()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_recipe_id BIGINT;
|
||||
v_rows_updated INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_recipe_id := COALESCE(NEW.recipe_id, OLD.recipe_id);
|
||||
v_context := jsonb_build_object('recipe_id', v_recipe_id);
|
||||
|
||||
UPDATE public.recipes
|
||||
SET
|
||||
avg_rating = (
|
||||
SELECT AVG(rating)
|
||||
FROM public.recipe_ratings
|
||||
WHERE recipe_id = COALESCE(NEW.recipe_id, OLD.recipe_id) -- This is correct, no change needed
|
||||
WHERE recipe_id = v_recipe_id
|
||||
),
|
||||
rating_count = (
|
||||
SELECT COUNT(*)
|
||||
FROM public.recipe_ratings
|
||||
WHERE recipe_id = COALESCE(NEW.recipe_id, OLD.recipe_id) -- This is correct, no change needed
|
||||
WHERE recipe_id = v_recipe_id
|
||||
)
|
||||
WHERE recipe_id = COALESCE(NEW.recipe_id, OLD.recipe_id);
|
||||
WHERE recipe_id = v_recipe_id;
|
||||
|
||||
-- Tier 3 logging: Log when recipe update fails
|
||||
GET DIAGNOSTICS v_rows_updated = ROW_COUNT;
|
||||
IF v_rows_updated = 0 THEN
|
||||
PERFORM fn_log('ERROR', 'update_recipe_rating_aggregates',
|
||||
'Recipe not found for rating aggregate update',
|
||||
v_context);
|
||||
END IF;
|
||||
|
||||
RETURN NULL; -- The result is ignored since this is an AFTER trigger.
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Tier 3 logging: Log unexpected errors in trigger
|
||||
PERFORM fn_log('ERROR', 'update_recipe_rating_aggregates',
|
||||
'Unexpected error in rating aggregate update: ' || SQLERRM,
|
||||
v_context);
|
||||
-- Re-raise the exception to ensure trigger failure is visible
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -3033,12 +3249,30 @@ DROP FUNCTION IF EXISTS public.log_new_recipe();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_recipe()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_full_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'user_id', NEW.user_id,
|
||||
'recipe_id', NEW.recipe_id,
|
||||
'recipe_name', NEW.name
|
||||
);
|
||||
|
||||
-- Get user's full name (Tier 3 logging: Log if profile lookup fails)
|
||||
SELECT full_name INTO v_full_name FROM public.profiles WHERE user_id = NEW.user_id;
|
||||
IF v_full_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_recipe',
|
||||
'Profile not found for user creating recipe',
|
||||
v_context);
|
||||
v_full_name := 'Unknown User';
|
||||
END IF;
|
||||
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.user_id,
|
||||
'recipe_created',
|
||||
(SELECT full_name FROM public.profiles WHERE user_id = NEW.user_id) || ' created a new recipe: ' || NEW.name,
|
||||
v_full_name || ' created a new recipe: ' || NEW.name,
|
||||
'chef-hat',
|
||||
jsonb_build_object('recipe_id', NEW.recipe_id, 'recipe_name', NEW.name)
|
||||
);
|
||||
@@ -3047,6 +3281,14 @@ BEGIN
|
||||
PERFORM public.award_achievement(NEW.user_id, 'First Recipe');
|
||||
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Tier 3 logging: Log unexpected errors in trigger
|
||||
PERFORM fn_log('ERROR', 'log_new_recipe',
|
||||
'Unexpected error in recipe activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
-- Re-raise the exception to ensure trigger failure is visible
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -3063,13 +3305,39 @@ DROP FUNCTION IF EXISTS public.update_flyer_item_count();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.update_flyer_item_count()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_rows_updated INTEGER;
|
||||
v_context JSONB;
|
||||
v_flyer_id BIGINT;
|
||||
BEGIN
|
||||
-- Determine which flyer_id to use based on operation
|
||||
IF (TG_OP = 'INSERT') THEN
|
||||
v_flyer_id := NEW.flyer_id;
|
||||
v_context := jsonb_build_object('flyer_id', NEW.flyer_id, 'operation', 'INSERT');
|
||||
|
||||
UPDATE public.flyers SET item_count = item_count + 1 WHERE flyer_id = NEW.flyer_id;
|
||||
ELSIF (TG_OP = 'DELETE') THEN
|
||||
v_flyer_id := OLD.flyer_id;
|
||||
v_context := jsonb_build_object('flyer_id', OLD.flyer_id, 'operation', 'DELETE');
|
||||
|
||||
UPDATE public.flyers SET item_count = item_count - 1 WHERE flyer_id = OLD.flyer_id;
|
||||
END IF;
|
||||
|
||||
-- Tier 3 logging: Log if flyer not found
|
||||
GET DIAGNOSTICS v_rows_updated = ROW_COUNT;
|
||||
IF v_rows_updated = 0 THEN
|
||||
PERFORM fn_log('ERROR', 'update_flyer_item_count',
|
||||
'Flyer not found for item count update',
|
||||
v_context);
|
||||
END IF;
|
||||
|
||||
RETURN NULL; -- The result is ignored since this is an AFTER trigger.
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'update_flyer_item_count',
|
||||
'Unexpected error updating flyer item count: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -3085,27 +3353,55 @@ DROP FUNCTION IF EXISTS public.log_new_flyer();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_flyer()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_store_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'flyer_id', NEW.flyer_id,
|
||||
'store_id', NEW.store_id,
|
||||
'uploaded_by', NEW.uploaded_by,
|
||||
'valid_from', NEW.valid_from,
|
||||
'valid_to', NEW.valid_to
|
||||
);
|
||||
|
||||
-- If the flyer was uploaded by a registered user, award the 'First-Upload' achievement.
|
||||
-- The award_achievement function handles checking if the user already has it.
|
||||
IF NEW.uploaded_by IS NOT NULL THEN
|
||||
PERFORM public.award_achievement(NEW.uploaded_by, 'First-Upload');
|
||||
END IF;
|
||||
|
||||
-- Get store name (Tier 3 logging: Log if store lookup fails)
|
||||
SELECT name INTO v_store_name FROM public.stores WHERE store_id = NEW.store_id;
|
||||
IF v_store_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_flyer',
|
||||
'Store not found for flyer',
|
||||
v_context);
|
||||
v_store_name := 'Unknown Store';
|
||||
END IF;
|
||||
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.uploaded_by, -- Log the user who uploaded it
|
||||
'flyer_uploaded',
|
||||
'A new flyer for ' || (SELECT name FROM public.stores WHERE store_id = NEW.store_id) || ' has been uploaded.',
|
||||
'A new flyer for ' || v_store_name || ' has been uploaded.',
|
||||
'file-text',
|
||||
jsonb_build_object(
|
||||
'flyer_id', NEW.flyer_id,
|
||||
'store_name', (SELECT name FROM public.stores WHERE store_id = NEW.store_id),
|
||||
'store_name', v_store_name,
|
||||
'valid_from', to_char(NEW.valid_from, 'YYYY-MM-DD'),
|
||||
'valid_to', to_char(NEW.valid_to, 'YYYY-MM-DD')
|
||||
)
|
||||
);
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
-- Tier 3 logging: Log unexpected errors in trigger
|
||||
PERFORM fn_log('ERROR', 'log_new_flyer',
|
||||
'Unexpected error in flyer activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
-- Re-raise the exception to ensure trigger failure is visible
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -3120,12 +3416,39 @@ DROP FUNCTION IF EXISTS public.log_new_favorite_recipe();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_favorite_recipe()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_user_name TEXT;
|
||||
v_recipe_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'user_id', NEW.user_id,
|
||||
'recipe_id', NEW.recipe_id
|
||||
);
|
||||
|
||||
-- Get user name (Tier 3 logging: Log if profile lookup fails)
|
||||
SELECT full_name INTO v_user_name FROM public.profiles WHERE user_id = NEW.user_id;
|
||||
IF v_user_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_favorite_recipe',
|
||||
'Profile not found for user',
|
||||
v_context);
|
||||
v_user_name := 'Unknown User';
|
||||
END IF;
|
||||
|
||||
-- Get recipe name (Tier 3 logging: Log if recipe lookup fails)
|
||||
SELECT name INTO v_recipe_name FROM public.recipes WHERE recipe_id = NEW.recipe_id;
|
||||
IF v_recipe_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_favorite_recipe',
|
||||
'Recipe not found',
|
||||
v_context);
|
||||
v_recipe_name := 'Unknown Recipe';
|
||||
END IF;
|
||||
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.user_id,
|
||||
'recipe_favorited',
|
||||
(SELECT full_name FROM public.profiles WHERE user_id = NEW.user_id) || ' favorited the recipe: ' || (SELECT name FROM public.recipes WHERE recipe_id = NEW.recipe_id),
|
||||
v_user_name || ' favorited the recipe: ' || v_recipe_name,
|
||||
'heart',
|
||||
jsonb_build_object(
|
||||
'recipe_id', NEW.recipe_id
|
||||
@@ -3135,6 +3458,12 @@ BEGIN
|
||||
-- Award 'First Favorite' achievement.
|
||||
PERFORM public.award_achievement(NEW.user_id, 'First Favorite');
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_favorite_recipe',
|
||||
'Unexpected error in favorite recipe activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -3144,16 +3473,44 @@ DROP FUNCTION IF EXISTS public.log_new_list_share();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_list_share()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_user_name TEXT;
|
||||
v_list_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'shared_by_user_id', NEW.shared_by_user_id,
|
||||
'shopping_list_id', NEW.shopping_list_id,
|
||||
'shared_with_user_id', NEW.shared_with_user_id
|
||||
);
|
||||
|
||||
-- Get user name (Tier 3 logging: Log if profile lookup fails)
|
||||
SELECT full_name INTO v_user_name FROM public.profiles WHERE user_id = NEW.shared_by_user_id;
|
||||
IF v_user_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_list_share',
|
||||
'Profile not found for sharing user',
|
||||
v_context);
|
||||
v_user_name := 'Unknown User';
|
||||
END IF;
|
||||
|
||||
-- Get list name (Tier 3 logging: Log if list lookup fails)
|
||||
SELECT name INTO v_list_name FROM public.shopping_lists WHERE shopping_list_id = NEW.shopping_list_id;
|
||||
IF v_list_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_list_share',
|
||||
'Shopping list not found',
|
||||
v_context);
|
||||
v_list_name := 'Unknown List';
|
||||
END IF;
|
||||
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.shared_by_user_id,
|
||||
'list_shared',
|
||||
(SELECT full_name FROM public.profiles WHERE user_id = NEW.shared_by_user_id) || ' shared a shopping list.',
|
||||
v_user_name || ' shared a shopping list.',
|
||||
'share-2',
|
||||
jsonb_build_object(
|
||||
'shopping_list_id', NEW.shopping_list_id,
|
||||
'list_name', (SELECT name FROM public.shopping_lists WHERE shopping_list_id = NEW.shopping_list_id),
|
||||
'list_name', v_list_name,
|
||||
'shared_with_user_id', NEW.shared_with_user_id
|
||||
)
|
||||
);
|
||||
@@ -3161,6 +3518,12 @@ BEGIN
|
||||
-- Award 'List Sharer' achievement.
|
||||
PERFORM public.award_achievement(NEW.shared_by_user_id, 'List Sharer');
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_list_share',
|
||||
'Unexpected error in list share activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -3169,12 +3532,30 @@ DROP FUNCTION IF EXISTS public.log_new_recipe_collection_share();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.log_new_recipe_collection_share()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_user_name TEXT;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
v_context := jsonb_build_object(
|
||||
'shared_by_user_id', NEW.shared_by_user_id,
|
||||
'recipe_collection_id', NEW.recipe_collection_id,
|
||||
'shared_with_user_id', NEW.shared_with_user_id
|
||||
);
|
||||
|
||||
-- Get user name (Tier 3 logging: Log if profile lookup fails)
|
||||
SELECT full_name INTO v_user_name FROM public.profiles WHERE user_id = NEW.shared_by_user_id;
|
||||
IF v_user_name IS NULL THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_recipe_collection_share',
|
||||
'Profile not found for sharing user',
|
||||
v_context);
|
||||
v_user_name := 'Unknown User';
|
||||
END IF;
|
||||
|
||||
-- Log the activity
|
||||
INSERT INTO public.activity_log (user_id, action, display_text, icon, details)
|
||||
VALUES (
|
||||
NEW.shared_by_user_id, 'recipe_collection_shared',
|
||||
(SELECT full_name FROM public.profiles WHERE user_id = NEW.shared_by_user_id) || ' shared a recipe collection.',
|
||||
v_user_name || ' shared a recipe collection.',
|
||||
'book',
|
||||
jsonb_build_object('collection_id', NEW.recipe_collection_id, 'shared_with_user_id', NEW.shared_with_user_id)
|
||||
);
|
||||
@@ -3182,6 +3563,12 @@ BEGIN
|
||||
-- Award 'Recipe Sharer' achievement.
|
||||
PERFORM public.award_achievement(NEW.shared_by_user_id, 'Recipe Sharer');
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'log_new_recipe_collection_share',
|
||||
'Unexpected error in recipe collection share activity logging: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
@@ -3244,14 +3631,38 @@ DROP FUNCTION IF EXISTS public.increment_recipe_fork_count();
|
||||
|
||||
CREATE OR REPLACE FUNCTION public.increment_recipe_fork_count()
|
||||
RETURNS TRIGGER AS $$
|
||||
DECLARE
|
||||
v_rows_updated INTEGER;
|
||||
v_context JSONB;
|
||||
BEGIN
|
||||
-- Only run if the recipe is a fork (original_recipe_id is not null).
|
||||
IF NEW.original_recipe_id IS NOT NULL THEN
|
||||
v_context := jsonb_build_object(
|
||||
'recipe_id', NEW.recipe_id,
|
||||
'original_recipe_id', NEW.original_recipe_id,
|
||||
'user_id', NEW.user_id
|
||||
);
|
||||
|
||||
-- Tier 3 logging: Log if original recipe not found
|
||||
UPDATE public.recipes SET fork_count = fork_count + 1 WHERE recipe_id = NEW.original_recipe_id;
|
||||
GET DIAGNOSTICS v_rows_updated = ROW_COUNT;
|
||||
|
||||
IF v_rows_updated = 0 THEN
|
||||
PERFORM fn_log('ERROR', 'increment_recipe_fork_count',
|
||||
'Original recipe not found for fork count increment',
|
||||
v_context);
|
||||
END IF;
|
||||
|
||||
-- Award 'First Fork' achievement.
|
||||
PERFORM public.award_achievement(NEW.user_id, 'First Fork');
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
EXCEPTION
|
||||
WHEN OTHERS THEN
|
||||
PERFORM fn_log('ERROR', 'increment_recipe_fork_count',
|
||||
'Unexpected error incrementing fork count: ' || SQLERRM,
|
||||
v_context);
|
||||
RAISE;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
|
||||
@@ -224,11 +224,11 @@ describe('AuthService', () => {
|
||||
expect(result).toEqual({
|
||||
newUserProfile: mockUserProfile,
|
||||
accessToken: 'access-token',
|
||||
refreshToken: 'mocked_random_id',
|
||||
refreshToken: expect.any(String),
|
||||
});
|
||||
expect(userRepo.saveRefreshToken).toHaveBeenCalledWith(
|
||||
'user-123',
|
||||
'mocked_random_id',
|
||||
expect.any(String),
|
||||
reqLog,
|
||||
);
|
||||
});
|
||||
@@ -254,7 +254,7 @@ describe('AuthService', () => {
|
||||
);
|
||||
expect(result).toEqual({
|
||||
accessToken: 'access-token',
|
||||
refreshToken: 'mocked_random_id',
|
||||
refreshToken: expect.any(String),
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -293,10 +293,10 @@ describe('AuthService', () => {
|
||||
);
|
||||
expect(sendPasswordResetEmail).toHaveBeenCalledWith(
|
||||
'test@example.com',
|
||||
expect.stringContaining('/reset-password/mocked_random_id'),
|
||||
expect.stringMatching(/\/reset-password\/[a-f0-9]+/),
|
||||
reqLog,
|
||||
);
|
||||
expect(result).toBe('mocked_random_id');
|
||||
expect(result).toEqual(expect.any(String));
|
||||
});
|
||||
|
||||
it('should log warning and return undefined for non-existent user', async () => {
|
||||
@@ -333,7 +333,7 @@ describe('AuthService', () => {
|
||||
{ emailError },
|
||||
`Email send failure during password reset for user`,
|
||||
);
|
||||
expect(result).toBe('mocked_random_id');
|
||||
expect(result).toEqual(expect.any(String));
|
||||
});
|
||||
|
||||
it('should re-throw RepositoryError', async () => {
|
||||
|
||||
@@ -326,7 +326,7 @@ describe('E2E Deals and Price Tracking Journey', () => {
|
||||
expect(watchedMilk.category_id).toBe(dairyEggsCategoryId);
|
||||
|
||||
// Step 6: Get best prices for watched items
|
||||
const bestPricesResponse = await authedFetch('/users/deals/best-watched-prices', {
|
||||
const bestPricesResponse = await authedFetch('/deals/best-watched-prices', {
|
||||
method: 'GET',
|
||||
token: authToken,
|
||||
});
|
||||
@@ -405,7 +405,7 @@ describe('E2E Deals and Price Tracking Journey', () => {
|
||||
expect(otherWatchedData.data.length).toBe(0);
|
||||
|
||||
// Other user's deals should be empty
|
||||
const otherDealsResponse = await authedFetch('/users/deals/best-watched-prices', {
|
||||
const otherDealsResponse = await authedFetch('/deals/best-watched-prices', {
|
||||
method: 'GET',
|
||||
token: otherToken,
|
||||
});
|
||||
|
||||
@@ -164,38 +164,8 @@ vi.mock('jsonwebtoken', () => ({
|
||||
// Mock 'bcrypt'. The service uses `import * as bcrypt from 'bcrypt'`.
|
||||
vi.mock('bcrypt');
|
||||
|
||||
// Mock 'crypto'. Supports both default import and named imports.
|
||||
// Default: import crypto from 'crypto'; crypto.randomUUID()
|
||||
// Named: import { randomUUID } from 'crypto'; randomUUID()
|
||||
vi.mock('crypto', async () => {
|
||||
const actual = await vi.importActual<typeof import('crypto')>('crypto');
|
||||
const mockRandomUUID = vi.fn(() => actual.randomUUID());
|
||||
const mockRandomBytes = vi.fn((size: number) => {
|
||||
const buffer = actual.randomBytes(size);
|
||||
// Add mocked toString for backward compatibility
|
||||
buffer.toString = vi.fn().mockImplementation((encoding) => {
|
||||
const id = 'mocked_random_id';
|
||||
console.log(
|
||||
`[DEBUG] tests-setup-unit.ts: crypto.randomBytes mock returning "${id}" for encoding "${encoding}"`,
|
||||
);
|
||||
return id;
|
||||
});
|
||||
return buffer;
|
||||
});
|
||||
|
||||
return {
|
||||
...actual,
|
||||
// Named exports for: import { randomUUID } from 'crypto'
|
||||
randomUUID: mockRandomUUID,
|
||||
randomBytes: mockRandomBytes,
|
||||
// Default export for: import crypto from 'crypto'
|
||||
default: {
|
||||
...actual,
|
||||
randomUUID: mockRandomUUID,
|
||||
randomBytes: mockRandomBytes,
|
||||
},
|
||||
};
|
||||
});
|
||||
// NOTE: We do NOT mock the 'crypto' module anymore. It works correctly without mocking in tests.
|
||||
// The previous attempt to mock it caused issues because vi.importActual returned an empty object.
|
||||
|
||||
// --- Global Mocks ---
|
||||
|
||||
|
||||
Reference in New Issue
Block a user