Compare commits

...

5 Commits

Author SHA1 Message Date
Gitea Actions
a1f52544d0 ci: Bump version to 0.0.20 [skip ci] 2025-12-24 06:49:05 +05:00
2334359756 Merge branch 'main' of https://gitea.projectium.com/torbo/flyer-crawler.projectium.com
All checks were successful
Deploy to Test Environment / deploy-to-test (push) Successful in 14m31s
2025-12-23 17:48:07 -08:00
406954ca06 database fixes due to adding foreign keys and seed script improvement 2025-12-23 17:48:00 -08:00
Gitea Actions
95d441be98 ci: Bump version to 0.0.19 [skip ci] 2025-12-24 06:22:03 +05:00
186ed484b7 last test fixes for upcoming V0.1 + pretty
Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 2m40s
2025-12-23 17:20:51 -08:00
31 changed files with 389 additions and 344 deletions

View File

@@ -60,4 +60,4 @@ jobs:
uses: actions/upload-artifact@v3
with:
name: database-backup
path: ${{ env.backup_filename }}
path: ${{ env.backup_filename }}

View File

@@ -144,4 +144,4 @@ jobs:
find "$APP_PATH/flyer-images" -type f -name '*-test-flyer-image.*' -delete
find "$APP_PATH/flyer-images/icons" -type f -name '*-test-flyer-image.*' -delete
find "$APP_PATH/flyer-images/archive" -mindepth 1 -maxdepth 1 -type f -delete || echo "Archive directory not found, skipping."
echo "✅ Flyer asset directories cleared."
echo "✅ Flyer asset directories cleared."

View File

@@ -130,4 +130,4 @@ jobs:
find "$APP_PATH/flyer-images" -mindepth 1 -type f -delete
find "$APP_PATH/flyer-images/icons" -mindepth 1 -type f -delete
find "$APP_PATH/flyer-images/archive" -mindepth 1 -type f -delete || echo "Archive directory not found, skipping."
echo "✅ Test flyer asset directories cleared."
echo "✅ Test flyer asset directories cleared."

View File

@@ -25,7 +25,7 @@ jobs:
DB_USER: ${{ secrets.DB_USER }}
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
DB_NAME: ${{ secrets.DB_DATABASE_PROD }}
BACKUP_DIR: "/var/www/backups" # Define a dedicated directory for backups
BACKUP_DIR: '/var/www/backups' # Define a dedicated directory for backups
steps:
- name: Validate Secrets and Inputs
@@ -92,4 +92,4 @@ jobs:
echo "Restarting application server..."
cd /var/www/flyer-crawler.projectium.com
pm2 startOrReload ecosystem.config.cjs --env production && pm2 save
echo "✅ Application server restarted."
echo "✅ Application server restarted."

View File

@@ -34,7 +34,7 @@ We will adopt a strict, consistent error-handling contract for the service and r
**Robustness**: Eliminates an entire class of bugs where `undefined` is passed to `res.json()`, preventing incorrect `500` errors.
**Consistency & Predictability**: All data-fetching methods now have a predictable contract. They either return the expected data or throw a specific, typed error.
**Developer Experience**: Route handlers become simpler, cleaner, and easier to write correctly. The cognitive load on developers is reduced as they no longer need to remember to check for `undefined`.
**Improved Testability**: Tests become more reliable and realistic. Mocks can now throw the *exact* error type (`new NotFoundError()`) that the real implementation would, ensuring tests accurately reflect the application's behavior.
**Improved Testability**: Tests become more reliable and realistic. Mocks can now throw the _exact_ error type (`new NotFoundError()`) that the real implementation would, ensuring tests accurately reflect the application's behavior.
**Centralized Control**: Error-to-HTTP-status logic is centralized in the `errorHandler` middleware, making it easy to manage and modify error responses globally.
### Negative

View File

@@ -10,21 +10,19 @@ Following the standardization of error handling in ADR-001, the next most common
This manual approach has several drawbacks:
**Repetitive Boilerplate**: The `try/catch/finally` block for transaction management is duplicated across multiple files.
**Error-Prone**: It is easy to forget to `client.release()` in all code paths, which can lead to connection pool exhaustion and bring down the application.
3. **Poor Composability**: It is difficult to compose multiple repository methods into a single, atomic "Unit of Work". For example, a service function that needs to update a user's points and create a budget in a single transaction cannot easily do so if both underlying repository methods create their own transactions.
**Error-Prone**: It is easy to forget to `client.release()` in all code paths, which can lead to connection pool exhaustion and bring down the application. 3. **Poor Composability**: It is difficult to compose multiple repository methods into a single, atomic "Unit of Work". For example, a service function that needs to update a user's points and create a budget in a single transaction cannot easily do so if both underlying repository methods create their own transactions.
## Decision
We will implement a standardized "Unit of Work" pattern through a high-level `withTransaction` helper function. This function will abstract away the complexity of transaction management.
1. **`withTransaction` Helper**: A new helper function, `withTransaction<T>(callback: (client: PoolClient) => Promise<T>): Promise<T>`, will be created. This function will be responsible for:
* Acquiring a client from the database pool.
* Starting a transaction (`BEGIN`).
* Executing the `callback` function, passing the transactional client to it.
* If the callback succeeds, it will `COMMIT` the transaction.
* If the callback throws an error, it will `ROLLBACK` the transaction and re-throw the error.
* In all cases, it will `RELEASE` the client back to the pool.
- Acquiring a client from the database pool.
- Starting a transaction (`BEGIN`).
- Executing the `callback` function, passing the transactional client to it.
- If the callback succeeds, it will `COMMIT` the transaction.
- If the callback throws an error, it will `ROLLBACK` the transaction and re-throw the error.
- In all cases, it will `RELEASE` the client back to the pool.
2. **Repository Method Signature**: Repository methods that need to be part of a transaction will be updated to optionally accept a `PoolClient` in their constructor or as a method parameter. By default, they will use the global pool. When called from within a `withTransaction` block, they will be passed the transactional client.
3. **Service Layer Orchestration**: Service-layer functions that orchestrate multi-step operations will use `withTransaction` to ensure atomicity. They will instantiate or call repository methods, providing them with the transactional client from the callback.
@@ -40,7 +38,7 @@ async function registerUserAndCreateDefaultList(userData) {
const shoppingRepo = new ShoppingRepository(client);
const newUser = await userRepo.createUser(userData);
await shoppingRepo.createShoppingList(newUser.user_id, "My First List");
await shoppingRepo.createShoppingList(newUser.user_id, 'My First List');
return newUser;
});

View File

@@ -20,8 +20,8 @@ We will adopt a schema-based approach for input validation using the `zod` libra
1. **Adopt `zod` for Schema Definition**: We will use `zod` to define clear, type-safe schemas for the `params`, `query`, and `body` of each API request. `zod` provides powerful and declarative validation rules and automatically infers TypeScript types.
2. **Create a Reusable Validation Middleware**: A generic `validateRequest(schema)` middleware will be created. This middleware will take a `zod` schema, parse the incoming request against it, and handle success and error cases.
* On successful validation, the parsed and typed data will be attached to the `req` object (e.g., `req.body` will be replaced with the parsed body), and `next()` will be called.
* On validation failure, the middleware will call `next()` with a custom `ValidationError` containing a structured list of issues, which `ADR-001`'s `errorHandler` can then format into a user-friendly `400 Bad Request` response.
- On successful validation, the parsed and typed data will be attached to the `req` object (e.g., `req.body` will be replaced with the parsed body), and `next()` will be called.
- On validation failure, the middleware will call `next()` with a custom `ValidationError` containing a structured list of issues, which `ADR-001`'s `errorHandler` can then format into a user-friendly `400 Bad Request` response.
3. **Refactor Routes**: All route handlers will be refactored to use this new middleware, removing all manual validation logic.
@@ -46,18 +46,18 @@ const getFlyerSchema = z.object({
type GetFlyerRequest = z.infer<typeof getFlyerSchema>;
// 3. Apply the middleware and use an inline cast for the request
router.get('/:id', validateRequest(getFlyerSchema), (async (req, res, next) => {
// Cast 'req' to the inferred type.
// This provides full type safety for params, query, and body.
const { params } = req as unknown as GetFlyerRequest;
router.get('/:id', validateRequest(getFlyerSchema), async (req, res, next) => {
// Cast 'req' to the inferred type.
// This provides full type safety for params, query, and body.
const { params } = req as unknown as GetFlyerRequest;
try {
const flyer = await db.flyerRepo.getFlyerById(params.id); // params.id is 'number'
res.json(flyer);
} catch (error) {
next(error);
}
}));
try {
const flyer = await db.flyerRepo.getFlyerById(params.id); // params.id is 'number'
res.json(flyer);
} catch (error) {
next(error);
}
});
```
## Consequences

View File

@@ -20,9 +20,9 @@ We will adopt a standardized, application-wide structured logging policy. All lo
**Request-Scoped Logger with Context**: We will create a middleware that runs at the beginning of the request lifecycle. This middleware will:
* Generate a unique `request_id` for each incoming request.
* Create a request-scoped logger instance (a "child logger") that automatically includes the `request_id`, `user_id` (if authenticated), and `ip_address` in every log message it generates.
* Attach this child logger to the `req` object (e.g., `req.log`).
- Generate a unique `request_id` for each incoming request.
- Create a request-scoped logger instance (a "child logger") that automatically includes the `request_id`, `user_id` (if authenticated), and `ip_address` in every log message it generates.
- Attach this child logger to the `req` object (e.g., `req.log`).
**Mandatory Use of Request-Scoped Logger**: All route handlers and any service functions called by them **MUST** use the request-scoped logger (`req.log`) instead of the global logger instance. This ensures all logs for a given request are automatically correlated.
@@ -32,9 +32,9 @@ We will adopt a standardized, application-wide structured logging policy. All lo
**Standardized Logging Practices**:
**INFO**: Log key business events, such as `User logged in` or `Flyer processed`.
**WARN**: Log recoverable errors or unusual situations that do not break the request, such as `Client Error: 404 on GET /api/non-existent-route` or `Retrying failed database connection`.
**ERROR**: Log only unhandled or server-side errors that cause a request to fail (typically handled by the `errorHandler`). Avoid logging expected client errors (like 4xx) at this level.
**DEBUG**: Log detailed diagnostic information useful during development, such as function entry/exit points or variable states.
**WARN**: Log recoverable errors or unusual situations that do not break the request, such as `Client Error: 404 on GET /api/non-existent-route` or `Retrying failed database connection`.
**ERROR**: Log only unhandled or server-side errors that cause a request to fail (typically handled by the `errorHandler`). Avoid logging expected client errors (like 4xx) at this level.
**DEBUG**: Log detailed diagnostic information useful during development, such as function entry/exit points or variable states.
### Example Usage
@@ -59,15 +59,15 @@ export const requestLogger = (req, res, next) => {
// In a route handler:
router.get('/:id', async (req, res, next) => {
// Use the request-scoped logger
req.log.info({ flyerId: req.params.id }, 'Fetching flyer by ID');
try {
// ... business logic ...
res.json(flyer);
} catch (error) {
// The error itself will be logged with full context by the errorHandler
next(error);
}
// Use the request-scoped logger
req.log.info({ flyerId: req.params.id }, 'Fetching flyer by ID');
try {
// ... business logic ...
res.json(flyer);
} catch (error) {
// The error itself will be logged with full context by the errorHandler
next(error);
}
});
```

View File

@@ -14,5 +14,5 @@ We will formalize a centralized Role-Based Access Control (RBAC) or Attribute-Ba
## Consequences
* **Positive**: Ensures authorization logic is consistent, easy to audit, and decoupled from business logic. Improves security by centralizing access control.
* **Negative**: Requires a significant refactoring effort to integrate the new authorization system across all protected routes and features. Introduces a new dependency if an external library is chosen.
- **Positive**: Ensures authorization logic is consistent, easy to audit, and decoupled from business logic. Improves security by centralizing access control.
- **Negative**: Requires a significant refactoring effort to integrate the new authorization system across all protected routes and features. Introduces a new dependency if an external library is chosen.

View File

@@ -14,5 +14,5 @@ We will establish a formal Design System and Component Library. This will involv
## Consequences
* **Positive**: Ensures a consistent and high-quality user interface. Accelerates frontend development by providing reusable, well-documented components. Improves maintainability and reduces technical debt.
* **Negative**: Requires an initial investment in setting up Storybook and migrating existing components. Adds a new dependency and a new workflow for frontend development.
- **Positive**: Ensures a consistent and high-quality user interface. Accelerates frontend development by providing reusable, well-documented components. Improves maintainability and reduces technical debt.
- **Negative**: Requires an initial investment in setting up Storybook and migrating existing components. Adds a new dependency and a new workflow for frontend development.

View File

@@ -14,5 +14,5 @@ We will adopt a dedicated database migration tool, such as **`node-pg-migrate`**
## Consequences
* **Positive**: Provides a safe, repeatable, and reversible way to evolve the database schema. Improves team collaboration on database changes. Reduces the risk of data loss or downtime during deployments.
* **Negative**: Requires an initial setup and learning curve for the chosen migration tool. All future schema changes must adhere to the migration workflow.
- **Positive**: Provides a safe, repeatable, and reversible way to evolve the database schema. Improves team collaboration on database changes. Reduces the risk of data loss or downtime during deployments.
- **Negative**: Requires an initial setup and learning curve for the chosen migration tool. All future schema changes must adhere to the migration workflow.

View File

@@ -14,5 +14,5 @@ We will standardize the deployment process by containerizing the application usi
## Consequences
* **Positive**: Ensures consistency between development and production environments. Simplifies the setup for new developers. Improves portability and scalability of the application.
* **Negative**: Requires learning Docker and containerization concepts. Adds `Dockerfile` and `docker-compose.yml` to the project's configuration.
- **Positive**: Ensures consistency between development and production environments. Simplifies the setup for new developers. Improves portability and scalability of the application.
- **Negative**: Requires learning Docker and containerization concepts. Adds `Dockerfile` and `docker-compose.yml` to the project's configuration.

View File

@@ -18,5 +18,5 @@ We will implement a multi-layered security approach for the API:
## Consequences
* **Positive**: Significantly improves the application's security posture against common web vulnerabilities like XSS, clickjacking, and brute-force attacks.
* **Negative**: Requires careful configuration of CORS and rate limits to avoid blocking legitimate traffic. Content-Security-Policy can be complex to configure correctly.
- **Positive**: Significantly improves the application's security posture against common web vulnerabilities like XSS, clickjacking, and brute-force attacks.
- **Negative**: Requires careful configuration of CORS and rate limits to avoid blocking legitimate traffic. Content-Security-Policy can be complex to configure correctly.

View File

@@ -14,5 +14,5 @@ We will formalize the end-to-end CI/CD process. This ADR will define the project
## Consequences
* **Positive**: Automates quality control and creates a safe, repeatable path to production. Increases development velocity and reduces deployment-related errors.
* **Negative**: Initial setup effort for the CI/CD pipeline. May slightly increase the time to merge code due to mandatory checks.
- **Positive**: Automates quality control and creates a safe, repeatable path to production. Increases development velocity and reduces deployment-related errors.
- **Negative**: Initial setup effort for the CI/CD pipeline. May slightly increase the time to merge code due to mandatory checks.

View File

@@ -14,5 +14,5 @@ We will adopt **OpenAPI (Swagger)** for API documentation. We will use tools (e.
## Consequences
* **Positive**: Creates a single source of truth for API documentation that stays in sync with the code. Enables auto-generation of client SDKs and simplifies testing.
* **Negative**: Requires developers to maintain JSDoc annotations on all routes. Adds a build step and new dependencies to the project.
- **Positive**: Creates a single source of truth for API documentation that stays in sync with the code. Enables auto-generation of client SDKs and simplifies testing.
- **Negative**: Requires developers to maintain JSDoc annotations on all routes. Adds a build step and new dependencies to the project.

View File

@@ -14,5 +14,5 @@ We will implement a formal data backup and recovery strategy. This will involve
## Consequences
* **Positive**: Protects against catastrophic data loss, ensuring business continuity. Provides a clear, tested plan for disaster recovery.
* **Negative**: Requires setup and maintenance of backup scripts and secure storage. Incurs storage costs for backup files.
- **Positive**: Protects against catastrophic data loss, ensuring business continuity. Provides a clear, tested plan for disaster recovery.
- **Negative**: Requires setup and maintenance of backup scripts and secure storage. Incurs storage costs for backup files.

View File

@@ -12,11 +12,11 @@ When the application is containerized (`ADR-014`), the container orchestrator (e
We will implement dedicated health check endpoints in the Express application.
* A **Liveness Probe** (`/api/health/live`) will return a `200 OK` to indicate the server is running. If it fails, the orchestrator should restart the container.
- A **Liveness Probe** (`/api/health/live`) will return a `200 OK` to indicate the server is running. If it fails, the orchestrator should restart the container.
* A **Readiness Probe** (`/api/health/ready`) will return a `200 OK` only if the application is ready to accept traffic (e.g., database connection is established). If it fails, the orchestrator will temporarily remove the container from the load balancer.
- A **Readiness Probe** (`/api/health/ready`) will return a `200 OK` only if the application is ready to accept traffic (e.g., database connection is established). If it fails, the orchestrator will temporarily remove the container from the load balancer.
## Consequences
* **Positive**: Enables robust, automated application lifecycle management in a containerized environment. Prevents traffic from being sent to unhealthy or uninitialized application instances.
* **Negative**: Adds a small amount of code for the health check endpoints. Requires configuration in the container orchestration layer.
- **Positive**: Enables robust, automated application lifecycle management in a containerized environment. Prevents traffic from being sent to unhealthy or uninitialized application instances.
- **Negative**: Adds a small amount of code for the health check endpoints. Requires configuration in the container orchestration layer.

View File

@@ -24,8 +24,8 @@ We will adopt a standardized, application-wide structured logging policy for all
**2. Pino-like API for Structured Logging**: The client logger mimics the `pino` API, which is the standard on the backend. It supports two primary call signatures:
* `logger.info('A simple message');`
* `logger.info({ key: 'value' }, 'A message with a structured data payload');`
- `logger.info('A simple message');`
- `logger.info({ key: 'value' }, 'A message with a structured data payload');`
The second signature, which includes a data object as the first argument, is **strongly preferred**, especially for logging errors or complex state.
@@ -79,7 +79,7 @@ describe('MyComponent', () => {
// Assert that the logger was called with the expected structure
expect(logger.error).toHaveBeenCalledWith(
expect.objectContaining({ err: expect.any(Error) }), // Check for the error object
'Failed to fetch component data' // Check for the message
'Failed to fetch component data', // Check for the message
);
});
});

4
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "flyer-crawler",
"version": "0.0.18",
"version": "0.0.20",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "flyer-crawler",
"version": "0.0.18",
"version": "0.0.20",
"dependencies": {
"@bull-board/api": "^6.14.2",
"@bull-board/express": "^6.14.2",

View File

@@ -1,7 +1,7 @@
{
"name": "flyer-crawler",
"private": true,
"version": "0.0.18",
"version": "0.0.20",
"type": "module",
"scripts": {
"dev": "concurrently \"npm:start:dev\" \"vite\"",

View File

@@ -1030,11 +1030,61 @@ DROP FUNCTION IF EXISTS public.fork_recipe(UUID, BIGINT);
CREATE OR REPLACE FUNCTION public.fork_recipe(p_user_id UUID, p_original_recipe_id BIGINT)
RETURNS SETOF public.recipes
LANGUAGE sql
LANGUAGE plpgsql
SECURITY INVOKER
AS $$
-- The entire forking logic is now encapsulated in a single, atomic database function.
SELECT * FROM public.fork_recipe(p_user_id, p_original_recipe_id);
DECLARE
new_recipe_id BIGINT;
BEGIN
-- 1. Create a copy of the recipe, linking it to the new user and the original recipe.
INSERT INTO public.recipes (
user_id,
original_recipe_id,
name,
description,
instructions,
prep_time_minutes,
cook_time_minutes,
servings,
photo_url,
calories_per_serving,
protein_grams,
fat_grams,
carb_grams,
status -- Forked recipes should be private by default
)
SELECT
p_user_id,
p_original_recipe_id,
original.name || ' (Fork)', -- Append '(Fork)' to distinguish it
original.description,
original.instructions,
original.prep_time_minutes,
original.cook_time_minutes,
original.servings,
original.photo_url,
original.calories_per_serving,
original.protein_grams,
original.fat_grams,
original.carb_grams,
'private'
FROM public.recipes AS original
WHERE original.recipe_id = p_original_recipe_id
RETURNING recipe_id INTO new_recipe_id;
-- If the original recipe didn't exist, new_recipe_id will be null.
IF new_recipe_id IS NULL THEN
RETURN;
END IF;
-- 2. Copy all ingredients, tags, and appliances from the original recipe to the new one.
INSERT INTO public.recipe_ingredients (recipe_id, master_item_id, quantity, unit) SELECT new_recipe_id, master_item_id, quantity, unit FROM public.recipe_ingredients WHERE recipe_id = p_original_recipe_id;
INSERT INTO public.recipe_tags (recipe_id, tag_id) SELECT new_recipe_id, tag_id FROM public.recipe_tags WHERE recipe_id = p_original_recipe_id;
INSERT INTO public.recipe_appliances (recipe_id, appliance_id) SELECT new_recipe_id, appliance_id FROM public.recipe_appliances WHERE recipe_id = p_original_recipe_id;
-- 3. Return the newly created recipe record.
RETURN QUERY SELECT * FROM public.recipes WHERE recipe_id = new_recipe_id;
END;
$$;
@@ -1566,4 +1616,3 @@ BEGIN
bp.price_rank = 1;
END;
$$ LANGUAGE plpgsql;

View File

@@ -8,7 +8,23 @@
-- It is idempotent, meaning it can be run multiple times without causing errors.
-- 1. Pre-populate the master grocery items dictionary.
-- This block links generic items to their respective categories.
-- This MUST run after populating categories.
-- Renumbered to 2.
-- 2. Pre-populate the categories table from a predefined list.
-- Renumbered to 1. This MUST run before populating master_grocery_items.
DO $$
BEGIN
INSERT INTO public.categories (name) VALUES
('Fruits & Vegetables'), ('Meat & Seafood'), ('Dairy & Eggs'), ('Bakery & Bread'),
('Pantry & Dry Goods'), ('Beverages'), ('Frozen Foods'), ('Snacks'), ('Household & Cleaning'),
('Personal Care & Health'), ('Baby & Child'), ('Pet Supplies'), ('Deli & Prepared Foods'),
('Canned Goods'), ('Condiments & Spices'), ('Breakfast & Cereal'), ('Organic'),
('International Foods'), ('Other/Miscellaneous')
ON CONFLICT (name) DO NOTHING;
END $$;
-- 2. Pre-populate the master grocery items dictionary.
DO $$
DECLARE
fv_cat_id BIGINT; ms_cat_id BIGINT; de_cat_id BIGINT; bb_cat_id BIGINT; pdg_cat_id BIGINT;
@@ -53,18 +69,6 @@ BEGIN
ON CONFLICT (name) DO NOTHING;
END $$;
-- 2. Pre-populate the categories table from a predefined list.
DO $$
BEGIN
INSERT INTO public.categories (name) VALUES
('Fruits & Vegetables'), ('Meat & Seafood'), ('Dairy & Eggs'), ('Bakery & Bread'),
('Pantry & Dry Goods'), ('Beverages'), ('Frozen Foods'), ('Snacks'), ('Household & Cleaning'),
('Personal Care & Health'), ('Baby & Child'), ('Pet Supplies'), ('Deli & Prepared Foods'),
('Canned Goods'), ('Condiments & Spices'), ('Breakfast & Cereal'), ('Organic'),
('International Foods'), ('Other/Miscellaneous')
ON CONFLICT (name) DO NOTHING;
END $$;
-- 3. Pre-populate the brands and products tables.
-- This block adds common brands and links them to specific products.
DO $$

View File

@@ -110,8 +110,8 @@ CREATE TABLE IF NOT EXISTS public.flyers (
file_name TEXT NOT NULL,
image_url TEXT NOT NULL,
icon_url TEXT,
checksum TEXT UNIQUE,
store_id BIGINT REFERENCES public.stores(store_id),
checksum TEXT UNIQUE,
store_id BIGINT REFERENCES public.stores(store_id) ON DELETE CASCADE,
valid_from DATE,
valid_to DATE,
store_address TEXT,
@@ -139,7 +139,7 @@ CREATE INDEX IF NOT EXISTS idx_flyers_valid_to_file_name ON public.flyers (valid
CREATE TABLE IF NOT EXISTS public.master_grocery_items (
master_grocery_item_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
name TEXT NOT NULL UNIQUE,
category_id BIGINT REFERENCES public.categories(category_id),
category_id BIGINT REFERENCES public.categories(category_id) ON DELETE SET NULL,
is_allergen BOOLEAN DEFAULT false,
allergy_info JSONB,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
@@ -162,6 +162,38 @@ CREATE TABLE IF NOT EXISTS public.user_watched_items (
COMMENT ON TABLE public.user_watched_items IS 'A linking table that represents a user''s personal watchlist of grocery items.';
CREATE INDEX IF NOT EXISTS idx_user_watched_items_master_item_id ON public.user_watched_items(master_item_id);
-- 23. Store brand information. (Moved up due to dependency in flyer_items)
CREATE TABLE IF NOT EXISTS public.brands (
brand_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
name TEXT NOT NULL UNIQUE,
logo_url TEXT,
store_id BIGINT REFERENCES public.stores(store_id) ON DELETE SET NULL,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
COMMENT ON TABLE public.brands IS 'Stores brand names like "Coca-Cola", "Maple Leaf", or "Kraft".';
COMMENT ON COLUMN public.brands.store_id IS 'If this is a store-specific brand (e.g., President''s Choice), this links to the parent store.';
-- 24. For specific products, linking a master item with a brand and size. (Moved up due to dependency in flyer_items)
CREATE TABLE IF NOT EXISTS public.products (
product_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE CASCADE,
brand_id BIGINT REFERENCES public.brands(brand_id) ON DELETE SET NULL,
name TEXT NOT NULL,
description TEXT,
size TEXT,
upc_code TEXT UNIQUE,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
COMMENT ON TABLE public.products IS 'Represents a specific, sellable product, combining a generic item with a brand and size.';
COMMENT ON COLUMN public.products.upc_code IS 'Universal Product Code, if available, for exact product matching.';
COMMENT ON COLUMN public.products.brand_id IS 'Can be null for generic/store-brand items.';
COMMENT ON COLUMN public.products.name IS 'Prime Raised without Antibiotics Chicken Breast.';
COMMENT ON COLUMN public.products.size IS 'e.g., "4L", "500g".';
CREATE INDEX IF NOT EXISTS idx_products_master_item_id ON public.products(master_item_id);
CREATE INDEX IF NOT EXISTS idx_products_brand_id ON public.products(brand_id);
-- 9. The 'flyer_items' table. This stores individual items from flyers.
CREATE TABLE IF NOT EXISTS public.flyer_items (
flyer_item_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
@@ -171,13 +203,13 @@ CREATE TABLE IF NOT EXISTS public.flyer_items (
price_in_cents INTEGER,
quantity_num NUMERIC,
quantity TEXT NOT NULL,
category_id BIGINT REFERENCES public.categories(category_id),
category_id BIGINT REFERENCES public.categories(category_id) ON DELETE SET NULL,
category_name TEXT,
unit_price JSONB,
view_count INTEGER DEFAULT 0 NOT NULL,
click_count INTEGER DEFAULT 0 NOT NULL,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id),
product_id BIGINT,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE SET NULL,
product_id BIGINT REFERENCES public.products(product_id) ON DELETE SET NULL,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
@@ -294,7 +326,7 @@ CREATE INDEX IF NOT EXISTS idx_shopping_lists_user_id ON public.shopping_lists(u
CREATE TABLE IF NOT EXISTS public.shopping_list_items (
shopping_list_item_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
shopping_list_id BIGINT NOT NULL REFERENCES public.shopping_lists(shopping_list_id) ON DELETE CASCADE,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id),
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE CASCADE,
custom_item_name TEXT,
quantity NUMERIC DEFAULT 1 NOT NULL,
is_purchased BOOLEAN DEFAULT false NOT NULL,
@@ -359,7 +391,7 @@ CREATE INDEX IF NOT EXISTS idx_shared_menu_plans_shared_with_user_id ON public.s
CREATE TABLE IF NOT EXISTS public.suggested_corrections (
suggested_correction_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
flyer_item_id BIGINT NOT NULL REFERENCES public.flyer_items(flyer_item_id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES public.users(user_id),
user_id UUID NOT NULL REFERENCES public.users(user_id) ON DELETE CASCADE,
correction_type TEXT NOT NULL,
suggested_value TEXT NOT NULL,
status TEXT DEFAULT 'pending' NOT NULL,
@@ -379,9 +411,9 @@ CREATE INDEX IF NOT EXISTS idx_suggested_corrections_pending ON public.suggested
-- 21. For prices submitted directly by users from in-store.
CREATE TABLE IF NOT EXISTS public.user_submitted_prices (
user_submitted_price_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
user_id UUID NOT NULL REFERENCES public.users(user_id),
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id),
store_id BIGINT NOT NULL REFERENCES public.stores(store_id),
user_id UUID NOT NULL REFERENCES public.users(user_id) ON DELETE CASCADE,
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE CASCADE,
store_id BIGINT NOT NULL REFERENCES public.stores(store_id) ON DELETE CASCADE,
price_in_cents INTEGER NOT NULL,
photo_url TEXT,
upvotes INTEGER DEFAULT 0 NOT NULL,
@@ -409,38 +441,6 @@ COMMENT ON TABLE public.unmatched_flyer_items IS 'A queue for reviewing flyer it
CREATE INDEX IF NOT EXISTS idx_unmatched_flyer_items_flyer_item_id ON public.unmatched_flyer_items(flyer_item_id);
CREATE INDEX IF NOT EXISTS idx_unmatched_flyer_items_pending ON public.unmatched_flyer_items (created_at) WHERE status = 'pending';
-- 23. Store brand information.
CREATE TABLE IF NOT EXISTS public.brands (
brand_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
name TEXT NOT NULL UNIQUE,
logo_url TEXT,
store_id BIGINT REFERENCES public.stores(store_id) ON DELETE SET NULL,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
COMMENT ON TABLE public.brands IS 'Stores brand names like "Coca-Cola", "Maple Leaf", or "Kraft".';
COMMENT ON COLUMN public.brands.store_id IS 'If this is a store-specific brand (e.g., President''s Choice), this links to the parent store.';
-- 24. For specific products, linking a master item with a brand and size.
CREATE TABLE IF NOT EXISTS public.products (
product_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id),
brand_id BIGINT REFERENCES public.brands(brand_id),
name TEXT NOT NULL,
description TEXT,
size TEXT,
upc_code TEXT UNIQUE,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
COMMENT ON TABLE public.products IS 'Represents a specific, sellable product, combining a generic item with a brand and size.';
COMMENT ON COLUMN public.products.upc_code IS 'Universal Product Code, if available, for exact product matching.';
COMMENT ON COLUMN public.products.brand_id IS 'Can be null for generic/store-brand items.';
COMMENT ON COLUMN public.products.name IS 'Prime Raised without Antibiotics Chicken Breast.';
COMMENT ON COLUMN public.products.size IS 'e.g., "4L", "500g".';
CREATE INDEX IF NOT EXISTS idx_products_master_item_id ON public.products(master_item_id);
CREATE INDEX IF NOT EXISTS idx_products_brand_id ON public.products(brand_id);
-- 25. Linking table for when one flyer is valid for multiple locations.
CREATE TABLE IF NOT EXISTS public.flyer_locations (
flyer_id BIGINT NOT NULL REFERENCES public.flyers(flyer_id) ON DELETE CASCADE,
@@ -496,7 +496,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS idx_recipes_unique_system_recipe_name ON publi
CREATE TABLE IF NOT EXISTS public.recipe_ingredients (
recipe_ingredient_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
recipe_id BIGINT NOT NULL REFERENCES public.recipes(recipe_id) ON DELETE CASCADE,
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id),
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE CASCADE,
quantity NUMERIC NOT NULL,
unit TEXT NOT NULL,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
@@ -780,7 +780,7 @@ CREATE INDEX IF NOT EXISTS idx_shopping_trips_shopping_list_id ON public.shoppin
CREATE TABLE IF NOT EXISTS public.shopping_trip_items (
shopping_trip_item_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
shopping_trip_id BIGINT NOT NULL REFERENCES public.shopping_trips(shopping_trip_id) ON DELETE CASCADE,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id),
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE SET NULL,
custom_item_name TEXT,
quantity NUMERIC NOT NULL,
price_paid_cents INTEGER,
@@ -844,7 +844,7 @@ CREATE INDEX IF NOT EXISTS idx_user_follows_following_id ON public.user_follows(
CREATE TABLE IF NOT EXISTS public.receipts (
receipt_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
user_id UUID NOT NULL REFERENCES public.users(user_id) ON DELETE CASCADE,
store_id BIGINT REFERENCES public.stores(store_id),
store_id BIGINT REFERENCES public.stores(store_id) ON DELETE CASCADE,
receipt_image_url TEXT NOT NULL,
transaction_date TIMESTAMPTZ,
total_amount_cents INTEGER,
@@ -865,8 +865,8 @@ CREATE TABLE IF NOT EXISTS public.receipt_items (
raw_item_description TEXT NOT NULL,
quantity NUMERIC DEFAULT 1 NOT NULL,
price_paid_cents INTEGER NOT NULL,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id),
product_id BIGINT REFERENCES public.products(product_id),
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE SET NULL,
product_id BIGINT REFERENCES public.products(product_id) ON DELETE SET NULL,
status TEXT DEFAULT 'unmatched' NOT NULL CHECK (status IN ('unmatched', 'matched', 'needs_review', 'ignored')),
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL

View File

@@ -126,8 +126,8 @@ CREATE TABLE IF NOT EXISTS public.flyers (
file_name TEXT NOT NULL,
image_url TEXT NOT NULL,
icon_url TEXT,
checksum TEXT UNIQUE,
store_id BIGINT REFERENCES public.stores(store_id),
checksum TEXT UNIQUE,
store_id BIGINT REFERENCES public.stores(store_id) ON DELETE CASCADE,
valid_from DATE,
valid_to DATE,
store_address TEXT,
@@ -155,7 +155,7 @@ CREATE INDEX IF NOT EXISTS idx_flyers_valid_to_file_name ON public.flyers (valid
CREATE TABLE IF NOT EXISTS public.master_grocery_items (
master_grocery_item_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
name TEXT NOT NULL UNIQUE,
category_id BIGINT REFERENCES public.categories(category_id),
category_id BIGINT REFERENCES public.categories(category_id) ON DELETE SET NULL,
is_allergen BOOLEAN DEFAULT false,
allergy_info JSONB,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
@@ -178,6 +178,38 @@ CREATE TABLE IF NOT EXISTS public.user_watched_items (
COMMENT ON TABLE public.user_watched_items IS 'A linking table that represents a user''s personal watchlist of grocery items.';
CREATE INDEX IF NOT EXISTS idx_user_watched_items_master_item_id ON public.user_watched_items(master_item_id);
-- 23. Store brand information. (Moved up due to dependency in flyer_items)
CREATE TABLE IF NOT EXISTS public.brands (
brand_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
name TEXT NOT NULL UNIQUE,
logo_url TEXT,
store_id BIGINT REFERENCES public.stores(store_id) ON DELETE SET NULL,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
COMMENT ON TABLE public.brands IS 'Stores brand names like "Coca-Cola", "Maple Leaf", or "Kraft".';
COMMENT ON COLUMN public.brands.store_id IS 'If this is a store-specific brand (e.g., President''s Choice), this links to the parent store.';
-- 24. For specific products, linking a master item with a brand and size. (Moved up due to dependency in flyer_items)
CREATE TABLE IF NOT EXISTS public.products (
product_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE CASCADE,
brand_id BIGINT REFERENCES public.brands(brand_id) ON DELETE SET NULL,
name TEXT NOT NULL,
description TEXT,
size TEXT,
upc_code TEXT UNIQUE,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
COMMENT ON TABLE public.products IS 'Represents a specific, sellable product, combining a generic item with a brand and size.';
COMMENT ON COLUMN public.products.upc_code IS 'Universal Product Code, if available, for exact product matching.';
COMMENT ON COLUMN public.products.brand_id IS 'Can be null for generic/store-brand items.';
COMMENT ON COLUMN public.products.name IS 'Prime Raised without Antibiotics Chicken Breast.';
COMMENT ON COLUMN public.products.size IS 'e.g., "4L", "500g".';
CREATE INDEX IF NOT EXISTS idx_products_master_item_id ON public.products(master_item_id);
CREATE INDEX IF NOT EXISTS idx_products_brand_id ON public.products(brand_id);
-- 9. The 'flyer_items' table. This stores individual items from flyers.
CREATE TABLE IF NOT EXISTS public.flyer_items (
flyer_item_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
@@ -187,13 +219,13 @@ CREATE TABLE IF NOT EXISTS public.flyer_items (
price_in_cents INTEGER,
quantity_num NUMERIC,
quantity TEXT NOT NULL,
category_id BIGINT REFERENCES public.categories(category_id),
category_id BIGINT REFERENCES public.categories(category_id) ON DELETE SET NULL,
category_name TEXT,
unit_price JSONB,
view_count INTEGER DEFAULT 0 NOT NULL,
click_count INTEGER DEFAULT 0 NOT NULL,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id),
product_id BIGINT,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE SET NULL,
product_id BIGINT REFERENCES public.products(product_id) ON DELETE SET NULL,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
@@ -310,7 +342,7 @@ CREATE INDEX IF NOT EXISTS idx_shopping_lists_user_id ON public.shopping_lists(u
CREATE TABLE IF NOT EXISTS public.shopping_list_items (
shopping_list_item_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
shopping_list_id BIGINT NOT NULL REFERENCES public.shopping_lists(shopping_list_id) ON DELETE CASCADE,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id),
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE CASCADE,
custom_item_name TEXT,
quantity NUMERIC DEFAULT 1 NOT NULL,
is_purchased BOOLEAN DEFAULT false NOT NULL,
@@ -375,7 +407,7 @@ CREATE INDEX IF NOT EXISTS idx_shared_menu_plans_shared_with_user_id ON public.s
CREATE TABLE IF NOT EXISTS public.suggested_corrections (
suggested_correction_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
flyer_item_id BIGINT NOT NULL REFERENCES public.flyer_items(flyer_item_id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES public.users(user_id),
user_id UUID NOT NULL REFERENCES public.users(user_id) ON DELETE CASCADE,
correction_type TEXT NOT NULL,
suggested_value TEXT NOT NULL,
status TEXT DEFAULT 'pending' NOT NULL,
@@ -395,9 +427,9 @@ CREATE INDEX IF NOT EXISTS idx_suggested_corrections_pending ON public.suggested
-- 21. For prices submitted directly by users from in-store.
CREATE TABLE IF NOT EXISTS public.user_submitted_prices (
user_submitted_price_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
user_id UUID NOT NULL REFERENCES public.users(user_id),
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id),
store_id BIGINT NOT NULL REFERENCES public.stores(store_id),
user_id UUID NOT NULL REFERENCES public.users(user_id) ON DELETE CASCADE,
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE CASCADE,
store_id BIGINT NOT NULL REFERENCES public.stores(store_id) ON DELETE CASCADE,
price_in_cents INTEGER NOT NULL,
photo_url TEXT,
upvotes INTEGER DEFAULT 0 NOT NULL,
@@ -424,38 +456,6 @@ COMMENT ON TABLE public.unmatched_flyer_items IS 'A queue for reviewing flyer it
CREATE INDEX IF NOT EXISTS idx_unmatched_flyer_items_flyer_item_id ON public.unmatched_flyer_items(flyer_item_id);
CREATE INDEX IF NOT EXISTS idx_unmatched_flyer_items_pending ON public.unmatched_flyer_items (created_at) WHERE status = 'pending';
-- 23. Store brand information.
CREATE TABLE IF NOT EXISTS public.brands (
brand_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
name TEXT NOT NULL UNIQUE,
logo_url TEXT,
store_id BIGINT REFERENCES public.stores(store_id) ON DELETE SET NULL,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
COMMENT ON TABLE public.brands IS 'Stores brand names like "Coca-Cola", "Maple Leaf", or "Kraft".';
COMMENT ON COLUMN public.brands.store_id IS 'If this is a store-specific brand (e.g., President''s Choice), this links to the parent store.';
-- 24. For specific products, linking a master item with a brand and size.
CREATE TABLE IF NOT EXISTS public.products (
product_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id),
brand_id BIGINT REFERENCES public.brands(brand_id),
name TEXT NOT NULL,
description TEXT,
size TEXT,
upc_code TEXT UNIQUE,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
);
COMMENT ON TABLE public.products IS 'Represents a specific, sellable product, combining a generic item with a brand and size.';
COMMENT ON COLUMN public.products.upc_code IS 'Universal Product Code, if available, for exact product matching.';
COMMENT ON COLUMN public.products.brand_id IS 'Can be null for generic/store-brand items.';
COMMENT ON COLUMN public.products.name IS 'Prime Raised without Antibiotics Chicken Breast.';
COMMENT ON COLUMN public.products.size IS 'e.g., "4L", "500g".';
CREATE INDEX IF NOT EXISTS idx_products_master_item_id ON public.products(master_item_id);
CREATE INDEX IF NOT EXISTS idx_products_brand_id ON public.products(brand_id);
-- 25. Linking table for when one flyer is valid for multiple locations.
CREATE TABLE IF NOT EXISTS public.flyer_locations (
flyer_id BIGINT NOT NULL REFERENCES public.flyers(flyer_id) ON DELETE CASCADE,
@@ -510,7 +510,7 @@ CREATE UNIQUE INDEX IF NOT EXISTS idx_recipes_unique_system_recipe_name ON publi
CREATE TABLE IF NOT EXISTS public.recipe_ingredients (
recipe_ingredient_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
recipe_id BIGINT NOT NULL REFERENCES public.recipes(recipe_id) ON DELETE CASCADE,
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id),
master_item_id BIGINT NOT NULL REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE CASCADE,
quantity NUMERIC NOT NULL,
unit TEXT NOT NULL,
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
@@ -796,7 +796,7 @@ CREATE INDEX IF NOT EXISTS idx_shopping_trips_shopping_list_id ON public.shoppin
CREATE TABLE IF NOT EXISTS public.shopping_trip_items (
shopping_trip_item_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
shopping_trip_id BIGINT NOT NULL REFERENCES public.shopping_trips(shopping_trip_id) ON DELETE CASCADE,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id),
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE SET NULL,
custom_item_name TEXT,
quantity NUMERIC NOT NULL,
price_paid_cents INTEGER,
@@ -862,7 +862,7 @@ CREATE INDEX IF NOT EXISTS idx_user_follows_following_id ON public.user_follows(
CREATE TABLE IF NOT EXISTS public.receipts (
receipt_id BIGINT PRIMARY KEY GENERATED ALWAYS AS IDENTITY,
user_id UUID NOT NULL REFERENCES public.users(user_id) ON DELETE CASCADE,
store_id BIGINT REFERENCES public.stores(store_id),
store_id BIGINT REFERENCES public.stores(store_id) ON DELETE CASCADE,
receipt_image_url TEXT NOT NULL,
transaction_date TIMESTAMPTZ,
total_amount_cents INTEGER,
@@ -883,8 +883,8 @@ CREATE TABLE IF NOT EXISTS public.receipt_items (
raw_item_description TEXT NOT NULL,
quantity NUMERIC DEFAULT 1 NOT NULL,
price_paid_cents INTEGER NOT NULL,
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id),
product_id BIGINT REFERENCES public.products(product_id),
master_item_id BIGINT REFERENCES public.master_grocery_items(master_grocery_item_id) ON DELETE SET NULL,
product_id BIGINT REFERENCES public.products(product_id) ON DELETE SET NULL,
status TEXT DEFAULT 'unmatched' NOT NULL CHECK (status IN ('unmatched', 'matched', 'needs_review', 'ignored')),
created_at TIMESTAMPTZ DEFAULT now() NOT NULL,
updated_at TIMESTAMPTZ DEFAULT now() NOT NULL
@@ -2128,11 +2128,61 @@ DROP FUNCTION IF EXISTS public.fork_recipe(UUID, BIGINT);
CREATE OR REPLACE FUNCTION public.fork_recipe(p_user_id UUID, p_original_recipe_id BIGINT)
RETURNS SETOF public.recipes
LANGUAGE sql
LANGUAGE plpgsql
SECURITY INVOKER
AS $$
-- The entire forking logic is now encapsulated in a single, atomic database function.
SELECT * FROM public.fork_recipe(p_user_id, p_original_recipe_id);
DECLARE
new_recipe_id BIGINT;
BEGIN
-- 1. Create a copy of the recipe, linking it to the new user and the original recipe.
INSERT INTO public.recipes (
user_id,
original_recipe_id,
name,
description,
instructions,
prep_time_minutes,
cook_time_minutes,
servings,
photo_url,
calories_per_serving,
protein_grams,
fat_grams,
carb_grams,
status -- Forked recipes should be private by default
)
SELECT
p_user_id,
p_original_recipe_id,
original.name || ' (Fork)', -- Append '(Fork)' to distinguish it
original.description,
original.instructions,
original.prep_time_minutes,
original.cook_time_minutes,
original.servings,
original.photo_url,
original.calories_per_serving,
original.protein_grams,
original.fat_grams,
original.carb_grams,
'private'
FROM public.recipes AS original
WHERE original.recipe_id = p_original_recipe_id
RETURNING recipe_id INTO new_recipe_id;
-- If the original recipe didn't exist, new_recipe_id will be null.
IF new_recipe_id IS NULL THEN
RETURN;
END IF;
-- 2. Copy all ingredients, tags, and appliances from the original recipe to the new one.
INSERT INTO public.recipe_ingredients (recipe_id, master_item_id, quantity, unit) SELECT new_recipe_id, master_item_id, quantity, unit FROM public.recipe_ingredients WHERE recipe_id = p_original_recipe_id;
INSERT INTO public.recipe_tags (recipe_id, tag_id) SELECT new_recipe_id, tag_id FROM public.recipe_tags WHERE recipe_id = p_original_recipe_id;
INSERT INTO public.recipe_appliances (recipe_id, appliance_id) SELECT new_recipe_id, appliance_id FROM public.recipe_appliances WHERE recipe_id = p_original_recipe_id;
-- 3. Return the newly created recipe record.
RETURN QUERY SELECT * FROM public.recipes WHERE recipe_id = new_recipe_id;
END;
$$;

View File

@@ -6,10 +6,11 @@
* DO NOT run this on a production database.
*/
import { Pool } from 'pg';
import { Pool, PoolClient } from 'pg';
import fs from 'node:fs/promises';
import path from 'node:path';
import bcrypt from 'bcrypt';
import { logger } from '../services/logger.server';
import { CATEGORIES } from '../types';
const pool = new Pool({
user: process.env.DB_USER,
@@ -20,81 +21,55 @@ const pool = new Pool({
});
async function main() {
// Declare client outside the try block so it's accessible in the finally block.
let client;
let client: PoolClient | undefined;
try {
client = await pool.connect();
logger.info('Connected to the database for seeding.');
await client.query('BEGIN');
// 1. Clean the database
logger.info('--- Wiping existing data... ---');
// Using TRUNCATE ... RESTART IDENTITY CASCADE is a powerful way to clean all tables
// and reset auto-incrementing keys, while respecting foreign key relationships.
const tablesRes = await client.query(`
SELECT tablename
FROM pg_tables
WHERE schemaname = 'public'
-- Exclude PostGIS system tables from truncation to avoid permission errors.
AND tablename NOT IN ('spatial_ref_sys', 'geometry_columns')
`);
const tables = tablesRes.rows.map((row) => `"${row.tablename}"`).join(', ');
if (tables) {
await client.query(`TRUNCATE ${tables} RESTART IDENTITY CASCADE`);
logger.info('All tables in public schema have been truncated.');
}
// 1. Clean the database by dropping and recreating the schema
logger.info('--- Wiping and rebuilding schema... ---');
const dropScriptPath = path.resolve(process.cwd(), 'sql/drop_tables.sql');
const dropSql = await fs.readFile(dropScriptPath, 'utf-8');
await client.query(dropSql);
logger.info('All tables dropped successfully.');
// 2. Seed Categories
logger.info('--- Seeding Categories... ---');
const categoryQuery = `INSERT INTO public.categories (name) VALUES ${CATEGORIES.map((_, i) => `($${i + 1})`).join(', ')} RETURNING category_id, name`;
const seededCategories = (
await client.query<{ category_id: number; name: string }>(categoryQuery, CATEGORIES)
).rows;
const categoryMap = new Map(seededCategories.map((c) => [c.name, c.category_id]));
logger.info(`Seeded ${seededCategories.length} categories.`);
const schemaScriptPath = path.resolve(process.cwd(), 'sql/master_schema_rollup.sql');
const schemaSql = await fs.readFile(schemaScriptPath, 'utf-8');
await client.query(schemaSql);
logger.info(
'Schema rebuilt and static data seeded successfully from master_schema_rollup.sql.',
);
// 3. Seed Stores
// 2. Seed Additional Stores (if any beyond what's in the rollup)
logger.info('--- Seeding Stores... ---');
const stores = ['Safeway', 'No Frills', 'Costco', 'Superstore'];
const storeQuery = `INSERT INTO public.stores (name) VALUES ${stores.map((_, i) => `($${i + 1})`).join(', ')} RETURNING store_id, name`;
const seededStores = (
await client.query<{ store_id: number; name: string }>(storeQuery, stores)
const storeQuery = `INSERT INTO public.stores (name) VALUES ${stores.map((_, i) => `($${i + 1})`).join(', ')} ON CONFLICT (name) DO NOTHING RETURNING store_id, name`;
await client.query<{ store_id: number; name: string }>(storeQuery, stores);
const allStores = (
await client.query<{ store_id: number; name: string }>(
'SELECT store_id, name FROM public.stores',
)
).rows;
const storeMap = new Map(seededStores.map((s) => [s.name, s.store_id]));
logger.info(`Seeded ${seededStores.length} stores.`);
// 4. Seed Master Grocery Items
logger.info('--- Seeding Master Grocery Items... ---');
const masterItems = [
{ name: 'Chicken Breast, Boneless Skinless', category: 'Meat & Seafood' },
{ name: 'Ground Beef, Lean', category: 'Meat & Seafood' },
{ name: 'Avocado', category: 'Fruits & Vegetables' },
{ name: 'Bananas', category: 'Fruits & Vegetables' },
{ name: 'Broccoli', category: 'Fruits & Vegetables' },
{ name: 'Cheddar Cheese, Block', category: 'Dairy & Eggs' },
{ name: 'Milk, 2%', category: 'Dairy & Eggs' },
{ name: 'Eggs, Large', category: 'Dairy & Eggs' },
{ name: 'Whole Wheat Bread', category: 'Bakery & Bread' },
{ name: 'Pasta, Spaghetti', category: 'Pantry & Dry Goods' },
{ name: 'Canned Tomatoes, Diced', category: 'Canned Goods' },
{ name: 'Coca-Cola, 12-pack', category: 'Beverages' },
{ name: 'Frozen Pizza', category: 'Frozen Foods' },
{ name: 'Paper Towels', category: 'Household & Cleaning' },
];
const masterItemValues = masterItems
.map((item) => `('${item.name.replace(/'/g, "''")}', ${categoryMap.get(item.category)})`)
.join(', ');
const masterItemQuery = `INSERT INTO public.master_grocery_items (name, category_id) VALUES ${masterItemValues} RETURNING master_grocery_item_id, name`;
const seededMasterItems = (
await client.query<{ master_grocery_item_id: number; name: string }>(masterItemQuery)
).rows;
const masterItemMap = new Map(
seededMasterItems.map((item) => [item.name, item.master_grocery_item_id]),
const storeMap = new Map(
allStores.map((s: { name: string; store_id: number }) => [s.name, s.store_id]),
);
logger.info(`Seeded ${seededMasterItems.length} master grocery items.`);
logger.info(`Seeded/verified ${allStores.length} total stores.`);
// 5. Seed Users & Profiles
// Fetch maps for items seeded by the master rollup script
const masterItemMap = new Map(
(
await client.query<{ master_grocery_item_id: number; name: string }>(
'SELECT master_grocery_item_id, name FROM public.master_grocery_items',
)
).rows.map((item: { name: string; master_grocery_item_id: number }) => [
item.name,
item.master_grocery_item_id,
]),
);
// 3. Seed Users & Profiles
logger.info('--- Seeding Users & Profiles... ---');
const saltRounds = 10;
const adminPassHash = await bcrypt.hash('adminpass', saltRounds);
@@ -126,7 +101,7 @@ async function main() {
const userId = userRes.rows[0].user_id;
logger.info('Seeded regular user (user@example.com / userpass)');
// 6. Seed a Flyer
// 4. Seed a Flyer
logger.info('--- Seeding a Sample Flyer... ---');
const today = new Date();
const validFrom = new Date(today);
@@ -146,29 +121,29 @@ async function main() {
const flyerId = flyerRes.rows[0].flyer_id;
logger.info(`Seeded flyer for Safeway (ID: ${flyerId}).`);
// 7. Seed Flyer Items
// 5. Seed Flyer Items
logger.info('--- Seeding Flyer Items... ---');
const flyerItems = [
{
name: 'Chicken Breast, Boneless Skinless',
name: 'chicken breast',
price_display: '$3.99 /lb',
price_in_cents: 399,
quantity: 'per lb',
master_item_id: masterItemMap.get('Chicken Breast, Boneless Skinless'),
master_item_id: masterItemMap.get('chicken breast'),
},
{
name: 'Avocado',
name: 'avocados',
price_display: '2 for $5.00',
price_in_cents: 250,
quantity: 'each',
master_item_id: masterItemMap.get('Avocado'),
master_item_id: masterItemMap.get('avocados'),
},
{
name: 'Coca-Cola 12-pack',
name: 'soda',
price_display: '$6.99',
price_in_cents: 699,
quantity: '12x355ml',
master_item_id: masterItemMap.get('Coca-Cola, 12-pack'),
master_item_id: masterItemMap.get('soda'),
},
{
name: 'Unmatched Sample Item',
@@ -194,12 +169,12 @@ async function main() {
}
logger.info(`Seeded ${flyerItems.length} items for the Safeway flyer.`);
// 8. Seed Watched Items for the user
// 6. Seed Watched Items for the user
logger.info('--- Seeding Watched Items... ---');
const watchedItemIds = [
masterItemMap.get('Chicken Breast, Boneless Skinless'),
masterItemMap.get('Avocado'),
masterItemMap.get('Ground Beef, Lean'),
masterItemMap.get('chicken breast'),
masterItemMap.get('avocados'),
masterItemMap.get('ground beef'),
];
for (const itemId of watchedItemIds) {
if (itemId) {
@@ -211,7 +186,7 @@ async function main() {
}
logger.info(`Seeded ${watchedItemIds.length} watched items for Test User.`);
// 9. Seed a Shopping List
// 7. Seed a Shopping List
logger.info('--- Seeding a Shopping List... ---');
const listRes = await client.query<{ shopping_list_id: number }>(
'INSERT INTO public.shopping_lists (user_id, name) VALUES ($1, $2) RETURNING shopping_list_id',
@@ -220,8 +195,8 @@ async function main() {
const listId = listRes.rows[0].shopping_list_id;
const shoppingListItems = [
{ master_item_id: masterItemMap.get('Milk, 2%'), quantity: 1 },
{ master_item_id: masterItemMap.get('Eggs, Large'), quantity: 1 },
{ master_item_id: masterItemMap.get('milk'), quantity: 1 },
{ master_item_id: masterItemMap.get('eggs'), quantity: 1 },
{ custom_item_name: 'Specialty Hot Sauce', quantity: 1 },
];
@@ -235,75 +210,6 @@ async function main() {
`Seeded shopping list "Weekly Groceries" with ${shoppingListItems.length} items for Test User.`,
);
// 10. Seed Brands
logger.info('--- Seeding Brands... ---');
const brands = [
'Coca-Cola',
'Kraft',
'Maple Leaf',
"Dempster's",
'No Name',
"President's Choice",
];
const brandQuery = `INSERT INTO public.brands (name) VALUES ${brands.map((_, i) => `($${i + 1})`).join(', ')} ON CONFLICT (name) DO NOTHING`;
await client.query(brandQuery, brands);
logger.info(`Seeded ${brands.length} brands.`);
// Link store-specific brands
const loblawsId = storeMap.get('Loblaws');
if (loblawsId) {
await client.query('UPDATE public.brands SET store_id = $1 WHERE name = $2 OR name = $3', [
loblawsId,
'No Name',
"President's Choice",
]);
logger.info('Linked store brands to Loblaws.');
}
// 11. Seed Recipes
logger.info('--- Seeding Recipes... ---');
const recipes = [
{
name: 'Simple Chicken and Rice',
description: 'A quick and healthy weeknight meal.',
instructions: '1. Cook rice. 2. Cook chicken. 3. Combine.',
prep: 10,
cook: 20,
servings: 4,
},
{
name: 'Classic Spaghetti Bolognese',
description: 'A rich and hearty meat sauce.',
instructions: '1. Brown beef. 2. Add sauce. 3. Simmer.',
prep: 15,
cook: 45,
servings: 6,
},
{
name: 'Vegetable Stir-fry',
description: 'A fast and flavorful vegetarian meal.',
instructions: '1. Chop veggies. 2. Stir-fry. 3. Add sauce.',
prep: 10,
cook: 10,
servings: 3,
},
];
for (const recipe of recipes) {
await client.query(
`INSERT INTO public.recipes (name, description, instructions, prep_time_minutes, cook_time_minutes, servings, status)
VALUES ($1, $2, $3, $4, $5, $6, 'public') ON CONFLICT (name) WHERE user_id IS NULL DO NOTHING`,
[
recipe.name,
recipe.description,
recipe.instructions,
recipe.prep,
recipe.cook,
recipe.servings,
],
);
}
logger.info(`Seeded ${recipes.length} recipes.`);
// --- SEED SCRIPT DEBUG LOGGING ---
// Corrected the query to be unambiguous by specifying the table alias for each column.
// `id` and `email` come from the `users` table (u), and `role` comes from the `profiles` table (p).

View File

@@ -620,6 +620,14 @@ describe('AI Routes (/api/ai)', () => {
expect(response.body.text).toContain('server-generated quick insight');
});
it('POST /quick-insights should accept items with "item" property instead of "name"', async () => {
const response = await supertest(app)
.post('/api/ai/quick-insights')
.send({ items: [{ item: 'test item' }] });
expect(response.status).toBe(200);
});
it('POST /quick-insights should return 500 on a generic error', async () => {
// To hit the catch block, we can simulate an error by making the logger throw.
vi.mocked(mockLogger.info).mockImplementationOnce(() => {

View File

@@ -88,10 +88,17 @@ const rescanAreaSchema = z.object({
const flyerItemForAnalysisSchema = z
.object({
name: requiredString('Item name is required.'),
// Allow other properties to pass through without validation
item: z.string().nullish(),
name: z.string().nullish(),
})
.passthrough();
.passthrough()
.refine(
(data) =>
(data.item && data.item.trim().length > 0) || (data.name && data.name.trim().length > 0),
{
message: "Item identifier is required (either 'item' or 'name').",
},
);
const insightsSchema = z.object({
body: z.object({

View File

@@ -617,7 +617,9 @@ describe('Auth Routes (/api/auth)', () => {
const setCookieHeader = response.headers['set-cookie'];
expect(setCookieHeader).toBeDefined();
expect(setCookieHeader[0]).toContain('refreshToken=;');
expect(setCookieHeader[0]).toContain('Expires=Thu, 01 Jan 1970');
// Check for Max-Age=0, which is the modern way to expire a cookie.
// The 'Expires' attribute is a fallback and its exact value can be inconsistent.
expect(setCookieHeader[0]).toContain('Max-Age=0');
});
it('should still return 200 OK even if deleting the refresh token from DB fails', async () => {

View File

@@ -98,11 +98,27 @@ describe('AI API Routes Integration Tests', () => {
altitudeAccuracy: null,
heading: null,
speed: null,
toJSON: () => ({}),
toJSON: function () {
return {
latitude: this.latitude,
longitude: this.longitude,
accuracy: this.accuracy,
altitude: this.altitude,
altitudeAccuracy: this.altitudeAccuracy,
heading: this.heading,
speed: this.speed,
};
},
};
const mockStore = {
name: 'Test Store for Trip',
store_id: 1,
created_at: new Date().toISOString(),
updated_at: new Date().toISOString(),
};
const response = await aiApiClient.planTripWithMaps(
[],
undefined,
mockStore,
mockLocation,
undefined,
authToken,

View File

@@ -60,7 +60,11 @@ describe('Flyer Processing Background Job Integration Test', () => {
// Arrange: Load a mock flyer PDF.
const imagePath = path.resolve(__dirname, '../assets/test-flyer-image.jpg');
const imageBuffer = await fs.readFile(imagePath);
const mockImageFile = new File([imageBuffer], 'test-flyer-image.jpg', { type: 'image/jpeg' });
// Create a unique buffer and filename for each test run to ensure a unique checksum.
// This prevents a 409 Conflict error when the second test runs.
const uniqueContent = Buffer.concat([imageBuffer, Buffer.from(Date.now().toString())]);
const uniqueFileName = `test-flyer-image-${Date.now()}.jpg`;
const mockImageFile = new File([uniqueContent], uniqueFileName, { type: 'image/jpeg' });
const checksum = await generateFileChecksum(mockImageFile);
// Act 1: Upload the file to start the background job.

View File

@@ -54,9 +54,10 @@ describe('Public API Routes Integration Tests', () => {
testFlyer = flyerRes.rows[0];
// Add an item to the flyer
await pool.query(`INSERT INTO public.flyer_items (flyer_id, item) VALUES ($1, 'Test Item')`, [
testFlyer.flyer_id,
]);
await pool.query(
`INSERT INTO public.flyer_items (flyer_id, item, price_display, quantity) VALUES ($1, 'Test Item', '$0.00', 'each')`,
[testFlyer.flyer_id],
);
});
afterAll(async () => {