Some checks failed
Deploy to Test Environment / deploy-to-test (push) Failing after 3h14m13s
359 lines
10 KiB
TypeScript
359 lines
10 KiB
TypeScript
// src/services/aiApiClient.ts
|
|
/**
|
|
* @file This file acts as a client-side API wrapper for all AI-related functionalities.
|
|
* It communicates with the application's own backend endpoints, which then securely
|
|
* call the Google AI services. This ensures no API keys are exposed on the client.
|
|
*/
|
|
import type {
|
|
FlyerItem,
|
|
Store,
|
|
MasterGroceryItem,
|
|
ProcessingStage,
|
|
GroundedResponse,
|
|
} from '../types';
|
|
import { logger } from './logger.client';
|
|
import { apiFetch } from './apiClient';
|
|
|
|
/**
|
|
* Uploads a flyer file to the backend to be processed asynchronously.
|
|
* This is the first step in the new background processing flow.
|
|
* @param file The flyer file (PDF or image).
|
|
* @param checksum The SHA-256 checksum of the file.
|
|
* @param tokenOverride Optional token for testing.
|
|
* @returns A promise that resolves to the API response, which should contain a `jobId`.
|
|
*/
|
|
export const uploadAndProcessFlyer = async (
|
|
file: File,
|
|
checksum: string,
|
|
tokenOverride?: string,
|
|
): Promise<{ jobId: string }> => {
|
|
const formData = new FormData();
|
|
formData.append('flyerFile', file);
|
|
formData.append('checksum', checksum);
|
|
|
|
logger.info(`[aiApiClient] Starting background processing for file: ${file.name}`);
|
|
|
|
const response = await apiFetch(
|
|
'/ai/upload-and-process',
|
|
{
|
|
method: 'POST',
|
|
body: formData,
|
|
},
|
|
{ tokenOverride },
|
|
);
|
|
|
|
if (!response.ok) {
|
|
let errorBody;
|
|
try {
|
|
errorBody = await response.json();
|
|
} catch (e) {
|
|
errorBody = { message: await response.text() };
|
|
}
|
|
// Throw a structured error so the component can inspect the status and body
|
|
throw { status: response.status, body: errorBody };
|
|
}
|
|
|
|
return response.json();
|
|
};
|
|
|
|
// Define the expected shape of the job status response
|
|
export interface JobStatus {
|
|
id: string;
|
|
state: 'completed' | 'failed' | 'active' | 'waiting' | 'delayed' | 'paused';
|
|
progress: {
|
|
stages?: ProcessingStage[];
|
|
estimatedTimeRemaining?: number;
|
|
message?: string;
|
|
} | null;
|
|
returnValue: {
|
|
flyerId?: number;
|
|
} | null;
|
|
failedReason: string | null;
|
|
}
|
|
|
|
/**
|
|
* Fetches the status of a background processing job.
|
|
* This is the second step in the new background processing flow.
|
|
* @param jobId The ID of the job to check.
|
|
* @param tokenOverride Optional token for testing.
|
|
* @returns A promise that resolves to the parsed job status object.
|
|
* @throws An error if the network request fails or if the response is not valid JSON.
|
|
*/
|
|
export const getJobStatus = async (
|
|
jobId: string,
|
|
tokenOverride?: string,
|
|
): Promise<JobStatus> => {
|
|
const response = await apiFetch(`/ai/jobs/${jobId}/status`, {}, { tokenOverride });
|
|
|
|
if (!response.ok) {
|
|
let errorText = `API Error: ${response.status} ${response.statusText}`;
|
|
try {
|
|
const errorBody = await response.text();
|
|
if (errorBody) errorText = `API Error ${response.status}: ${errorBody}`;
|
|
} catch (e) {
|
|
// ignore if reading body fails
|
|
}
|
|
throw new Error(errorText);
|
|
}
|
|
|
|
try {
|
|
return await response.json();
|
|
} catch (error) {
|
|
const rawText = await response.text();
|
|
throw new Error(`Failed to parse JSON response from server. Body: ${rawText}`);
|
|
}
|
|
};
|
|
|
|
export const isImageAFlyer = (
|
|
imageFile: File,
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
const formData = new FormData();
|
|
formData.append('image', imageFile);
|
|
|
|
// Use apiFetchWithAuth for FormData to let the browser set the correct Content-Type.
|
|
// The URL must be relative, as the helper constructs the full path.
|
|
return apiFetch(
|
|
'/ai/check-flyer',
|
|
{
|
|
method: 'POST',
|
|
body: formData,
|
|
},
|
|
{ tokenOverride },
|
|
);
|
|
};
|
|
|
|
export const extractAddressFromImage = (
|
|
imageFile: File,
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
const formData = new FormData();
|
|
formData.append('image', imageFile);
|
|
|
|
return apiFetch(
|
|
'/ai/extract-address',
|
|
{
|
|
method: 'POST',
|
|
body: formData,
|
|
},
|
|
{ tokenOverride },
|
|
);
|
|
};
|
|
|
|
export const extractLogoFromImage = (
|
|
imageFiles: File[],
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
const formData = new FormData();
|
|
imageFiles.forEach((file) => {
|
|
formData.append('images', file);
|
|
});
|
|
|
|
return apiFetch(
|
|
'/ai/extract-logo',
|
|
{
|
|
method: 'POST',
|
|
body: formData,
|
|
},
|
|
{ tokenOverride },
|
|
);
|
|
};
|
|
|
|
export const getQuickInsights = (
|
|
items: Partial<FlyerItem>[],
|
|
signal?: AbortSignal,
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
return apiFetch(
|
|
'/ai/quick-insights',
|
|
{
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ items }),
|
|
signal,
|
|
},
|
|
{ tokenOverride, signal },
|
|
);
|
|
};
|
|
|
|
export const getDeepDiveAnalysis = (
|
|
items: Partial<FlyerItem>[],
|
|
signal?: AbortSignal,
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
return apiFetch(
|
|
'/ai/deep-dive',
|
|
{
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ items }),
|
|
signal,
|
|
},
|
|
{ tokenOverride, signal },
|
|
);
|
|
};
|
|
|
|
export const searchWeb = (
|
|
query: string,
|
|
signal?: AbortSignal,
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
return apiFetch(
|
|
'/ai/search-web',
|
|
{
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ query }),
|
|
signal,
|
|
},
|
|
{ tokenOverride, signal },
|
|
);
|
|
};
|
|
|
|
// ============================================================================
|
|
// STUBS FOR FUTURE AI FEATURES
|
|
// ============================================================================
|
|
|
|
export const planTripWithMaps = async (
|
|
items: FlyerItem[],
|
|
store: Store | undefined,
|
|
userLocation: GeolocationCoordinates,
|
|
signal?: AbortSignal,
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
logger.debug('Stub: planTripWithMaps called with location:', { userLocation });
|
|
return apiFetch(
|
|
'/ai/plan-trip',
|
|
{
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ items, store, userLocation }),
|
|
},
|
|
{ signal, tokenOverride },
|
|
);
|
|
};
|
|
|
|
/**
|
|
* [STUB] Generates an image based on a text prompt using the Imagen model.
|
|
* @param prompt A description of the image to generate (e.g., a meal plan).
|
|
* @returns A base64-encoded string of the generated PNG image.
|
|
*/
|
|
export const generateImageFromText = (
|
|
prompt: string,
|
|
signal?: AbortSignal,
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
logger.debug('Stub: generateImageFromText called with prompt:', { prompt });
|
|
return apiFetch(
|
|
'/ai/generate-image',
|
|
{
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ prompt }),
|
|
signal,
|
|
},
|
|
{ tokenOverride, signal },
|
|
);
|
|
};
|
|
|
|
/**
|
|
* [STUB] Converts a string of text into speech audio data.
|
|
* @param text The text to be spoken.
|
|
* @returns A base64-encoded string of the raw audio data.
|
|
*/
|
|
export const generateSpeechFromText = (
|
|
text: string,
|
|
signal?: AbortSignal,
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
logger.debug('Stub: generateSpeechFromText called with text:', { text });
|
|
return apiFetch(
|
|
'/ai/generate-speech',
|
|
{
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ text }),
|
|
signal,
|
|
},
|
|
{ tokenOverride, signal },
|
|
);
|
|
};
|
|
|
|
/**
|
|
* [STUB] Initiates a real-time voice conversation session using the Live API.
|
|
* This function is more complex and would require a WebSocket connection proxied
|
|
* through the backend. For now, this remains a conceptual client-side function.
|
|
* A full implementation would involve a separate WebSocket client.
|
|
* @param callbacks An object containing onopen, onmessage, onerror, and onclose handlers.
|
|
* @returns A promise that resolves to the live session object.
|
|
*/
|
|
export const startVoiceSession = (callbacks: {
|
|
onopen?: () => void;
|
|
onmessage: (message: import('@google/genai').LiveServerMessage) => void;
|
|
onerror?: (error: ErrorEvent) => void;
|
|
onclose?: () => void;
|
|
}): Promise<unknown> => {
|
|
logger.debug('Stub: startVoiceSession called.', { callbacks });
|
|
// In a real implementation, this would connect to a WebSocket endpoint on your server,
|
|
// which would then proxy the connection to the Google AI Live API.
|
|
// This is a placeholder and will not function.
|
|
throw new Error(
|
|
'Voice session feature is not fully implemented and requires a backend WebSocket proxy.',
|
|
);
|
|
};
|
|
|
|
/*
|
|
The following functions are server-side only and have been moved to `aiService.server.ts`.
|
|
This file should not contain any server-side logic or direct use of `fs` or `process.env`.
|
|
|
|
- extractItemsFromReceiptImage
|
|
- extractCoreDataFromFlyerImage
|
|
*/
|
|
|
|
/**
|
|
* Sends a cropped area of an image to the backend for targeted text extraction.
|
|
* @param imageFile The original image file.
|
|
* @param cropArea The { x, y, width, height } of the area to scan.
|
|
* @param extractionType The type of data to look for ('store_name', 'dates', etc.).
|
|
* @param tokenOverride Optional token for testing.
|
|
* @returns A promise that resolves to the API response containing the extracted text.
|
|
*/
|
|
export const rescanImageArea = (
|
|
imageFile: File,
|
|
cropArea: { x: number; y: number; width: number; height: number },
|
|
extractionType: 'store_name' | 'dates' | 'item_details',
|
|
tokenOverride?: string,
|
|
): Promise<Response> => {
|
|
const formData = new FormData();
|
|
formData.append('image', imageFile);
|
|
formData.append('cropArea', JSON.stringify(cropArea));
|
|
formData.append('extractionType', extractionType);
|
|
|
|
return apiFetch(
|
|
'/ai/rescan-area',
|
|
{ method: 'POST', body: formData },
|
|
{ tokenOverride },
|
|
);
|
|
};
|
|
|
|
/**
|
|
* Sends a user's watched items to the AI backend for price comparison.
|
|
* @param watchedItems An array of the user's watched master grocery items.
|
|
* @returns A promise that resolves to the raw `Response` object from the API.
|
|
*/
|
|
export const compareWatchedItemPrices = (
|
|
watchedItems: MasterGroceryItem[],
|
|
signal?: AbortSignal,
|
|
): Promise<Response> => {
|
|
// Use the apiFetch wrapper for consistency with other API calls in this file.
|
|
// This centralizes token handling and base URL logic.
|
|
return apiFetch(
|
|
'/ai/compare-prices',
|
|
{
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({ items: watchedItems }),
|
|
},
|
|
{ signal },
|
|
)};
|