Files
flyer-crawler.projectium.com/src/services/aiApiClient.ts
Torben Sorensen 5ab5698fe2
All checks were successful
Deploy to Web Server flyer-crawler.projectium.com / deploy (push) Successful in 2m31s
working ! testing !
2025-11-24 19:41:41 -08:00

194 lines
7.5 KiB
TypeScript

// src/services/aiApiClient.ts
/**
* @file This file acts as a client-side API wrapper for all AI-related functionalities.
* It communicates with the application's own backend endpoints, which then securely
* call the Google AI services. This ensures no API keys are exposed on the client.
*/
import type { GroundingChunk } from "@google/genai";
import type { FlyerItem, MasterGroceryItem, Store, ExtractedCoreData, ExtractedLogoData } from "../types";
import { logger } from "./logger";
import { apiFetchWithAuth } from './apiClient';
export const isImageAFlyer = async (imageFile: File): Promise<boolean> => {
const formData = new FormData();
formData.append('image', imageFile);
// Use apiFetchWithAuth for FormData to let the browser set the correct Content-Type.
// The URL must be relative, as the helper constructs the full path.
const response = await apiFetchWithAuth('/ai/check-flyer', {
method: 'POST',
body: formData,
});
const result = await response.json();
return result.is_flyer;
}
export const extractAddressFromImage = async (imageFile: File): Promise<string | null> => {
const formData = new FormData();
formData.append('image', imageFile);
const response = await apiFetchWithAuth('/ai/extract-address', {
method: 'POST',
body: formData,
});
const result = await response.json();
return result.address;
};
export const extractCoreDataFromImage = async (imageFiles: File[], masterItems: MasterGroceryItem[]): Promise<ExtractedCoreData | null> => {
const formData = new FormData();
imageFiles.forEach(file => {
formData.append('flyerImages', file);
});
formData.append('masterItems', JSON.stringify(masterItems));
// --- DEBUG LOGGING for flyer processing ---
logger.debug('[aiApiClient] Calling /api/ai/process-flyer with FormData.');
logger.debug(`[aiApiClient] Number of image files: ${imageFiles.length}`);
logger.debug(`[aiApiClient] Master items count: ${masterItems.length}`);
// --- END DEBUG LOGGING ---
// This now calls the real backend endpoint.
const response = await apiFetchWithAuth('/ai/process-flyer', {
method: 'POST',
body: formData,
});
const responseData = await response.json();
if (!response.ok) {
throw new Error(responseData.message || 'Failed to process flyer with AI.');
}
if (!responseData.data) {
return null;
}
// The backend now returns the fully processed data in the correct format.
return responseData.data as ExtractedCoreData;
};
export const extractLogoFromImage = async (imageFiles: File[]): Promise<ExtractedLogoData> => {
const formData = new FormData();
imageFiles.forEach(file => {
formData.append('images', file);
});
const response = await apiFetchWithAuth('/ai/extract-logo', {
method: 'POST',
body: formData,
});
return response.json();
};
export const getQuickInsights = async (items: FlyerItem[]): Promise<string> => {
const response = await apiFetchWithAuth('/ai/quick-insights', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ items }),
});
const result = await response.json();
return result.text;
};
export const getDeepDiveAnalysis = async (items: FlyerItem[]): Promise<string> => {
const response = await apiFetchWithAuth('/ai/deep-dive', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ items }),
});
const result = await response.json();
return result.text;
};
export const searchWeb = async (items: FlyerItem[]): Promise<{text: string; sources: GroundingChunk[]}> => {
const response = await apiFetchWithAuth('/ai/search-web', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ items }),
});
return response.json();
};
// ============================================================================
// STUBS FOR FUTURE AI FEATURES
// ============================================================================
/**
* [STUB] Uses Google Maps grounding to find nearby stores and plan a shopping trip.
* @param items The items from the flyer.
* @param store The store associated with the flyer.
* @param userLocation The user's current geographic coordinates.
* @returns A text response with trip planning advice and a list of map sources.
*/
export const planTripWithMaps = async (items: FlyerItem[], store: Store | undefined, userLocation: GeolocationCoordinates): Promise<{text: string; sources: { uri: string; title: string; }[]}> => {
logger.debug("Stub: planTripWithMaps called with location:", { userLocation });
const response = await apiFetchWithAuth('/ai/plan-trip', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ items, store, userLocation }),
});
return response.json();
};
/**
* [STUB] Generates an image based on a text prompt using the Imagen model.
* @param prompt A description of the image to generate (e.g., a meal plan).
* @returns A base64-encoded string of the generated PNG image.
*/
export const generateImageFromText = async (prompt: string): Promise<string> => {
logger.debug("Stub: generateImageFromText called with prompt:", { prompt });
const response = await apiFetchWithAuth('/ai/generate-image', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ prompt }),
});
const result = await response.json();
return result.base64Image;
};
/**
* [STUB] Converts a string of text into speech audio data.
* @param text The text to be spoken.
* @returns A base64-encoded string of the raw audio data.
*/
export const generateSpeechFromText = async (text: string): Promise<string> => {
logger.debug("Stub: generateSpeechFromText called with text:", { text });
const response = await apiFetchWithAuth('/ai/generate-speech', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ text }),
});
const result = await response.json();
return result.base64Audio;
};
/**
* [STUB] Initiates a real-time voice conversation session using the Live API.
* This function is more complex and would require a WebSocket connection proxied
* through the backend. For now, this remains a conceptual client-side function.
* A full implementation would involve a separate WebSocket client.
* @param callbacks An object containing onopen, onmessage, onerror, and onclose handlers.
* @returns A promise that resolves to the live session object.
*/
export const startVoiceSession = (callbacks: {
onopen?: () => void;
onmessage: (message: import('@google/genai').LiveServerMessage) => void;
onerror?: (error: ErrorEvent) => void;
onclose?: () => void;
}) => {
logger.debug('Stub: startVoiceSession called.', { callbacks });
// In a real implementation, this would connect to a WebSocket endpoint on your server,
// which would then proxy the connection to the Google AI Live API.
// This is a placeholder and will not function.
throw new Error("Voice session feature is not fully implemented and requires a backend WebSocket proxy.");
};
/*
The following functions are server-side only and have been moved to `aiService.server.ts`.
This file should not contain any server-side logic or direct use of `fs` or `process.env`.
- extractItemsFromReceiptImage
- extractCoreDataFromFlyerImage
*/