Add Google Gemini AI support (v1.0.6)

- Add Gemini as new AI provider option alongside Anthropic, OpenAI, Ollama
- Support models: Gemini 2.5 Flash Lite (default), Flash, Pro, 3 Flash Preview
- Add test API key endpoint to verify connection before saving
- Full support for extraction, verification, stock status, and arbitration
- Update all version files to 1.0.6

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
clucraft 2026-01-26 21:02:14 -05:00
parent fb00f5b5b2
commit 617953dcf5
12 changed files with 341 additions and 17 deletions

View file

@ -1,14 +1,15 @@
{
"name": "priceghost-backend",
"version": "1.0.2",
"version": "1.0.5",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "priceghost-backend",
"version": "1.0.2",
"version": "1.0.5",
"dependencies": {
"@anthropic-ai/sdk": "^0.24.0",
"@google/generative-ai": "^0.24.1",
"axios": "^1.6.0",
"bcrypt": "^5.1.1",
"cheerio": "^1.0.0-rc.12",
@ -531,6 +532,15 @@
"node": ">=18"
}
},
"node_modules/@google/generative-ai": {
"version": "0.24.1",
"resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.24.1.tgz",
"integrity": "sha512-MqO+MLfM6kjxcKoy0p1wRzG3b4ZZXtPI+z2IE26UogS2Cm/XHO+7gGRBh6gcJsOiIVoH93UwKvW4HdgiOZCy9Q==",
"license": "Apache-2.0",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/@mapbox/node-pre-gyp": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/@mapbox/node-pre-gyp/-/node-pre-gyp-1.0.11.tgz",

View file

@ -1,6 +1,6 @@
{
"name": "priceghost-backend",
"version": "1.0.5",
"version": "1.0.6",
"description": "PriceGhost price tracking API",
"main": "dist/index.js",
"scripts": {
@ -11,6 +11,7 @@
},
"dependencies": {
"@anthropic-ai/sdk": "^0.24.0",
"@google/generative-ai": "^0.24.1",
"axios": "^1.6.0",
"bcrypt": "^5.1.1",
"cheerio": "^1.0.0-rc.12",

View file

@ -137,6 +137,12 @@ async function runMigrations() {
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'users' AND column_name = 'openai_model') THEN
ALTER TABLE users ADD COLUMN openai_model TEXT;
END IF;
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'users' AND column_name = 'gemini_api_key') THEN
ALTER TABLE users ADD COLUMN gemini_api_key TEXT;
END IF;
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'users' AND column_name = 'gemini_model') THEN
ALTER TABLE users ADD COLUMN gemini_model TEXT;
END IF;
IF NOT EXISTS (SELECT 1 FROM information_schema.columns WHERE table_name = 'users' AND column_name = 'notifications_cleared_at') THEN
ALTER TABLE users ADD COLUMN notifications_cleared_at TIMESTAMP;
END IF;

View file

@ -40,13 +40,15 @@ export interface NotificationSettings {
export interface AISettings {
ai_enabled: boolean;
ai_verification_enabled: boolean;
ai_provider: 'anthropic' | 'openai' | 'ollama' | null;
ai_provider: 'anthropic' | 'openai' | 'ollama' | 'gemini' | null;
anthropic_api_key: string | null;
anthropic_model: string | null;
openai_api_key: string | null;
openai_model: string | null;
ollama_base_url: string | null;
ollama_model: string | null;
gemini_api_key: string | null;
gemini_model: string | null;
}
export const userQueries = {
@ -231,7 +233,7 @@ export const userQueries = {
const result = await pool.query(
`SELECT ai_enabled, COALESCE(ai_verification_enabled, false) as ai_verification_enabled,
ai_provider, anthropic_api_key, anthropic_model, openai_api_key, openai_model,
ollama_base_url, ollama_model
ollama_base_url, ollama_model, gemini_api_key, gemini_model
FROM users WHERE id = $1`,
[id]
);
@ -282,6 +284,14 @@ export const userQueries = {
fields.push(`ollama_model = $${paramIndex++}`);
values.push(settings.ollama_model);
}
if (settings.gemini_api_key !== undefined) {
fields.push(`gemini_api_key = $${paramIndex++}`);
values.push(settings.gemini_api_key);
}
if (settings.gemini_model !== undefined) {
fields.push(`gemini_model = $${paramIndex++}`);
values.push(settings.gemini_model);
}
if (fields.length === 0) return null;
@ -290,7 +300,7 @@ export const userQueries = {
`UPDATE users SET ${fields.join(', ')} WHERE id = $${paramIndex}
RETURNING ai_enabled, COALESCE(ai_verification_enabled, false) as ai_verification_enabled,
ai_provider, anthropic_api_key, anthropic_model, openai_api_key, openai_model,
ollama_base_url, ollama_model`,
ollama_base_url, ollama_model, gemini_api_key, gemini_model`,
values
);
return result.rows[0] || null;

View file

@ -314,9 +314,13 @@ router.get('/ai', async (req: AuthRequest, res: Response) => {
ai_verification_enabled: settings.ai_verification_enabled || false,
ai_provider: settings.ai_provider || null,
anthropic_api_key: settings.anthropic_api_key || null,
anthropic_model: settings.anthropic_model || null,
openai_api_key: settings.openai_api_key || null,
openai_model: settings.openai_model || null,
ollama_base_url: settings.ollama_base_url || null,
ollama_model: settings.ollama_model || null,
gemini_api_key: settings.gemini_api_key || null,
gemini_model: settings.gemini_model || null,
});
} catch (error) {
console.error('Error fetching AI settings:', error);
@ -328,16 +332,32 @@ router.get('/ai', async (req: AuthRequest, res: Response) => {
router.put('/ai', async (req: AuthRequest, res: Response) => {
try {
const userId = req.userId!;
const { ai_enabled, ai_verification_enabled, ai_provider, anthropic_api_key, openai_api_key, ollama_base_url, ollama_model } = req.body;
const {
ai_enabled,
ai_verification_enabled,
ai_provider,
anthropic_api_key,
anthropic_model,
openai_api_key,
openai_model,
ollama_base_url,
ollama_model,
gemini_api_key,
gemini_model,
} = req.body;
const settings = await userQueries.updateAISettings(userId, {
ai_enabled,
ai_verification_enabled,
ai_provider,
anthropic_api_key,
anthropic_model,
openai_api_key,
openai_model,
ollama_base_url,
ollama_model,
gemini_api_key,
gemini_model,
});
if (!settings) {
@ -350,9 +370,13 @@ router.put('/ai', async (req: AuthRequest, res: Response) => {
ai_verification_enabled: settings.ai_verification_enabled || false,
ai_provider: settings.ai_provider || null,
anthropic_api_key: settings.anthropic_api_key || null,
anthropic_model: settings.anthropic_model || null,
openai_api_key: settings.openai_api_key || null,
openai_model: settings.openai_model || null,
ollama_base_url: settings.ollama_base_url || null,
ollama_model: settings.ollama_model || null,
gemini_api_key: settings.gemini_api_key || null,
gemini_model: settings.gemini_model || null,
message: 'AI settings updated successfully',
});
} catch (error) {
@ -438,4 +462,44 @@ router.post('/ai/test-ollama', async (req: AuthRequest, res: Response) => {
}
});
// Test Gemini API key
router.post('/ai/test-gemini', async (req: AuthRequest, res: Response) => {
try {
const { api_key } = req.body;
if (!api_key) {
res.status(400).json({ error: 'API key is required' });
return;
}
// Test the API key by listing models
const { GoogleGenerativeAI } = await import('@google/generative-ai');
const genAI = new GoogleGenerativeAI(api_key);
// Try to generate a simple response to verify the key works
const model = genAI.getGenerativeModel({ model: 'gemini-2.5-flash-lite' });
await model.generateContent('Say "API key valid" in 3 words or less');
res.json({
success: true,
message: 'Successfully connected to Gemini API',
});
} catch (error) {
console.error('Error testing Gemini connection:', error);
const errorMessage = error instanceof Error ? error.message : 'Unknown error';
if (errorMessage.includes('API_KEY_INVALID') || errorMessage.includes('API key')) {
res.status(400).json({
error: 'Invalid API key. Please check your Gemini API key.',
success: false,
});
} else {
res.status(500).json({
error: `Failed to connect to Gemini: ${errorMessage}`,
success: false,
});
}
}
});
export default router;

View file

@ -1,5 +1,6 @@
import Anthropic from '@anthropic-ai/sdk';
import OpenAI from 'openai';
import { GoogleGenerativeAI } from '@google/generative-ai';
import axios from 'axios';
import { load } from 'cheerio';
import { AISettings } from '../models';
@ -218,6 +219,7 @@ function prepareHtmlForAI(html: string): string {
// Default models to use if user hasn't selected one
const DEFAULT_ANTHROPIC_MODEL = 'claude-haiku-4-5-20251001';
const DEFAULT_OPENAI_MODEL = 'gpt-4.1-nano-2025-04-14';
const DEFAULT_GEMINI_MODEL = 'gemini-2.5-flash-lite';
async function extractWithAnthropic(
html: string,
@ -324,6 +326,28 @@ async function extractWithOllama(
return parseAIResponse(content);
}
async function extractWithGemini(
html: string,
apiKey: string,
model?: string | null
): Promise<AIExtractionResult> {
const genAI = new GoogleGenerativeAI(apiKey);
const modelToUse = model || DEFAULT_GEMINI_MODEL;
const geminiModel = genAI.getGenerativeModel({ model: modelToUse });
const preparedHtml = prepareHtmlForAI(html);
const result = await geminiModel.generateContent(EXTRACTION_PROMPT + preparedHtml);
const response = result.response;
const content = response.text();
if (!content) {
throw new Error('No response from Gemini');
}
return parseAIResponse(content);
}
// Verification functions for each provider
async function verifyWithAnthropic(
html: string,
@ -423,6 +447,33 @@ async function verifyWithOllama(
return parseVerificationResponse(content, scrapedPrice, currency);
}
async function verifyWithGemini(
html: string,
scrapedPrice: number,
currency: string,
apiKey: string,
model?: string | null
): Promise<AIVerificationResult> {
const genAI = new GoogleGenerativeAI(apiKey);
const modelToUse = model || DEFAULT_GEMINI_MODEL;
const geminiModel = genAI.getGenerativeModel({ model: modelToUse });
const preparedHtml = prepareHtmlForAI(html);
const prompt = VERIFICATION_PROMPT
.replace('$SCRAPED_PRICE$', scrapedPrice.toString())
.replace('$CURRENCY$', currency) + preparedHtml;
const result = await geminiModel.generateContent(prompt);
const response = result.response;
const content = response.text();
if (!content) {
throw new Error('No response from Gemini');
}
return parseVerificationResponse(content, scrapedPrice, currency);
}
// Stock status verification functions (for variant products with anchor price)
async function verifyStockStatusWithAnthropic(
html: string,
@ -522,6 +573,33 @@ async function verifyStockStatusWithOllama(
return parseStockStatusResponse(content);
}
async function verifyStockStatusWithGemini(
html: string,
variantPrice: number,
currency: string,
apiKey: string,
model?: string | null
): Promise<AIStockStatusResult> {
const genAI = new GoogleGenerativeAI(apiKey);
const modelToUse = model || DEFAULT_GEMINI_MODEL;
const geminiModel = genAI.getGenerativeModel({ model: modelToUse });
const preparedHtml = prepareHtmlForAI(html);
const prompt = STOCK_STATUS_PROMPT
.replace(/\$VARIANT_PRICE\$/g, variantPrice.toString())
.replace(/\$CURRENCY\$/g, currency) + preparedHtml;
const result = await geminiModel.generateContent(prompt);
const response = result.response;
const content = response.text();
if (!content) {
throw new Error('No response from Gemini');
}
return parseStockStatusResponse(content);
}
function parseStockStatusResponse(responseText: string): AIStockStatusResult {
console.log(`[AI Stock] Raw response: ${responseText.substring(0, 500)}...`);
@ -733,6 +811,8 @@ export async function extractWithAI(
return extractWithOpenAI(html, settings.openai_api_key, settings.openai_model);
} else if (settings.ai_provider === 'ollama' && settings.ollama_base_url && settings.ollama_model) {
return extractWithOllama(html, settings.ollama_base_url, settings.ollama_model);
} else if (settings.ai_provider === 'gemini' && settings.gemini_api_key) {
return extractWithGemini(html, settings.gemini_api_key, settings.gemini_model);
}
throw new Error('No valid AI provider configured');
@ -765,6 +845,10 @@ export async function tryAIExtraction(
} else if (settings.ai_provider === 'ollama' && settings.ollama_base_url && settings.ollama_model) {
console.log(`[AI] Using Ollama (${settings.ollama_model}) for ${url}`);
return await extractWithOllama(html, settings.ollama_base_url, settings.ollama_model);
} else if (settings.ai_provider === 'gemini' && settings.gemini_api_key) {
const modelToUse = settings.gemini_model || DEFAULT_GEMINI_MODEL;
console.log(`[AI] Using Gemini (${modelToUse}) for ${url}`);
return await extractWithGemini(html, settings.gemini_api_key, settings.gemini_model);
}
return null;
@ -803,6 +887,10 @@ export async function tryAIVerification(
} else if (settings.ai_provider === 'ollama' && settings.ollama_base_url && settings.ollama_model) {
console.log(`[AI Verify] Using Ollama (${settings.ollama_model}) to verify $${scrapedPrice} for ${url}`);
return await verifyWithOllama(html, scrapedPrice, currency, settings.ollama_base_url, settings.ollama_model);
} else if (settings.ai_provider === 'gemini' && settings.gemini_api_key) {
const modelToUse = settings.gemini_model || DEFAULT_GEMINI_MODEL;
console.log(`[AI Verify] Using Gemini (${modelToUse}) to verify $${scrapedPrice} for ${url}`);
return await verifyWithGemini(html, scrapedPrice, currency, settings.gemini_api_key, settings.gemini_model);
}
console.log(`[AI Verify] Verification enabled but no provider configured`);
@ -842,6 +930,10 @@ export async function tryAIStockStatusVerification(
} else if (settings.ai_provider === 'ollama' && settings.ollama_base_url && settings.ollama_model) {
console.log(`[AI Stock] Using Ollama (${settings.ollama_model}) to verify stock status for $${variantPrice} variant at ${url}`);
return await verifyStockStatusWithOllama(html, variantPrice, currency, settings.ollama_base_url, settings.ollama_model);
} else if (settings.ai_provider === 'gemini' && settings.gemini_api_key) {
const modelToUse = settings.gemini_model || DEFAULT_GEMINI_MODEL;
console.log(`[AI Stock] Using Gemini (${modelToUse}) to verify stock status for $${variantPrice} variant at ${url}`);
return await verifyStockStatusWithGemini(html, variantPrice, currency, settings.gemini_api_key, settings.gemini_model);
}
console.log(`[AI Stock] No AI provider configured for stock status verification`);
@ -984,6 +1076,34 @@ async function arbitrateWithOllama(
return parseArbitrationResponse(content, candidates);
}
async function arbitrateWithGemini(
html: string,
candidates: PriceCandidate[],
apiKey: string,
model?: string | null
): Promise<AIArbitrationResult> {
const genAI = new GoogleGenerativeAI(apiKey);
const modelToUse = model || DEFAULT_GEMINI_MODEL;
const geminiModel = genAI.getGenerativeModel({ model: modelToUse });
const candidatesList = candidates.map((c, i) =>
`${i}. ${c.price} ${c.currency} (method: ${c.method}, context: ${c.context || 'none'})`
).join('\n');
const preparedHtml = prepareHtmlForAI(html);
const prompt = ARBITRATION_PROMPT.replace('$CANDIDATES$', candidatesList) + preparedHtml;
const result = await geminiModel.generateContent(prompt);
const response = result.response;
const content = response.text();
if (!content) {
throw new Error('No response from Gemini');
}
return parseArbitrationResponse(content, candidates);
}
function parseArbitrationResponse(
responseText: string,
candidates: PriceCandidate[]
@ -1064,6 +1184,10 @@ export async function tryAIArbitration(
} else if (settings.ai_provider === 'ollama' && settings.ollama_base_url && settings.ollama_model) {
console.log(`[AI Arbitrate] Using Ollama (${settings.ollama_model}) to arbitrate ${candidates.length} prices for ${url}`);
return await arbitrateWithOllama(html, candidates, settings.ollama_base_url, settings.ollama_model);
} else if (settings.ai_provider === 'gemini' && settings.gemini_api_key) {
const modelToUse = settings.gemini_model || DEFAULT_GEMINI_MODEL;
console.log(`[AI Arbitrate] Using Gemini (${modelToUse}) to arbitrate ${candidates.length} prices for ${url}`);
return await arbitrateWithGemini(html, candidates, settings.gemini_api_key, settings.gemini_model);
}
console.log(`[AI Arbitrate] No provider configured`);