From 4229f4ed08689616000a588196381b520aa21536 Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Wed, 25 Jun 2025 14:48:07 +0000 Subject: [PATCH] feat: Implement Ollama support for AI models This change allows the application to use locally hosted AI models via Ollama for data enrichment and field generation tasks. - Modified `OpenAIService` to accept Ollama API base URL and model name. - Updated environment variable handling to include `OLLAMA_API_BASE` and `OLLAMA_MODEL`. - Adjusted API routes and dependent services to pass Ollama configuration. - Ensured fallback to OpenAI if Ollama is not configured. --- .env.example | 4 +++ app/api/enrich/route.ts | 25 ++++++++++++------ app/api/generate-fields/route.ts | 23 ++++++++++++----- lib/agent-architecture/orchestrator.ts | 6 +++-- lib/services/openai.ts | 28 ++++++++++++++++----- lib/strategies/agent-enrichment-strategy.ts | 4 ++- 6 files changed, 67 insertions(+), 23 deletions(-) diff --git a/.env.example b/.env.example index 47a23ccc..7e6ca5c6 100644 --- a/.env.example +++ b/.env.example @@ -6,6 +6,10 @@ FIRECRAWL_API_KEY=fc-your-api-key-here # Get yours at https://platform.openai.com OPENAI_API_KEY=sk-your-api-key-here +# Optional: Ollama configuration +# OLLAMA_API_BASE="http://localhost:11434/v1" # Example for local Ollama +# OLLAMA_MODEL="deepseek-coder" # Example model, replace with your desired Ollama model + # Enable unlimited mode (optional) # When true, removes all limits on rows, columns, and file size # Automatically enabled in development mode diff --git a/app/api/enrich/route.ts b/app/api/enrich/route.ts index 20af7557..28890391 100644 --- a/app/api/enrich/route.ts +++ b/app/api/enrich/route.ts @@ -52,14 +52,21 @@ export async function POST(request: NextRequest) { // Check environment variables and headers for API keys const openaiApiKey = process.env.OPENAI_API_KEY || request.headers.get('X-OpenAI-API-Key'); const firecrawlApiKey = process.env.FIRECRAWL_API_KEY || request.headers.get('X-Firecrawl-API-Key'); + const ollamaApiBase = process.env.OLLAMA_API_BASE || request.headers.get('X-Ollama-API-Base'); + const ollamaModel = process.env.OLLAMA_MODEL || request.headers.get('X-Ollama-Model'); - if (!openaiApiKey || !firecrawlApiKey) { - console.error('Missing API keys:', { - hasOpenAI: !!openaiApiKey, - hasFirecrawl: !!firecrawlApiKey - }); + if (!firecrawlApiKey) { + console.error('Missing Firecrawl API key'); return NextResponse.json( - { error: 'Server configuration error: Missing API keys' }, + { error: 'Server configuration error: Missing Firecrawl API key' }, + { status: 500 } + ); + } + + if (!openaiApiKey && (!ollamaApiBase || !ollamaModel)) { + console.error('Missing OpenAI API key or Ollama configuration'); + return NextResponse.json( + { error: 'Server configuration error: Missing OpenAI API key or Ollama configuration' }, { status: 500 } ); } @@ -69,8 +76,10 @@ export async function POST(request: NextRequest) { console.log(`[STRATEGY] Using ${strategyName} - Advanced multi-agent architecture with specialized agents`); const enrichmentStrategy = new AgentEnrichmentStrategy( - openaiApiKey, - firecrawlApiKey + openaiApiKey || 'ollama', // Pass 'ollama' if using Ollama, actual key not needed by OpenAIService then + firecrawlApiKey, + ollamaApiBase, + ollamaModel ); // Load skip list diff --git a/app/api/generate-fields/route.ts b/app/api/generate-fields/route.ts index 89ceed41..dee2c6cc 100644 --- a/app/api/generate-fields/route.ts +++ b/app/api/generate-fields/route.ts @@ -15,19 +15,30 @@ export async function POST(request: NextRequest) { ); } - if (!process.env.OPENAI_API_KEY) { + const openaiApiKey = process.env.OPENAI_API_KEY; + const ollamaApiBase = process.env.OLLAMA_API_BASE; + const ollamaModel = process.env.OLLAMA_MODEL; + + if (!openaiApiKey && (!ollamaApiBase || !ollamaModel)) { return NextResponse.json( - { error: 'OpenAI API key not configured' }, + { error: 'OpenAI API key or Ollama configuration not configured' }, { status: 500 } ); } - const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY, - }); + const openai = new OpenAI( + (ollamaApiBase && ollamaModel) ? { + apiKey: 'ollama', // Ollama doesn't require an API key + baseURL: ollamaApiBase, + } : { + apiKey: openaiApiKey, + } + ); + + const modelToUse = (ollamaApiBase && ollamaModel) ? ollamaModel : 'gpt-4o'; const completion = await openai.chat.completions.create({ - model: 'gpt-4o', + model: modelToUse, messages: [ { role: 'system', diff --git a/lib/agent-architecture/orchestrator.ts b/lib/agent-architecture/orchestrator.ts index 9619c081..64af2d42 100644 --- a/lib/agent-architecture/orchestrator.ts +++ b/lib/agent-architecture/orchestrator.ts @@ -10,10 +10,12 @@ export class AgentOrchestrator { constructor( private firecrawlApiKey: string, - private openaiApiKey: string + private openaiApiKey: string, + private ollamaApiBase?: string, + private ollamaModel?: string, ) { this.firecrawl = new FirecrawlService(firecrawlApiKey); - this.openai = new OpenAIService(openaiApiKey); + this.openai = new OpenAIService(openaiApiKey, ollamaApiBase, ollamaModel); } async enrichRow( diff --git a/lib/services/openai.ts b/lib/services/openai.ts index c7663b0a..fe688d41 100644 --- a/lib/services/openai.ts +++ b/lib/services/openai.ts @@ -5,9 +5,25 @@ import type { EnrichmentField, EnrichmentResult } from '../types'; export class OpenAIService { private client: OpenAI; + private ollamaBaseUrl?: string; + private ollamaModel?: string; - constructor(apiKey: string) { - this.client = new OpenAI({ apiKey }); + constructor(apiKey: string, ollamaBaseUrl?: string, ollamaModel?: string) { + this.ollamaBaseUrl = ollamaBaseUrl; + this.ollamaModel = ollamaModel; + + if (this.ollamaBaseUrl && this.ollamaModel) { + this.client = new OpenAI({ + apiKey: 'ollama', // Ollama doesn't require an API key + baseURL: this.ollamaBaseUrl, + }); + } else { + this.client = new OpenAI({ apiKey }); + } + } + + private getModel(): string { + return this.ollamaModel || 'gpt-4o'; } createEnrichmentSchema(fields: EnrichmentField[]) { @@ -139,7 +155,7 @@ export class OpenAIService { } const response = await this.client.chat.completions.create({ - model: 'gpt-4o', + model: this.getModel(), messages: [ { role: 'system', @@ -374,7 +390,7 @@ DOMAIN PARKING/SALE PAGES: } const response = await this.client.chat.completions.create({ - model: 'gpt-4o', + model: this.getModel(), messages: [ { role: 'system', @@ -754,7 +770,7 @@ REMEMBER: Extract exact_text from the "=== ACTUAL CONTENT BELOW ===" section, NO .join('\n'); const response = await this.client.chat.completions.create({ - model: 'gpt-4o-mini', + model: this.getModel(), messages: [ { role: 'system', @@ -824,7 +840,7 @@ ${schemaDescription} ): Promise { try { const response = await this.client.chat.completions.create({ - model: 'gpt-4o', + model: this.getModel(), messages: [ { role: 'system', diff --git a/lib/strategies/agent-enrichment-strategy.ts b/lib/strategies/agent-enrichment-strategy.ts index 3b95894e..06cc253a 100644 --- a/lib/strategies/agent-enrichment-strategy.ts +++ b/lib/strategies/agent-enrichment-strategy.ts @@ -8,8 +8,10 @@ export class AgentEnrichmentStrategy { constructor( openaiApiKey: string, firecrawlApiKey: string, + ollamaApiBase?: string, + ollamaModel?: string, ) { - this.orchestrator = new AgentOrchestrator(firecrawlApiKey, openaiApiKey); + this.orchestrator = new AgentOrchestrator(firecrawlApiKey, openaiApiKey, ollamaApiBase, ollamaModel); } async enrichRow(