Keep/keep-notes/lib/ai/factory.ts
sepehr 8617117dec fix: remove Ollama default fallbacks in factory and Docker
ROOT CAUSE: The factory was defaulting to 'ollama' when no provider
was configured, and docker-compose.yml was always setting OLLAMA_BASE_URL
even when using OpenAI. This caused the app to try connecting to Ollama
even when OpenAI was configured in the admin.

CRITICAL CHANGES:
1. lib/ai/factory.ts - Removed 'ollama' default fallback
   - getTagsProvider() now throws error if AI_PROVIDER_TAGS not set
   - getEmbeddingsProvider() now throws error if AI_PROVIDER_EMBEDDING not set
   - Forces explicit configuration instead of silent fallback to Ollama

2. docker-compose.yml - Removed default OLLAMA_BASE_URL
   - Changed: OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://ollama:11434}
   - To: OLLAMA_BASE_URL=${OLLAMA_BASE_URL}
   - Only set if explicitly defined in .env.docker

3. Application name: Mento → Memento (correct spelling)
   - Updated in: sidebar, README, deploy.sh, DOCKER_DEPLOYMENT.md

4. app/api/ai/config/route.ts - Return 'not set' instead of 'ollama'
   - Makes it clear when provider is not configured

IMPACT: The app will now properly use OpenAI when configured in the
admin interface, instead of silently falling back to Ollama.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-12 23:08:20 +01:00

108 lines
4.3 KiB
TypeScript

import { OpenAIProvider } from './providers/openai';
import { OllamaProvider } from './providers/ollama';
import { CustomOpenAIProvider } from './providers/custom-openai';
import { AIProvider } from './types';
type ProviderType = 'ollama' | 'openai' | 'custom';
function createOllamaProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): OllamaProvider {
let baseUrl = config?.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL
// Only use localhost as fallback for local development (not in Docker)
if (!baseUrl && process.env.NODE_ENV !== 'production') {
baseUrl = 'http://localhost:11434'
}
if (!baseUrl) {
throw new Error('OLLAMA_BASE_URL is required when using Ollama provider')
}
// Ensure baseUrl doesn't end with /api, we'll add it in OllamaProvider
if (baseUrl.endsWith('/api')) {
baseUrl = baseUrl.slice(0, -4); // Remove /api
}
return new OllamaProvider(baseUrl, modelName, embeddingModelName);
}
function createOpenAIProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): OpenAIProvider {
const apiKey = config?.OPENAI_API_KEY || process.env.OPENAI_API_KEY || '';
if (!apiKey) {
throw new Error('OPENAI_API_KEY is required when using OpenAI provider');
}
return new OpenAIProvider(apiKey, modelName, embeddingModelName);
}
function createCustomOpenAIProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): CustomOpenAIProvider {
const apiKey = config?.CUSTOM_OPENAI_API_KEY || process.env.CUSTOM_OPENAI_API_KEY || '';
const baseUrl = config?.CUSTOM_OPENAI_BASE_URL || process.env.CUSTOM_OPENAI_BASE_URL || '';
if (!apiKey) {
throw new Error('CUSTOM_OPENAI_API_KEY is required when using Custom OpenAI provider');
}
if (!baseUrl) {
throw new Error('CUSTOM_OPENAI_BASE_URL is required when using Custom OpenAI provider');
}
return new CustomOpenAIProvider(apiKey, baseUrl, modelName, embeddingModelName);
}
function getProviderInstance(providerType: ProviderType, config: Record<string, string>, modelName: string, embeddingModelName: string): AIProvider {
switch (providerType) {
case 'ollama':
return createOllamaProvider(config, modelName, embeddingModelName);
case 'openai':
return createOpenAIProvider(config, modelName, embeddingModelName);
case 'custom':
return createCustomOpenAIProvider(config, modelName, embeddingModelName);
default:
return createOllamaProvider(config, modelName, embeddingModelName);
}
}
export function getTagsProvider(config?: Record<string, string>): AIProvider {
// Check database config first, then environment variables
const providerType = (config?.AI_PROVIDER_TAGS || process.env.AI_PROVIDER_TAGS);
// If no provider is configured, throw a clear error
if (!providerType) {
throw new Error(
'AI_PROVIDER_TAGS is not configured. Please set it in the admin settings or environment variables. ' +
'Options: ollama, openai, custom'
);
}
const provider = providerType.toLowerCase() as ProviderType;
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
}
export function getEmbeddingsProvider(config?: Record<string, string>): AIProvider {
// Check database config first, then environment variables
const providerType = (config?.AI_PROVIDER_EMBEDDING || process.env.AI_PROVIDER_EMBEDDING);
// If no provider is configured, throw a clear error
if (!providerType) {
throw new Error(
'AI_PROVIDER_EMBEDDING is not configured. Please set it in the admin settings or environment variables. ' +
'Options: ollama, openai, custom'
);
}
const provider = providerType.toLowerCase() as ProviderType;
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
}
// Legacy function for backward compatibility
export function getAIProvider(config?: Record<string, string>): AIProvider {
return getTagsProvider(config);
}