Keep/keep-notes/lib/ai/factory.ts
sepehr e6bcdea641 fix: remove hardcoded localhost fallbacks, require explicit config
Critical fix for Docker deployment where AI features were trying to connect
to localhost:11434 instead of using configured provider (Ollama Docker service
or OpenAI).

Problems fixed:
1. Reformulation (clarify/shorten/improve) failing with ECONNREFUSED 127.0.0.1:11434
2. Auto-labels failing with same error
3. Notebook summaries failing
4. Could not switch from Ollama to OpenAI in admin

Root cause:
- Code had hardcoded fallback to 'http://localhost:11434' in multiple places
- .env.docker file not created on server (gitignore'd)
- No validation that required environment variables were set

Changes:

1. lib/ai/factory.ts:
   - Remove hardcoded 'http://localhost:11434' fallback
   - Only use localhost for local development (NODE_ENV !== 'production')
   - Throw error if OLLAMA_BASE_URL not set in production

2. lib/ai/providers/ollama.ts:
   - Remove default parameter 'http://localhost:11434' from constructor
   - Require baseUrl to be explicitly passed
   - Throw error if baseUrl is missing

3. lib/ai/services/paragraph-refactor.service.ts:
   - Remove 'http://localhost:11434' fallback (2 locations)
   - Require OLLAMA_BASE_URL to be set
   - Throw clear error if not configured

4. app/(main)/admin/settings/admin-settings-form.tsx:
   - Add debug info showing current provider state
   - Display database config value for transparency
   - Help troubleshoot provider selection issues

5. DOCKER-SETUP.md:
   - Complete guide for Docker configuration
   - Instructions for .env.docker setup
   - Examples for Ollama Docker, OpenAI, and external Ollama
   - Troubleshooting common issues

Usage:
On server, create .env.docker with proper provider configuration:
- Ollama in Docker: OLLAMA_BASE_URL="http://ollama:11434"
- OpenAI: OPENAI_API_KEY="sk-..."
- External Ollama: OLLAMA_BASE_URL="http://SERVER_IP:11434"

Then in admin interface, users can independently configure:
- Tags Provider (for auto-labels, AI features)
- Embeddings Provider (for semantic search)

Result:
✓ Clear errors if Ollama not configured
✓ Can switch to OpenAI freely in admin
✓ No more hardcoded localhost in production
✓ Proper separation between local dev and Docker production

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-12 22:28:39 +01:00

83 lines
3.4 KiB
TypeScript

import { OpenAIProvider } from './providers/openai';
import { OllamaProvider } from './providers/ollama';
import { CustomOpenAIProvider } from './providers/custom-openai';
import { AIProvider } from './types';
type ProviderType = 'ollama' | 'openai' | 'custom';
function createOllamaProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): OllamaProvider {
let baseUrl = config?.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL
// Only use localhost as fallback for local development (not in Docker)
if (!baseUrl && process.env.NODE_ENV !== 'production') {
baseUrl = 'http://localhost:11434'
}
if (!baseUrl) {
throw new Error('OLLAMA_BASE_URL is required when using Ollama provider')
}
// Ensure baseUrl doesn't end with /api, we'll add it in OllamaProvider
if (baseUrl.endsWith('/api')) {
baseUrl = baseUrl.slice(0, -4); // Remove /api
}
return new OllamaProvider(baseUrl, modelName, embeddingModelName);
}
function createOpenAIProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): OpenAIProvider {
const apiKey = config?.OPENAI_API_KEY || process.env.OPENAI_API_KEY || '';
if (!apiKey) {
}
return new OpenAIProvider(apiKey, modelName, embeddingModelName);
}
function createCustomOpenAIProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): CustomOpenAIProvider {
const apiKey = config?.CUSTOM_OPENAI_API_KEY || process.env.CUSTOM_OPENAI_API_KEY || '';
const baseUrl = config?.CUSTOM_OPENAI_BASE_URL || process.env.CUSTOM_OPENAI_BASE_URL || '';
if (!apiKey) {
}
if (!baseUrl) {
}
return new CustomOpenAIProvider(apiKey, baseUrl, modelName, embeddingModelName);
}
function getProviderInstance(providerType: ProviderType, config: Record<string, string>, modelName: string, embeddingModelName: string): AIProvider {
switch (providerType) {
case 'ollama':
return createOllamaProvider(config, modelName, embeddingModelName);
case 'openai':
return createOpenAIProvider(config, modelName, embeddingModelName);
case 'custom':
return createCustomOpenAIProvider(config, modelName, embeddingModelName);
default:
return createOllamaProvider(config, modelName, embeddingModelName);
}
}
export function getTagsProvider(config?: Record<string, string>): AIProvider {
const providerType = (config?.AI_PROVIDER_TAGS || process.env.AI_PROVIDER_TAGS || 'ollama').toLowerCase() as ProviderType;
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
return getProviderInstance(providerType, config || {}, modelName, embeddingModelName);
}
export function getEmbeddingsProvider(config?: Record<string, string>): AIProvider {
const providerType = (config?.AI_PROVIDER_EMBEDDING || process.env.AI_PROVIDER_EMBEDDING || 'ollama').toLowerCase() as ProviderType;
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
return getProviderInstance(providerType, config || {}, modelName, embeddingModelName);
}
// Legacy function for backward compatibility
export function getAIProvider(config?: Record<string, string>): AIProvider {
return getTagsProvider(config);
}