159 lines
6.0 KiB
TypeScript
159 lines
6.0 KiB
TypeScript
import { OpenAIProvider } from './providers/openai';
|
|
import { OllamaProvider } from './providers/ollama';
|
|
import { CustomOpenAIProvider } from './providers/custom-openai';
|
|
import { AIProvider } from './types';
|
|
|
|
type ProviderType = 'ollama' | 'openai' | 'custom';
|
|
|
|
function createOllamaProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): OllamaProvider {
|
|
let baseUrl = config?.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL
|
|
|
|
// Only use localhost as fallback for local development (not in Docker)
|
|
if (!baseUrl && process.env.NODE_ENV !== 'production') {
|
|
baseUrl = 'http://localhost:11434'
|
|
}
|
|
|
|
if (!baseUrl) {
|
|
throw new Error('OLLAMA_BASE_URL is required when using Ollama provider')
|
|
}
|
|
|
|
// Ensure baseUrl doesn't end with /api, we'll add it in OllamaProvider
|
|
if (baseUrl.endsWith('/api')) {
|
|
baseUrl = baseUrl.slice(0, -4); // Remove /api
|
|
}
|
|
|
|
return new OllamaProvider(baseUrl, modelName, embeddingModelName);
|
|
}
|
|
|
|
function createOpenAIProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): OpenAIProvider {
|
|
const apiKey = config?.OPENAI_API_KEY || process.env.OPENAI_API_KEY || '';
|
|
|
|
if (!apiKey) {
|
|
throw new Error('OPENAI_API_KEY is required when using OpenAI provider');
|
|
}
|
|
|
|
return new OpenAIProvider(apiKey, modelName, embeddingModelName);
|
|
}
|
|
|
|
function createCustomOpenAIProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): CustomOpenAIProvider {
|
|
const apiKey = config?.CUSTOM_OPENAI_API_KEY || process.env.CUSTOM_OPENAI_API_KEY || '';
|
|
const baseUrl = config?.CUSTOM_OPENAI_BASE_URL || process.env.CUSTOM_OPENAI_BASE_URL || '';
|
|
|
|
if (!apiKey) {
|
|
throw new Error('CUSTOM_OPENAI_API_KEY is required when using Custom OpenAI provider');
|
|
}
|
|
|
|
if (!baseUrl) {
|
|
throw new Error('CUSTOM_OPENAI_BASE_URL is required when using Custom OpenAI provider');
|
|
}
|
|
|
|
return new CustomOpenAIProvider(apiKey, baseUrl, modelName, embeddingModelName);
|
|
}
|
|
|
|
function getProviderInstance(providerType: ProviderType, config: Record<string, string>, modelName: string, embeddingModelName: string): AIProvider {
|
|
switch (providerType) {
|
|
case 'ollama':
|
|
return createOllamaProvider(config, modelName, embeddingModelName);
|
|
case 'openai':
|
|
return createOpenAIProvider(config, modelName, embeddingModelName);
|
|
case 'custom':
|
|
return createCustomOpenAIProvider(config, modelName, embeddingModelName);
|
|
default:
|
|
return createOllamaProvider(config, modelName, embeddingModelName);
|
|
}
|
|
}
|
|
|
|
export function getTagsProvider(config?: Record<string, string>): AIProvider {
|
|
// Check database config first, then environment variables
|
|
const providerType = (
|
|
config?.AI_PROVIDER_TAGS ||
|
|
config?.AI_PROVIDER_EMBEDDING ||
|
|
config?.AI_PROVIDER ||
|
|
process.env.AI_PROVIDER_TAGS ||
|
|
process.env.AI_PROVIDER_EMBEDDING ||
|
|
process.env.AI_PROVIDER
|
|
);
|
|
|
|
// If no provider is configured, throw a clear error
|
|
if (!providerType) {
|
|
console.error('[getTagsProvider] FATAL: No provider configured. Config received:', config);
|
|
throw new Error(
|
|
'AI_PROVIDER_TAGS is not configured. Please set it in the admin settings or environment variables. ' +
|
|
'Options: ollama, openai, custom'
|
|
);
|
|
}
|
|
|
|
const provider = providerType.toLowerCase() as ProviderType;
|
|
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
|
|
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
|
|
|
|
return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
|
|
}
|
|
|
|
export function getEmbeddingsProvider(config?: Record<string, string>): AIProvider {
|
|
// Check database config first, then environment variables
|
|
const providerType = (
|
|
config?.AI_PROVIDER_EMBEDDING ||
|
|
config?.AI_PROVIDER_TAGS ||
|
|
config?.AI_PROVIDER ||
|
|
process.env.AI_PROVIDER_EMBEDDING ||
|
|
process.env.AI_PROVIDER_TAGS ||
|
|
process.env.AI_PROVIDER
|
|
);
|
|
|
|
// If no provider is configured, throw a clear error
|
|
if (!providerType) {
|
|
console.error('[getEmbeddingsProvider] FATAL: No provider configured. Config received:', config);
|
|
throw new Error(
|
|
'AI_PROVIDER_EMBEDDING is not configured. Please set it in the admin settings or environment variables. ' +
|
|
'Options: ollama, openai, custom'
|
|
);
|
|
}
|
|
|
|
const provider = providerType.toLowerCase() as ProviderType;
|
|
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
|
|
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
|
|
|
|
return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
|
|
}
|
|
|
|
export function getAIProvider(config?: Record<string, string>): AIProvider {
|
|
return getEmbeddingsProvider(config);
|
|
}
|
|
|
|
export function getChatProvider(config?: Record<string, string>): AIProvider {
|
|
// Check database config first, then environment variables
|
|
// Fallback cascade: chat -> tags -> embeddings
|
|
const providerType = (
|
|
config?.AI_PROVIDER_CHAT ||
|
|
config?.AI_PROVIDER_TAGS ||
|
|
config?.AI_PROVIDER_EMBEDDING ||
|
|
config?.AI_PROVIDER ||
|
|
process.env.AI_PROVIDER_CHAT ||
|
|
process.env.AI_PROVIDER_TAGS ||
|
|
process.env.AI_PROVIDER_EMBEDDING ||
|
|
process.env.AI_PROVIDER
|
|
);
|
|
|
|
// If no provider is configured, throw a clear error
|
|
if (!providerType) {
|
|
console.error('[getChatProvider] FATAL: No provider configured. Config received:', config);
|
|
throw new Error(
|
|
'AI_PROVIDER_CHAT is not configured. Please set it in the admin settings or environment variables. ' +
|
|
'Options: ollama, openai, custom'
|
|
);
|
|
}
|
|
|
|
const provider = providerType.toLowerCase() as ProviderType;
|
|
const modelName = (
|
|
config?.AI_MODEL_CHAT ||
|
|
process.env.AI_MODEL_CHAT ||
|
|
config?.AI_MODEL_TAGS ||
|
|
process.env.AI_MODEL_TAGS ||
|
|
'granite4:latest'
|
|
);
|
|
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
|
|
|
|
return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
|
|
}
|