fix: remove Ollama default fallbacks in factory and Docker
ROOT CAUSE: The factory was defaulting to 'ollama' when no provider
was configured, and docker-compose.yml was always setting OLLAMA_BASE_URL
even when using OpenAI. This caused the app to try connecting to Ollama
even when OpenAI was configured in the admin.
CRITICAL CHANGES:
1. lib/ai/factory.ts - Removed 'ollama' default fallback
- getTagsProvider() now throws error if AI_PROVIDER_TAGS not set
- getEmbeddingsProvider() now throws error if AI_PROVIDER_EMBEDDING not set
- Forces explicit configuration instead of silent fallback to Ollama
2. docker-compose.yml - Removed default OLLAMA_BASE_URL
- Changed: OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://ollama:11434}
- To: OLLAMA_BASE_URL=${OLLAMA_BASE_URL}
- Only set if explicitly defined in .env.docker
3. Application name: Mento → Memento (correct spelling)
- Updated in: sidebar, README, deploy.sh, DOCKER_DEPLOYMENT.md
4. app/api/ai/config/route.ts - Return 'not set' instead of 'ollama'
- Makes it clear when provider is not configured
IMPACT: The app will now properly use OpenAI when configured in the
admin interface, instead of silently falling back to Ollama.
Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -64,19 +64,41 @@ function getProviderInstance(providerType: ProviderType, config: Record<string,
|
||||
}
|
||||
|
||||
export function getTagsProvider(config?: Record<string, string>): AIProvider {
|
||||
const providerType = (config?.AI_PROVIDER_TAGS || process.env.AI_PROVIDER_TAGS || 'ollama').toLowerCase() as ProviderType;
|
||||
// Check database config first, then environment variables
|
||||
const providerType = (config?.AI_PROVIDER_TAGS || process.env.AI_PROVIDER_TAGS);
|
||||
|
||||
// If no provider is configured, throw a clear error
|
||||
if (!providerType) {
|
||||
throw new Error(
|
||||
'AI_PROVIDER_TAGS is not configured. Please set it in the admin settings or environment variables. ' +
|
||||
'Options: ollama, openai, custom'
|
||||
);
|
||||
}
|
||||
|
||||
const provider = providerType.toLowerCase() as ProviderType;
|
||||
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
|
||||
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
|
||||
|
||||
return getProviderInstance(providerType, config || {}, modelName, embeddingModelName);
|
||||
return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
|
||||
}
|
||||
|
||||
export function getEmbeddingsProvider(config?: Record<string, string>): AIProvider {
|
||||
const providerType = (config?.AI_PROVIDER_EMBEDDING || process.env.AI_PROVIDER_EMBEDDING || 'ollama').toLowerCase() as ProviderType;
|
||||
// Check database config first, then environment variables
|
||||
const providerType = (config?.AI_PROVIDER_EMBEDDING || process.env.AI_PROVIDER_EMBEDDING);
|
||||
|
||||
// If no provider is configured, throw a clear error
|
||||
if (!providerType) {
|
||||
throw new Error(
|
||||
'AI_PROVIDER_EMBEDDING is not configured. Please set it in the admin settings or environment variables. ' +
|
||||
'Options: ollama, openai, custom'
|
||||
);
|
||||
}
|
||||
|
||||
const provider = providerType.toLowerCase() as ProviderType;
|
||||
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
|
||||
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
|
||||
|
||||
return getProviderInstance(providerType, config || {}, modelName, embeddingModelName);
|
||||
return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
|
||||
}
|
||||
|
||||
// Legacy function for backward compatibility
|
||||
|
||||
Reference in New Issue
Block a user