fix: remove Ollama default fallbacks in factory and Docker

ROOT CAUSE: The factory was defaulting to 'ollama' when no provider
was configured, and docker-compose.yml was always setting OLLAMA_BASE_URL
even when using OpenAI. This caused the app to try connecting to Ollama
even when OpenAI was configured in the admin.

CRITICAL CHANGES:
1. lib/ai/factory.ts - Removed 'ollama' default fallback
   - getTagsProvider() now throws error if AI_PROVIDER_TAGS not set
   - getEmbeddingsProvider() now throws error if AI_PROVIDER_EMBEDDING not set
   - Forces explicit configuration instead of silent fallback to Ollama

2. docker-compose.yml - Removed default OLLAMA_BASE_URL
   - Changed: OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://ollama:11434}
   - To: OLLAMA_BASE_URL=${OLLAMA_BASE_URL}
   - Only set if explicitly defined in .env.docker

3. Application name: Mento → Memento (correct spelling)
   - Updated in: sidebar, README, deploy.sh, DOCKER_DEPLOYMENT.md

4. app/api/ai/config/route.ts - Return 'not set' instead of 'ollama'
   - Makes it clear when provider is not configured

IMPACT: The app will now properly use OpenAI when configured in the
admin interface, instead of silently falling back to Ollama.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
sepehr 2026-01-12 23:08:20 +01:00
parent 00335a1383
commit 8617117dec
7 changed files with 42 additions and 19 deletions

View File

@ -25,9 +25,10 @@ services:
- SMTP_FROM=${SMTP_FROM:-noreply@memento.app} - SMTP_FROM=${SMTP_FROM:-noreply@memento.app}
# AI Providers # AI Providers
# Only define these if you're using the corresponding provider
- OPENAI_API_KEY=${OPENAI_API_KEY} - OPENAI_API_KEY=${OPENAI_API_KEY}
- OLLAMA_BASE_URL=${OLLAMA_BASE_URL:-http://ollama:11434} - OLLAMA_BASE_URL=${OLLAMA_BASE_URL}
- OLLAMA_MODEL=${OLLAMA_MODEL:-granite4:latest} - OLLAMA_MODEL=${OLLAMA_MODEL}
volumes: volumes:
- db-data:/app/prisma - db-data:/app/prisma
- uploads-data:/app/public/uploads - uploads-data:/app/public/uploads

View File

@ -1,6 +1,6 @@
# 🐳 Docker Deployment Guide for Proxmox # 🐳 Docker Deployment Guide for Proxmox
Complete guide to deploy Mento on Proxmox using Docker Compose. Complete guide to deploy Memento on Proxmox using Docker Compose.
## 📋 Prerequisites ## 📋 Prerequisites
@ -375,7 +375,7 @@ usermod -aG docker ubuntu
# Add: features: nesting=1,keyctl=1 # Add: features: nesting=1,keyctl=1
``` ```
Then deploy Mento as described above. Then deploy Memento as described above.
## 📚 Additional Resources ## 📚 Additional Resources

View File

@ -1,8 +1,8 @@
# Mento - Google Keep Clone # Memento - Google Keep Clone
A beautiful and feature-rich Google Keep clone built with modern web technologies. A beautiful and feature-rich Google Keep clone built with modern web technologies.
![Mento](https://img.shields.io/badge/Next.js-16-black) ![Memento](https://img.shields.io/badge/Next.js-16-black)
![TypeScript](https://img.shields.io/badge/TypeScript-5.0-blue) ![TypeScript](https://img.shields.io/badge/TypeScript-5.0-blue)
![Tailwind CSS](https://img.shields.io/badge/Tailwind-4.0-38bdf8) ![Tailwind CSS](https://img.shields.io/badge/Tailwind-4.0-38bdf8)
![Prisma](https://img.shields.io/badge/Prisma-7.0-2d3748) ![Prisma](https://img.shields.io/badge/Prisma-7.0-2d3748)

View File

@ -6,14 +6,14 @@ export async function GET(request: NextRequest) {
const config = await getSystemConfig() const config = await getSystemConfig()
return NextResponse.json({ return NextResponse.json({
AI_PROVIDER_TAGS: config.AI_PROVIDER_TAGS || 'ollama', AI_PROVIDER_TAGS: config.AI_PROVIDER_TAGS || 'not set',
AI_MODEL_TAGS: config.AI_MODEL_TAGS || 'granite4:latest', AI_MODEL_TAGS: config.AI_MODEL_TAGS || 'not set',
AI_PROVIDER_EMBEDDING: config.AI_PROVIDER_EMBEDDING || 'ollama', AI_PROVIDER_EMBEDDING: config.AI_PROVIDER_EMBEDDING || 'not set',
AI_MODEL_EMBEDDING: config.AI_MODEL_EMBEDDING || 'embeddinggemma:latest', AI_MODEL_EMBEDDING: config.AI_MODEL_EMBEDDING || 'not set',
OPENAI_API_KEY: config.OPENAI_API_KEY ? '***configured***' : '', OPENAI_API_KEY: config.OPENAI_API_KEY ? '***configured***' : '',
CUSTOM_OPENAI_API_KEY: config.CUSTOM_OPENAI_API_KEY ? '***configured***' : '', CUSTOM_OPENAI_API_KEY: config.CUSTOM_OPENAI_API_KEY ? '***configured***' : '',
CUSTOM_OPENAI_BASE_URL: config.CUSTOM_OPENAI_BASE_URL || '', CUSTOM_OPENAI_BASE_URL: config.CUSTOM_OPENAI_BASE_URL || '',
OLLAMA_BASE_URL: config.OLLAMA_BASE_URL || 'http://localhost:11434' OLLAMA_BASE_URL: config.OLLAMA_BASE_URL || 'not set'
}) })
} catch (error: any) { } catch (error: any) {
return NextResponse.json( return NextResponse.json(

View File

@ -79,7 +79,7 @@ export function Sidebar({ className, user }: { className?: string, user?: any })
<StickyNote className="h-5 w-5" /> <StickyNote className="h-5 w-5" />
</div> </div>
<div className="flex flex-col"> <div className="flex flex-col">
<span className="text-lg font-bold tracking-tight text-slate-900 dark:text-white leading-none">Mento</span> <span className="text-lg font-bold tracking-tight text-slate-900 dark:text-white leading-none">Memento</span>
<span className="text-[10px] font-medium text-slate-400 dark:text-slate-500 uppercase tracking-widest mt-1">{t('nav.workspace')}</span> <span className="text-[10px] font-medium text-slate-400 dark:text-slate-500 uppercase tracking-widest mt-1">{t('nav.workspace')}</span>
</div> </div>
</div> </div>

View File

@ -1,11 +1,11 @@
#!/bin/bash #!/bin/bash
# Mento Docker Deployment Script # Memento Docker Deployment Script
# This script helps you build and deploy Mento on Proxmox/Docker # This script helps you build and deploy Memento on Proxmox/Docker
set -e set -e
echo "🚀 Mento Docker Deployment" echo "🚀 Memento Docker Deployment"
echo "================================" echo "================================"
echo "" echo ""

View File

@ -64,19 +64,41 @@ function getProviderInstance(providerType: ProviderType, config: Record<string,
} }
export function getTagsProvider(config?: Record<string, string>): AIProvider { export function getTagsProvider(config?: Record<string, string>): AIProvider {
const providerType = (config?.AI_PROVIDER_TAGS || process.env.AI_PROVIDER_TAGS || 'ollama').toLowerCase() as ProviderType; // Check database config first, then environment variables
const providerType = (config?.AI_PROVIDER_TAGS || process.env.AI_PROVIDER_TAGS);
// If no provider is configured, throw a clear error
if (!providerType) {
throw new Error(
'AI_PROVIDER_TAGS is not configured. Please set it in the admin settings or environment variables. ' +
'Options: ollama, openai, custom'
);
}
const provider = providerType.toLowerCase() as ProviderType;
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest'; const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest'; const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
return getProviderInstance(providerType, config || {}, modelName, embeddingModelName); return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
} }
export function getEmbeddingsProvider(config?: Record<string, string>): AIProvider { export function getEmbeddingsProvider(config?: Record<string, string>): AIProvider {
const providerType = (config?.AI_PROVIDER_EMBEDDING || process.env.AI_PROVIDER_EMBEDDING || 'ollama').toLowerCase() as ProviderType; // Check database config first, then environment variables
const providerType = (config?.AI_PROVIDER_EMBEDDING || process.env.AI_PROVIDER_EMBEDDING);
// If no provider is configured, throw a clear error
if (!providerType) {
throw new Error(
'AI_PROVIDER_EMBEDDING is not configured. Please set it in the admin settings or environment variables. ' +
'Options: ollama, openai, custom'
);
}
const provider = providerType.toLowerCase() as ProviderType;
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest'; const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest'; const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
return getProviderInstance(providerType, config || {}, modelName, embeddingModelName); return getProviderInstance(provider, config || {}, modelName, embeddingModelName);
} }
// Legacy function for backward compatibility // Legacy function for backward compatibility