import { NextRequest, NextResponse } from 'next/server' import { getSystemConfig } from '@/lib/config' // Modèles populaires pour chaque provider (2025) const PROVIDER_MODELS = { ollama: { tags: [ 'llama3:latest', 'llama3.2:latest', 'granite4:latest', 'mistral:latest', 'mixtral:latest', 'phi3:latest', 'gemma2:latest', 'qwen2:latest' ], embeddings: [ 'embeddinggemma:latest', 'mxbai-embed-large:latest', 'nomic-embed-text:latest' ] }, openai: { tags: [ 'gpt-4o', 'gpt-4o-mini', 'gpt-4-turbo', 'gpt-4', 'gpt-3.5-turbo' ], embeddings: [ 'text-embedding-3-small', 'text-embedding-3-large', 'text-embedding-ada-002' ] }, custom: { tags: [], // Will be loaded dynamically embeddings: [] // Will be loaded dynamically } } export async function GET(request: NextRequest) { try { const config = await getSystemConfig() const provider = (config.AI_PROVIDER || 'ollama').toLowerCase() let models = PROVIDER_MODELS[provider as keyof typeof PROVIDER_MODELS] || { tags: [], embeddings: [] } // Pour Ollama, essayer de récupérer la liste réelle depuis l'API locale if (provider === 'ollama') { try { const ollamaBaseUrl = config.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434' const response = await fetch(`${ollamaBaseUrl}/api/tags`, { method: 'GET', headers: { 'Content-Type': 'application/json' } }) if (response.ok) { const data = await response.json() const allModels = data.models || [] // Séparer les modèles de tags et d'embeddings const tagModels = allModels .filter((m: any) => !m.name.includes('embed') && !m.name.includes('Embedding')) .map((m: any) => m.name) .slice(0, 20) // Limiter à 20 modèles const embeddingModels = allModels .filter((m: any) => m.name.includes('embed') || m.name.includes('Embedding')) .map((m: any) => m.name) models = { tags: tagModels.length > 0 ? tagModels : models.tags, embeddings: embeddingModels.length > 0 ? embeddingModels : models.embeddings } } } catch (error) { console.warn('Could not fetch Ollama models, using defaults:', error) // Garder les modèles par défaut } } return NextResponse.json({ provider, models: models || { tags: [], embeddings: [] } }) } catch (error: any) { console.error('Error fetching models:', error) return NextResponse.json( { error: error.message || 'Failed to fetch models', models: { tags: [], embeddings: [] } }, { status: 500 } ) } }