sepehr fc2c40249e feat: AI provider testing page + multi-provider support + UX design spec
- Add AI Provider Testing page (/admin/ai-test) with Tags and Embeddings tests
- Add new AI providers: CustomOpenAI, DeepSeek, OpenRouter
- Add API routes for AI config, models listing, and testing endpoints
- Add UX Design Specification document for Phase 1 MVP AI
- Add PRD Phase 1 MVP AI planning document
- Update admin settings and sidebar navigation
- Fix AI factory for multi-provider support
2026-01-10 11:23:22 +01:00

99 lines
2.8 KiB
TypeScript

import { NextRequest, NextResponse } from 'next/server'
import { getSystemConfig } from '@/lib/config'
// Modèles populaires pour chaque provider (2025)
const PROVIDER_MODELS = {
ollama: {
tags: [
'llama3:latest',
'llama3.2:latest',
'granite4:latest',
'mistral:latest',
'mixtral:latest',
'phi3:latest',
'gemma2:latest',
'qwen2:latest'
],
embeddings: [
'embeddinggemma:latest',
'mxbai-embed-large:latest',
'nomic-embed-text:latest'
]
},
openai: {
tags: [
'gpt-4o',
'gpt-4o-mini',
'gpt-4-turbo',
'gpt-4',
'gpt-3.5-turbo'
],
embeddings: [
'text-embedding-3-small',
'text-embedding-3-large',
'text-embedding-ada-002'
]
},
custom: {
tags: [], // Will be loaded dynamically
embeddings: [] // Will be loaded dynamically
}
}
export async function GET(request: NextRequest) {
try {
const config = await getSystemConfig()
const provider = (config.AI_PROVIDER || 'ollama').toLowerCase()
let models = PROVIDER_MODELS[provider as keyof typeof PROVIDER_MODELS] || { tags: [], embeddings: [] }
// Pour Ollama, essayer de récupérer la liste réelle depuis l'API locale
if (provider === 'ollama') {
try {
const ollamaBaseUrl = config.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434'
const response = await fetch(`${ollamaBaseUrl}/api/tags`, {
method: 'GET',
headers: { 'Content-Type': 'application/json' }
})
if (response.ok) {
const data = await response.json()
const allModels = data.models || []
// Séparer les modèles de tags et d'embeddings
const tagModels = allModels
.filter((m: any) => !m.name.includes('embed') && !m.name.includes('Embedding'))
.map((m: any) => m.name)
.slice(0, 20) // Limiter à 20 modèles
const embeddingModels = allModels
.filter((m: any) => m.name.includes('embed') || m.name.includes('Embedding'))
.map((m: any) => m.name)
models = {
tags: tagModels.length > 0 ? tagModels : models.tags,
embeddings: embeddingModels.length > 0 ? embeddingModels : models.embeddings
}
}
} catch (error) {
console.warn('Could not fetch Ollama models, using defaults:', error)
// Garder les modèles par défaut
}
}
return NextResponse.json({
provider,
models: models || { tags: [], embeddings: [] }
})
} catch (error: any) {
console.error('Error fetching models:', error)
return NextResponse.json(
{
error: error.message || 'Failed to fetch models',
models: { tags: [], embeddings: [] }
},
{ status: 500 }
)
}
}