feat: AI provider testing page + multi-provider support + UX design spec

- Add AI Provider Testing page (/admin/ai-test) with Tags and Embeddings tests
- Add new AI providers: CustomOpenAI, DeepSeek, OpenRouter
- Add API routes for AI config, models listing, and testing endpoints
- Add UX Design Specification document for Phase 1 MVP AI
- Add PRD Phase 1 MVP AI planning document
- Update admin settings and sidebar navigation
- Fix AI factory for multi-provider support
This commit is contained in:
2026-01-10 11:23:22 +01:00
parent 640fcb26f7
commit fc2c40249e
21 changed files with 5971 additions and 138 deletions

View File

@@ -0,0 +1,27 @@
import { NextRequest, NextResponse } from 'next/server'
import { getSystemConfig } from '@/lib/config'
export async function GET(request: NextRequest) {
try {
const config = await getSystemConfig()
return NextResponse.json({
AI_PROVIDER_TAGS: config.AI_PROVIDER_TAGS || 'ollama',
AI_MODEL_TAGS: config.AI_MODEL_TAGS || 'granite4:latest',
AI_PROVIDER_EMBEDDING: config.AI_PROVIDER_EMBEDDING || 'ollama',
AI_MODEL_EMBEDDING: config.AI_MODEL_EMBEDDING || 'embeddinggemma:latest',
OPENAI_API_KEY: config.OPENAI_API_KEY ? '***configured***' : '',
CUSTOM_OPENAI_API_KEY: config.CUSTOM_OPENAI_API_KEY ? '***configured***' : '',
CUSTOM_OPENAI_BASE_URL: config.CUSTOM_OPENAI_BASE_URL || '',
OLLAMA_BASE_URL: config.OLLAMA_BASE_URL || 'http://localhost:11434'
})
} catch (error: any) {
console.error('Error fetching AI config:', error)
return NextResponse.json(
{
error: error.message || 'Failed to fetch config'
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,98 @@
import { NextRequest, NextResponse } from 'next/server'
import { getSystemConfig } from '@/lib/config'
// Modèles populaires pour chaque provider (2025)
const PROVIDER_MODELS = {
ollama: {
tags: [
'llama3:latest',
'llama3.2:latest',
'granite4:latest',
'mistral:latest',
'mixtral:latest',
'phi3:latest',
'gemma2:latest',
'qwen2:latest'
],
embeddings: [
'embeddinggemma:latest',
'mxbai-embed-large:latest',
'nomic-embed-text:latest'
]
},
openai: {
tags: [
'gpt-4o',
'gpt-4o-mini',
'gpt-4-turbo',
'gpt-4',
'gpt-3.5-turbo'
],
embeddings: [
'text-embedding-3-small',
'text-embedding-3-large',
'text-embedding-ada-002'
]
},
custom: {
tags: [], // Will be loaded dynamically
embeddings: [] // Will be loaded dynamically
}
}
export async function GET(request: NextRequest) {
try {
const config = await getSystemConfig()
const provider = (config.AI_PROVIDER || 'ollama').toLowerCase()
let models = PROVIDER_MODELS[provider as keyof typeof PROVIDER_MODELS] || { tags: [], embeddings: [] }
// Pour Ollama, essayer de récupérer la liste réelle depuis l'API locale
if (provider === 'ollama') {
try {
const ollamaBaseUrl = config.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434'
const response = await fetch(`${ollamaBaseUrl}/api/tags`, {
method: 'GET',
headers: { 'Content-Type': 'application/json' }
})
if (response.ok) {
const data = await response.json()
const allModels = data.models || []
// Séparer les modèles de tags et d'embeddings
const tagModels = allModels
.filter((m: any) => !m.name.includes('embed') && !m.name.includes('Embedding'))
.map((m: any) => m.name)
.slice(0, 20) // Limiter à 20 modèles
const embeddingModels = allModels
.filter((m: any) => m.name.includes('embed') || m.name.includes('Embedding'))
.map((m: any) => m.name)
models = {
tags: tagModels.length > 0 ? tagModels : models.tags,
embeddings: embeddingModels.length > 0 ? embeddingModels : models.embeddings
}
}
} catch (error) {
console.warn('Could not fetch Ollama models, using defaults:', error)
// Garder les modèles par défaut
}
}
return NextResponse.json({
provider,
models: models || { tags: [], embeddings: [] }
})
} catch (error: any) {
console.error('Error fetching models:', error)
return NextResponse.json(
{
error: error.message || 'Failed to fetch models',
models: { tags: [], embeddings: [] }
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,91 @@
import { NextRequest, NextResponse } from 'next/server'
import { getEmbeddingsProvider } from '@/lib/ai/factory'
import { getSystemConfig } from '@/lib/config'
function getProviderDetails(config: Record<string, string>, providerType: string) {
const provider = providerType.toLowerCase()
switch (provider) {
case 'ollama':
return {
provider: 'Ollama',
baseUrl: config.OLLAMA_BASE_URL || 'http://localhost:11434',
model: config.AI_MODEL_EMBEDDING || 'embeddinggemma:latest'
}
case 'openai':
return {
provider: 'OpenAI',
baseUrl: 'https://api.openai.com/v1',
model: config.AI_MODEL_EMBEDDING || 'text-embedding-3-small'
}
case 'custom':
return {
provider: 'Custom OpenAI',
baseUrl: config.CUSTOM_OPENAI_BASE_URL || 'Not configured',
model: config.AI_MODEL_EMBEDDING || 'text-embedding-3-small'
}
default:
return {
provider: provider,
baseUrl: 'unknown',
model: config.AI_MODEL_EMBEDDING || 'unknown'
}
}
}
export async function POST(request: NextRequest) {
try {
const config = await getSystemConfig()
const provider = getEmbeddingsProvider(config)
const testText = 'test'
const startTime = Date.now()
const embeddings = await provider.getEmbeddings(testText)
const endTime = Date.now()
if (!embeddings || embeddings.length === 0) {
const providerType = config.AI_PROVIDER_EMBEDDING || 'ollama'
const details = getProviderDetails(config, providerType)
return NextResponse.json(
{
success: false,
error: 'No embeddings returned',
provider: providerType,
model: config.AI_MODEL_EMBEDDING || 'embeddinggemma:latest',
details
},
{ status: 500 }
)
}
const providerType = config.AI_PROVIDER_EMBEDDING || 'ollama'
const details = getProviderDetails(config, providerType)
return NextResponse.json({
success: true,
provider: providerType,
model: config.AI_MODEL_EMBEDDING || 'embeddinggemma:latest',
embeddingLength: embeddings.length,
firstValues: embeddings.slice(0, 5),
responseTime: endTime - startTime,
details
})
} catch (error: any) {
console.error('AI embeddings test error:', error)
const config = await getSystemConfig()
const providerType = config.AI_PROVIDER_EMBEDDING || 'ollama'
const details = getProviderDetails(config, providerType)
return NextResponse.json(
{
success: false,
error: error.message || 'Unknown error',
provider: providerType,
model: config.AI_MODEL_EMBEDDING || 'embeddinggemma:latest',
details,
stack: process.env.NODE_ENV === 'development' ? error.stack : undefined
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,50 @@
import { NextRequest, NextResponse } from 'next/server'
import { getTagsProvider } from '@/lib/ai/factory'
import { getSystemConfig } from '@/lib/config'
export async function POST(request: NextRequest) {
try {
const config = await getSystemConfig()
const provider = getTagsProvider(config)
const testContent = "This is a test note about artificial intelligence and machine learning. It contains keywords like AI, ML, neural networks, and deep learning."
const startTime = Date.now()
const tags = await provider.generateTags(testContent)
const endTime = Date.now()
if (!tags || tags.length === 0) {
return NextResponse.json(
{
success: false,
error: 'No tags generated',
provider: config.AI_PROVIDER_TAGS || 'ollama',
model: config.AI_MODEL_TAGS || 'granite4:latest'
},
{ status: 500 }
)
}
return NextResponse.json({
success: true,
provider: config.AI_PROVIDER_TAGS || 'ollama',
model: config.AI_MODEL_TAGS || 'granite4:latest',
tags: tags,
responseTime: endTime - startTime
})
} catch (error: any) {
console.error('AI tags test error:', error)
const config = await getSystemConfig()
return NextResponse.json(
{
success: false,
error: error.message || 'Unknown error',
provider: config.AI_PROVIDER_TAGS || 'ollama',
model: config.AI_MODEL_TAGS || 'granite4:latest',
stack: process.env.NODE_ENV === 'development' ? error.stack : undefined
},
{ status: 500 }
)
}
}

View File

@@ -1,55 +1,88 @@
import { NextRequest, NextResponse } from 'next/server'
import { getAIProvider } from '@/lib/ai/factory'
import { getTagsProvider, getEmbeddingsProvider } from '@/lib/ai/factory'
import { getSystemConfig } from '@/lib/config'
function getProviderDetails(config: Record<string, string>, providerType: string) {
const provider = providerType.toLowerCase()
switch (provider) {
case 'ollama':
return {
provider: 'Ollama',
baseUrl: config.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434',
model: config.AI_MODEL_EMBEDDING || 'embeddinggemma:latest'
}
case 'openai':
return {
provider: 'OpenAI',
baseUrl: 'https://api.openai.com/v1',
model: config.AI_MODEL_EMBEDDING || 'text-embedding-3-small'
}
case 'custom':
return {
provider: 'Custom OpenAI',
baseUrl: config.CUSTOM_OPENAI_BASE_URL || process.env.CUSTOM_OPENAI_BASE_URL || 'Not configured',
model: config.AI_MODEL_EMBEDDING || 'text-embedding-3-small'
}
default:
return {
provider: provider,
baseUrl: 'unknown',
model: config.AI_MODEL_EMBEDDING || 'unknown'
}
}
}
export async function GET(request: NextRequest) {
try {
const config = await getSystemConfig()
const provider = getAIProvider(config)
const tagsProvider = getTagsProvider(config)
const embeddingsProvider = getEmbeddingsProvider(config)
// Test with a simple embedding request
const testText = 'test'
const embeddings = await provider.getEmbeddings(testText)
// Test embeddings provider
const embeddings = await embeddingsProvider.getEmbeddings(testText)
if (!embeddings || embeddings.length === 0) {
const providerType = config.AI_PROVIDER_EMBEDDING || 'ollama'
const details = getProviderDetails(config, providerType)
return NextResponse.json(
{
success: false,
provider: config.AI_PROVIDER || 'ollama',
tagsProvider: config.AI_PROVIDER_TAGS || 'ollama',
embeddingsProvider: providerType,
error: 'No embeddings returned',
details: {
provider: config.AI_PROVIDER || 'ollama',
baseUrl: config.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434',
model: config.AI_MODEL_EMBEDDING || process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest'
}
details
},
{ status: 500 }
)
}
const tagsProviderType = config.AI_PROVIDER_TAGS || 'ollama'
const embeddingsProviderType = config.AI_PROVIDER_EMBEDDING || 'ollama'
const details = getProviderDetails(config, embeddingsProviderType)
return NextResponse.json({
success: true,
provider: config.AI_PROVIDER || 'ollama',
tagsProvider: tagsProviderType,
embeddingsProvider: embeddingsProviderType,
embeddingLength: embeddings.length,
firstValues: embeddings.slice(0, 5),
details: {
provider: config.AI_PROVIDER || 'ollama',
baseUrl: config.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434',
model: config.AI_MODEL_EMBEDDING || process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest'
}
details
})
} catch (error: any) {
console.error('AI test error:', error)
const config = await getSystemConfig()
const providerType = config.AI_PROVIDER_EMBEDDING || 'ollama'
const details = getProviderDetails(config, providerType)
return NextResponse.json(
{
success: false,
error: error.message || 'Unknown error',
stack: process.env.NODE_ENV === 'development' ? error.stack : undefined,
details: {
provider: process.env.AI_PROVIDER || 'ollama',
baseUrl: process.env.OLLAMA_BASE_URL || 'http://localhost:11434',
model: process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest'
}
details
},
{ status: 500 }
)