feat: AI provider testing page + multi-provider support + UX design spec

- Add AI Provider Testing page (/admin/ai-test) with Tags and Embeddings tests
- Add new AI providers: CustomOpenAI, DeepSeek, OpenRouter
- Add API routes for AI config, models listing, and testing endpoints
- Add UX Design Specification document for Phase 1 MVP AI
- Add PRD Phase 1 MVP AI planning document
- Update admin settings and sidebar navigation
- Fix AI factory for multi-provider support
This commit is contained in:
2026-01-10 11:23:22 +01:00
parent 640fcb26f7
commit fc2c40249e
21 changed files with 5971 additions and 138 deletions

View File

@@ -1,30 +1,77 @@
import { OpenAIProvider } from './providers/openai';
import { OllamaProvider } from './providers/ollama';
import { CustomOpenAIProvider } from './providers/custom-openai';
import { AIProvider } from './types';
export function getAIProvider(config?: Record<string, string>): AIProvider {
const providerType = config?.AI_PROVIDER || process.env.AI_PROVIDER || 'ollama';
type ProviderType = 'ollama' | 'openai' | 'custom';
switch (providerType.toLowerCase()) {
function createOllamaProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): OllamaProvider {
let baseUrl = config?.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
// Ensure baseUrl doesn't end with /api, we'll add it in OllamaProvider
if (baseUrl.endsWith('/api')) {
baseUrl = baseUrl.slice(0, -4); // Remove /api
}
return new OllamaProvider(baseUrl, modelName, embeddingModelName);
}
function createOpenAIProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): OpenAIProvider {
const apiKey = config?.OPENAI_API_KEY || process.env.OPENAI_API_KEY || '';
if (!apiKey) {
console.warn('OPENAI_API_KEY non configurée.');
}
return new OpenAIProvider(apiKey, modelName, embeddingModelName);
}
function createCustomOpenAIProvider(config: Record<string, string>, modelName: string, embeddingModelName: string): CustomOpenAIProvider {
const apiKey = config?.CUSTOM_OPENAI_API_KEY || process.env.CUSTOM_OPENAI_API_KEY || '';
const baseUrl = config?.CUSTOM_OPENAI_BASE_URL || process.env.CUSTOM_OPENAI_BASE_URL || '';
if (!apiKey) {
console.warn('CUSTOM_OPENAI_API_KEY non configurée.');
}
if (!baseUrl) {
console.warn('CUSTOM_OPENAI_BASE_URL non configurée.');
}
return new CustomOpenAIProvider(apiKey, baseUrl, modelName, embeddingModelName);
}
function getProviderInstance(providerType: ProviderType, config: Record<string, string>, modelName: string, embeddingModelName: string): AIProvider {
switch (providerType) {
case 'ollama':
let baseUrl = config?.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
const model = config?.AI_MODEL_TAGS || process.env.OLLAMA_MODEL || 'granite4:latest';
const embedModel = config?.AI_MODEL_EMBEDDING || process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest';
// Ensure baseUrl doesn't end with /api, we'll add it in OllamaProvider
if (baseUrl.endsWith('/api')) {
baseUrl = baseUrl.slice(0, -4); // Remove /api
}
return new OllamaProvider(baseUrl, model, embedModel);
return createOllamaProvider(config, modelName, embeddingModelName);
case 'openai':
return createOpenAIProvider(config, modelName, embeddingModelName);
case 'custom':
return createCustomOpenAIProvider(config, modelName, embeddingModelName);
default:
const apiKey = config?.OPENAI_API_KEY || process.env.OPENAI_API_KEY || '';
const aiModel = config?.AI_MODEL_TAGS || process.env.OPENAI_MODEL || 'gpt-4o-mini';
if (!apiKey && providerType.toLowerCase() === 'openai') {
console.warn('OPENAI_API_KEY non configurée.');
}
return new OpenAIProvider(apiKey, aiModel);
console.warn(`Provider AI inconnu: ${providerType}, utilisation de Ollama par défaut`);
return createOllamaProvider(config, modelName, embeddingModelName);
}
}
export function getTagsProvider(config?: Record<string, string>): AIProvider {
const providerType = (config?.AI_PROVIDER_TAGS || process.env.AI_PROVIDER_TAGS || 'ollama').toLowerCase() as ProviderType;
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
return getProviderInstance(providerType, config || {}, modelName, embeddingModelName);
}
export function getEmbeddingsProvider(config?: Record<string, string>): AIProvider {
const providerType = (config?.AI_PROVIDER_EMBEDDING || process.env.AI_PROVIDER_EMBEDDING || 'ollama').toLowerCase() as ProviderType;
const modelName = config?.AI_MODEL_TAGS || process.env.AI_MODEL_TAGS || 'granite4:latest';
const embeddingModelName = config?.AI_MODEL_EMBEDDING || process.env.AI_MODEL_EMBEDDING || 'embeddinggemma:latest';
return getProviderInstance(providerType, config || {}, modelName, embeddingModelName);
}
// Legacy function for backward compatibility
export function getAIProvider(config?: Record<string, string>): AIProvider {
return getTagsProvider(config);
}