import { NextRequest, NextResponse } from 'next/server' import { getAIProvider } from '@/lib/ai/factory' import { getSystemConfig } from '@/lib/config' export async function GET(request: NextRequest) { try { const config = await getSystemConfig() const provider = getAIProvider(config) // Test with a simple embedding request const testText = 'test' const embeddings = await provider.getEmbeddings(testText) if (!embeddings || embeddings.length === 0) { return NextResponse.json( { success: false, provider: config.AI_PROVIDER || 'ollama', error: 'No embeddings returned', details: { provider: config.AI_PROVIDER || 'ollama', baseUrl: config.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434', model: config.AI_MODEL_EMBEDDING || process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest' } }, { status: 500 } ) } return NextResponse.json({ success: true, provider: config.AI_PROVIDER || 'ollama', embeddingLength: embeddings.length, firstValues: embeddings.slice(0, 5), details: { provider: config.AI_PROVIDER || 'ollama', baseUrl: config.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434', model: config.AI_MODEL_EMBEDDING || process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest' } }) } catch (error: any) { console.error('AI test error:', error) return NextResponse.json( { success: false, error: error.message || 'Unknown error', stack: process.env.NODE_ENV === 'development' ? error.stack : undefined, details: { provider: process.env.AI_PROVIDER || 'ollama', baseUrl: process.env.OLLAMA_BASE_URL || 'http://localhost:11434', model: process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest' } }, { status: 500 } ) } }