Add comprehensive tests to verify AI provider configuration and ensure OpenAI is being used correctly instead of hardcoded Ollama. Changes: - Add ai-provider.spec.ts: Playwright tests for AI provider validation - Add /api/debug/config endpoint: Exposes AI configuration for testing - Tests verify: OpenAI config, connectivity, no OLLAMA errors All 4 tests pass locally: ✓ AI provider configuration check ✓ OpenAI connectivity test ✓ Embeddings provider verification ✓ No OLLAMA errors validation Usage on Docker: TEST_URL=http://192.168.1.190:3000 npx playwright test ai-provider.spec.ts Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
33 lines
1.1 KiB
TypeScript
33 lines
1.1 KiB
TypeScript
import { NextResponse } from 'next/server';
|
|
import { getSystemConfig } from '@/lib/config';
|
|
|
|
/**
|
|
* Debug endpoint to check AI configuration
|
|
* This helps verify that OpenAI is properly configured
|
|
*/
|
|
export async function GET() {
|
|
try {
|
|
const config = await getSystemConfig();
|
|
|
|
// Return only AI-related config for debugging
|
|
const aiConfig = {
|
|
AI_PROVIDER_TAGS: config.AI_PROVIDER_TAGS || 'not set',
|
|
AI_PROVIDER_EMBEDDING: config.AI_PROVIDER_EMBEDDING || 'not set',
|
|
AI_MODEL_TAGS: config.AI_MODEL_TAGS || 'not set',
|
|
AI_MODEL_EMBEDDING: config.AI_MODEL_EMBEDDING || 'not set',
|
|
OPENAI_API_KEY: config.OPENAI_API_KEY ? 'set (hidden)' : 'not set',
|
|
OLLAMA_BASE_URL: config.OLLAMA_BASE_URL || 'not set',
|
|
OLLAMA_MODEL: config.OLLAMA_MODEL || 'not set',
|
|
CUSTOM_OPENAI_BASE_URL: config.CUSTOM_OPENAI_BASE_URL || 'not set',
|
|
CUSTOM_OPENAI_API_KEY: config.CUSTOM_OPENAI_API_KEY ? 'set (hidden)' : 'not set',
|
|
};
|
|
|
|
return NextResponse.json(aiConfig);
|
|
} catch (error) {
|
|
return NextResponse.json(
|
|
{ error: 'Failed to get config', details: error },
|
|
{ status: 500 }
|
|
);
|
|
}
|
|
}
|