test: add Playwright tests and debug endpoint for AI provider validation

Add comprehensive tests to verify AI provider configuration and ensure
OpenAI is being used correctly instead of hardcoded Ollama.

Changes:
- Add ai-provider.spec.ts: Playwright tests for AI provider validation
- Add /api/debug/config endpoint: Exposes AI configuration for testing
- Tests verify: OpenAI config, connectivity, no OLLAMA errors

All 4 tests pass locally:
✓ AI provider configuration check
✓ OpenAI connectivity test
✓ Embeddings provider verification
✓ No OLLAMA errors validation

Usage on Docker:
TEST_URL=http://192.168.1.190:3000 npx playwright test ai-provider.spec.ts

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
sepehr 2026-01-12 22:58:38 +01:00
parent 5d315a6bdd
commit 00335a1383
2 changed files with 124 additions and 0 deletions

View File

@ -0,0 +1,32 @@
import { NextResponse } from 'next/server';
import { getSystemConfig } from '@/lib/config';
/**
* Debug endpoint to check AI configuration
* This helps verify that OpenAI is properly configured
*/
export async function GET() {
try {
const config = await getSystemConfig();
// Return only AI-related config for debugging
const aiConfig = {
AI_PROVIDER_TAGS: config.AI_PROVIDER_TAGS || 'not set',
AI_PROVIDER_EMBEDDING: config.AI_PROVIDER_EMBEDDING || 'not set',
AI_MODEL_TAGS: config.AI_MODEL_TAGS || 'not set',
AI_MODEL_EMBEDDING: config.AI_MODEL_EMBEDDING || 'not set',
OPENAI_API_KEY: config.OPENAI_API_KEY ? 'set (hidden)' : 'not set',
OLLAMA_BASE_URL: config.OLLAMA_BASE_URL || 'not set',
OLLAMA_MODEL: config.OLLAMA_MODEL || 'not set',
CUSTOM_OPENAI_BASE_URL: config.CUSTOM_OPENAI_BASE_URL || 'not set',
CUSTOM_OPENAI_API_KEY: config.CUSTOM_OPENAI_API_KEY ? 'set (hidden)' : 'not set',
};
return NextResponse.json(aiConfig);
} catch (error) {
return NextResponse.json(
{ error: 'Failed to get config', details: error },
{ status: 500 }
);
}
}

View File

@ -0,0 +1,92 @@
import { test, expect } from '@playwright/test';
// Use localhost for local testing, can be overridden with env var
const BASE_URL = process.env.TEST_URL || 'http://localhost:3000';
test.describe('AI Provider Configuration Tests', () => {
test('should check AI provider configuration in database', async ({ page }) => {
// This test checks the debug endpoint we created
const response = await page.request.get(`${BASE_URL}/api/debug/config`);
expect(response.status()).toBe(200);
const config = await response.json();
console.log('AI Configuration:', config);
// Verify OpenAI is configured
expect(config.AI_PROVIDER_TAGS).toBe('openai');
expect(config.AI_PROVIDER_EMBEDDING).toBe('openai');
expect(config.AI_MODEL_TAGS).toBeTruthy();
expect(config.AI_MODEL_EMBEDDING).toBeTruthy();
expect(config.OPENAI_API_KEY).toContain('set');
});
test('should test OpenAI provider connectivity', async ({ page }) => {
const response = await page.request.get(`${BASE_URL}/api/ai/test`);
expect(response.status()).toBe(200);
const result = await response.json();
console.log('AI Test Result:', result);
// Should indicate success with OpenAI
expect(result.success).toBe(true);
expect(result.tagsProvider).toBe('openai');
expect(result.embeddingsProvider).toBe('openai');
expect(result.embeddingLength).toBeGreaterThan(0);
});
test('should check embeddings provider via main test endpoint', async ({ page }) => {
// The /api/ai/test endpoint already tests both tags and embeddings providers
const response = await page.request.get(`${BASE_URL}/api/ai/test`);
expect(response.status()).toBe(200);
const result = await response.json();
console.log('Embeddings via AI Test:', result);
// Verify embeddings provider is working
expect(result.embeddingsProvider).toBe('openai');
expect(result.embeddingLength).toBe(1536); // OpenAI text-embedding-3-small
expect(result.details?.provider).toBe('OpenAI');
});
test('should verify no OLLAMA errors in provider', async ({ page }) => {
// Test the AI test endpoint to ensure it's using OpenAI, not Ollama
const response = await page.request.get(`${BASE_URL}/api/ai/test`);
expect(response.status()).toBe(200);
const result = await response.json();
const resultStr = JSON.stringify(result);
// Should not contain OLLAMA references
expect(resultStr).not.toContain('OLLAMA');
expect(resultStr).not.toContain('ECONNREFUSED');
expect(resultStr).not.toContain('127.0.0.1:11434');
// Should contain OpenAI references
expect(resultStr).toContain('openai');
});
});
test.describe('AI Provider - Docker Deployment Tests', () => {
test.beforeAll(async () => {
// Skip these tests if not testing against Docker
test.skip(!process.env.TEST_URL?.includes('192.168'), 'Skipping Docker tests - set TEST_URL=http://192.168.1.190:3000');
});
test('should verify paragraph refactor service uses OpenAI', async ({ page }) => {
// This test verifies that the refactor service uses the configured provider
// instead of hardcoding Ollama
const response = await page.request.get(`${BASE_URL}/api/ai/test`);
expect(response.status()).toBe(200);
const result = await response.json();
// Verify the tags provider is OpenAI (this is what refactor uses)
expect(result.tagsProvider).toBe('openai');
expect(result.details?.provider).toBe('OpenAI');
});
});