Keep/keep-notes/tests/ai-provider.spec.ts
sepehr 00335a1383 test: add Playwright tests and debug endpoint for AI provider validation
Add comprehensive tests to verify AI provider configuration and ensure
OpenAI is being used correctly instead of hardcoded Ollama.

Changes:
- Add ai-provider.spec.ts: Playwright tests for AI provider validation
- Add /api/debug/config endpoint: Exposes AI configuration for testing
- Tests verify: OpenAI config, connectivity, no OLLAMA errors

All 4 tests pass locally:
✓ AI provider configuration check
✓ OpenAI connectivity test
✓ Embeddings provider verification
✓ No OLLAMA errors validation

Usage on Docker:
TEST_URL=http://192.168.1.190:3000 npx playwright test ai-provider.spec.ts

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-12 22:58:38 +01:00

93 lines
3.4 KiB
TypeScript

import { test, expect } from '@playwright/test';
// Use localhost for local testing, can be overridden with env var
const BASE_URL = process.env.TEST_URL || 'http://localhost:3000';
test.describe('AI Provider Configuration Tests', () => {
test('should check AI provider configuration in database', async ({ page }) => {
// This test checks the debug endpoint we created
const response = await page.request.get(`${BASE_URL}/api/debug/config`);
expect(response.status()).toBe(200);
const config = await response.json();
console.log('AI Configuration:', config);
// Verify OpenAI is configured
expect(config.AI_PROVIDER_TAGS).toBe('openai');
expect(config.AI_PROVIDER_EMBEDDING).toBe('openai');
expect(config.AI_MODEL_TAGS).toBeTruthy();
expect(config.AI_MODEL_EMBEDDING).toBeTruthy();
expect(config.OPENAI_API_KEY).toContain('set');
});
test('should test OpenAI provider connectivity', async ({ page }) => {
const response = await page.request.get(`${BASE_URL}/api/ai/test`);
expect(response.status()).toBe(200);
const result = await response.json();
console.log('AI Test Result:', result);
// Should indicate success with OpenAI
expect(result.success).toBe(true);
expect(result.tagsProvider).toBe('openai');
expect(result.embeddingsProvider).toBe('openai');
expect(result.embeddingLength).toBeGreaterThan(0);
});
test('should check embeddings provider via main test endpoint', async ({ page }) => {
// The /api/ai/test endpoint already tests both tags and embeddings providers
const response = await page.request.get(`${BASE_URL}/api/ai/test`);
expect(response.status()).toBe(200);
const result = await response.json();
console.log('Embeddings via AI Test:', result);
// Verify embeddings provider is working
expect(result.embeddingsProvider).toBe('openai');
expect(result.embeddingLength).toBe(1536); // OpenAI text-embedding-3-small
expect(result.details?.provider).toBe('OpenAI');
});
test('should verify no OLLAMA errors in provider', async ({ page }) => {
// Test the AI test endpoint to ensure it's using OpenAI, not Ollama
const response = await page.request.get(`${BASE_URL}/api/ai/test`);
expect(response.status()).toBe(200);
const result = await response.json();
const resultStr = JSON.stringify(result);
// Should not contain OLLAMA references
expect(resultStr).not.toContain('OLLAMA');
expect(resultStr).not.toContain('ECONNREFUSED');
expect(resultStr).not.toContain('127.0.0.1:11434');
// Should contain OpenAI references
expect(resultStr).toContain('openai');
});
});
test.describe('AI Provider - Docker Deployment Tests', () => {
test.beforeAll(async () => {
// Skip these tests if not testing against Docker
test.skip(!process.env.TEST_URL?.includes('192.168'), 'Skipping Docker tests - set TEST_URL=http://192.168.1.190:3000');
});
test('should verify paragraph refactor service uses OpenAI', async ({ page }) => {
// This test verifies that the refactor service uses the configured provider
// instead of hardcoding Ollama
const response = await page.request.get(`${BASE_URL}/api/ai/test`);
expect(response.status()).toBe(200);
const result = await response.json();
// Verify the tags provider is OpenAI (this is what refactor uses)
expect(result.tagsProvider).toBe('openai');
expect(result.details?.provider).toBe('OpenAI');
});
});