Keep/keep-notes/scripts/debug-config.ts
sepehr 5d315a6bdd fix: make paragraph refactor service use configured AI provider
The paragraph-refactor service was using OLLAMA_BASE_URL directly from
environment variables instead of using the configured AI provider from
the database. This caused "OLLAMA error" even when OpenAI was configured
in the admin interface.

Changes:
- paragraph-refactor.service.ts: Now uses getSystemConfig() and
  getTagsProvider() from factory instead of direct Ollama calls
- factory.ts: Added proper error messages when API keys are missing
- .env.docker.example: Updated with new provider configuration
  variables (AI_PROVIDER_TAGS, AI_PROVIDER_EMBEDDING)

This fixes the issue where AI reformulation features (Clarify, Shorten,
Improve Style) would fail with OLLAMA errors even when OpenAI was
properly configured in the admin settings.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-12 22:51:24 +01:00

42 lines
1.0 KiB
TypeScript

import prisma from '../lib/prisma'
async function debugConfig() {
console.log('=== System Configuration Debug ===\n')
const configs = await prisma.systemConfig.findMany()
console.log(`Total configs in DB: ${configs.length}\n`)
// Group by category
const aiConfigs = configs.filter(c => c.key.startsWith('AI_'))
const ollamaConfigs = configs.filter(c => c.key.includes('OLLAMA'))
const openaiConfigs = configs.filter(c => c.key.includes('OPENAI'))
console.log('=== AI Provider Configs ===')
aiConfigs.forEach(c => {
console.log(`${c.key}: "${c.value}"`)
})
console.log('\n=== Ollama Configs ===')
ollamaConfigs.forEach(c => {
console.log(`${c.key}: "${c.value}"`)
})
console.log('\n=== OpenAI Configs ===')
openaiConfigs.forEach(c => {
console.log(`${c.key}: "${c.value}"`)
})
console.log('\n=== All Configs ===')
configs.forEach(c => {
console.log(`${c.key}: "${c.value}"`)
})
}
debugConfig()
.then(() => process.exit(0))
.catch((err) => {
console.error(err)
process.exit(1)
})