fix: make paragraph refactor service use configured AI provider

The paragraph-refactor service was using OLLAMA_BASE_URL directly from
environment variables instead of using the configured AI provider from
the database. This caused "OLLAMA error" even when OpenAI was configured
in the admin interface.

Changes:
- paragraph-refactor.service.ts: Now uses getSystemConfig() and
  getTagsProvider() from factory instead of direct Ollama calls
- factory.ts: Added proper error messages when API keys are missing
- .env.docker.example: Updated with new provider configuration
  variables (AI_PROVIDER_TAGS, AI_PROVIDER_EMBEDDING)

This fixes the issue where AI reformulation features (Clarify, Shorten,
Improve Style) would fail with OLLAMA errors even when OpenAI was
properly configured in the admin settings.

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-12 22:51:24 +01:00
parent 58e486c68e
commit 5d315a6bdd
10 changed files with 3025 additions and 3072 deletions

View File

@@ -7,6 +7,8 @@
*/
import { LanguageDetectionService } from './language-detection.service'
import { getTagsProvider } from '../factory'
import { getSystemConfig } from '@/lib/config'
export type RefactorMode = 'clarify' | 'shorten' | 'improveStyle'
@@ -83,37 +85,13 @@ export class ParagraphRefactorService {
const systemPrompt = this.getSystemPrompt(mode)
const userPrompt = this.getUserPrompt(mode, content, language)
// Get AI provider response using fetch
let baseUrl = process.env.OLLAMA_BASE_URL
// Get AI provider from factory
const config = await getSystemConfig()
const provider = getTagsProvider(config)
if (!baseUrl) {
throw new Error('OLLAMA_BASE_URL environment variable is required')
}
// Remove /api suffix if present to avoid double /api/api/...
if (baseUrl.endsWith('/api')) {
baseUrl = baseUrl.slice(0, -4)
}
const modelName = process.env.OLLAMA_MODEL || 'granite4:latest'
const response = await fetch(`${baseUrl}/api/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: modelName,
system: systemPrompt,
prompt: userPrompt,
stream: false,
}),
})
if (!response.ok) {
throw new Error(`Provider error: ${response.statusText}`)
}
const data = await response.json()
const refactored = this.extractRefactoredText(data.response)
// Use provider's generateText method
const fullPrompt = `${systemPrompt}\n\n${userPrompt}`
const refactored = await provider.generateText(fullPrompt)
// Calculate word count change
const refactoredWordCount = refactored.split(/\s+/).length
@@ -189,38 +167,16 @@ ${content}
Original language: ${language}
IMPORTANT: Provide all 3 versions in ${language}. No English, no explanations.`
// Get AI provider response using fetch
let baseUrl = process.env.OLLAMA_BASE_URL
// Get AI provider from factory
const config = await getSystemConfig()
const provider = getTagsProvider(config)
if (!baseUrl) {
throw new Error('OLLAMA_BASE_URL environment variable is required')
}
// Remove /api suffix if present to avoid double /api/api/...
if (baseUrl.endsWith('/api')) {
baseUrl = baseUrl.slice(0, -4)
}
const modelName = process.env.OLLAMA_MODEL || 'granite4:latest'
const response = await fetch(`${baseUrl}/api/generate`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: modelName,
system: systemPrompt,
prompt: userPrompt,
stream: false,
}),
})
if (!response.ok) {
throw new Error(`Provider error: ${response.statusText}`)
}
const data = await response.json()
// Use provider's generateText method
const fullPrompt = `${systemPrompt}\n\n${userPrompt}`
const response = await provider.generateText(fullPrompt)
// Parse JSON response
const jsonResponse = JSON.parse(data.response)
const jsonResponse = JSON.parse(response)
const modes: RefactorMode[] = ['clarify', 'shorten', 'improveStyle']
const results: RefactorResult[] = []