Add Next.js frontend with WebLLM, OpenAI support - Add complete Next.js frontend with Tailwind CSS and shadcn/ui - Integrate WebLLM for client-side browser-based translations - Add OpenAI provider support with gpt-4o-mini default - Add Context & Glossary page for LLM customization - Reorganize settings: Translation Services includes all providers - Add system prompt and glossary support for all LLMs - Remove test files and requirements-test.txt

This commit is contained in:
2025-11-30 19:02:41 +01:00
parent a4ecd3e0ec
commit 8c7716bf4d
44 changed files with 11885 additions and 15 deletions

View File

@@ -54,15 +54,19 @@ class DeepLTranslationProvider(TranslationProvider):
class LibreTranslationProvider(TranslationProvider):
"""LibreTranslate implementation"""
def __init__(self, custom_url: str = "https://libretranslate.com"):
self.custom_url = custom_url
def translate(self, text: str, target_language: str, source_language: str = 'auto') -> str:
if not text or not text.strip():
return text
try:
# LibreTranslator doesn't need API key for self-hosted instances
translator = LibreTranslator(source=source_language, target=target_language, custom_url="http://localhost:5000")
# LibreTranslator supports custom URL for self-hosted or public instances
translator = LibreTranslator(source=source_language, target=target_language, custom_url=self.custom_url)
return translator.translate(text)
except Exception as e:
print(f"LibreTranslate error: {e}")
# Fail silently and return original text
return text
@@ -188,6 +192,97 @@ class WebLLMTranslationProvider(TranslationProvider):
return text
class OpenAITranslationProvider(TranslationProvider):
"""OpenAI GPT translation implementation with vision support"""
def __init__(self, api_key: str, model: str = "gpt-4o-mini", system_prompt: str = ""):
self.api_key = api_key
self.model = model
self.custom_system_prompt = system_prompt
def translate(self, text: str, target_language: str, source_language: str = 'auto') -> str:
if not text or not text.strip():
return text
# Skip very short text or numbers only
if len(text.strip()) < 2 or text.strip().isdigit():
return text
try:
import openai
client = openai.OpenAI(api_key=self.api_key)
# Build system prompt with custom context if provided
base_prompt = f"You are a translator. Translate the user's text to {target_language}. Return ONLY the translation, nothing else."
if self.custom_system_prompt:
system_content = f"""{base_prompt}
ADDITIONAL CONTEXT AND INSTRUCTIONS:
{self.custom_system_prompt}"""
else:
system_content = base_prompt
response = client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": system_content},
{"role": "user", "content": text}
],
temperature=0.3,
max_tokens=500
)
translated = response.choices[0].message.content.strip()
return translated if translated else text
except Exception as e:
print(f"OpenAI translation error: {e}")
return text
def translate_image(self, image_path: str, target_language: str) -> str:
"""Translate text within an image using OpenAI vision model"""
import base64
try:
import openai
client = openai.OpenAI(api_key=self.api_key)
# Read and encode image
with open(image_path, 'rb') as img_file:
image_data = base64.b64encode(img_file.read()).decode('utf-8')
# Determine image type from extension
ext = image_path.lower().split('.')[-1]
media_type = f"image/{ext}" if ext in ['png', 'jpg', 'jpeg', 'gif', 'webp'] else "image/png"
response = client.chat.completions.create(
model=self.model, # gpt-4o and gpt-4o-mini support vision
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": f"Extract all text from this image and translate it to {target_language}. Return ONLY the translated text, preserving the structure and formatting."
},
{
"type": "image_url",
"image_url": {
"url": f"data:{media_type};base64,{image_data}"
}
}
]
}
],
max_tokens=1000
)
return response.choices[0].message.content.strip()
except Exception as e:
print(f"OpenAI vision translation error: {e}")
return ""
class TranslationService:
"""Main translation service that delegates to the configured provider"""
@@ -224,7 +319,7 @@ class TranslationService:
def translate_image(self, image_path: str, target_language: str) -> str:
"""
Translate text in an image using vision model (Ollama only)
Translate text in an image using vision model (Ollama or OpenAI)
Args:
image_path: Path to image file
@@ -236,9 +331,11 @@ class TranslationService:
if not self.translate_images:
return ""
# Only Ollama supports image translation
# Ollama and OpenAI support image translation
if isinstance(self.provider, OllamaTranslationProvider):
return self.provider.translate_image(image_path, target_language)
elif isinstance(self.provider, OpenAITranslationProvider):
return self.provider.translate_image(image_path, target_language)
return ""