diff --git a/main.py b/main.py
index f7c59e5..071c822 100644
--- a/main.py
+++ b/main.py
@@ -154,7 +154,7 @@ async def translate_document(
logger.info(f"Saved input file to: {input_path}")
# Configure translation provider
- from services.translation_service import GoogleTranslationProvider, DeepLTranslationProvider, LibreTranslationProvider, OllamaTranslationProvider, translation_service
+ from services.translation_service import GoogleTranslationProvider, DeepLTranslationProvider, LibreTranslationProvider, OllamaTranslationProvider, WebLLMTranslationProvider, translation_service
if provider.lower() == "deepl":
if not config.DEEPL_API_KEY:
@@ -165,6 +165,8 @@ async def translate_document(
elif provider.lower() == "ollama":
vision_model = getattr(config, 'OLLAMA_VISION_MODEL', 'llava')
translation_provider = OllamaTranslationProvider(config.OLLAMA_BASE_URL, config.OLLAMA_MODEL, vision_model)
+ elif provider.lower() == "webllm":
+ translation_provider = WebLLMTranslationProvider()
else:
translation_provider = GoogleTranslationProvider()
diff --git a/services/translation_service.py b/services/translation_service.py
index fed1ba2..a491a87 100644
--- a/services/translation_service.py
+++ b/services/translation_service.py
@@ -139,6 +139,16 @@ class OllamaTranslationProvider(TranslationProvider):
return []
+class WebLLMTranslationProvider(TranslationProvider):
+ """WebLLM browser-based translation (client-side processing)"""
+
+ def translate(self, text: str, target_language: str, source_language: str = 'auto') -> str:
+ # WebLLM translation happens client-side in the browser
+ # This is just a placeholder - actual translation is done by JavaScript
+ # For server-side, we'll just pass through for now
+ return text
+
+
class TranslationService:
"""Main translation service that delegates to the configured provider"""
diff --git a/static/index.html b/static/index.html
index 6f3cafc..0fedcfa 100644
--- a/static/index.html
+++ b/static/index.html
@@ -352,7 +352,8 @@
@@ -366,6 +367,12 @@
+
+
@@ -396,11 +403,18 @@
function toggleImageTranslation() {
const provider = document.getElementById('provider').value;
const imageOption = document.getElementById('image-translation-option');
+ const webllmInfo = document.getElementById('webllm-info');
if (provider === 'ollama') {
imageOption.style.display = 'block';
+ webllmInfo.style.display = 'none';
+ } else if (provider === 'webllm') {
+ imageOption.style.display = 'none';
+ webllmInfo.style.display = 'block';
+ document.getElementById('translate-images').checked = false;
} else {
imageOption.style.display = 'none';
+ webllmInfo.style.display = 'none';
document.getElementById('translate-images').checked = false;
}
}
@@ -503,14 +517,48 @@
progressContainer.classList.add('active');
resultDiv.innerHTML = '';
- // Simulate progress (since we don't have real progress from backend)
+ // Better progress simulation with timeout protection
let progress = 0;
+ let progressSpeed = 8; // Start at 8% increments
const progressInterval = setInterval(() => {
- progress += Math.random() * 15;
- if (progress > 90) progress = 90;
- progressBar.style.width = progress + '%';
- progressText.textContent = `Processing: ${Math.round(progress)}%`;
- }, 500);
+ if (progress < 30) {
+ progress += progressSpeed;
+ } else if (progress < 60) {
+ progressSpeed = 4; // Slower
+ progress += progressSpeed;
+ } else if (progress < 85) {
+ progressSpeed = 2; // Even slower
+ progress += progressSpeed;
+ } else if (progress < 95) {
+ progressSpeed = 0.5; // Very slow near the end
+ progress += progressSpeed;
+ }
+
+ progressBar.style.width = Math.min(progress, 98) + '%';
+ progressText.textContent = `Processing: ${Math.round(Math.min(progress, 98))}%`;
+ }, 800);
+
+ // Safety timeout: if takes more than 5 minutes, show error
+ const safetyTimeout = setTimeout(() => {
+ clearInterval(progressInterval);
+ loadingDiv.classList.remove('active');
+ progressContainer.classList.remove('active');
+ progressBar.style.width = '0%';
+ progressText.textContent = '';
+
+ resultDiv.innerHTML = `
+
+
Request timeout
+
Translation is taking longer than expected. This might be due to:
+
+ - Large file size
+ - Ollama model not responding (check if Ollama is running)
+ - Network issues with translation service
+
+
Please try again or use a different provider.
+
+ `;
+ }, 300000); // 5 minutes
try {
const response = await fetch(`${API_BASE}/translate`, {
@@ -519,6 +567,7 @@
});
clearInterval(progressInterval);
+ clearTimeout(safetyTimeout);
progressBar.style.width = '100%';
progressText.textContent = 'Complete: 100%';
@@ -557,6 +606,7 @@
}
} catch (error) {
clearInterval(progressInterval);
+ clearTimeout(safetyTimeout);
loadingDiv.classList.remove('active');
progressContainer.classList.remove('active');
progressBar.style.width = '0%';
diff --git a/static/webllm.html b/static/webllm.html
new file mode 100644
index 0000000..fb9d562
--- /dev/null
+++ b/static/webllm.html
@@ -0,0 +1,177 @@
+
+
+
+
+
+
WebLLM Translation Demo
+
+
+
+
+
+
WebLLM Translation Demo
+
+ Info: This demo runs entirely in your browser using WebGPU. First load will download ~2GB model.
+
+
+
+
+
+
+
+
+
+
+
Initializing...
+
+
+
+