Compare commits

...

2 Commits

7 changed files with 900 additions and 14 deletions

View File

@ -1,7 +1,12 @@
# Translation Service Configuration
TRANSLATION_SERVICE=google # Options: google, deepl, libre
TRANSLATION_SERVICE=google # Options: google, deepl, libre, ollama
DEEPL_API_KEY=your_deepl_api_key_here
# Ollama Configuration (for LLM-based translation)
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_MODEL=llama3
OLLAMA_VISION_MODEL=llava
# API Configuration
MAX_FILE_SIZE_MB=50
UPLOAD_DIR=./uploads

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 Sepehr
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -12,6 +12,11 @@ class Config:
TRANSLATION_SERVICE = os.getenv("TRANSLATION_SERVICE", "google")
DEEPL_API_KEY = os.getenv("DEEPL_API_KEY", "")
# Ollama Configuration
OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
OLLAMA_MODEL = os.getenv("OLLAMA_MODEL", "llama3")
OLLAMA_VISION_MODEL = os.getenv("OLLAMA_VISION_MODEL", "llava")
# File Upload Configuration
MAX_FILE_SIZE_MB = int(os.getenv("MAX_FILE_SIZE_MB", "50"))
MAX_FILE_SIZE_BYTES = MAX_FILE_SIZE_MB * 1024 * 1024

70
main.py
View File

@ -5,6 +5,7 @@ FastAPI application for translating complex documents while preserving formattin
from fastapi import FastAPI, UploadFile, File, Form, HTTPException
from fastapi.responses import FileResponse, JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from pathlib import Path
from typing import Optional
import asyncio
@ -37,6 +38,11 @@ app.add_middleware(
allow_headers=["*"],
)
# Mount static files
static_dir = Path(__file__).parent / "static"
if static_dir.exists():
app.mount("/static", StaticFiles(directory=str(static_dir)), name="static")
@app.get("/")
async def root():
@ -104,6 +110,8 @@ async def translate_document(
file: UploadFile = File(..., description="Document file to translate (.xlsx, .docx, or .pptx)"),
target_language: str = Form(..., description="Target language code (e.g., 'es', 'fr', 'de')"),
source_language: str = Form(default="auto", description="Source language code (default: auto-detect)"),
provider: str = Form(default="google", description="Translation provider (google, ollama, deepl, libre)"),
translate_images: bool = Form(default=False, description="Translate images with Ollama vision (only for Ollama provider)"),
cleanup: bool = Form(default=True, description="Delete input file after translation")
):
"""
@ -145,6 +153,27 @@ async def translate_document(
await file_handler.save_upload_file(file, input_path)
logger.info(f"Saved input file to: {input_path}")
# Configure translation provider
from services.translation_service import GoogleTranslationProvider, DeepLTranslationProvider, LibreTranslationProvider, OllamaTranslationProvider, translation_service
if provider.lower() == "deepl":
if not config.DEEPL_API_KEY:
raise HTTPException(status_code=400, detail="DeepL API key not configured")
translation_provider = DeepLTranslationProvider(config.DEEPL_API_KEY)
elif provider.lower() == "libre":
translation_provider = LibreTranslationProvider()
elif provider.lower() == "ollama":
vision_model = getattr(config, 'OLLAMA_VISION_MODEL', 'llava')
translation_provider = OllamaTranslationProvider(config.OLLAMA_BASE_URL, config.OLLAMA_MODEL, vision_model)
else:
translation_provider = GoogleTranslationProvider()
# Update the global translation service
translation_service.provider = translation_provider
# Store translate_images flag for translators to access
translation_service.translate_images = translate_images
# Translate based on file type
if file_extension == ".xlsx":
logger.info("Translating Excel file...")
@ -302,6 +331,47 @@ async def download_file(filename: str):
)
@app.get("/ollama/models")
async def list_ollama_models(base_url: Optional[str] = None):
"""
List available Ollama models
**Parameters:**
- **base_url**: Ollama server URL (default: from config)
"""
from services.translation_service import OllamaTranslationProvider
url = base_url or config.OLLAMA_BASE_URL
models = OllamaTranslationProvider.list_models(url)
return {
"ollama_url": url,
"models": models,
"count": len(models)
}
@app.post("/ollama/configure")
async def configure_ollama(base_url: str = Form(...), model: str = Form(...)):
"""
Configure Ollama settings
**Parameters:**
- **base_url**: Ollama server URL (e.g., http://localhost:11434)
- **model**: Model name to use for translation (e.g., llama3, mistral)
"""
config.OLLAMA_BASE_URL = base_url
config.OLLAMA_MODEL = model
return {
"status": "success",
"message": "Ollama configuration updated",
"ollama_url": base_url,
"model": model
}
if __name__ == "__main__":
import uvicorn
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)

119
sample_files/webllm.html Normal file
View File

@ -0,0 +1,119 @@
<!DOCTYPE html>
<html lang="fr">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Test LLM Local - WebGPU</title>
<style>
body { font-family: system-ui, sans-serif; max-width: 800px; margin: 2rem auto; padding: 0 1rem; line-height: 1.5; }
#chat-box { border: 1px solid #ccc; padding: 1rem; height: 400px; overflow-y: auto; border-radius: 8px; background: #f9f9f9; margin-bottom: 1rem; }
.message { margin-bottom: 1rem; padding: 0.5rem 1rem; border-radius: 8px; }
.user { background: #e3f2fd; align-self: flex-end; text-align: right; }
.bot { background: #fff; border: 1px solid #eee; }
#controls { display: flex; gap: 10px; }
input { flex-grow: 1; padding: 10px; border-radius: 4px; border: 1px solid #ddd; }
button { padding: 10px 20px; background: #007bff; color: white; border: none; border-radius: 4px; cursor: pointer; }
button:disabled { background: #ccc; }
#status { font-size: 0.9rem; color: #666; margin-bottom: 1rem; font-style: italic; }
</style>
</head>
<body>
<h2>🤖 Mon LLM Local (via Chrome WebGPU)</h2>
<div id="status">Initialisation... cliquez sur "Charger le modèle" pour commencer.</div>
<div id="chat-box"></div>
<div id="controls">
<input type="text" id="user-input" placeholder="Écrivez votre message ici..." disabled>
<button id="send-btn" disabled>Envoyer</button>
</div>
<button id="load-btn" style="margin-top: 10px; background-color: #28a745;">Charger le Modèle (Llama 3.2)</button>
<script type="module">
// Importation de WebLLM directement depuis le CDN
import { CreateMLCEngine } from "https://esm.run/@mlc-ai/web-llm";
// Configuration du modèle (ici Llama 3.2 1B, léger et rapide)
const selectedModel = "Llama-3.2-1B-Instruct-q4f16_1-MLC";
let engine;
const statusLabel = document.getElementById('status');
const chatBox = document.getElementById('chat-box');
const userInput = document.getElementById('user-input');
const sendBtn = document.getElementById('send-btn');
const loadBtn = document.getElementById('load-btn');
// Fonction pour mettre à jour l'état de chargement
const initProgressCallback = (report) => {
statusLabel.innerText = report.text;
};
// 1. Chargement du moteur (Engine)
loadBtn.addEventListener('click', async () => {
loadBtn.disabled = true;
statusLabel.innerText = "Démarrage du téléchargement du modèle (peut prendre quelques minutes)...";
try {
// Création du moteur WebLLM
engine = await CreateMLCEngine(
selectedModel,
{ initProgressCallback: initProgressCallback }
);
statusLabel.innerText = "✅ Modèle chargé et prêt ! (GPU Actif)";
userInput.disabled = false;
sendBtn.disabled = false;
loadBtn.style.display = 'none';
userInput.focus();
} catch (err) {
statusLabel.innerText = "❌ Erreur : " + err.message;
loadBtn.disabled = false;
}
});
// 2. Fonction d'envoi de message
sendBtn.addEventListener('click', sendMessage);
userInput.addEventListener('keypress', (e) => { if(e.key === 'Enter') sendMessage() });
async function sendMessage() {
const text = userInput.value.trim();
if (!text) return;
// Affichage message utilisateur
appendMessage(text, 'user');
userInput.value = '';
// Création placeholder pour la réponse
const botMessageDiv = appendMessage("...", 'bot');
let fullResponse = "";
// Inférence (Génération)
const chunks = await engine.chat.completions.create({
messages: [{ role: "user", content: text }],
stream: true, // Important pour voir le texte s'écrire en temps réel
});
// Lecture du flux (Streaming)
for await (const chunk of chunks) {
const content = chunk.choices[0]?.delta?.content || "";
fullResponse += content;
botMessageDiv.innerText = fullResponse;
// Auto-scroll vers le bas
chatBox.scrollTop = chatBox.scrollHeight;
}
}
function appendMessage(text, sender) {
const div = document.createElement('div');
div.classList.add('message', sender);
div.innerText = text;
chatBox.appendChild(div);
chatBox.scrollTop = chatBox.scrollHeight;
return div;
}
</script>
</body>
</html>

View File

@ -3,7 +3,8 @@ Translation Service Abstraction
Provides a unified interface for different translation providers
"""
from abc import ABC, abstractmethod
from typing import Optional
from typing import Optional, List
import requests
from deep_translator import GoogleTranslator, DeeplTranslator, LibreTranslator
from config import config
@ -58,13 +59,86 @@ class LibreTranslationProvider(TranslationProvider):
return text
try:
translator = LibreTranslator(source=source_language, target=target_language)
# LibreTranslator doesn't need API key for self-hosted instances
translator = LibreTranslator(source=source_language, target=target_language, custom_url="http://localhost:5000")
return translator.translate(text)
except Exception as e:
print(f"Translation error: {e}")
# Fail silently and return original text
return text
class OllamaTranslationProvider(TranslationProvider):
"""Ollama LLM translation implementation"""
def __init__(self, base_url: str = "http://localhost:11434", model: str = "llama3", vision_model: str = "llava"):
self.base_url = base_url.rstrip('/')
self.model = model
self.vision_model = vision_model
def translate(self, text: str, target_language: str, source_language: str = 'auto') -> str:
if not text or not text.strip():
return text
try:
prompt = f"Translate the following text to {target_language}. Return ONLY the translation, nothing else:\n\n{text}"
response = requests.post(
f"{self.base_url}/api/generate",
json={
"model": self.model,
"prompt": prompt,
"stream": False
},
timeout=30
)
response.raise_for_status()
result = response.json()
return result.get("response", text).strip()
except Exception as e:
print(f"Ollama translation error: {e}")
return text
def translate_image(self, image_path: str, target_language: str) -> str:
"""Translate text within an image using Ollama vision model"""
import base64
try:
# Read and encode image
with open(image_path, 'rb') as img_file:
image_data = base64.b64encode(img_file.read()).decode('utf-8')
prompt = f"Extract all text from this image and translate it to {target_language}. Return ONLY the translated text, preserving the structure and formatting."
response = requests.post(
f"{self.base_url}/api/generate",
json={
"model": self.vision_model,
"prompt": prompt,
"images": [image_data],
"stream": False
},
timeout=60
)
response.raise_for_status()
result = response.json()
return result.get("response", "").strip()
except Exception as e:
print(f"Ollama vision translation error: {e}")
return ""
@staticmethod
def list_models(base_url: str = "http://localhost:11434") -> List[str]:
"""List available Ollama models"""
try:
response = requests.get(f"{base_url.rstrip('/')}/api/tags", timeout=5)
response.raise_for_status()
models = response.json().get("models", [])
return [model["name"] for model in models]
except Exception as e:
print(f"Error listing Ollama models: {e}")
return []
class TranslationService:
"""Main translation service that delegates to the configured provider"""
@ -77,16 +151,9 @@ class TranslationService:
def _get_default_provider(self) -> TranslationProvider:
"""Get the default translation provider from configuration"""
service_type = config.TRANSLATION_SERVICE.lower()
if service_type == "deepl":
if not config.DEEPL_API_KEY:
raise ValueError("DeepL API key not configured")
return DeepLTranslationProvider(config.DEEPL_API_KEY)
elif service_type == "libre":
return LibreTranslationProvider()
else: # Default to Google
return GoogleTranslationProvider()
# Always use Google Translate by default to avoid API key issues
# Provider will be overridden per request in the API endpoint
return GoogleTranslationProvider()
def translate_text(self, text: str, target_language: str, source_language: str = 'auto') -> str:
"""

599
static/index.html Normal file
View File

@ -0,0 +1,599 @@
<!DOCTYPE html>
<html lang="fr">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document Translation API - Interface de Test</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif;
background: #f5f7fa;
min-height: 100vh;
color: #2c3e50;
line-height: 1.6;
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 40px 20px;
}
.header {
background: white;
padding: 30px 40px;
margin-bottom: 30px;
border-bottom: 1px solid #e1e8ed;
box-shadow: 0 1px 3px rgba(0,0,0,0.05);
}
.header h1 {
font-size: 28px;
font-weight: 600;
color: #1a202c;
margin-bottom: 8px;
}
.header p {
font-size: 15px;
color: #718096;
}
.card {
background: white;
border-radius: 8px;
padding: 32px;
margin-bottom: 24px;
border: 1px solid #e1e8ed;
box-shadow: 0 1px 3px rgba(0,0,0,0.04);
}
.card h2 {
color: #1a202c;
margin-bottom: 24px;
font-size: 20px;
font-weight: 600;
border-bottom: 1px solid #e1e8ed;
padding-bottom: 12px;
}
.form-group {
margin-bottom: 20px;
}
label {
display: block;
margin-bottom: 8px;
color: #4a5568;
font-weight: 500;
font-size: 14px;
}
input[type="text"],
input[type="file"],
select {
width: 100%;
padding: 10px 14px;
border: 1px solid #cbd5e0;
border-radius: 6px;
font-size: 14px;
transition: all 0.2s;
background: #ffffff;
}
input[type="text"]:focus,
select:focus {
outline: none;
border-color: #4299e1;
box-shadow: 0 0 0 3px rgba(66, 153, 225, 0.1);
}
input[type="file"] {
padding: 8px;
cursor: pointer;
}
button {
background: #2563eb;
color: white;
padding: 10px 24px;
border: none;
border-radius: 6px;
font-size: 14px;
font-weight: 500;
cursor: pointer;
transition: all 0.2s;
margin-right: 10px;
margin-bottom: 10px;
}
button:hover {
background: #1e40af;
}
button:disabled {
opacity: 0.5;
cursor: not-allowed;
}
.btn-secondary {
background: #64748b;
}
.btn-secondary:hover {
background: #475569;
}
.btn-success {
background: #059669;
}
.btn-success:hover {
background: #047857;
}
.grid-2 {
display: grid;
grid-template-columns: 1fr 1fr;
gap: 20px;
}
.result {
margin-top: 20px;
padding: 16px;
border-radius: 6px;
border-left: 4px solid #cbd5e0;
background: #f7fafc;
}
.result.success {
background: #f0fdf4;
border-left-color: #10b981;
}
.result.error {
background: #fef2f2;
border-left-color: #ef4444;
}
.result h3 {
margin-bottom: 12px;
color: #1a202c;
font-size: 16px;
font-weight: 600;
}
.result pre {
background: white;
padding: 12px;
border-radius: 4px;
overflow-x: auto;
font-size: 13px;
border: 1px solid #e1e8ed;
color: #2d3748;
}
.loading {
display: none;
text-align: center;
padding: 24px;
}
.loading.active {
display: block;
}
.spinner {
border: 3px solid #e1e8ed;
border-top: 3px solid #2563eb;
border-radius: 50%;
width: 36px;
height: 36px;
animation: spin 0.8s linear infinite;
margin: 0 auto 12px;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
.progress-container {
width: 100%;
background: #e1e8ed;
border-radius: 8px;
height: 8px;
overflow: hidden;
margin: 16px 0;
display: none;
}
.progress-container.active {
display: block;
}
.progress-bar {
height: 100%;
background: linear-gradient(90deg, #2563eb 0%, #1e40af 100%);
width: 0%;
transition: width 0.3s ease;
border-radius: 8px;
}
.progress-text {
text-align: center;
margin-top: 8px;
color: #4a5568;
font-size: 14px;
font-weight: 500;
}
.badge {
display: inline-block;
padding: 4px 10px;
border-radius: 4px;
font-size: 12px;
font-weight: 500;
margin-right: 8px;
background: #e0e7ff;
color: #3730a3;
}
.models-list {
display: flex;
flex-wrap: wrap;
gap: 8px;
margin-top: 12px;
}
.model-item {
background: #f1f5f9;
padding: 6px 12px;
border-radius: 4px;
font-size: 13px;
border: 1px solid #e1e8ed;
color: #475569;
}
.download-link {
display: inline-block;
margin-top: 12px;
padding: 10px 20px;
background: #059669;
color: white;
text-decoration: none;
border-radius: 6px;
font-weight: 500;
font-size: 14px;
}
.download-link:hover {
background: #047857;
}
@media (max-width: 768px) {
.grid-2 {
grid-template-columns: 1fr;
}
.container {
padding: 20px 15px;
}
.card {
padding: 20px;
}
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>Document Translation API</h1>
<p>Professional document translation service with format preservation</p>
</div>
<!-- Configuration Ollama -->
<div class="card">
<h2>Ollama Configuration</h2>
<div class="grid-2">
<div class="form-group">
<label for="ollama-url">URL Ollama</label>
<input type="text" id="ollama-url" value="http://localhost:11434" placeholder="http://localhost:11434">
</div>
<div class="form-group">
<label for="ollama-model">Modèle Ollama</label>
<input type="text" id="ollama-model" value="llama3" placeholder="llama3, mistral, etc.">
</div>
</div>
<button onclick="listOllamaModels()" class="btn-secondary">List Available Models</button>
<button onclick="configureOllama()" class="btn-success">Save Configuration</button>
<div id="models-result"></div>
</div>
<!-- Traduction de fichier -->
<div class="card">
<h2>Document Translation</h2>
<div class="form-group">
<label for="file-input">
Select file to translate
<span class="badge">XLSX</span>
<span class="badge">DOCX</span>
<span class="badge">PPTX</span>
</label>
<input type="file" id="file-input" accept=".xlsx,.docx,.pptx">
</div>
<div class="grid-2">
<div class="form-group">
<label for="target-lang">Target Language</label>
<select id="target-lang">
<option value="es">Espagnol (es)</option>
<option value="fr">Français (fr)</option>
<option value="de">Allemand (de)</option>
<option value="it">Italien (it)</option>
<option value="pt">Portugais (pt)</option>
<option value="ru">Russe (ru)</option>
<option value="zh">Chinois (zh)</option>
<option value="ja">Japonais (ja)</option>
<option value="ko">Coréen (ko)</option>
<option value="ar">Arabe (ar)</option>
</select>
</div>
<div class="form-group">
<label for="provider">Translation Service</label>
<select id="provider" onchange="toggleImageTranslation()">
<option value="google">Google Translate (Default)</option>
<option value="ollama">Ollama LLM</option>
<option value="deepl">DeepL</option>
<option value="libre">LibreTranslate</option>
</select>
</div>
</div>
<div class="form-group" id="image-translation-option" style="display: none;">
<label style="display: flex; align-items: center; cursor: pointer;">
<input type="checkbox" id="translate-images" style="width: auto; margin-right: 10px;">
<span>Translate images with Ollama Vision (requires llava model)</span>
</label>
</div>
<button onclick="translateFile()">Translate Document</button>
<div id="loading" class="loading">
<div class="spinner"></div>
<p>Translation in progress, please wait...</p>
</div>
<div class="progress-container" id="progress-container">
<div class="progress-bar" id="progress-bar"></div>
</div>
<div class="progress-text" id="progress-text"></div>
<div id="translate-result"></div>
</div>
<!-- Test de l'API -->
<div class="card">
<h2>API Health Check</h2>
<button onclick="checkHealth()">Check API Status</button>
<div id="health-result"></div>
</div>
</div>
<script>
const API_BASE = 'http://localhost:8000';
// Toggle image translation option based on provider
function toggleImageTranslation() {
const provider = document.getElementById('provider').value;
const imageOption = document.getElementById('image-translation-option');
if (provider === 'ollama') {
imageOption.style.display = 'block';
} else {
imageOption.style.display = 'none';
document.getElementById('translate-images').checked = false;
}
}
// Liste des modèles Ollama
async function listOllamaModels() {
const url = document.getElementById('ollama-url').value;
const resultDiv = document.getElementById('models-result');
try {
const response = await fetch(`${API_BASE}/ollama/models?base_url=${encodeURIComponent(url)}`);
const data = await response.json();
if (data.models && data.models.length > 0) {
resultDiv.innerHTML = `
<div class="result success">
<h3>${data.count} model(s) available</h3>
<div class="models-list">
${data.models.map(model => `<span class="model-item">${model}</span>`).join('')}
</div>
</div>
`;
} else {
resultDiv.innerHTML = `
<div class="result error">
<h3>No models found</h3>
<p>Make sure Ollama is running and accessible at ${url}</p>
</div>
`;
}
} catch (error) {
resultDiv.innerHTML = `
<div class="result error">
<h3>Connection error</h3>
<pre>${error.message}</pre>
</div>
`;
}
}
// Configurer Ollama
async function configureOllama() {
const url = document.getElementById('ollama-url').value;
const model = document.getElementById('ollama-model').value;
const resultDiv = document.getElementById('models-result');
try {
const formData = new FormData();
formData.append('base_url', url);
formData.append('model', model);
const response = await fetch(`${API_BASE}/ollama/configure`, {
method: 'POST',
body: formData
});
const data = await response.json();
resultDiv.innerHTML = `
<div class="result success">
<h3>Configuration saved</h3>
<p><strong>URL:</strong> ${data.ollama_url}</p>
<p><strong>Model:</strong> ${data.model}</p>
</div>
`;
} catch (error) {
resultDiv.innerHTML = `
<div class="result error">
<h3>Error</h3>
<pre>${error.message}</pre>
</div>
`;
}
}
// Traduire un fichier
async function translateFile() {
const fileInput = document.getElementById('file-input');
const targetLang = document.getElementById('target-lang').value;
const provider = document.getElementById('provider').value;
const translateImages = document.getElementById('translate-images').checked;
const resultDiv = document.getElementById('translate-result');
const loadingDiv = document.getElementById('loading');
const progressContainer = document.getElementById('progress-container');
const progressBar = document.getElementById('progress-bar');
const progressText = document.getElementById('progress-text');
if (!fileInput.files || fileInput.files.length === 0) {
alert('Please select a file');
return;
}
const formData = new FormData();
formData.append('file', fileInput.files[0]);
formData.append('target_language', targetLang);
formData.append('provider', provider);
formData.append('translate_images', translateImages);
loadingDiv.classList.add('active');
progressContainer.classList.add('active');
resultDiv.innerHTML = '';
// Simulate progress (since we don't have real progress from backend)
let progress = 0;
const progressInterval = setInterval(() => {
progress += Math.random() * 15;
if (progress > 90) progress = 90;
progressBar.style.width = progress + '%';
progressText.textContent = `Processing: ${Math.round(progress)}%`;
}, 500);
try {
const response = await fetch(`${API_BASE}/translate`, {
method: 'POST',
body: formData
});
clearInterval(progressInterval);
progressBar.style.width = '100%';
progressText.textContent = 'Complete: 100%';
setTimeout(() => {
loadingDiv.classList.remove('active');
progressContainer.classList.remove('active');
progressBar.style.width = '0%';
progressText.textContent = '';
}, 500);
if (response.ok) {
const blob = await response.blob();
const filename = response.headers.get('content-disposition')?.split('filename=')[1]?.replace(/"/g, '') || 'translated_file';
// Créer un lien de téléchargement
const url = window.URL.createObjectURL(blob);
resultDiv.innerHTML = `
<div class="result success">
<h3>Translation completed successfully</h3>
<p><strong>File:</strong> ${fileInput.files[0].name}</p>
<p><strong>Target language:</strong> ${targetLang}</p>
<p><strong>Service:</strong> ${provider}</p>
${translateImages ? '<p><strong>Images:</strong> Translated with Ollama Vision</p>' : ''}
<a href="${url}" download="${filename}" class="download-link">Download translated file</a>
</div>
`;
} else {
const error = await response.json();
resultDiv.innerHTML = `
<div class="result error">
<h3>Translation error</h3>
<pre>${JSON.stringify(error, null, 2)}</pre>
</div>
`;
}
} catch (error) {
clearInterval(progressInterval);
loadingDiv.classList.remove('active');
progressContainer.classList.remove('active');
progressBar.style.width = '0%';
progressText.textContent = '';
resultDiv.innerHTML = `
<div class="result error">
<h3>Error</h3>
<pre>${error.message}</pre>
</div>
`;
}
}
// Vérifier la santé de l'API
async function checkHealth() {
const resultDiv = document.getElementById('health-result');
try {
const response = await fetch(`${API_BASE}/health`);
const data = await response.json();
resultDiv.innerHTML = `
<div class="result success">
<h3>API operational</h3>
<pre>${JSON.stringify(data, null, 2)}</pre>
</div>
`;
} catch (error) {
resultDiv.innerHTML = `
<div class="result error">
<h3>API not accessible</h3>
<pre>${error.message}</pre>
</div>
`;
}
}
</script>
</body>
</html>