Add WebLLM model selection and cache management
This commit is contained in:
@@ -368,9 +368,24 @@
|
||||
</div>
|
||||
|
||||
<div class="form-group" id="webllm-info" style="display: none; padding: 12px; background: #e0f2ff; border-radius: 6px; border-left: 4px solid #2563eb;">
|
||||
<p style="margin: 0; font-size: 13px; color: #1e40af;">
|
||||
<strong>WebLLM Mode:</strong> First use will download the model (~2GB) to your browser. Translation runs entirely in your browser using WebGPU.
|
||||
<p style="margin: 0 0 10px 0; font-size: 13px; color: #1e40af;">
|
||||
<strong>WebLLM Mode:</strong> Translation runs entirely in your browser using WebGPU. First use downloads the model.
|
||||
</p>
|
||||
<div style="display: grid; grid-template-columns: 1fr auto; gap: 10px; align-items: end;">
|
||||
<div>
|
||||
<label for="webllm-model" style="font-size: 12px; color: #4a5568; margin-bottom: 4px;">Select Model:</label>
|
||||
<select id="webllm-model" style="width: 100%; padding: 6px; font-size: 13px; border: 1px solid #cbd5e0; border-radius: 4px;">
|
||||
<option value="Llama-3.1-8B-Instruct-q4f32_1-MLC">Llama 3.1 8B (~4.5GB)</option>
|
||||
<option value="Llama-3.2-3B-Instruct-q4f32_1-MLC">Llama 3.2 3B (~2GB)</option>
|
||||
<option value="Phi-3.5-mini-instruct-q4f16_1-MLC">Phi 3.5 Mini (~2.5GB)</option>
|
||||
<option value="Mistral-7B-Instruct-v0.3-q4f16_1-MLC">Mistral 7B (~4.5GB)</option>
|
||||
<option value="gemma-2-2b-it-q4f16_1-MLC">Gemma 2 2B (~1.5GB)</option>
|
||||
</select>
|
||||
</div>
|
||||
<button onclick="clearWebLLMCache()" style="background: #dc2626; padding: 6px 12px; font-size: 13px; white-space: nowrap;">
|
||||
Clear Cache
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<button onclick="translateFile()">Translate Document</button>
|
||||
@@ -399,6 +414,37 @@
|
||||
<script>
|
||||
const API_BASE = 'http://localhost:8000';
|
||||
|
||||
// Clear WebLLM cache
|
||||
async function clearWebLLMCache() {
|
||||
if (!confirm('This will delete all downloaded WebLLM models from your browser cache. Continue?')) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// Clear IndexedDB cache used by WebLLM
|
||||
const databases = await indexedDB.databases();
|
||||
for (const db of databases) {
|
||||
if (db.name && (db.name.includes('webllm') || db.name.includes('mlc'))) {
|
||||
indexedDB.deleteDatabase(db.name);
|
||||
}
|
||||
}
|
||||
|
||||
// Clear Cache API
|
||||
if ('caches' in window) {
|
||||
const cacheNames = await caches.keys();
|
||||
for (const name of cacheNames) {
|
||||
if (name.includes('webllm') || name.includes('mlc')) {
|
||||
await caches.delete(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
alert('✅ WebLLM cache cleared successfully! Refresh the page.');
|
||||
} catch (error) {
|
||||
alert('❌ Error clearing cache: ' + error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Toggle image translation option based on provider
|
||||
function toggleImageTranslation() {
|
||||
const provider = document.getElementById('provider').value;
|
||||
|
||||
Reference in New Issue
Block a user