Production-ready improvements: security hardening, Redis sessions, retry logic, updated pricing
Changes: - Removed hardcoded admin credentials (now requires env vars) - Added Redis session storage with in-memory fallback - Improved CORS configuration with warnings for development mode - Added retry_with_backoff decorator for translation API calls - Updated pricing: Starter=, Pro=, Business= - Stripe price IDs now loaded from environment variables - Added redis to requirements.txt - Updated .env.example with all new configuration options - Created COMPREHENSIVE_REVIEW_AND_PLAN.md with deployment roadmap - Frontend: Updated pricing page, new UI components
This commit is contained in:
@@ -11,16 +11,45 @@ from config import config
|
||||
import concurrent.futures
|
||||
import threading
|
||||
import asyncio
|
||||
from functools import lru_cache
|
||||
from functools import lru_cache, wraps
|
||||
import time
|
||||
import hashlib
|
||||
import random
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Global thread pool for parallel translations
|
||||
_executor = concurrent.futures.ThreadPoolExecutor(max_workers=8)
|
||||
|
||||
|
||||
def retry_with_backoff(max_retries: int = 3, base_delay: float = 1.0, max_delay: float = 30.0):
|
||||
"""
|
||||
Decorator for retry logic with exponential backoff and jitter.
|
||||
Used for API calls that may fail due to rate limiting or transient errors.
|
||||
"""
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
last_exception = None
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
last_exception = e
|
||||
if attempt < max_retries - 1:
|
||||
# Exponential backoff with jitter
|
||||
delay = min(base_delay * (2 ** attempt) + random.uniform(0, 1), max_delay)
|
||||
logger.warning(f"Retry {attempt + 1}/{max_retries} for {func.__name__} after {delay:.2f}s: {e}")
|
||||
time.sleep(delay)
|
||||
# All retries exhausted
|
||||
logger.error(f"All {max_retries} retries failed for {func.__name__}: {last_exception}")
|
||||
raise last_exception
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
class TranslationCache:
|
||||
"""Thread-safe LRU cache for translations to avoid redundant API calls"""
|
||||
|
||||
@@ -143,6 +172,11 @@ class GoogleTranslationProvider(TranslationProvider):
|
||||
self._local.translators[key] = GoogleTranslator(source=source_language, target=target_language)
|
||||
return self._local.translators[key]
|
||||
|
||||
@retry_with_backoff(max_retries=3, base_delay=1.0)
|
||||
def _do_translate(self, translator: GoogleTranslator, text: str) -> str:
|
||||
"""Perform translation with retry logic"""
|
||||
return translator.translate(text)
|
||||
|
||||
def translate(self, text: str, target_language: str, source_language: str = 'auto') -> str:
|
||||
if not text or not text.strip():
|
||||
return text
|
||||
@@ -154,12 +188,12 @@ class GoogleTranslationProvider(TranslationProvider):
|
||||
|
||||
try:
|
||||
translator = self._get_translator(source_language, target_language)
|
||||
result = translator.translate(text)
|
||||
result = self._do_translate(translator, text)
|
||||
# Cache the result
|
||||
_translation_cache.set(text, target_language, source_language, self.provider_name, result)
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"Translation error: {e}")
|
||||
logger.error(f"Translation error: {e}")
|
||||
return text
|
||||
|
||||
def translate_batch(self, texts: List[str], target_language: str, source_language: str = 'auto', batch_size: int = 50) -> List[str]:
|
||||
|
||||
Reference in New Issue
Block a user