Enhance chatbot UI by increasing height, adding copy button, and refining image gallery display

This commit is contained in:
sepehr 2025-03-09 15:30:54 +01:00
parent 819d3a0956
commit 9fd056baaf
2 changed files with 101 additions and 115 deletions

View File

@ -9,6 +9,9 @@ from translations.lang_mappings import LANGUAGE_MAPPING
from utils.image_utils import base64_to_image
from langchain.callbacks.base import BaseCallbackHandler
import re
from typing import List, Union, Dict, Any
# Pour Gradio 4.x
# from gradio.types.message import ImageMessage, HtmlMessage, TextMessage
def clean_llm_response(text):
"""Nettoie la réponse du LLM en enlevant les balises de pensée et autres éléments non désirés."""
@ -53,7 +56,9 @@ def display_images(images_list=None):
for img_data in images_to_use:
image = img_data["image"]
if image:
caption = f"{img_data['caption']} (Source: {img_data['source']}, Page: {img_data['page']})"
# Supprimer les infos de type "(Texte 5)" dans la caption
caption = re.sub(pattern_texte, '', img_data["caption"])
caption = f"{caption} (Source: {img_data['source']}, Page: {img_data['page']})"
gallery.append((image, caption))
return gallery if gallery else None
@ -160,7 +165,7 @@ def convert_to_messages_format(history):
messages = []
# Vérifier si nous avons déjà le format messages
if history and isinstance(history[0], dict) and "role" in history[0]:
if history and isinstance(history[0], dict) and "role" in history[0]:
return history
# Format tuples [(user_msg, assistant_msg), ...]
@ -171,71 +176,75 @@ def convert_to_messages_format(history):
messages.append({"role": "user", "content": user_msg})
if assistant_msg: # Éviter les messages vides
messages.append({"role": "assistant", "content": assistant_msg})
except ValueError:
except Exception as e:
# Journaliser l'erreur pour le débogage
print(f"Format d'historique non reconnu: {history}")
print(f"Erreur: {str(e)}")
# Retourner un historique vide en cas d'erreur
return []
return messages
# Définir le pattern de l'expression régulière en dehors de la f-string
pattern_texte = r'\(Texte \d+\)'
def process_query(message, history, streaming, show_sources, max_images, language):
global current_images, current_tables
# Debug plus clair
print(f"Langue sélectionnée pour la réponse: {language} -> {LANGUAGE_MAPPING.get(language, 'français')}")
print(f"Language selected for response: {language} -> {LANGUAGE_MAPPING.get(language, 'français')}")
if not message.strip():
return history, "", None, None
current_images = []
current_tables = []
print(f"Traitement du message: {message}")
print(f"Streaming: {streaming}")
try:
# Convert history to messages format
messages_history = convert_to_messages_format(history)
if streaming:
# Convertir history en format messages pour l'affichage
messages_history = convert_to_messages_format(history)
# Add user message to history
messages_history.append({"role": "user", "content": message})
# Add empty message for assistant response
messages_history.append({"role": "assistant", "content": ""})
# 1. Récupérer les documents pertinents
# Get relevant documents
docs = rag_bot._retrieve_relevant_documents(message)
# 2. Préparer le contexte et l'historique
# Process context and history
context = rag_bot._format_documents(docs)
history_text = rag_bot._format_chat_history()
# 3. Préparer le prompt
# Create prompt
prompt_template = ChatPromptTemplate.from_template("""
Tu es un assistant documentaire spécialisé qui utilise le contexte fourni.
You are a specialized document assistant that uses the provided context.
===== INSTRUCTION CRUCIALE SUR LA LANGUE =====
RÉPONDS UNIQUEMENT EN {language}. C'est une exigence ABSOLUE.
NE RÉPONDS JAMAIS dans une autre langue que {language}, quelle que soit la langue de la question.
===== CRITICAL LANGUAGE INSTRUCTION =====
RESPOND ONLY IN {language}. This is an ABSOLUTE requirement.
NEVER RESPOND in any language other than {language}, regardless of question language.
==============================================
Instructions spécifiques:
1. Pour chaque image mentionnée: inclure la légende, source, page et description
2. Pour chaque tableau: inclure titre, source, page et signification
3. Pour les équations: utiliser la syntaxe LaTeX exacte
4. Ne pas inventer d'informations hors du contexte fourni
5. Citer précisément les sources
Specific instructions:
1. For each image mentioned: include caption, source, page and description
2. For each table: include title, source, page and significance
3. For equations: use exact LaTeX syntax
4. Don't invent information outside the provided context
5. Cite sources precisely
Historique de conversation:
Conversation history:
{chat_history}
Contexte:
Context:
{context}
Question: {question}
Réponds de façon structurée en intégrant les images, tableaux et équations disponibles.
TA RÉPONSE DOIT ÊTRE UNIQUEMENT ET ENTIÈREMENT EN {language}. CETTE RÈGLE EST ABSOLUE.
Respond in a structured way incorporating available images, tables and equations.
YOUR RESPONSE MUST BE SOLELY AND ENTIRELY IN {language}. THIS RULE IS ABSOLUTE.
""")
# Assurer que la langue est bien passée dans le format du prompt
# Set language for the response
selected_language = LANGUAGE_MAPPING.get(language, "français")
messages = prompt_template.format_messages(
chat_history=history_text,
@ -244,10 +253,10 @@ def process_query(message, history, streaming, show_sources, max_images, languag
language=selected_language
)
# 5. Créer un handler de streaming personnalisé
# Create streaming handler
handler = GradioStreamingHandler()
# 6. Créer un modèle LLM avec notre handler
# Create LLM model with our handler
streaming_llm = ChatOllama(
model=rag_bot.llm.model,
base_url=rag_bot.llm.base_url,
@ -255,87 +264,81 @@ def process_query(message, history, streaming, show_sources, max_images, languag
callbacks=[handler]
)
# 7. Lancer la génération dans un thread pour ne pas bloquer l'UI
# Generate response in a separate thread
def generate_response():
streaming_llm.invoke(messages)
thread = threading.Thread(target=generate_response)
thread.start()
# 8. Récupérer les tokens et mettre à jour l'interface
# Process tokens and update interface
partial_response = ""
# Attendre les tokens avec un timeout
# Wait for tokens with timeout
while thread.is_alive() or not handler.tokens_queue.empty():
try:
token = handler.tokens_queue.get(timeout=0.05)
partial_response += token
# Nettoyer la réponse uniquement pour l'affichage (pas pour l'historique interne)
# Clean response for display
clean_response = clean_llm_response(partial_response)
# Mettre à jour le dernier message (assistant)
# Update assistant message - JUST TEXT, not multimodal
messages_history[-1]["content"] = clean_response
yield messages_history, "", None, None
except queue.Empty:
continue
# Après la boucle, nettoyer la réponse complète pour l'historique interne
# After loop, clean the complete response for internal history
partial_response = clean_llm_response(partial_response)
rag_bot.chat_history.append({"role": "user", "content": message})
rag_bot.chat_history.append({"role": "assistant", "content": partial_response})
# 10. Récupérer les sources, images, tableaux
# Get sources, images, tables
texts, images, tables = rag_bot._process_documents(docs)
# Préparer les informations sur les sources
# Process sources
source_info = ""
if texts:
source_info += f"📚 {len(texts)} textes • "
if images:
source_info += f"🖼️ {len(images)} images • "
if tables:
source_info += f"📊 {len(tables)} tableaux"
clean_texts = [re.sub(pattern_texte, '', t.get("source", "")) for t in texts]
# Remove duplicates and empty items
clean_texts = [t for t in clean_texts if t.strip()]
clean_texts = list(set(clean_texts))
if clean_texts:
source_info += f"📚 Sources: {', '.join(clean_texts)}"
if source_info:
source_info = "Sources trouvées: " + source_info
# 11. Traiter les images
if show_sources and images:
images = images[:max_images]
for img in images:
# Process images and tables for SEPARATE display only
if show_sources and images and max_images > 0:
for img in images[:max_images]:
img_data = img.get("image_data")
if img_data:
image = base64_to_image(img_data)
if image:
caption = re.sub(pattern_texte, '', img.get("caption", ""))
# Only add to gallery, not to chat messages
current_images.append({
"image": image,
"caption": img.get("caption", ""),
"caption": caption,
"source": img.get("source", ""),
"page": img.get("page", ""),
"description": img.get("description", "")
"page": img.get("page", "")
})
# 12. Traiter les tableaux
if show_sources and tables:
for table in tables:
current_tables.append({
"data": rag_bot.format_table(table.get("table_data", "")),
"caption": table.get("caption", ""),
"source": table.get("source", ""),
"page": table.get("page", ""),
"description": table.get("description", "")
})
# 13. Retourner les résultats finaux
images_display = display_images()
tables_display = display_tables()
yield messages_history, source_info, images_display, tables_display
# Final yield with separate image gallery
yield messages_history, source_info, display_images(), display_tables()
else:
# Version sans streaming
# Version non-streaming
print("Mode non-streaming activé")
source_info = ""
history_tuples = history if isinstance(history, list) else []
# Ajouter le message utilisateur à l'historique au format message
messages_history.append({"role": "user", "content": message})
# Initialize multimodal_content first
multimodal_content = [result["response"]] # Start with text response
# Après avoir obtenu le résultat
result = rag_bot.chat(
message,
stream=False,
@ -344,12 +347,10 @@ def process_query(message, history, streaming, show_sources, max_images, languag
# Nettoyer la réponse des balises <think>
result["response"] = clean_llm_response(result["response"])
# Convertir l'historique au format messages
messages_history = convert_to_messages_format(history)
messages_history.append({"role": "user", "content": message})
# Ajouter la réponse de l'assistant au format message
messages_history.append({"role": "assistant", "content": result["response"]})
# Mise à jour de l'historique interne
# Mise à jour de l'historique interne du chatbot
rag_bot.chat_history.append({"role": "user", "content": message})
rag_bot.chat_history.append({"role": "assistant", "content": result["response"]})
@ -364,33 +365,23 @@ def process_query(message, history, streaming, show_sources, max_images, languag
if source_info:
source_info = "Sources trouvées: " + source_info
# Traiter les images et tableaux
# Process images for SEPARATE gallery
if show_sources and "images" in result and result["images"]:
images = result["images"][:max_images]
for img in images:
for img in result["images"][:max_images]:
img_data = img.get("image_data")
if img_data:
image = base64_to_image(img_data)
if image:
caption = re.sub(pattern_texte, '', img.get("caption", ""))
# Only add to gallery
current_images.append({
"image": image,
"caption": img.get("caption", ""),
"caption": caption,
"source": img.get("source", ""),
"page": img.get("page", ""),
"description": img.get("description", "")
"page": img.get("page", "")
})
if show_sources and "tables" in result and result["tables"]:
tables = result["tables"]
for table in tables:
current_tables.append({
"data": rag_bot.format_table(table.get("table_data", "")),
"caption": table.get("caption", ""),
"source": table.get("source", ""),
"page": table.get("page", ""),
"description": table.get("description", "")
})
# Final yield with separate displays
yield messages_history, source_info, display_images(), display_tables()
except Exception as e:
@ -398,8 +389,13 @@ def process_query(message, history, streaming, show_sources, max_images, languag
traceback_text = traceback.format_exc()
print(error_msg)
print(traceback_text)
history = history + [(message, error_msg)]
yield history, "Erreur lors du traitement de la requête", None, None
# Formater l'erreur au format message
error_history = convert_to_messages_format(history)
error_history.append({"role": "user", "content": message})
error_history.append({"role": "assistant", "content": error_msg})
yield error_history, "Erreur lors du traitement de la requête", None, None
# Fonction pour réinitialiser la conversation
def reset_conversation():
@ -410,4 +406,4 @@ def reset_conversation():
rag_bot.clear_history()
# Retourner une liste vide au format messages
return [], "", None, None
return [], "", None, None # Liste vide = pas de messages

View File

@ -73,11 +73,11 @@ def build_interface(
with gr.Row():
with gr.Column(scale=2):
chat_interface = gr.Chatbot(
height=600,
show_label=False,
layout="bubble",
elem_id="chatbot",
type="messages" # Ajoutez cette ligne
height=800,
bubble_full_width=False,
show_copy_button=True,
type="messages"
# likeable=False,
)
with gr.Row():
@ -144,17 +144,9 @@ def build_interface(
label=ui_elements['max_images_label']
)
gr.Markdown("---")
# Ne pas supprimer ces lignes dans ui.py
images_title = gr.Markdown(f"### {ui_elements['images_title']}")
image_gallery = gr.Gallery(
label=ui_elements['images_title'],
show_label=False,
columns=2,
height=300,
object_fit="contain"
)
image_gallery = gr.Gallery(label="Images")
tables_title = gr.Markdown(f"### {ui_elements['tables_title']}")
tables_display = gr.HTML()
@ -190,9 +182,7 @@ def build_interface(
apply_collection_btn,
streaming,
show_sources,
max_images,
images_title,
tables_title
max_images
]
)
@ -215,7 +205,7 @@ def build_interface(
clear_btn.click(
reset_conversation_fn,
outputs=[chat_interface, source_info, image_gallery, tables_display]
outputs=[chat_interface, source_info] # Retirer image_gallery et tables_display
)
# Connecter le changement de modèle
@ -236,7 +226,7 @@ def build_interface(
gr.Markdown("""
<style>
.gradio-container {max-width: 1200px !important}
#chatbot {height: 600px; overflow-y: auto;}
#chatbot {height: 800px; overflow-y: auto;}
#sources_info {margin-top: 10px; color: #666;}
/* Improved styles for equations */