# cli.py from rag_chatbot import MultimodalRAGChatbot def main(): # Initialiser le chatbot chatbot = MultimodalRAGChatbot( qdrant_url="http://localhost:6333", qdrant_collection_name="my_documents", ollama_model="llama3.2" ) print("Chatbot RAG Multimodal") print("Tapez 'exit' pour quitter ou 'clear' pour effacer l'historique") while True: # Récupérer la question query = input("\nVotre question: ") # Quitter si demandé if query.lower() in ["exit", "quit", "q"]: break # Effacer l'historique si demandé if query.lower() == "clear": chatbot.clear_history() print("Historique effacé") continue # Demander si mode streaming stream_mode = input("Mode streaming? (y/n): ").lower() == 'y' # Traitement de la requête result = chatbot.chat(query, stream=stream_mode) # Si pas de streaming, afficher la réponse texte if not stream_mode: print("\n" + "="*50) print("Réponse:") print(result["response"]) print("="*50) # Afficher les informations sur les sources print("\nSources trouvées:") print(f"- {len(result['texts'])} textes") print(f"- {len(result['images'])} images") print(f"- {len(result['tables'])} tableaux") # Afficher les images si demandé if result["images"]: show_images = input("\nAfficher les images? (y/n): ").lower() == 'y' if show_images: for i, img in enumerate(result["images"]): print(f"\nImage {i+1}: {img['caption']} (Source: {img['source']}, Page: {img['page']})") print(f"Description: {img['description']}") chatbot.display_image(img["image_data"], img["caption"]) # Afficher les tableaux si demandé if result["tables"]: show_tables = input("\nAfficher les tableaux? (y/n): ").lower() == 'y' if show_tables: for i, table in enumerate(result["tables"]): print(f"\nTableau {i+1}: {table['caption']} (Source: {table['source']}, Page: {table['page']})") print(f"Description: {table['description']}") print("\nContenu:") print("```") print(chatbot.format_table(table["table_data"])) print("```") if __name__ == "__main__": main()