# AI assistant with a RAG system to query information from # the gwIAS search pipline # using Langchain and deployed with Gradio from rag import RAG, load_docs from langchain_community.embeddings import HuggingFaceInstructEmbeddings from langchain.chat_models import ChatOpenAI import gradio as gr # Load the documentation docs = load_docs() print("Pages loaded:", len(docs)) # LLM model llm = ChatOpenAI(model="gpt-4o-mini") # Embeddings embed_model = "sentence-transformers/multi-qa-distilbert-cos-v1" # embed_model = "nvidia/NV-Embed-v2" embeddings = HuggingFaceInstructEmbeddings(model_name=embed_model) # RAG chain rag_chain = RAG(llm, docs, embeddings) # Function to handle prompt and query the RAG chain def handle_prompt(message, history): try: # Stream output out = "" for chunk in rag_chain.stream(message): out += chunk yield out except Exception as e: raise gr.Error(f"An error occurred: {str(e)}") if __name__ == "__main__": # Predefined messages and examples description = "AI powered assistant to help with [gwIAS](https://github.com/JayWadekar/gwIAS-HM) gravitational wave search pipeline." greetingsmessage = "Hi, I'm the gwIAS Bot, I'm here to assist you with the search pipeline." example_questions = [ "Can you give me the code for calculating coherent score?", "Which module in the code is used for collecting coincident triggers?", "How are template banks constructed?" ] # Define customized Gradio chatbot chatbot = gr.Chatbot([{"role": "assistant", "content": greetingsmessage}], type="messages", avatar_images=["ims/userpic.png", "ims/gwIASlogo.jpg"], height="60vh") # Define Gradio interface demo = gr.ChatInterface(handle_prompt, type="messages", title="gwIAS DocBot", fill_height=True, examples=example_questions, theme=gr.themes.Soft(), description=description, # cache_examples=False, chatbot=chatbot) demo.launch() # https://arxiv.org/html/2405.17400v2 # https://arxiv.org/html/2312.06631v1 # https://arxiv.org/html/2310.15233v2