Spaces:
Runtime error
Runtime error
File size: 3,114 Bytes
a0219fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
from google.colab import drive
drive.mount("/content/drive")
!pip install langchain sentence-transformers chromadb llama-cpp-python langchain_community pypdf
from langchain_community.document_loaders import PyPDFDirectoryLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from langchain_community.llms import LlamaCpp
from langchain.chains import RetrievalQA, LLMChain
loader=PyPDFDirectoryLoader("/content/drive/MyDrive/BioMistral/Data")
docs=loader.load()
text_splitter=RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
chunks=text_splitter.split_documents(docs)
import os
import os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
embeddings = SentenceTransformerEmbeddings(model_name="NeuML/pubmedbert-base-embeddings")
vectorstore = Chroma.from_documents(chunks, embeddings)
query="What are the major risk factors of heart disease?"
search_results=vectorstore.similarity_search(query)
search_results
retriever=vectorstore.as_retriever(search_kwargs={"k":5})
retriever.get_relevant_documents(query)
llm=LlamaCpp(
model_path="/content/drive/MyDrive/BioMistral/BioMistral-7B.Q4_K_M.gguf",
temperature=0.2,
max_tokens=2048,
top_p=1
)
template="""
<|context|>
You are an Medical Assistant that follows the instruction and generate the accurate response based on the query and the context provided.
Please be truthful and give direct answers.
</s>
<|user|>
{query}
</s>
<|assistant|>
"""
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.output_parser import StrOutputParser
from langchain.prompts import ChatPromptTemplate
prompt=ChatPromptTemplate.from_template(template)
rag_chain=(
{"context":retriever,"query":RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
response=rag_chain.invoke("query")
response
import sys
while True:
user_input=input(f"Input query: ")
if user_input=='exit':
print("Exiting...")
sys.exit()
if user_input=="":
continue
result=rag_chain.invoke(user_input)
print("Answer: ",result)
!pip install gradio
import gradio as gr
# Define a function to handle queries
def chatbot_ui(user_query):
if not user_query.strip():
return "Please enter a valid query."
try:
result = rag_chain.invoke(user_query)
return result
except Exception as e:
return f"Error: {str(e)}"
# Create the Gradio interface
interface = gr.Interface(
fn=chatbot_ui, # Function to process the query
inputs=gr.Textbox(label="Enter your medical query:", placeholder="Ask a medical question here..."),
outputs=gr.Textbox(label="Chatbot Response"),
title="Medical Assistant Chatbot",
description="A chatbot made for heart patients.",
examples=[
["What are the symptoms of diabetes?"],
["Explain the risk factors of heart disease."],
["How can I reduce cholesterol levels naturally?"],
]
)
# Launch the Gradio interface
interface.launch(share=True)
|