Spaces:
Sleeping
Sleeping
import pandas as pd | |
df = pd.read_csv('./Mental_Health_FAQ.csv') | |
import pandas as pd | |
from sentence_transformers import SentenceTransformer | |
# Assuming your DataFrame is already loaded as 'df' | |
context_data = [] | |
for i in range(len(df)): | |
context = f"Question: {df.iloc[i]['Questions']} Answer: {df.iloc[i]['Answers']}" | |
context_data.append(context) | |
# print(context_data) | |
# Embed the contexts | |
embedding_model = SentenceTransformer('all-MiniLM-L6-v2') | |
context_embeddings = embedding_model.encode(context_data) | |
#print(f"Number of contexts: {len(context_data)}") | |
#print(f"Shape of embeddings: {context_embeddings.shape}") | |
import os | |
# Get the secret key from the environment | |
groq_key = os.environ.get('new_chatAPI_key') | |
## LLM used for RAG | |
from langchain_groq import ChatGroq | |
llm = ChatGroq(model="llama-3.3-70b-versatile",api_key=groq_key) | |
## Embedding model! | |
from langchain_huggingface import HuggingFaceEmbeddings | |
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1") | |
# create vector store! | |
from langchain_chroma import Chroma | |
vectorstore = Chroma( | |
collection_name="medical_dataset_store", | |
embedding_function=embed_model, | |
) | |
# add data to vector nstore | |
vectorstore.add_texts(context_data) | |
retriever = vectorstore.as_retriever() | |
from langchain_core.prompts import PromptTemplate | |
template = ("""You are a mental health professional. | |
Use the provided context to answer the question. | |
If you don't know the answer, say so. Explain your answer in detail. | |
Do not discuss the context in your response; just provide the answer directly. | |
Context: {context} | |
Question: {question} | |
Answer:""") | |
rag_prompt = PromptTemplate.from_template(template) | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_core.runnables import RunnablePassthrough | |
rag_chain = ( | |
{"context": retriever, "question": RunnablePassthrough()} | |
| rag_prompt | |
| llm | |
| StrOutputParser() | |
) | |
import gradio as gr | |
def rag_memory_stream(message, history): | |
partial_text = "" | |
for new_text in rag_chain.stream(message): | |
partial_text += new_text | |
yield partial_text | |
examples = [ | |
"I am not in a good mood", | |
"what is the possible symptompts of depression?" | |
] | |
description = "Real-time AI App with Groq API and LangChain to Answer medical questions" | |
title = "ThriveTalk Expert :) Try me!" | |
demo = gr.ChatInterface(fn=rag_memory_stream, | |
type="messages", | |
title=title, | |
description=description, | |
fill_height=True, | |
examples=examples, | |
theme="glass", | |
) | |
if __name__ == "__main__": | |
demo.launch() | |