aie1-midterm / app.py
bikrammaharjan's picture
AIE1 midterm submission
85912b2
import os
import chainlit as cl
from langchain_openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.retrievers import MultiQueryRetriever
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_openai import ChatOpenAI
from langchain.prompts.chat import ChatPromptTemplate
from langchain.chains import create_retrieval_chain
from dotenv import load_dotenv
load_dotenv()
# START CODE
openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
def process_file(file: str):
pypdf_loader = PyMuPDFLoader(file)
texts = pypdf_loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=700,
chunk_overlap=50
)
documents = text_splitter.split_documents(texts)
return documents
async def RAG_pipeline(question: str, documents: str):
template = """
Answer the question based only on the following context.
If you cannot answer the question with the context, please respond with 'I don't know, can you provide more context?'.
If the user question is not related to NVIDIA or NVIDIA 10-k Filings then please respond with "I can only help you with NVIDIA 10-k Filings report.\n\n".
Always answer in full sentence.
If a user says things like "Ok" or "thank you" or "thank you" or anything that is related to "phatic expressions" or "phatic communication" then respond with "No problem! Always happy to help."
Always end your sentence with "\n\n\n Thank you for asking. What else can I help you regarding NVIDIA 10-k Filings report?".
Context:
{context}
Question:
{input}
"""
# Create prompt template
prompt = ChatPromptTemplate.from_template(template)
# Initialize FAISS vector store
vector_store = FAISS.from_documents(documents, embeddings)
# Initialize a retriever to retrieve similar context
retriever = vector_store.as_retriever()
# Initialize retriever using a multi-query approach with a language model.
retriever = MultiQueryRetriever.from_llm(
retriever=retriever, llm=openai_chat_model)
# Create a document chain using OpenAI chat model and a prompt
document_chain = create_stuff_documents_chain(openai_chat_model, prompt)
# Create a retrieval chain using a retriever and a document chain
retrieval_chain = create_retrieval_chain(retriever, document_chain)
# Send a request to OpenAI with the question
response = retrieval_chain.invoke({"input": question})
# Making sure we have 'answer' params so that we can give proper response
if 'answer' in response:
llm_answer = response['answer']
else:
llm_answer = '**EMPTY RESPONSE**'
print("llm_answer: ", llm_answer)
return llm_answer
@cl.on_chat_start # marks a function that will be executed at the start of a user session
async def start_chat():
settings = {
"model": "gpt-3.5-turbo",
"temperature": 0,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
print("A new chat session has started!")
# Process our document for tokenization
documents = process_file(
"https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf")
# Save session
cl.user_session.set("documents", documents)
cl.user_session.set("settings", settings)
@cl.on_message
async def main(message: cl.Message):
print("Human asked: ", message.content)
msg = cl.Message(content="")
await msg.send()
# do some work
await cl.sleep(2)
# Retrieve session
document_chunks = cl.user_session.get("documents")
# Wait for OpenAI to return a response and the good ol' RAG stuff
response = await RAG_pipeline(message.content, document_chunks)
# If there is a response then let the user know else fallback to else statement!
if response:
await cl.Message(content=response).send()
else:
cl.Message(
content="Something went wrong! please kindly refresh and try again 🤝").send()