File size: 4,525 Bytes
85912b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152

import os
import chainlit as cl
from langchain_openai import OpenAIEmbeddings

from langchain.text_splitter import RecursiveCharacterTextSplitter

from langchain.retrievers import MultiQueryRetriever
from langchain.chains.combine_documents import create_stuff_documents_chain

from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter

from langchain_openai import ChatOpenAI
from langchain.prompts.chat import ChatPromptTemplate
from langchain.chains import create_retrieval_chain


from dotenv import load_dotenv


load_dotenv()


# START CODE
openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")


def process_file(file: str):

    pypdf_loader = PyMuPDFLoader(file)
    texts = pypdf_loader.load()

    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=700,
        chunk_overlap=50
    )

    documents = text_splitter.split_documents(texts)

    return documents


async def RAG_pipeline(question: str, documents: str):

    template = """
    
    Answer the question based only on the following context. 
    
    If you cannot answer the question with the context, please respond with 'I don't know, can you provide more context?'.

    If the user question is not related to NVIDIA or NVIDIA 10-k Filings then please respond with "I can only help you with NVIDIA 10-k Filings report.\n\n".

    Always answer in full sentence.
    
    If a user says things like "Ok" or "thank you" or "thank you" or anything that is related to  "phatic expressions" or "phatic communication" then respond with "No problem! Always happy to help."
    
    Always end your sentence with "\n\n\n Thank you for asking. What else can I help you regarding NVIDIA 10-k Filings report?".
    
    Context:
    {context}

    Question:
    {input}
    """

    # Create prompt template
    prompt = ChatPromptTemplate.from_template(template)

    # Initialize FAISS vector store
    vector_store = FAISS.from_documents(documents, embeddings)

    # Initialize a retriever to retrieve similar context
    retriever = vector_store.as_retriever()

    # Initialize retriever using a multi-query approach with a language model.
    retriever = MultiQueryRetriever.from_llm(
        retriever=retriever, llm=openai_chat_model)

    # Create a document chain using OpenAI chat model and a prompt
    document_chain = create_stuff_documents_chain(openai_chat_model, prompt)

    # Create a retrieval chain using a retriever and a document chain
    retrieval_chain = create_retrieval_chain(retriever, document_chain)

    # Send a request to OpenAI with the question 
    response = retrieval_chain.invoke({"input": question})

    # Making sure we have 'answer' params so that we can give proper response
    if 'answer' in response:
        llm_answer = response['answer']
    else: 
        llm_answer = '**EMPTY RESPONSE**'

    print("llm_answer: ", llm_answer)

    return llm_answer


@cl.on_chat_start  # marks a function that will be executed at the start of a user session
async def start_chat():
    settings = {
        "model": "gpt-3.5-turbo",
        "temperature": 0,
        "max_tokens": 500,
        "top_p": 1,
        "frequency_penalty": 0,
        "presence_penalty": 0,
    }

    print("A new chat session has started!")

    # Process our document for tokenization
    documents = process_file(
        "https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf")


    # Save session
    cl.user_session.set("documents", documents)
    cl.user_session.set("settings", settings)


@cl.on_message
async def main(message: cl.Message):

    print("Human asked: ", message.content)

    msg = cl.Message(content="")
    await msg.send()

    # do some work
    await cl.sleep(2)

    # Retrieve session
    document_chunks = cl.user_session.get("documents")

    # Wait for OpenAI to return a response and the good ol' RAG stuff
    response = await RAG_pipeline(message.content, document_chunks)

    
    # If there is a response then let the user know else fallback to else statement!
    if response:
        await cl.Message(content=response).send()
    else:
        cl.Message(
            content="Something went wrong! please kindly refresh and try again 🤝").send()