tensorkelechi commited on
Commit
b0e3bba
·
verified ·
1 Parent(s): b835442

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +146 -0
app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from langchain_community.document_loaders import PyPDFLoader
3
+ from langchain_community.vectorstores import faiss
4
+ from langchain.memory import ConversationBufferMemory
5
+ from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
6
+ from tempfile import NamedTemporaryFile
7
+ from dotenv import load_dotenv
8
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
9
+ from langchain.chains import ConversationalRetrievalChain
10
+ import streamlit as st
11
+ import nest_asyncio
12
+
13
+ nest_asyncio.apply()
14
+ load_dotenv()
15
+
16
+ # Initialize app resources
17
+ st.set_page_config(page_title="StudyAssist", page_icon=":book:")
18
+ st.title("StudyAssist(pharmassist-v0)")
19
+ st.write(
20
+ "An AI/RAG application to aid students in their studies, specially optimized for the pharm 028 students. In simpler terms, chat with your pdf"
21
+ )
22
+
23
+
24
+ @st.cache_resource
25
+ def initialize_resources():
26
+ llm_gemini = ChatGoogleGenerativeAI(
27
+ model="gemini-1.5-flash-latest", google_api_key=os.getenv("GOOGLE_API_KEY")
28
+ )
29
+ return llm_gemini
30
+
31
+
32
+ def get_retriever(pdf_file):
33
+ with NamedTemporaryFile(suffix="pdf") as temp:
34
+ temp.write(pdf_file.getvalue())
35
+ pdf_loader = PyPDFLoader(temp.name, extract_images=True)
36
+ pages = pdf_loader.load()
37
+
38
+ st.write(f"AI Chatbot for {course_material}")
39
+
40
+ underlying_embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")
41
+ text_splitter = RecursiveCharacterTextSplitter(
42
+ chunk_size=1000,
43
+ chunk_overlap=20,
44
+ length_function=len,
45
+ is_separator_regex=False,
46
+ separators="\n",
47
+ )
48
+ documents = text_splitter.split_documents(pages)
49
+ vectorstore = faiss.FAISS.from_documents(documents, underlying_embeddings)
50
+ doc_retiever = vectorstore.as_retriever(
51
+ search_type="mmr", search_kwargs={"k": 5, "fetch_k": 10}
52
+ )
53
+
54
+ return doc_retiever
55
+
56
+
57
+ chat_model = initialize_resources()
58
+
59
+ # Streamlit UI
60
+ # Course list and pdf retrieval
61
+
62
+ courses = ["PMB", "PCL", "Kelechi_research"] # "GSP", "CPM", "PCG", "PCH"
63
+ course_pdfs = None
64
+ doc_retriever = None
65
+ conversational_chain = None
66
+
67
+ # course = st.sidebar.selectbox("Choose course", (courses))
68
+ # docs_path = f"pdfs/{course}"
69
+ # course_pdfs = os.listdir(docs_path)
70
+ # pdfs = [os.path.join(docs_path, pdf) for pdf in course_pdfs]
71
+
72
+ course_material = "{Not selected}"
73
+
74
+
75
+ # @st.cache_resource
76
+ def query_response(query, _retriever):
77
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
78
+ conversational_chain = ConversationalRetrievalChain.from_llm(
79
+ llm=chat_model, retriever=_retriever, memory=memory, verbose=False
80
+ )
81
+ response = conversational_chain.run(query)
82
+
83
+ return response
84
+
85
+
86
+ if "doc" not in st.session_state:
87
+ st.session_state.doc = ""
88
+
89
+ course_material = st.file_uploader("or Upload your own pdf", type="pdf")
90
+
91
+ if st.session_state != "":
92
+ try:
93
+ doc_retriever = get_retriever(course_material)
94
+ st.success("File loading successful, vector db initialize")
95
+ except:
96
+ st.error("Upload your file")
97
+
98
+ # We store the conversation in the session state.
99
+ # This will be use to render the chat conversation.
100
+ # We initialize it with the first message we want to be greeted with.
101
+ if "messages" not in st.session_state:
102
+ st.session_state.messages = [
103
+ {"role": "assistant", "content": "Yoo, How far boss?"}
104
+ ]
105
+
106
+ if "current_response" not in st.session_state:
107
+ st.session_state.current_response = ""
108
+
109
+ # We loop through each message in the session state and render it as
110
+ # a chat message.
111
+ for message in st.session_state.messages:
112
+ with st.chat_message(message["role"]):
113
+ st.markdown(message["content"])
114
+
115
+ # We take questions/instructions from the chat input to pass to the LLM
116
+ if user_prompt := st.chat_input("Your message here", key="user_input"):
117
+ # Add our input to the session state
118
+ st.session_state.messages.append({"role": "user", "content": user_prompt})
119
+
120
+ # Add our input to the chat window
121
+ with st.chat_message("user"):
122
+ st.markdown(user_prompt)
123
+
124
+ # Pass our input to the llm chain and capture the final responses.
125
+ # here once the llm has finished generating the complete response.
126
+ response = query_response(user_prompt, doc_retriever)
127
+ # Add the response to the session state
128
+ st.session_state.messages.append({"role": "assistant", "content": response})
129
+
130
+ # Add the response to the chat window
131
+ with st.chat_message("assistant"):
132
+ st.markdown(response)
133
+ #
134
+ st.write("")
135
+ st.write("")
136
+
137
+
138
+ st.markdown(
139
+ """
140
+ <div style="text-align: center; padding: 1rem;">
141
+ Project by <a href="https://github.com/kelechi-c" target="_blank" style="color: white; font-weight: bold; text-decoration: none;">
142
+ kelechi(tensor)</a>
143
+ </div>
144
+ """,
145
+ unsafe_allow_html=True,
146
+ )