pankajsingh3012 commited on
Commit
23978c7
·
verified ·
1 Parent(s): 6bdadd9

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +132 -0
  2. requirements (2).txt +8 -0
app.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #imporitng libraryies
2
+ import streamlit as st
3
+ from PyPDF2 import PdfReader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ import os
6
+ from langchain_community.embeddings import HuggingFaceEmbeddings
7
+ import google.generativeai as genai
8
+ from langchain.vectorstores import FAISS
9
+ from langchain_google_genai import ChatGoogleGenerativeAI
10
+ from langchain.chains.question_answering import load_qa_chain
11
+ from langchain.prompts import PromptTemplate
12
+ from dotenv import load_dotenv
13
+
14
+ load_dotenv()
15
+
16
+ #get api key
17
+ os.getenv("GOOGLE_API_KEY")
18
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
19
+
20
+
21
+
22
+
23
+
24
+ #pdf read and convert into raw text
25
+ def get_pdf_text(pdf_docs):
26
+ text=""
27
+ for pdf in pdf_docs:
28
+ pdf_reader= PdfReader(pdf)
29
+ for page in pdf_reader.pages:
30
+ text+= page.extract_text()
31
+ return text
32
+
33
+
34
+ #making chunks of text
35
+ def get_text_chunks(text):
36
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=10000, chunk_overlap=1000)
37
+ chunks = text_splitter.split_text(text)
38
+ return chunks
39
+
40
+ #create embeddings and store in vector database
41
+ def get_vector_store(text_chunks):
42
+ embeddings = HuggingFaceEmbeddings(model_name="hkunlp/instructor-large")
43
+ vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
44
+ vector_store.save_local("faiss_index")
45
+
46
+ #define chain
47
+ def get_conversational_chain():
48
+
49
+ prompt_template = """
50
+ Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
51
+ provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
52
+ Context:\n {context}?\n
53
+ Question: \n{question}\n
54
+
55
+ Answer:
56
+ """
57
+
58
+ model = ChatGoogleGenerativeAI(model="gemini-pro",
59
+ temperature=0.3)
60
+
61
+ prompt = PromptTemplate(template = prompt_template, input_variables = ["context", "question"])
62
+ chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
63
+
64
+ return chain
65
+
66
+
67
+ #take user input
68
+ def user_input(user_question):
69
+ embeddings = HuggingFaceEmbeddings(model_name="hkunlp/instructor-large")
70
+
71
+ new_db = FAISS.load_local("faiss_index", embeddings)
72
+ docs = new_db.similarity_search(user_question)
73
+
74
+ chain = get_conversational_chain()
75
+
76
+
77
+ response = chain(
78
+ {"input_documents":docs, "question": user_question}
79
+ , return_only_outputs=True)
80
+
81
+ print(response)
82
+ st.write("Reply: ", response["output_text"])
83
+
84
+
85
+
86
+ #steamlit interface
87
+ def main():
88
+ titleimg = "bg.jpeg"
89
+
90
+ # impliment background formating
91
+ def set_bg_hack(main_bg):
92
+ # set bg name
93
+ main_bg_ext = "jpeg"
94
+ st.markdown(
95
+ f"""
96
+ <style>
97
+ .stApp {{
98
+ background: url(data:image/{main_bg_ext};base64,{base64.b64encode(open(main_bg, "rb").read()).decode()});
99
+ background-repeat: no-repeat;
100
+ background-position: right 50% bottom 95% ;
101
+ background-size: cover;
102
+ background-attachment: scroll;
103
+ }}
104
+ </style>
105
+ """,
106
+ unsafe_allow_html=True,
107
+ )
108
+
109
+ set_bg_hack(titleimg)
110
+
111
+ st.set_page_config("Chat PDF")
112
+ st.header("Chat with PDF 💁")
113
+
114
+ user_question = st.text_input("Ask a Question from the PDF Files")
115
+
116
+ if user_question:
117
+ user_input(user_question)
118
+
119
+ with st.sidebar:
120
+ st.title("Menu:")
121
+ pdf_docs = st.file_uploader("Upload your PDF Files and Click on the Submit & Process Button", accept_multiple_files=True)
122
+ if st.button("Submit & Process"):
123
+ with st.spinner("Processing..."):
124
+ raw_text = get_pdf_text(pdf_docs)
125
+ text_chunks = get_text_chunks(raw_text)
126
+ get_vector_store(text_chunks)
127
+ st.success("Done")
128
+
129
+
130
+
131
+ if __name__ == "__main__":
132
+ main()
requirements (2).txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ google-generativeai
3
+ python-dotenv
4
+ langchain
5
+ PyPDF2
6
+ chromadb
7
+ faiss-cpu
8
+ langchain_google_genai