realmitratushar commited on
Commit
3b76b56
·
verified ·
1 Parent(s): 998b78f

Upload 3 files

Browse files
Files changed (3) hide show
  1. .gitignore +5 -0
  2. app.py +100 -0
  3. requirements.txt +10 -0
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .venv
2
+ venv/
3
+
4
+ .env
5
+
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PyPDF2 import PdfReader
3
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
4
+ import os
5
+
6
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings
7
+ import google.generativeai as genai
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain_google_genai import ChatGoogleGenerativeAI
10
+ from langchain.chains.question_answering import load_qa_chain
11
+ from langchain_core.prompts import PromptTemplate
12
+ from dotenv import load_dotenv
13
+
14
+ load_dotenv()
15
+
16
+ # Initialize session state
17
+ if 'processed' not in st.session_state:
18
+ st.session_state.processed = False
19
+
20
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
21
+
22
+ @st.cache_data
23
+ def get_pdf_text(pdf_docs):
24
+ text=""
25
+ for pdf in pdf_docs:
26
+ pdf_reader = PdfReader(pdf)
27
+ for page in pdf_reader.pages:
28
+ text+= page.extract_text()
29
+ return text
30
+
31
+ @st.cache_data
32
+ def get_text_chunks(text):
33
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=500)
34
+ chunks = text_splitter.split_text(text)
35
+ return chunks
36
+
37
+ @st.cache_data
38
+ def get_vector_store(chunks):
39
+ embeddings=GoogleGenerativeAIEmbeddings(model="models/embedding-001")
40
+ vector_store = FAISS.from_texts(chunks, embedding=embeddings)
41
+ vector_store.save_local("faiss_index")
42
+
43
+ @st.cache_resource
44
+ def get_conversation_chain():
45
+ prompt_template = """
46
+ Answer the question as detailed as possible from the provided context, make sure to provide all the details, if the answer is not in
47
+ provided context just say, "answer is not available in the context", don't provide the wrong answer\n\n
48
+ Context:\n {context}?\n
49
+ Question: \n{question}\n
50
+
51
+ Answer:
52
+ """
53
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.4)
54
+ prompt=PromptTemplate(template=prompt_template, input_variables=["context", "question"])
55
+ chain=load_qa_chain(model,chain_type="stuff",prompt=prompt)
56
+ return chain
57
+
58
+ def process_pdfs(pdf_docs):
59
+ raw_text = get_pdf_text(pdf_docs)
60
+ text_chunks = get_text_chunks(raw_text)
61
+ get_vector_store(text_chunks)
62
+ st.session_state.processed = True
63
+ return "PDFs processed successfully!"
64
+
65
+ def user_input(user_question):
66
+ embeddings=GoogleGenerativeAIEmbeddings(model="models/embedding-001")
67
+ new_db=FAISS.load_local("faiss_index", embeddings,allow_dangerous_deserialization=True)
68
+ docs=new_db.similarity_search(user_question)
69
+ chain=get_conversation_chain()
70
+ response=chain(
71
+ {"input_documents":docs, "question":user_question},
72
+ return_only_outputs=True
73
+ )
74
+ return response["output_text"]
75
+
76
+ def main():
77
+ st.title("Chat with multiple PDFs")
78
+
79
+ tab1, tab2 = st.tabs(["Upload PDFs", "Chat"])
80
+
81
+ with tab1:
82
+ pdf_docs = st.file_uploader("Upload your PDF files", type=['pdf'], accept_multiple_files=True)
83
+ if st.button("Process"):
84
+ with st.spinner("Processing PDFs..."):
85
+ status = process_pdfs(pdf_docs)
86
+ st.success(status)
87
+
88
+ with tab2:
89
+ if not st.session_state.processed:
90
+ st.warning("Please upload and process PDFs first")
91
+ else:
92
+ user_question = st.text_input("Ask a question from the PDF files")
93
+ if st.button("Submit"):
94
+ with st.spinner("Generating response..."):
95
+ response = user_input(user_question)
96
+ st.write(response)
97
+
98
+ if __name__=="__main__":
99
+ main()
100
+
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ google-generativeai
3
+ python-dotenv
4
+ langchain
5
+ PyPDF2
6
+ faiss-cpu
7
+ ipykernel
8
+ langchain-google-genai
9
+ langchain-text-splitters
10
+ langchain-community