hunterXdk commited on
Commit
abce492
·
verified ·
1 Parent(s): c4b0a5b

deleting google dependency

Browse files
Files changed (1) hide show
  1. app.py +109 -110
app.py CHANGED
@@ -1,111 +1,110 @@
1
- import os
2
- import streamlit as st
3
- from PyPDF2 import PdfReader
4
- from langchain.text_splitter import RecursiveCharacterTextSplitter
5
- from langchain.embeddings import HuggingFaceEmbeddings
6
-
7
- from langchain.vectorstores import FAISS
8
- from langchain_google_genai import ChatGoogleGenerativeAI
9
- from langchain.chains.question_answering import load_qa_chain
10
- from langchain.prompts import PromptTemplate
11
- from transformers import AutoModelForCausalLM, AutoTokenizer
12
- import torch
13
-
14
-
15
- # Load a Hugging Face model (e.g., LLaMA or Falcon)
16
- model_name = "mixedbread-ai/mxbai-embed-2d-large-v1" # Replace with your preferred model
17
- tokenizer = AutoTokenizer.from_pretrained(model_name)
18
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
19
-
20
-
21
-
22
- def get_pdf_text(pdf_docs):
23
- text = ""
24
- for pdf in pdf_docs:
25
- pdf_reader = PdfReader(pdf)
26
- for page in pdf_reader.pages:
27
- text += page.extract_text()
28
- return text
29
-
30
- # chuck_size = 1000, chunk_overlap = 200 (for shorted PDFs)
31
- def get_text_chunks(text):
32
- text_splitter= RecursiveCharacterTextSplitter(
33
- chunk_size=10000,
34
- chunk_overlap=1000,
35
- # length_function=len
36
- )
37
- chunks=text_splitter.split_text(text)
38
- return chunks
39
-
40
- # Converting into Vector data/store (can also be stored)
41
- def get_vector_store(text_chunks):
42
- # embeddings = GoogleGenerativeAIEmbeddings(model='embedding-gecko-001')
43
- # embeddings = GoogleGenerativeAIEmbeddings(model='models/embedding-001')
44
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
45
- vector_store = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
46
- vector_store.save_local("faiss_index")
47
- # return vector_store
48
-
49
-
50
-
51
- def chat_with_huggingface(context, query):
52
- prompt_template = """
53
- Answer the query as detailed as possible from the provided context.
54
- If the answer is not in the context, just say, "Answer is not available in the provided documents".
55
- Context: {context}
56
- Query: {query}
57
- Answer:
58
- """
59
- inputs = tokenizer(prompt_template, return_tensors="pt").to(model.device)
60
- outputs = model.generate(**inputs, max_length=500, temperature=0.3)
61
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
62
-
63
- def get_conversation_chain():
64
- def huggingface_chain(inputs):
65
- context = inputs["input_documents"][0].page_content # Extract context from FAISS search
66
- query = inputs["question"]
67
- return {"output_text": chat_with_huggingface(context, query)}
68
-
69
- return huggingface_chain
70
-
71
- def user_input(user_question):
72
- # embeddings = GoogleGenerativeAIEmbeddings(model='embedding-gecko-001')
73
- embeddings = GoogleGenerativeAIEmbeddings(model='models/embedding-001')
74
-
75
- # Loading the embeddings
76
- new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
77
- docs = new_db.similarity_search(user_question)
78
-
79
- chain=get_conversation_chain()
80
-
81
- response = chain(
82
- {"input_documents": docs, "question": user_question}
83
- , return_only_outputs=True)
84
-
85
- print(response)
86
- st.write("Reply: ", response["output_text"])
87
-
88
- # Frontend page Processor
89
- def main():
90
- st.set_page_config(page_title="PDF Chatbot")
91
- st.header("PDF Chatbot made for Pooja")
92
-
93
- user_question = st.text_input("Puchiye kuch apne documents se:")
94
-
95
- if user_question:
96
- user_input(user_question)
97
-
98
- with st.sidebar:
99
- st.title("Menu:")
100
- pdf_docs = st.file_uploader(
101
- "Apne PDFs yaha pe upload karo then click on 'Process'", accept_multiple_files=True)
102
- if st.button("Submit & Process"):
103
- with st.spinner("Ruko Padh raha hu..."):
104
- raw_text = get_pdf_text(pdf_docs)
105
- text_chunks = get_text_chunks(raw_text)
106
- get_vector_store(text_chunks)
107
- st.success("Saare documents padh liya. Ab swaal pucho 😤")
108
-
109
-
110
- if __name__ == '__main__':
111
  main()
 
1
+ import os
2
+ import streamlit as st
3
+ from PyPDF2 import PdfReader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+ from langchain.embeddings import HuggingFaceEmbeddings
6
+
7
+ from langchain.vectorstores import FAISS
8
+ from langchain.chains.question_answering import load_qa_chain
9
+ from langchain.prompts import PromptTemplate
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer
11
+ import torch
12
+
13
+
14
+ # Load a Hugging Face model (e.g., LLaMA or Falcon)
15
+ model_name = "mixedbread-ai/mxbai-embed-2d-large-v1" # Replace with your preferred model
16
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
17
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
18
+
19
+
20
+
21
+ def get_pdf_text(pdf_docs):
22
+ text = ""
23
+ for pdf in pdf_docs:
24
+ pdf_reader = PdfReader(pdf)
25
+ for page in pdf_reader.pages:
26
+ text += page.extract_text()
27
+ return text
28
+
29
+ # chuck_size = 1000, chunk_overlap = 200 (for shorted PDFs)
30
+ def get_text_chunks(text):
31
+ text_splitter= RecursiveCharacterTextSplitter(
32
+ chunk_size=10000,
33
+ chunk_overlap=1000,
34
+ # length_function=len
35
+ )
36
+ chunks=text_splitter.split_text(text)
37
+ return chunks
38
+
39
+ # Converting into Vector data/store (can also be stored)
40
+ def get_vector_store(text_chunks):
41
+ # embeddings = GoogleGenerativeAIEmbeddings(model='embedding-gecko-001')
42
+ # embeddings = GoogleGenerativeAIEmbeddings(model='models/embedding-001')
43
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
44
+ vector_store = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
45
+ vector_store.save_local("faiss_index")
46
+ # return vector_store
47
+
48
+
49
+
50
+ def chat_with_huggingface(context, query):
51
+ prompt_template = """
52
+ Answer the query as detailed as possible from the provided context.
53
+ If the answer is not in the context, just say, "Answer is not available in the provided documents".
54
+ Context: {context}
55
+ Query: {query}
56
+ Answer:
57
+ """
58
+ inputs = tokenizer(prompt_template, return_tensors="pt").to(model.device)
59
+ outputs = model.generate(**inputs, max_length=500, temperature=0.3)
60
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
61
+
62
+ def get_conversation_chain():
63
+ def huggingface_chain(inputs):
64
+ context = inputs["input_documents"][0].page_content # Extract context from FAISS search
65
+ query = inputs["question"]
66
+ return {"output_text": chat_with_huggingface(context, query)}
67
+
68
+ return huggingface_chain
69
+
70
+ def user_input(user_question):
71
+ # embeddings = GoogleGenerativeAIEmbeddings(model='embedding-gecko-001')
72
+ embeddings = GoogleGenerativeAIEmbeddings(model='models/embedding-001')
73
+
74
+ # Loading the embeddings
75
+ new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
76
+ docs = new_db.similarity_search(user_question)
77
+
78
+ chain=get_conversation_chain()
79
+
80
+ response = chain(
81
+ {"input_documents": docs, "question": user_question}
82
+ , return_only_outputs=True)
83
+
84
+ print(response)
85
+ st.write("Reply: ", response["output_text"])
86
+
87
+ # Frontend page Processor
88
+ def main():
89
+ st.set_page_config(page_title="PDF Chatbot")
90
+ st.header("PDF Chatbot made for Pooja")
91
+
92
+ user_question = st.text_input("Puchiye kuch apne documents se:")
93
+
94
+ if user_question:
95
+ user_input(user_question)
96
+
97
+ with st.sidebar:
98
+ st.title("Menu:")
99
+ pdf_docs = st.file_uploader(
100
+ "Apne PDFs yaha pe upload karo then click on 'Process'", accept_multiple_files=True)
101
+ if st.button("Submit & Process"):
102
+ with st.spinner("Ruko Padh raha hu..."):
103
+ raw_text = get_pdf_text(pdf_docs)
104
+ text_chunks = get_text_chunks(raw_text)
105
+ get_vector_store(text_chunks)
106
+ st.success("Saare documents padh liya. Ab swaal pucho 😤")
107
+
108
+
109
+ if __name__ == '__main__':
 
110
  main()