changed imports
Browse files
app.py
CHANGED
@@ -1,20 +1,22 @@
|
|
1 |
-
|
2 |
-
import
|
3 |
-
from langchain_groq import ChatGroq
|
4 |
from langchain_community.document_loaders import WebBaseLoader
|
5 |
-
# from langchain_community.embeddings import OllamaEmbeddings
|
6 |
-
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
9 |
from langchain_core.prompts import ChatPromptTemplate
|
10 |
from langchain.chains import create_retrieval_chain
|
11 |
-
from langchain_community.vectorstores.faiss import FAISS
|
12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
import time
|
14 |
from PyPDF2 import PdfReader
|
15 |
import tempfile
|
16 |
|
17 |
-
|
18 |
st.title("Ask your questions from pdf(s) or website")
|
19 |
option = None
|
20 |
|
@@ -55,6 +57,11 @@ def llm_model():
|
|
55 |
print("Response time :", time.process_time()-start)
|
56 |
st.write(response['answer'])
|
57 |
|
|
|
|
|
|
|
|
|
|
|
58 |
st.session_state.embeddings = GoogleGenerativeAIEmbeddings(model = 'models/embedding-001')
|
59 |
st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size =1000, chunk_overlap= 200)
|
60 |
|
@@ -76,10 +83,4 @@ if option:
|
|
76 |
st.session_state.vector = FAISS.from_texts(st.session_state.final_documents,st.session_state.embeddings)
|
77 |
llm_model()
|
78 |
|
79 |
-
# with st.expander("Document Similarity Search"):
|
80 |
-
# for i, doc in enumerate(response['context']):
|
81 |
-
# st.write(doc.page_content)
|
82 |
-
# st.write("-----------------------------")
|
83 |
-
|
84 |
-
|
85 |
|
|
|
1 |
+
# Langchain imports
|
2 |
+
from langchain_community.vectorstores.faiss import FAISS
|
|
|
3 |
from langchain_community.document_loaders import WebBaseLoader
|
|
|
|
|
4 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
5 |
from langchain.chains.combine_documents import create_stuff_documents_chain
|
6 |
from langchain_core.prompts import ChatPromptTemplate
|
7 |
from langchain.chains import create_retrieval_chain
|
|
|
8 |
|
9 |
+
# Embedding and model imports
|
10 |
+
from langchain_google_genai import GoogleGenerativeAIEmbeddings
|
11 |
+
from langchain_groq import ChatGroq
|
12 |
+
|
13 |
+
# Other
|
14 |
+
import streamlit as st
|
15 |
+
import os
|
16 |
import time
|
17 |
from PyPDF2 import PdfReader
|
18 |
import tempfile
|
19 |
|
|
|
20 |
st.title("Ask your questions from pdf(s) or website")
|
21 |
option = None
|
22 |
|
|
|
57 |
print("Response time :", time.process_time()-start)
|
58 |
st.write(response['answer'])
|
59 |
|
60 |
+
with st.expander("Did not like the response? Check out more here"):
|
61 |
+
for i, doc in enumerate(response['context']):
|
62 |
+
st.write(doc.page_content)
|
63 |
+
st.write("-----------------------------")
|
64 |
+
|
65 |
st.session_state.embeddings = GoogleGenerativeAIEmbeddings(model = 'models/embedding-001')
|
66 |
st.session_state.text_splitter = RecursiveCharacterTextSplitter(chunk_size =1000, chunk_overlap= 200)
|
67 |
|
|
|
83 |
st.session_state.vector = FAISS.from_texts(st.session_state.final_documents,st.session_state.embeddings)
|
84 |
llm_model()
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|