Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import logging
|
|
5 |
from langchain.document_loaders import PDFPlumberLoader
|
6 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
7 |
from langchain.prompts import ChatPromptTemplate
|
8 |
-
from langchain.llms import
|
9 |
from transformers import pipeline
|
10 |
|
11 |
# Configure logging
|
@@ -13,7 +13,7 @@ logging.basicConfig(level=logging.INFO)
|
|
13 |
logger = logging.getLogger(__name__)
|
14 |
|
15 |
# Page configuration
|
16 |
-
st.set_page_config(page_title="DeepSeek Chatbot
|
17 |
|
18 |
# Initialize session state for chat history
|
19 |
if "messages" not in st.session_state:
|
@@ -70,10 +70,13 @@ def generate_response_with_langchain(question, context):
|
|
70 |
"""
|
71 |
|
72 |
prompt = ChatPromptTemplate.from_template(prompt_template)
|
73 |
-
|
|
|
|
|
|
|
74 |
|
75 |
# Use LangChain to generate an answer
|
76 |
-
chain = prompt |
|
77 |
response = chain.invoke({"question": question, "context": context})
|
78 |
return response
|
79 |
|
@@ -94,10 +97,10 @@ if uploaded_file:
|
|
94 |
documents = process_pdf(uploaded_file)
|
95 |
context = "\n\n".join([doc.page_content for doc in documents])
|
96 |
|
97 |
-
#
|
98 |
prompt_input = "Ask a question about the PDF content"
|
99 |
|
100 |
-
# Show the
|
101 |
prompt = st.chat_input(prompt_input) if documents else None
|
102 |
|
103 |
if prompt:
|
|
|
5 |
from langchain.document_loaders import PDFPlumberLoader
|
6 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
7 |
from langchain.prompts import ChatPromptTemplate
|
8 |
+
from langchain.llms import HuggingFaceLLM
|
9 |
from transformers import pipeline
|
10 |
|
11 |
# Configure logging
|
|
|
13 |
logger = logging.getLogger(__name__)
|
14 |
|
15 |
# Page configuration
|
16 |
+
st.set_page_config(page_title="DeepSeek Chatbot RAG", page_icon="🤖", layout="centered")
|
17 |
|
18 |
# Initialize session state for chat history
|
19 |
if "messages" not in st.session_state:
|
|
|
70 |
"""
|
71 |
|
72 |
prompt = ChatPromptTemplate.from_template(prompt_template)
|
73 |
+
|
74 |
+
# Initialize HuggingFace model with LangChain's HuggingFaceLLM
|
75 |
+
hf_pipeline = pipeline("text-generation", model=selected_model)
|
76 |
+
llm = HuggingFaceLLM(pipeline=hf_pipeline)
|
77 |
|
78 |
# Use LangChain to generate an answer
|
79 |
+
chain = prompt | llm
|
80 |
response = chain.invoke({"question": question, "context": context})
|
81 |
return response
|
82 |
|
|
|
97 |
documents = process_pdf(uploaded_file)
|
98 |
context = "\n\n".join([doc.page_content for doc in documents])
|
99 |
|
100 |
+
# Show the PDF-based question input if the PDF is uploaded
|
101 |
prompt_input = "Ask a question about the PDF content"
|
102 |
|
103 |
+
# Show the chat input if PDF is uploaded
|
104 |
prompt = st.chat_input(prompt_input) if documents else None
|
105 |
|
106 |
if prompt:
|