Mattral commited on
Commit
63b2477
·
verified ·
1 Parent(s): e17dfb8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -5,7 +5,8 @@ import logging
5
  from langchain.document_loaders import PDFPlumberLoader
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain.prompts import ChatPromptTemplate
8
- from langchain.llms import HuggingFaceLLM
 
9
  from transformers import pipeline
10
 
11
  # Configure logging
@@ -13,7 +14,7 @@ logging.basicConfig(level=logging.INFO)
13
  logger = logging.getLogger(__name__)
14
 
15
  # Page configuration
16
- st.set_page_config(page_title="DeepSeek Chatbot RAG", page_icon="🤖", layout="centered")
17
 
18
  # Initialize session state for chat history
19
  if "messages" not in st.session_state:
@@ -71,13 +72,15 @@ def generate_response_with_langchain(question, context):
71
 
72
  prompt = ChatPromptTemplate.from_template(prompt_template)
73
 
74
- # Initialize HuggingFace model with LangChain's HuggingFaceLLM
75
  hf_pipeline = pipeline("text-generation", model=selected_model)
76
- llm = HuggingFaceLLM(pipeline=hf_pipeline)
77
 
78
- # Use LangChain to generate an answer
79
- chain = prompt | llm
80
- response = chain.invoke({"question": question, "context": context})
 
 
81
  return response
82
 
83
  # Chat interface
 
5
  from langchain.document_loaders import PDFPlumberLoader
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  from langchain.prompts import ChatPromptTemplate
8
+ from langchain.chains import LLMChain # This is used for chaining prompts and models
9
+ from langchain.llms import HuggingFacePipeline
10
  from transformers import pipeline
11
 
12
  # Configure logging
 
14
  logger = logging.getLogger(__name__)
15
 
16
  # Page configuration
17
+ st.set_page_config(page_title="DeepSeek Chatbot - ruslanmv.com", page_icon="🤖", layout="centered")
18
 
19
  # Initialize session state for chat history
20
  if "messages" not in st.session_state:
 
72
 
73
  prompt = ChatPromptTemplate.from_template(prompt_template)
74
 
75
+ # Initialize HuggingFace pipeline
76
  hf_pipeline = pipeline("text-generation", model=selected_model)
77
+ huggingface_llm = HuggingFacePipeline(pipeline=hf_pipeline)
78
 
79
+ # Set up LangChain's LLMChain
80
+ chain = LLMChain(prompt=prompt, llm=huggingface_llm)
81
+
82
+ # Use the chain to invoke the model with context and question
83
+ response = chain.run({"question": question, "context": context})
84
  return response
85
 
86
  # Chat interface