Mattral commited on
Commit
da06dc3
·
verified ·
1 Parent(s): 452bd06

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -48
app.py CHANGED
@@ -147,62 +147,41 @@ if input_type == 'URL':
147
  if base_url:
148
  urls = get_page_urls(base_url)
149
  retriever = get_retriever(urls)
 
150
  elif input_type == 'Upload PDF':
151
  uploaded_file = st.file_uploader("Upload your PDF here:", type="pdf")
152
  if uploaded_file:
153
  pdf_text = process_pdf(uploaded_file)
154
- # Assume we process the PDF text into a format that can be used by your LLM
155
- urls = [pdf_text] # This should be adjusted to match your system's needs
156
- retriever = get_retriever(urls) # Make sure your retriever can handle raw text if not, adapt it.
157
  llm_chain = create_chain(retriever)
158
 
159
  # We store the conversation in the session state.
160
  # This will be used to render the chat conversation.
161
  # We initialize it with the first message we want to be greeted with
162
 
163
- if "messages" not in st.session_state:
164
- st.session_state.messages = [
165
- {"role": "assistant", "content": "How may I help you today?"}
166
- ]
167
-
168
- if "current_response" not in st.session_state:
169
- st.session_state.current_response = ""
170
-
171
- # We loop through each message in the session state and render it as
172
- # a chat message.
173
- for message in st.session_state.messages:
174
- with st.chat_message(message["role"]):
175
- st.markdown(message["content"])
176
-
177
-
178
- # We initialize the quantized LLM from a local path.
179
- # Currently most parameters are fixed but we can make them
180
- # configurable.
181
- llm_chain = create_chain(retriever)
182
-
183
- # We take questions/instructions from the chat input to pass to the LLM
184
- if user_prompt := st.chat_input("Your message here", key="user_input"):
185
-
186
- # Add our input to the session state
187
- st.session_state.messages.append(
188
- {"role": "user", "content": user_prompt}
189
- )
190
-
191
- # Add our input to the chat window
192
- with st.chat_message("user"):
193
- st.markdown(user_prompt)
194
-
195
- # Pass our input to the llm chain and capture the final responses.
196
- # It is worth noting that the Stream Handler is already receiving the
197
- # streaming response as the llm is generating. We get our response
198
- # here once the llm has finished generating the complete response.
199
- response = llm_chain.run(user_prompt)
200
-
201
- # Add the response to the session state
202
- st.session_state.messages.append(
203
- {"role": "assistant", "content": response}
204
- )
205
 
206
- # Add the response to the chat window
207
- with st.chat_message("assistant"):
208
- st.markdown(response)
 
 
 
147
  if base_url:
148
  urls = get_page_urls(base_url)
149
  retriever = get_retriever(urls)
150
+ llm_chain = create_chain(retriever)
151
  elif input_type == 'Upload PDF':
152
  uploaded_file = st.file_uploader("Upload your PDF here:", type="pdf")
153
  if uploaded_file:
154
  pdf_text = process_pdf(uploaded_file)
155
+ # Process the PDF text into a format that can be used by your LLM
156
+ urls = [pdf_text] # Adapt as needed for your system
157
+ retriever = get_retriever(urls) # Ensure your retriever can handle raw text; if not, adapt it.
158
  llm_chain = create_chain(retriever)
159
 
160
  # We store the conversation in the session state.
161
  # This will be used to render the chat conversation.
162
  # We initialize it with the first message we want to be greeted with
163
 
164
+ # Initialize chat session state for storing messages and responses
165
+ if "messages" not in st.session_state:
166
+ st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
167
+
168
+ if "current_response" not in st.session_state:
169
+ st.session_state.current_response = ""
170
+
171
+ # Render the chat messages
172
+ for message in st.session_state.messages:
173
+ with st.chat_message(message["role"]):
174
+ st.markdown(message["content"])
175
+
176
+ # Input and response handling
177
+ if llm_chain and (user_prompt := st.chat_input("Your message here", key="user_input")):
178
+ # Add user input to the session state and chat window
179
+ st.session_state.messages.append({"role": "user", "content": user_prompt})
180
+ with st.chat_message("user"):
181
+ st.markdown(user_prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
 
183
+ # Generate and display the response using the LLM chain
184
+ response = llm_chain.run(user_prompt)
185
+ st.session_state.messages.append({"role": "assistant", "content": response})
186
+ with st.chat_message("assistant"):
187
+ st.markdown(response)