Mattral commited on
Commit
74bf15b
·
verified ·
1 Parent(s): aa8e6f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -26
app.py CHANGED
@@ -173,35 +173,34 @@ elif input_type == 'Upload PDF':
173
  st.markdown(message["content"])
174
 
175
 
176
- if retriever:
177
- # We initialize the quantized LLM from a local path.
178
- # Currently most parameters are fixed but we can make them
179
- # configurable.
180
- llm_chain = create_chain(retriever)
181
 
182
- # We take questions/instructions from the chat input to pass to the LLM
183
- if user_prompt := st.chat_input("Your message here", key="user_input"):
184
 
185
- # Add our input to the session state
186
- st.session_state.messages.append(
187
- {"role": "user", "content": user_prompt}
188
- )
189
 
190
- # Add our input to the chat window
191
- with st.chat_message("user"):
192
- st.markdown(user_prompt)
193
 
194
- # Pass our input to the llm chain and capture the final responses.
195
- # It is worth noting that the Stream Handler is already receiving the
196
- # streaming response as the llm is generating. We get our response
197
- # here once the llm has finished generating the complete response.
198
- response = llm_chain.run(user_prompt)
199
 
200
- # Add the response to the session state
201
- st.session_state.messages.append(
202
- {"role": "assistant", "content": response}
203
- )
204
 
205
- # Add the response to the chat window
206
- with st.chat_message("assistant"):
207
- st.markdown(response)
 
173
  st.markdown(message["content"])
174
 
175
 
176
+ # We initialize the quantized LLM from a local path.
177
+ # Currently most parameters are fixed but we can make them
178
+ # configurable.
179
+ llm_chain = create_chain(retriever)
 
180
 
181
+ # We take questions/instructions from the chat input to pass to the LLM
182
+ if user_prompt := st.chat_input("Your message here", key="user_input"):
183
 
184
+ # Add our input to the session state
185
+ st.session_state.messages.append(
186
+ {"role": "user", "content": user_prompt}
187
+ )
188
 
189
+ # Add our input to the chat window
190
+ with st.chat_message("user"):
191
+ st.markdown(user_prompt)
192
 
193
+ # Pass our input to the llm chain and capture the final responses.
194
+ # It is worth noting that the Stream Handler is already receiving the
195
+ # streaming response as the llm is generating. We get our response
196
+ # here once the llm has finished generating the complete response.
197
+ response = llm_chain.run(user_prompt)
198
 
199
+ # Add the response to the session state
200
+ st.session_state.messages.append(
201
+ {"role": "assistant", "content": response}
202
+ )
203
 
204
+ # Add the response to the chat window
205
+ with st.chat_message("assistant"):
206
+ st.markdown(response)