mavinsao commited on
Commit
3a9a00e
·
verified ·
1 Parent(s): 980d851

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -73,6 +73,12 @@ for message in st.session_state.messages:
73
  with st.chat_message(message["role"]):
74
  st.markdown(message["content"])
75
 
 
 
 
 
 
 
76
  # Accept user input
77
  if prompt := st.chat_input("What are you looking to learn?"):
78
  # Add user message to chat history
@@ -81,15 +87,14 @@ if prompt := st.chat_input("What are you looking to learn?"):
81
  with st.chat_message("user"):
82
  st.markdown(prompt)
83
 
84
- # Assistant response generation with a streaming effect
85
  with st.chat_message("assistant"):
86
  response = qa_chain({"question": prompt})
87
  response_text = response["answer"]
88
 
89
- # Stream the response character by character
90
- for char in response_text:
91
- st.markdown(char, unsafe_allow_html=True)
92
- time.sleep(0.01) # Adjust this delay to control typing speed
93
 
94
  # Add assistant response to chat history
95
- st.session_state.messages.append({"role": "assistant", "content": response_text})
 
73
  with st.chat_message(message["role"]):
74
  st.markdown(message["content"])
75
 
76
+ # Streamed response generator
77
+ def response_generator(response):
78
+ for word in response.split():
79
+ yield word + " "
80
+ time.sleep(0.05) # Adjust this delay to control typing speed
81
+
82
  # Accept user input
83
  if prompt := st.chat_input("What are you looking to learn?"):
84
  # Add user message to chat history
 
87
  with st.chat_message("user"):
88
  st.markdown(prompt)
89
 
90
+ # Assistant response generation with streaming effect
91
  with st.chat_message("assistant"):
92
  response = qa_chain({"question": prompt})
93
  response_text = response["answer"]
94
 
95
+ # Stream the response word by word
96
+ for word in response_generator(response_text):
97
+ st.markdown(word, unsafe_allow_html=True)
 
98
 
99
  # Add assistant response to chat history
100
+ st.session_state.messages.append({"role": "assistant", "content": response_text})