Update app.py
Browse files
app.py
CHANGED
|
@@ -276,7 +276,22 @@ def process_user_input(user_question):
|
|
| 276 |
create_file(filename, user_question, message.content)
|
| 277 |
|
| 278 |
#st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
| 279 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 280 |
|
| 281 |
def main():
|
| 282 |
# Sidebar and global
|
|
@@ -333,7 +348,21 @@ def main():
|
|
| 333 |
|
| 334 |
if st.button('💬 Chat'):
|
| 335 |
st.write('Reasoning with your inputs...')
|
| 336 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
st.write('Response:')
|
| 338 |
st.write(response)
|
| 339 |
|
|
|
|
| 276 |
create_file(filename, user_question, message.content)
|
| 277 |
|
| 278 |
#st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
|
| 279 |
+
|
| 280 |
+
def divide_prompt(prompt, max_length):
|
| 281 |
+
words = prompt.split()
|
| 282 |
+
chunks = []
|
| 283 |
+
current_chunk = []
|
| 284 |
+
current_length = 0
|
| 285 |
+
for word in words:
|
| 286 |
+
if len(word) + current_length <= max_length:
|
| 287 |
+
current_length += len(word) + 1 # Adding 1 to account for spaces
|
| 288 |
+
current_chunk.append(word)
|
| 289 |
+
else:
|
| 290 |
+
chunks.append(' '.join(current_chunk))
|
| 291 |
+
current_chunk = [word]
|
| 292 |
+
current_length = len(word)
|
| 293 |
+
chunks.append(' '.join(current_chunk)) # Append the final chunk
|
| 294 |
+
return chunks
|
| 295 |
|
| 296 |
def main():
|
| 297 |
# Sidebar and global
|
|
|
|
| 348 |
|
| 349 |
if st.button('💬 Chat'):
|
| 350 |
st.write('Reasoning with your inputs...')
|
| 351 |
+
|
| 352 |
+
#response = chat_with_model(user_prompt, ''.join(list(document_sections,)), model_choice) # *************************************
|
| 353 |
+
|
| 354 |
+
# Divide the user_prompt into smaller sections
|
| 355 |
+
user_prompt_sections = divide_prompt(user_prompt, max_length)
|
| 356 |
+
full_response = ''
|
| 357 |
+
for prompt_section in user_prompt_sections:
|
| 358 |
+
# Process each section with the model
|
| 359 |
+
response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
|
| 360 |
+
full_response += response + '\n' # Combine the responses
|
| 361 |
+
|
| 362 |
+
#st.write('Response:')
|
| 363 |
+
#st.write(full_response)
|
| 364 |
+
|
| 365 |
+
response = full_response
|
| 366 |
st.write('Response:')
|
| 367 |
st.write(response)
|
| 368 |
|