Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -307,6 +307,45 @@ def process_video(user_name, video_input, user_prompt):
|
|
307 |
save_data()
|
308 |
return video_response
|
309 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
310 |
# Main function for each column
|
311 |
def main_column(column_name):
|
312 |
st.markdown(f"##### {column_name}")
|
@@ -316,40 +355,20 @@ def main_column(column_name):
|
|
316 |
option = st.selectbox(f"Select an option for {column_name}", ("Text", "Image", "Audio", "Video"), key=f"{column_name}_option")
|
317 |
|
318 |
if option == "Text":
|
319 |
-
|
320 |
-
|
321 |
-
if text_input:
|
322 |
-
process_text(st.session_state.current_user['name'], text_input, selected_model, temp_values)
|
323 |
-
# Clear the input after processing
|
324 |
-
st.session_state[f"{column_name}_text"] = ""
|
325 |
elif option == "Image":
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
for image_input in uploaded_files:
|
330 |
-
image_bytes = image_input.read()
|
331 |
-
process_image(st.session_state.current_user['name'], image_bytes, text_input)
|
332 |
-
# Clear the inputs after processing
|
333 |
-
st.session_state[f"{column_name}_image_text"] = ""
|
334 |
-
st.session_state[f"{column_name}_image_upload"] = None
|
335 |
elif option == "Audio":
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
for audio_input in uploaded_files:
|
340 |
-
process_audio(st.session_state.current_user['name'], audio_input, text_input)
|
341 |
-
# Clear the inputs after processing
|
342 |
-
st.session_state[f"{column_name}_audio_text"] = ""
|
343 |
-
st.session_state[f"{column_name}_audio_upload"] = None
|
344 |
elif option == "Video":
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
if video_input and text_input:
|
349 |
-
process_video(st.session_state.current_user['name'], video_input, text_input)
|
350 |
-
# Clear the inputs after processing
|
351 |
-
st.session_state[f"{column_name}_video_text"] = ""
|
352 |
-
st.session_state[f"{column_name}_video_upload"] = None
|
353 |
|
354 |
# Main Streamlit app
|
355 |
st.title("Personalized Real-Time Chat")
|
@@ -390,4 +409,4 @@ with col2:
|
|
390 |
|
391 |
# Run the Streamlit app
|
392 |
if __name__ == "__main__":
|
393 |
-
st.markdown("\n[by Aaron Wacker](https://huggingface.co/spaces/awacke1/).")
|
|
|
307 |
save_data()
|
308 |
return video_response
|
309 |
|
310 |
+
# Callback function for text processing
|
311 |
+
def process_text_callback(column_name):
|
312 |
+
text_input = st.session_state[f"{column_name}_text"]
|
313 |
+
selected_model = st.session_state[f"{column_name}_model"]
|
314 |
+
temp_values = st.session_state[f"{column_name}_temp"]
|
315 |
+
if text_input:
|
316 |
+
process_text(st.session_state.current_user['name'], text_input, selected_model, temp_values)
|
317 |
+
st.session_state[f"{column_name}_text"] = "" # Clear the input after processing
|
318 |
+
|
319 |
+
# Callback function for image processing
|
320 |
+
def process_image_callback(column_name):
|
321 |
+
text_input = st.session_state[f"{column_name}_image_text"]
|
322 |
+
uploaded_files = st.session_state[f"{column_name}_image_upload"]
|
323 |
+
if text_input and uploaded_files:
|
324 |
+
for image_input in uploaded_files:
|
325 |
+
image_bytes = image_input.read()
|
326 |
+
process_image(st.session_state.current_user['name'], image_bytes, text_input)
|
327 |
+
st.session_state[f"{column_name}_image_text"] = "" # Clear the input after processing
|
328 |
+
st.session_state[f"{column_name}_image_upload"] = None # Clear the file uploader
|
329 |
+
|
330 |
+
# Callback function for audio processing
|
331 |
+
def process_audio_callback(column_name):
|
332 |
+
text_input = st.session_state[f"{column_name}_audio_text"]
|
333 |
+
uploaded_files = st.session_state[f"{column_name}_audio_upload"]
|
334 |
+
if uploaded_files:
|
335 |
+
for audio_input in uploaded_files:
|
336 |
+
process_audio(st.session_state.current_user['name'], audio_input, text_input)
|
337 |
+
st.session_state[f"{column_name}_audio_text"] = "" # Clear the input after processing
|
338 |
+
st.session_state[f"{column_name}_audio_upload"] = None # Clear the file uploader
|
339 |
+
|
340 |
+
# Callback function for video processing
|
341 |
+
def process_video_callback(column_name):
|
342 |
+
text_input = st.session_state[f"{column_name}_video_text"]
|
343 |
+
video_input = st.session_state[f"{column_name}_video_upload"]
|
344 |
+
if video_input and text_input:
|
345 |
+
process_video(st.session_state.current_user['name'], video_input, text_input)
|
346 |
+
st.session_state[f"{column_name}_video_text"] = "" # Clear the input after processing
|
347 |
+
st.session_state[f"{column_name}_video_upload"] = None # Clear the file uploader
|
348 |
+
|
349 |
# Main function for each column
|
350 |
def main_column(column_name):
|
351 |
st.markdown(f"##### {column_name}")
|
|
|
355 |
option = st.selectbox(f"Select an option for {column_name}", ("Text", "Image", "Audio", "Video"), key=f"{column_name}_option")
|
356 |
|
357 |
if option == "Text":
|
358 |
+
st.text_input(f"Enter your text for {column_name}:", key=f"{column_name}_text")
|
359 |
+
st.button(f"Process Text for {column_name}", on_click=process_text_callback, args=(column_name,))
|
|
|
|
|
|
|
|
|
360 |
elif option == "Image":
|
361 |
+
st.text_input(f"Enter text prompt to use with Image context for {column_name}:", key=f"{column_name}_image_text")
|
362 |
+
st.file_uploader(f"Upload images for {column_name}", type=["png", "jpg", "jpeg"], accept_multiple_files=True, key=f"{column_name}_image_upload")
|
363 |
+
st.button(f"Process Image for {column_name}", on_click=process_image_callback, args=(column_name,))
|
|
|
|
|
|
|
|
|
|
|
|
|
364 |
elif option == "Audio":
|
365 |
+
st.text_input(f"Enter text prompt to use with Audio context for {column_name}:", key=f"{column_name}_audio_text")
|
366 |
+
st.file_uploader(f"Upload an audio file for {column_name}", type=["mp3", "wav"], accept_multiple_files=True, key=f"{column_name}_audio_upload")
|
367 |
+
st.button(f"Process Audio for {column_name}", on_click=process_audio_callback, args=(column_name,))
|
|
|
|
|
|
|
|
|
|
|
368 |
elif option == "Video":
|
369 |
+
st.file_uploader(f"Upload a video file for {column_name}", type=["mp4"], key=f"{column_name}_video_upload")
|
370 |
+
st.text_input(f"Enter text prompt to use with Video context for {column_name}:", key=f"{column_name}_video_text")
|
371 |
+
st.button(f"Process Video for {column_name}", on_click=process_video_callback, args=(column_name,))
|
|
|
|
|
|
|
|
|
|
|
372 |
|
373 |
# Main Streamlit app
|
374 |
st.title("Personalized Real-Time Chat")
|
|
|
409 |
|
410 |
# Run the Streamlit app
|
411 |
if __name__ == "__main__":
|
412 |
+
st.markdown("\n[by Aaron Wacker](https://huggingface.co/spaces/awacke1/ChatStreamlitMultiplayer).")
|