
Updated line 8, 10, 11 with: st.write('Prediction probabilities are deriving from: Zero Shot Image Classification') | st.write('Prediction probabilities are deriving from: Zero Shot Image Classification') | st.write('Additional Multimodal Agentic AI Layer of User interaction deriving from a non-organic algorithm (Agentic AI).')
00c34e7
verified
import streamlit as st | |
from streamlit_player import st_player | |
# Set the title and description | |
st.title('Evolving Zero Shot Classification Multimodal Agentic AI') | |
st.write('Created with DeepSeek-V3-Base') | |
st.write('Images spawned (created) via: prithivMLmods/FLUX-LoRA-DLC | prithivMLmods/Fashion-Hut-Modeling-LoRA | Purz/neon-sign') | |
st.write('Prediction probabilities are deriving from: Zero Shot Image Classification') | |
st.write('Enhanced Goal Oriented Autonomy') | |
st.write('Additional Multimodal Agentic AI Layer of User interaction deriving from a non-organic algorithm (Agentic AI).') | |
st.write('Implements Natural Language Stopping (complete thoughts | sentences) (LLM Output)') | |
st.write('Responses are deriving from the mlx-community/Qwen2.5-7B-4bit LLM') | |
# Display images and play videos in the specified order | |
video_file = open('classification_output.mp4', 'rb') | |
video_bytes = video_file.read() | |
st.video(video_bytes) | |
video_file = open('follow_up_output.mp4', 'rb') | |
video_bytes = video_file.read() | |
st.video(video_bytes) | |
video_file = open('response_output.mp4', 'rb') | |
video_bytes = video_file.read() | |
st.video(video_bytes) | |
video_file = open('classification_output_2.mp4', 'rb') | |
video_bytes = video_file.read() | |
st.video(video_bytes) | |
video_file = open('follow_up_output_2.mp4', 'rb') | |
video_bytes = video_file.read() | |
st.video(video_bytes) | |
video_file = open('response_output_2.mp4', 'rb') | |
video_bytes = video_file.read() | |
st.video(video_bytes) | |