File size: 2,638 Bytes
d296c34
 
e87f4b7
 
025e412
 
 
 
9a30a8c
 
 
025e412
d296c34
9a30a8c
 
 
 
 
 
 
 
d296c34
 
 
4c18e6f
025e412
d296c34
 
 
 
e87f4b7
 
d296c34
e87f4b7
 
 
 
 
 
 
 
d296c34
 
e87f4b7
 
 
 
 
 
 
 
 
 
 
 
 
9a30a8c
 
 
 
025e412
 
d296c34
 
4c18e6f
d296c34
 
 
4c18e6f
d296c34
4c18e6f
 
 
 
 
 
 
 
 
 
d296c34
 
4c18e6f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import streamlit as st
from st_audiorec import st_audiorec
from Modules.Speech2Text.transcribe import transcribe
import base64
from langchain_mistralai import ChatMistralAI
from dotenv import load_dotenv
load_dotenv() # load .env api keys 
import os

from Modules.rag import rag_chain

mistral_api_key = os.getenv("MISTRAL_API_KEY")

def format_messages(messages):
    formatted_messages = ""
    for message in messages:
        role = message["role"]
        content = message["content"]
        formatted_messages += f"{role}: {content}\n"
    return formatted_messages

st.set_page_config(layout="wide", initial_sidebar_state="collapsed")
# Create two columns
col1, col2 = st.columns(2)
video_uploaded = None
llm = ChatMistralAI(model="mistral-large-latest", mistral_api_key=mistral_api_key, temperature=0)

# First column containers
with col1:
    st.subheader("Audio Recorder")
    recorded = False
    temp_path = 'data/temp_audio/audio_file.wav'
    wav_audio_data = st_audiorec()
    if wav_audio_data is not None:
        with open(temp_path, 'wb') as f:
            # Write the audio data to the file
            f.write(wav_audio_data)
        instruction = transcribe(temp_path)
        print(instruction)
        recorded = True


    st.subheader("LLM answering")
    if recorded:
        if "messages" not in st.session_state:
            st.session_state.messages = []
        for message in st.session_state.messages:
            with st.chat_message(message["role"]):
                st.markdown(message["content"])

        st.session_state.messages.append({"role": "user", "content": instruction})
        with st.chat_message("user"):
            st.markdown(instruction)

        with st.chat_message("assistant"):
            # Build answer from LLM
            response = rag_chain.invoke(
                        instruction
                        )
            print(type(response))
            st.session_state.messages.append({"role": "assistant", "content": response})
            st.markdown(response)

    st.subheader("Movement Analysis")
        # TO DO 
# Second column containers
with col2:
    st.subheader("Sports Agenda")
        # TO DO
    st.subheader("Video Analysis")
    ask_video = st.empty()
    if video_uploaded is None:
        video_uploaded = ask_video.file_uploader("Choose a video file", type=["mp4", "ogg", "webm"])
    if video_uploaded:
        ask_video.empty()
        with st.spin("Processing video"):
            pass # TO DO
        _left, mid, _right = st.columns(3)
        with mid:
            st.video(video_uploaded)

    st.subheader("Graph Displayer")
        # TO DO