File size: 6,956 Bytes
ec74f30
 
df25732
 
3667c7a
 
 
 
 
 
 
 
 
 
 
1b279d5
 
 
 
 
 
 
 
 
 
a10bc68
c57cd9a
 
 
 
 
87ae702
1b279d5
 
 
 
c57cd9a
 
 
 
1b279d5
 
c57cd9a
 
df25732
72abfd9
6ba2996
 
ec74f30
3667c7a
ec74f30
 
3667c7a
ec74f30
 
 
 
 
3667c7a
 
6ba2996
 
 
3667c7a
 
6ba2996
3667c7a
4518e15
6ba2996
3667c7a
 
6ba2996
3667c7a
6ba2996
 
3667c7a
 
6ba2996
3667c7a
6ba2996
1b279d5
df25732
 
a10bc68
1b279d5
df25732
a10bc68
 
1b279d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c57cd9a
a10bc68
891f3b9
a10bc68
c57cd9a
df25732
 
1b279d5
fb73ff7
 
 
a10bc68
df25732
87ae702
1b279d5
 
87ae702
1b279d5
87ae702
df25732
 
 
1b279d5
 
 
3667c7a
72abfd9
a10bc68
 
1b279d5
c57cd9a
1b279d5
3667c7a
72abfd9
a10bc68
f6e34f2
c57cd9a
1b279d5
 
 
 
 
3667c7a
1b279d5
c8e8be4
3667c7a
1b279d5
4518e15
3667c7a
72abfd9
f913a0b
87ae702
 
3667c7a
87ae702
1b279d5
87ae702
1b279d5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import os

import gradio as gr

from api.audio import STTManager, TTSManager
from api.llm import LLMManager
from config import config
from docs.instruction import instruction
from resources.data import fixed_messages, topics_list
from resources.prompts import prompts
from utils.ui import add_candidate_message, add_interviewer_message

llm = LLMManager(config, prompts)
tts = TTSManager(config)
stt = STTManager(config)

default_audio_params = {
    "label": "Record answer",
    "sources": ["microphone"],
    "type": "numpy",
    "waveform_options": {"show_controls": False},
    "editable": False,
    "container": False,
    "show_share_button": False,
}


def hide_settings():
    init_acc = gr.Accordion("Settings", open=False)
    start_btn = gr.Button("Generate a problem", interactive=False)
    solution_acc = gr.Accordion("Solution", open=True)
    end_btn = gr.Button("Finish the interview", interactive=True)
    audio_input = gr.Audio(interactive=True, **default_audio_params)
    return init_acc, start_btn, solution_acc, end_btn, audio_input


def hide_solution():
    solution_acc = gr.Accordion("Solution", open=False)
    end_btn = gr.Button("Finish the interview", interactive=False)
    problem_acc = gr.Accordion("Problem statement", open=False)
    audio_input = gr.Audio(interactive=False, **default_audio_params)
    return solution_acc, end_btn, problem_acc, audio_input


with gr.Blocks() as demo:
    with gr.Tab("Instruction") as instruction_tab:
        with gr.Row():
            with gr.Column(scale=10):
                gr.Markdown("# Welcome to the AI Tech Interviewer Training!")
                gr.Markdown(instruction["intro"])

                if os.getenv("IS_DEMO"):
                    gr.Markdown(instruction["demo"])

                gr.Markdown("### Introduction")
                gr.Markdown("### Setting Up Locally")
                gr.Markdown("### Interview Interface Overview")
                gr.Markdown("### Models Configuration")
                gr.Markdown("### Acknowledgement")
                gr.Markdown(instruction["acknowledgements"])

            with gr.Column(scale=1):
                try:
                    audio_test = tts.text_to_speech("Handshake")
                    gr.Markdown(f"TTS status: 🟒.   Model: {config.tts.name}")
                except:
                    gr.Markdown(f"TTS status: πŸ”΄.   Model: {config.tts.name}")

                try:
                    text_test = stt.speech_to_text(audio_test, False)
                    gr.Markdown(f"STT status: 🟒.   Model: {config.stt.name}")
                except:
                    gr.Markdown(f"STT status: πŸ”΄.   Model: {config.stt.name}")

                try:
                    llm.test_connection()
                    gr.Markdown(f"LLM status: 🟒.   Model: {config.llm.name}")
                except:
                    gr.Markdown(f"LLM status: πŸ”΄.   Model: {config.llm.name}")

    with gr.Tab("Coding") as coding_tab:
        chat_history = gr.State([])
        previous_code = gr.State("")
        client = gr.State(None)
        client_started = gr.State(False)
        with gr.Accordion("Settings") as init_acc:
            with gr.Row():
                with gr.Column():
                    gr.Markdown("##### Problem settings")
                    with gr.Row():
                        gr.Markdown("Difficulty")
                        difficulty_select = gr.Dropdown(
                            label="Select difficulty",
                            choices=["Easy", "Medium", "Hard"],
                            value="Medium",
                            container=False,
                            allow_custom_value=True,
                        )
                    with gr.Row():
                        gr.Markdown("Topic (can type custom value)")
                        topic_select = gr.Dropdown(
                            label="Select topic", choices=topics_list, value="Arrays", container=False, allow_custom_value=True
                        )
                with gr.Column(scale=2):
                    requirements = gr.Textbox(label="Requirements", placeholder="Specify additional requirements", lines=5)
                    start_btn = gr.Button("Generate a problem")

        with gr.Accordion("Problem statement", open=True) as problem_acc:
            description = gr.Markdown()
        with gr.Accordion("Solution", open=False) as solution_acc:
            with gr.Row() as content:
                with gr.Column(scale=2):
                    code = gr.Code(
                        label="Please write your code here. Only Python syntax highlighting is available for now.",
                        language="python",
                        lines=35,
                    )
                with gr.Column(scale=1):
                    end_btn = gr.Button("Finish the interview", interactive=False)
                    chat = gr.Chatbot(label="Chat", show_label=False, show_share_button=False)
                    audio_input = gr.Audio(interactive=False, **default_audio_params)
                    audio_output = gr.Audio(label="Play audio", autoplay=True, visible=False)
                    message = gr.Textbox(label="Message", lines=3, visible=False)

        with gr.Accordion("Feedback", open=True) as feedback_acc:
            feedback = gr.Markdown()

    coding_tab.select(fn=add_interviewer_message(fixed_messages["intro"]), inputs=[chat], outputs=[chat])

    start_btn.click(fn=add_interviewer_message(fixed_messages["start"]), inputs=[chat], outputs=[chat]).then(
        fn=llm.get_problem,
        inputs=[requirements, difficulty_select, topic_select],
        outputs=[description, chat_history],
        scroll_to_output=True,
    ).then(fn=hide_settings, inputs=None, outputs=[init_acc, start_btn, solution_acc, end_btn, audio_input])

    message.submit(
        fn=llm.send_request,
        inputs=[code, previous_code, message, chat_history, chat],
        outputs=[chat_history, chat, message, previous_code],
    )

    end_btn.click(
        fn=add_interviewer_message(fixed_messages["end"]),
        inputs=[chat],
        outputs=[chat],
    ).then(
        fn=llm.end_interview, inputs=[description, chat_history], outputs=feedback
    ).then(fn=hide_solution, inputs=None, outputs=[solution_acc, end_btn, problem_acc, audio_input])

    audio_input.stop_recording(fn=stt.speech_to_text, inputs=[audio_input], outputs=[message]).then(
        fn=lambda: None, inputs=None, outputs=[audio_input]
    ).then(fn=add_candidate_message, inputs=[message, chat], outputs=[chat]).then(
        fn=llm.send_request,
        inputs=[code, previous_code, message, chat_history, chat],
        outputs=[chat_history, chat, message, previous_code],
    )

    chat.change(fn=tts.read_last_message, inputs=[chat], outputs=[audio_output])

    audio_output.stop(fn=lambda: None, inputs=None, outputs=[audio_output])

demo.launch(show_api=False)