Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import spaces
|
2 |
import tempfile
|
3 |
-
import asyncio
|
4 |
import gradio as gr
|
5 |
from streaming_stt_nemo import Model
|
6 |
from huggingface_hub import InferenceClient
|
@@ -17,8 +16,9 @@ def transcribe(audio):
|
|
17 |
|
18 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
19 |
|
20 |
-
system_instructions = "[SYSTEM]
|
21 |
|
|
|
22 |
def model(text):
|
23 |
generate_kwargs = dict(
|
24 |
temperature=0.7,
|
@@ -29,7 +29,7 @@ def model(text):
|
|
29 |
seed=42,
|
30 |
)
|
31 |
|
32 |
-
formatted_prompt = system_instructions + text + "[
|
33 |
stream = client.text_generation(
|
34 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
35 |
output = ""
|
@@ -39,20 +39,19 @@ def model(text):
|
|
39 |
|
40 |
return output
|
41 |
|
42 |
-
|
43 |
-
async def respond(audio):
|
44 |
user = transcribe(audio)
|
45 |
reply = model(user)
|
46 |
communicate = edge_tts.Communicate(reply)
|
47 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
48 |
tmp_path = tmp_file.name
|
49 |
-
|
50 |
return tmp_path
|
51 |
|
52 |
with gr.Blocks() as voice:
|
53 |
with gr.Row():
|
54 |
input = gr.Audio(label="Voice Chat", source="microphone", type="filepath")
|
55 |
-
output = gr.Audio(label="
|
56 |
|
57 |
gr.Interface(
|
58 |
fn=respond,
|
@@ -63,8 +62,8 @@ with gr.Blocks() as voice:
|
|
63 |
|
64 |
theme = gr.themes.Base()
|
65 |
|
66 |
-
with gr.Blocks(theme=theme, css="footer {visibility: hidden}textbox{resize:none}", title="
|
67 |
-
gr.Markdown("#
|
68 |
gr.TabbedInterface([voice], ['🗣️ Voice Chat'])
|
69 |
|
70 |
demo.queue(max_size=200)
|
|
|
1 |
import spaces
|
2 |
import tempfile
|
|
|
3 |
import gradio as gr
|
4 |
from streaming_stt_nemo import Model
|
5 |
from huggingface_hub import InferenceClient
|
|
|
16 |
|
17 |
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
18 |
|
19 |
+
system_instructions = "[SYSTEM] You are CrucialCoach, an AI-powered conversational coach based on the principles from the book 'Crucial Conversations'. Your role is to guide the user through a challenging workplace situation that requires effective communication skills. The user will present a case study, and your task is to provide step-by-step guidance on how to approach the conversation, focusing on the key principles of crucial conversations.\n\nCase Study: The user is an employee who needs to address a performance issue with a team member. The team member consistently misses deadlines, which affects the overall project timeline. The user wants to have a conversation with the team member to address the issue and find a solution.\n\nYour coaching should cover the following steps:\n1. Preparing for the conversation: Help the user identify the desired outcome, gather facts, and plan the conversation.\n2. Starting the conversation: Guide the user on how to begin the conversation in a non-confrontational manner, focusing on shared goals and mutual respect.\n3. Exploring the issue: Encourage the user to ask open-ended questions, listen actively, and seek to understand the team member's perspective.\n4. Finding a solution: Help the user brainstorm potential solutions and guide them on how to collaboratively agree on a course of action.\n5. Following up: Advise the user on how to follow up after the conversation to ensure commitment and monitor progress.\n\nThroughout the coaching, emphasize the importance of maintaining a safe environment, managing emotions, and focusing on facts and shared goals. Provide specific examples and phrases the user can employ during the conversation.\n\n[USER]"
|
20 |
|
21 |
+
@spaces.GPU(duration=120)
|
22 |
def model(text):
|
23 |
generate_kwargs = dict(
|
24 |
temperature=0.7,
|
|
|
29 |
seed=42,
|
30 |
)
|
31 |
|
32 |
+
formatted_prompt = system_instructions + text + "[CrucialCoach]"
|
33 |
stream = client.text_generation(
|
34 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
35 |
output = ""
|
|
|
39 |
|
40 |
return output
|
41 |
|
42 |
+
def respond(audio):
|
|
|
43 |
user = transcribe(audio)
|
44 |
reply = model(user)
|
45 |
communicate = edge_tts.Communicate(reply)
|
46 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
|
47 |
tmp_path = tmp_file.name
|
48 |
+
communicate.save(tmp_path)
|
49 |
return tmp_path
|
50 |
|
51 |
with gr.Blocks() as voice:
|
52 |
with gr.Row():
|
53 |
input = gr.Audio(label="Voice Chat", source="microphone", type="filepath")
|
54 |
+
output = gr.Audio(label="CrucialCoach", type="filepath", interactive=False, autoplay=True)
|
55 |
|
56 |
gr.Interface(
|
57 |
fn=respond,
|
|
|
62 |
|
63 |
theme = gr.themes.Base()
|
64 |
|
65 |
+
with gr.Blocks(theme=theme, css="footer {visibility: hidden}textbox{resize:none}", title="CrucialCoach DEMO") as demo:
|
66 |
+
gr.Markdown("# CrucialCoach")
|
67 |
gr.TabbedInterface([voice], ['🗣️ Voice Chat'])
|
68 |
|
69 |
demo.queue(max_size=200)
|