import spaces import tempfile import gradio as gr from streaming_stt_nemo import Model from huggingface_hub import InferenceClient import edge_tts default_lang = "en" engines = {default_lang: Model(default_lang)} def transcribe(audio): lang = "en" model = engines[lang] text = model.stt_file(audio)[0] return text client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1") system_instructions = "[SYSTEM] You are CrucialCoach, an AI-powered conversational coach based on the principles from the book 'Crucial Conversations'. Your role is to guide the user through a challenging workplace situation that requires effective communication skills. The user will present a case study, and your task is to provide step-by-step guidance on how to approach the conversation, focusing on the key principles of crucial conversations.\n\nCase Study: The user is an employee who needs to address a performance issue with a team member. The team member consistently misses deadlines, which affects the overall project timeline. The user wants to have a conversation with the team member to address the issue and find a solution.\n\nYour coaching should cover the following steps:\n1. Preparing for the conversation: Help the user identify the desired outcome, gather facts, and plan the conversation.\n2. Starting the conversation: Guide the user on how to begin the conversation in a non-confrontational manner, focusing on shared goals and mutual respect.\n3. Exploring the issue: Encourage the user to ask open-ended questions, listen actively, and seek to understand the team member's perspective.\n4. Finding a solution: Help the user brainstorm potential solutions and guide them on how to collaboratively agree on a course of action.\n5. Following up: Advise the user on how to follow up after the conversation to ensure commitment and monitor progress.\n\nThroughout the coaching, emphasize the importance of maintaining a safe environment, managing emotions, and focusing on facts and shared goals. Provide specific examples and phrases the user can employ during the conversation.\n\n[USER]" @spaces.GPU(duration=120) def model(text): generate_kwargs = dict( temperature=0.7, max_new_tokens=512, top_p=0.95, repetition_penalty=1, do_sample=True, seed=42, ) formatted_prompt = system_instructions + text + "[CrucialCoach]" stream = client.text_generation( formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: if not response.token.text == "": output += response.token.text return output def respond(audio): user = transcribe(audio) reply = model(user) communicate = edge_tts.Communicate(reply) with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file: tmp_path = tmp_file.name communicate.save(tmp_path) return tmp_path with gr.Blocks() as voice: with gr.Row(): input = gr.Audio(label="Voice Chat", sources="microphone", type="filepath", waveform_options=False) output = gr.Audio(label="CrucialCoach", type="filepath", interactive=False, autoplay=True) gr.Interface( fn=respond, inputs=[input], outputs=[output], live=True, ) theme = gr.themes.Base() with gr.Blocks(theme=theme, css="footer {visibility: hidden}textbox{resize:none}", title="CrucialCoach DEMO") as demo: gr.Markdown("# CrucialCoach") gr.TabbedInterface([voice], ['🗣️ Voice Chat']) demo.queue(max_size=200) demo.launch()