Update app.py
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ def infer(audio, openai_api_key):
|
|
27 |
|
28 |
portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
|
29 |
|
30 |
-
return gr.Textbox.update(value=whisper_result, visible=True), portrait_link, gr.Textbox.update(value=gpt_response[1], visible=True), gr.update(visible=True), gr.
|
31 |
|
32 |
def try_api(message, openai_api_key):
|
33 |
|
@@ -82,7 +82,7 @@ def call_api(message, openai_api_key):
|
|
82 |
return str(response.choices[0].text).split("\n",2)[2]
|
83 |
|
84 |
def clean_components():
|
85 |
-
return gr.Audio.update(source="microphone",type="filepath", label="Audio input", show_label=True, elem_id="record_btn"), gr.HTML.update(visible=False), gr.Textbox.update(visible=False), gr.
|
86 |
|
87 |
title = """
|
88 |
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
|
|
|
27 |
|
28 |
portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
|
29 |
|
30 |
+
return gr.Textbox.update(value=whisper_result, visible=True), portrait_link, gr.Textbox.update(value=gpt_response[1], visible=True), gr.update(visible=True), gr.update(visible=True)
|
31 |
|
32 |
def try_api(message, openai_api_key):
|
33 |
|
|
|
82 |
return str(response.choices[0].text).split("\n",2)[2]
|
83 |
|
84 |
def clean_components():
|
85 |
+
return gr.Audio.update(source="microphone",type="filepath", label="Audio input", show_label=True, elem_id="record_btn"), gr.HTML.update(visible=False), gr.Textbox.update(visible=False), gr.update(visible=False)
|
86 |
|
87 |
title = """
|
88 |
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
|