import gradio as gr #import requests from PIL import Image import os from share_btn import community_icon_html, loading_icon_html, share_js token = os.environ.get('HF_TOKEN') whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT") tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan") talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token) def infer(audio): whisper_to_gpt_response = whisper_to_gpt(audio, "translate", fn_index=0) #print(gpt_response) audio_response = tts(whisper_to_gpt_response[1], "English Text", "English Accent", "English Speaker's Voice", fn_index=0) #image = Image.open(r"wise_woman_portrait.png") portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0) #portrait_response = requests.get(portrait_link, headers={'Authorization': 'Bearer ' + token}) #print(portrait_response.text) return whisper_to_gpt_response[0], portrait_link, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) title = """

GPT Talking Portrait

Use Whisper to ask, alive portrait responds !

""" css = ''' #col-container, #col-container-2 {max-width: 510px; margin-left: auto; margin-right: auto;} a {text-decoration-line: underline; font-weight: 600;} div#record_btn > .mt-6 { margin-top: 0!important; } div#record_btn > .mt-6 button { width: 100%; height: 40px; } .footer { margin-bottom: 45px; margin-top: 10px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } .animate-spin { animation: spin 1s linear infinite; } @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } #share-btn-container { display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; } #share-btn { all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0; } #share-btn * { all: unset; } #share-btn-container div:nth-child(-n+2){ width: auto !important; min-height: 0px !important; } #share-btn-container .wrap { display: none !important; } ''' with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.HTML(title) gpt_response = gr.Video(label="Talking Portrait response", elem_id="video_out") with gr.Column(elem_id="col-container-2"): record_input = gr.Audio(source="microphone",type="filepath", label="Audio input", show_label=True, elem_id="record_btn") whisper_tr = gr.Textbox(label="whisper english translation", elem_id="text_inp") send_btn = gr.Button("Send my request !") with gr.Group(elem_id="share-btn-container"): community_icon = gr.HTML(community_icon_html, visible=False) loading_icon = gr.HTML(loading_icon_html, visible=False) share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) send_btn.click(infer, inputs=[record_input], outputs=[whisper_tr, gpt_response, share_button, community_icon, loading_icon]) share_button.click(None, [], [], _js=share_js) demo.queue(max_size=32, concurrency_count=20).launch(debug=True)