Spaces:
Running
Running
File size: 5,793 Bytes
f23c138 cea6632 c8c05d4 49696ae cea6632 ed7f208 502b0b6 cea6632 502b0b6 cea6632 1d606bc cea6632 5468bc2 cea6632 502b0b6 cea6632 404e64b 1d606bc 404e64b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import os
import gradio as gr
import numpy as np
import torch
from InferenceInterfaces.Meta_FastSpeech2 import Meta_FastSpeech2
os.system("pip uninstall -y gradio")
os.system("pip install gradio==2.7.5.2")
def float2pcm(sig, dtype='int16'):
"""
https://gist.github.com/HudsonHuang/fbdf8e9af7993fe2a91620d3fb86a182
"""
sig = np.asarray(sig)
if sig.dtype.kind != 'f':
raise TypeError("'sig' must be a float array")
dtype = np.dtype(dtype)
if dtype.kind not in 'iu':
raise TypeError("'dtype' must be an integer type")
i = np.iinfo(dtype)
abs_max = 2 ** (i.bits - 1)
offset = i.min + abs_max
return (sig * abs_max + offset).clip(i.min, i.max).astype(dtype)
class TTS_Interface:
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model = Meta_FastSpeech2(device=self.device)
self.current_speaker = "English Speaker's Voice"
self.current_language = "English"
self.language_id_lookup = {
"English" : "en",
"German" : "de",
"Greek" : "el",
"Spanish" : "es",
"Finnish" : "fi",
"Russian" : "ru",
"Hungarian" : "hu",
"Dutch" : "nl",
"French" : "fr",
'Polish' : "pl",
'Portuguese': "pt",
'Italian' : "it",
}
self.speaker_path_lookup = {
"English Speaker's Voice" : "reference_audios/english.wav",
"German Speaker's Voice" : "reference_audios/german.wav",
"Greek Speaker's Voice" : "reference_audios/greek.wav",
"Spanish Speaker's Voice" : "reference_audios/spanish.wav",
"Finnish Speaker's Voice" : "reference_audios/finnish.wav",
"Russian Speaker's Voice" : "reference_audios/russian.wav",
"Hungarian Speaker's Voice" : "reference_audios/hungarian.wav",
"Dutch Speaker's Voice" : "reference_audios/dutch.wav",
"French Speaker's Voice" : "reference_audios/french.wav",
"Polish Speaker's Voice" : "reference_audios/polish.flac",
"Portuguese Speaker's Voice": "reference_audios/portuguese.flac",
"Italian Speaker's Voice" : "reference_audios/italian.flac",
}
def read(self, prompt, language, speaker):
if self.current_language != language:
self.model.set_language(self.language_id_lookup[language])
self.current_language = language
if self.current_speaker != speaker:
self.model.set_utterance_embedding(self.speaker_path_lookup[speaker])
self.current_speaker = speaker
wav = self.model(prompt)
return 48000, float2pcm(wav.cpu().numpy())
meta_model = TTS_Interface()
article = "<p style='text-align: left'>This is still a work in progress, models will be exchanged for better ones as soon as they are done. All of those languages are spoken by a single model. Speakers can be transferred across languages. More languages will be added soon.</p><p style='text-align: center'><a href='https://github.com/DigitalPhonetics/IMS-Toucan' target='_blank'>Click here to learn more about the IMS Toucan Speech Synthesis Toolkit</a></p>"
iface = gr.Interface(fn=meta_model.read,
inputs=[gr.inputs.Textbox(lines=2,
placeholder="write what you want the synthesis to read here...",
label=" "),
gr.inputs.Dropdown(['English',
'German',
'Greek',
'Spanish',
'Finnish',
'Russian',
'Hungarian',
'Dutch',
'French',
'Polish',
'Portuguese',
'Italian'], type="value", default='English', label="Language Selection"),
gr.inputs.Dropdown(["English Speaker's Voice",
"German Speaker's Voice",
"Greek Speaker's Voice",
"Spanish Speaker's Voice",
"Finnish Speaker's Voice",
"Russian Speaker's Voice",
"Hungarian Speaker's Voice",
"Dutch Speaker's Voice",
"French Speaker's Voice",
"Polish Speaker's Voice",
"Portuguese Speaker's Voice",
"Italian Speaker's Voice"], type="value", default="English Speaker's Voice", label="Speaker Selection")],
outputs=gr.outputs.Audio(type="numpy", label=None),
layout="vertical",
title="IMS Toucan Multilingual Multispeaker Demo",
thumbnail="Utility/toucan.png",
theme="default",
allow_flagging="never",
allow_screenshot=False,
article=article)
iface.launch(enable_queue=True)
|