|
import gradio as gr |
|
from transformers import VitsModel, AutoTokenizer |
|
import torch |
|
import logging |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
languages = ["bambara", "boomu", "dogon", "pular", "songhoy", "tamasheq"] |
|
|
|
|
|
models = {} |
|
tokenizers = {} |
|
|
|
|
|
examples = { |
|
"bambara": "An filɛ ni ye yɔrɔ minna ni an ye an sigi ka a layɛ yala an bɛ ka baara min kɛ ɛsike a kɛlen don ka Ɲɛ wa ?", |
|
"boomu": "Vunurobe wozomɛ pɛɛ, Poli we zo woro han Deeɓenu wara li Deeɓenu faralo zuun. Lo we baba a lo wara yi see ɓa Zuwifera ma ɓa Gɛrɛkela wa.", |
|
"dogon": "Pɔɔlɔ, kubɔ lugo joo le, bana dɛin dɛin le, inɛw Ama titiyaanw le digɛu, Ama, emɛ babe bɛrɛ sɔɔ sɔi.", |
|
"pular": "Miɗo ndaarde saabe Laamɗo e saabe Iisaa Almasiihu caroyoowo wuurɓe e maayɓe oo, miɗo ndaardire saabe gartol makko ka num e Laamu makko", |
|
"songhoy": "Haya ka se beenediyo kokoyteraydi go hima nda huukoy foo ka fatta ja subaahi ka taasi goykoyyo ngu rezẽ faridi se", |
|
"tamasheq": "Issăɣlăy-tăn Ɣisa tangalt ḍarăt-a-wen, inn'-asăn: "Tĕmmĕnĕya n-Măssinăɣ, tănifăqqa d-ăhalĕs ilan ašĕkrĕš-net; ăffăw-t ɣas, ifăl ehăn-net, immăɣ i-inaxdimăn issăxdăm ašĕkrĕš-net n-lăɣnăb" |
|
} |
|
|
|
|
|
try: |
|
for lang in languages: |
|
logger.info(f"Loading model and tokenizer for {lang}...") |
|
models[lang] = VitsModel.from_pretrained("sudoping01/malian-tts", subfolder=f"models/{lang}") |
|
tokenizers[lang] = AutoTokenizer.from_pretrained("sudoping01/malian-tts", subfolder=f"models/{lang}") |
|
logger.info(f"Successfully loaded {lang}") |
|
except Exception as e: |
|
logger.error(f"Failed to load models: {str(e)}") |
|
raise Exception(f"Model loading failed: {str(e)}") |
|
|
|
def generate_audio(language, text): |
|
if not text.strip(): |
|
return None, "Please enter some text to synthesize." |
|
|
|
try: |
|
|
|
model = models[language] |
|
tokenizer = tokenizers[language] |
|
|
|
|
|
inputs = tokenizer(text, return_tensors="pt") |
|
|
|
|
|
with torch.no_grad(): |
|
output = model(**inputs).waveform |
|
|
|
|
|
waveform = output.squeeze().cpu().numpy() |
|
sample_rate = model.config.sampling_rate |
|
|
|
return (sample_rate, waveform), None |
|
except Exception as e: |
|
logger.error(f"Error during inference for {language}: {str(e)}") |
|
return None, f"Error generating audio: {str(e)}" |
|
|
|
def load_example(language): |
|
return examples.get(language, "No example available") |
|
|
|
|
|
with gr.Blocks(title="Malian Languages TTS") as demo: |
|
gr.Markdown(" |
|
gr.Markdown("Select a language, enter text or load an example, and listen to the synthesized speech!") |
|
|
|
with gr.Row(): |
|
language = gr.Dropdown(choices=languages, label="Language", value="bambara") |
|
with gr.Column(): |
|
text = gr.Textbox(label="Input Text", lines=5, placeholder="Type your text here...") |
|
example_btn = gr.Button("Load Example") |
|
|
|
generate_btn = gr.Button("Generate Audio", variant="primary") |
|
audio_output = gr.Audio(label="Generated Audio", type="numpy") |
|
error_msg = gr.Textbox(label="Status", visible=False) |
|
|
|
|
|
generate_btn.click( |
|
fn=generate_audio, |
|
inputs=[language, text], |
|
outputs=[audio_output, error_msg] |
|
) |
|
example_btn.click( |
|
fn=load_example, |
|
inputs=language, |
|
outputs=text |
|
) |
|
|
|
|
|
demo.launch() |