File size: 3,467 Bytes
3f4dd11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49152bc
3f4dd11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import gradio as gr
from transformers import VitsModel, AutoTokenizer
import torch
import logging


logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


languages = ["bambara", "boomu", "dogon", "pular", "songhoy", "tamasheq"]


models = {}
tokenizers = {}


examples = {
    "bambara": "An filɛ ni ye yɔrɔ minna ni an ye an sigi ka a layɛ yala an bɛ ka baara min kɛ ɛsike a kɛlen don ka Ɲɛ wa ?",
    "boomu": "Vunurobe wozomɛ pɛɛ, Poli we zo woro han Deeɓenu wara li Deeɓenu faralo zuun. Lo we baba a lo wara yi see ɓa Zuwifera ma ɓa Gɛrɛkela wa.",
    "dogon": "Pɔɔlɔ, kubɔ lugo joo le, bana dɛin dɛin le, inɛw Ama titiyaanw le digɛu, Ama, emɛ babe bɛrɛ sɔɔ sɔi.",
    "pular": "Miɗo ndaarde saabe Laamɗo e saabe Iisaa Almasiihu caroyoowo wuurɓe e maayɓe oo, miɗo ndaardire saabe gartol makko ka num e Laamu makko",
    "songhoy": "Haya ka se beenediyo kokoyteraydi go hima nda huukoy foo ka fatta ja subaahi ka taasi goykoyyo ngu rezẽ faridi se",
    "tamasheq": "Toḍă tăfukt ɣas, issăɣră-dd măssi-s n-ašĕkrĕš ănaẓraf-net, inn'-as: 'Ǝɣĕr-dd inaxdimăn, tĕẓlĕd-asăn, sănt s-wi dd-ĕšrăynen har tĕkkĕd wi dd-ăzzarnen."
}


try:
    for lang in languages:
        logger.info(f"Loading model and tokenizer for {lang}...")
        models[lang] = VitsModel.from_pretrained("sudoping01/malian-tts", subfolder=f"models/{lang}")
        tokenizers[lang] = AutoTokenizer.from_pretrained("sudoping01/malian-tts", subfolder=f"models/{lang}")
        logger.info(f"Successfully loaded {lang}")
except Exception as e:
    logger.error(f"Failed to load models: {str(e)}")
    raise Exception(f"Model loading failed: {str(e)}")

def generate_audio(language, text):
    if not text.strip():
        return None, "Please enter some text to synthesize."
    
    try:
  
        model = models[language]
        tokenizer = tokenizers[language]
        

        inputs = tokenizer(text, return_tensors="pt")
        

        with torch.no_grad():
            output = model(**inputs).waveform
        

        waveform = output.squeeze().cpu().numpy()
        sample_rate = model.config.sampling_rate
        
        return (sample_rate, waveform), None
    except Exception as e:
        logger.error(f"Error during inference for {language}: {str(e)}")
        return None, f"Error generating audio: {str(e)}"

def load_example(language):
    return examples.get(language, "No example available")


with gr.Blocks(title="Malian Languages TTS") as demo:
    gr.Markdown("# Malian Languages Text-to-Speech")
    gr.Markdown("Select a language, enter text or load an example, and listen to the synthesized speech!")
    
    with gr.Row():
        language = gr.Dropdown(choices=languages, label="Language", value="bambara")
        with gr.Column():
            text = gr.Textbox(label="Input Text", lines=5, placeholder="Type your text here...")
            example_btn = gr.Button("Load Example")
    
    generate_btn = gr.Button("Generate Audio", variant="primary")
    audio_output = gr.Audio(label="Generated Audio", type="numpy")
    error_msg = gr.Textbox(label="Status", visible=False)
    
    # Connect buttons to functions
    generate_btn.click(
        fn=generate_audio,
        inputs=[language, text],
        outputs=[audio_output, error_msg]
    )
    example_btn.click(
        fn=load_example,
        inputs=language,
        outputs=text
    )


demo.launch()