sudoping01 commited on
Commit
3f4dd11
·
verified ·
1 Parent(s): 03af96e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import VitsModel, AutoTokenizer
3
+ import torch
4
+ import logging
5
+
6
+
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
9
+
10
+
11
+ languages = ["bambara", "boomu", "dogon", "pular", "songhoy", "tamasheq"]
12
+
13
+
14
+ models = {}
15
+ tokenizers = {}
16
+
17
+
18
+ examples = {
19
+ "bambara": "An filɛ ni ye yɔrɔ minna ni an ye an sigi ka a layɛ yala an bɛ ka baara min kɛ ɛsike a kɛlen don ka Ɲɛ wa ?",
20
+ "boomu": "Vunurobe wozomɛ pɛɛ, Poli we zo woro han Deeɓenu wara li Deeɓenu faralo zuun. Lo we baba a lo wara yi see ɓa Zuwifera ma ɓa Gɛrɛkela wa.",
21
+ "dogon": "Pɔɔlɔ, kubɔ lugo joo le, bana dɛin dɛin le, inɛw Ama titiyaanw le digɛu, Ama, emɛ babe bɛrɛ sɔɔ sɔi.",
22
+ "pular": "Miɗo ndaarde saabe Laamɗo e saabe Iisaa Almasiihu caroyoowo wuurɓe e maayɓe oo, miɗo ndaardire saabe gartol makko ka num e Laamu makko",
23
+ "songhoy": "Haya ka se beenediyo kokoyteraydi go hima nda huukoy foo ka fatta ja subaahi ka taasi goykoyyo ngu rezẽ faridi se",
24
+ "tamasheq": "Issăɣlăy-tăn Ɣisa tangalt ḍarăt-a-wen, inn'-asăn: "Tĕmmĕnĕya n-Măssinăɣ, tănifăqqa d-ăhalĕs ilan ašĕkrĕš-net; ăffăw-t ɣas, ifăl ehăn-net, immăɣ i-inaxdimăn issăxdăm ašĕkrĕš-net n-lăɣnăb"
25
+ }
26
+
27
+
28
+ try:
29
+ for lang in languages:
30
+ logger.info(f"Loading model and tokenizer for {lang}...")
31
+ models[lang] = VitsModel.from_pretrained("sudoping01/malian-tts", subfolder=f"models/{lang}")
32
+ tokenizers[lang] = AutoTokenizer.from_pretrained("sudoping01/malian-tts", subfolder=f"models/{lang}")
33
+ logger.info(f"Successfully loaded {lang}")
34
+ except Exception as e:
35
+ logger.error(f"Failed to load models: {str(e)}")
36
+ raise Exception(f"Model loading failed: {str(e)}")
37
+
38
+ def generate_audio(language, text):
39
+ if not text.strip():
40
+ return None, "Please enter some text to synthesize."
41
+
42
+ try:
43
+
44
+ model = models[language]
45
+ tokenizer = tokenizers[language]
46
+
47
+
48
+ inputs = tokenizer(text, return_tensors="pt")
49
+
50
+
51
+ with torch.no_grad():
52
+ output = model(**inputs).waveform
53
+
54
+
55
+ waveform = output.squeeze().cpu().numpy()
56
+ sample_rate = model.config.sampling_rate
57
+
58
+ return (sample_rate, waveform), None
59
+ except Exception as e:
60
+ logger.error(f"Error during inference for {language}: {str(e)}")
61
+ return None, f"Error generating audio: {str(e)}"
62
+
63
+ def load_example(language):
64
+ return examples.get(language, "No example available")
65
+
66
+
67
+ with gr.Blocks(title="Malian Languages TTS") as demo:
68
+ gr.Markdown("# Malian Languages Text-to-Speech")
69
+ gr.Markdown("Select a language, enter text or load an example, and listen to the synthesized speech!")
70
+
71
+ with gr.Row():
72
+ language = gr.Dropdown(choices=languages, label="Language", value="bambara")
73
+ with gr.Column():
74
+ text = gr.Textbox(label="Input Text", lines=5, placeholder="Type your text here...")
75
+ example_btn = gr.Button("Load Example")
76
+
77
+ generate_btn = gr.Button("Generate Audio", variant="primary")
78
+ audio_output = gr.Audio(label="Generated Audio", type="numpy")
79
+ error_msg = gr.Textbox(label="Status", visible=False)
80
+
81
+ # Connect buttons to functions
82
+ generate_btn.click(
83
+ fn=generate_audio,
84
+ inputs=[language, text],
85
+ outputs=[audio_output, error_msg]
86
+ )
87
+ example_btn.click(
88
+ fn=load_example,
89
+ inputs=language,
90
+ outputs=text
91
+ )
92
+
93
+
94
+ demo.launch()