File size: 8,626 Bytes
9f90da5 770dd41 d7b1af6 770dd41 9f90da5 e170aa7 9f90da5 41caeee 03d27b8 9f90da5 770dd41 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 a4bab8e 9f90da5 41caeee 9f90da5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 |
---
license: cc-by-nc-4.0
language:
- de
base_model:
- HKUSTAudio/Llasa-1B-Multilingual
widget:
- src: examples/no_speaker_example.wav
---
<img src="https://huggingface.co/MultiLlasa/Llasa-1B-Multilingual-German/resolve/main/cover.webp" alt="Llasa German Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
# Llasa-1B-Multilingual-German
<a target="_blank" href="https://huggingface.co/spaces/SebastianBodza/llasa-1b-tts-german">
<img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="Open in HuggingFace"/>
</a>
<Gallery />
> This model was trained on top of [HKUSTAudio/Llasa-1B-Multilingual](https://huggingface.co/HKUSTAudio/Llasa-1B-Multilingual).
## Model Overview
This text-to-speech (TTS) model has been trained on a custom dataset representing **7,000 hours** of high-quality audio data. The audio data consisted of permissive podcasts, lectures and other OER data.
## Training Details
- **Base Model:** HKUSTAudio/Llasa-1B-Multilingual
- **Dataset:** A custom dataset comprising **7,000 hours** of data.
- **Compute Resources:** The training was performed using **4x L40s GPUs**.
- **Raw Training Time:** Approximately **20 hours** not included the data preprocessing with xcodec2 (note: training was restarted after 3 crashes).
Huge thanks to Hugging Face for their generous GPU grant! 🤗
## 👨💻 Installation
First install the following pip packages:
```bash
pip install xcodec2
pip install torch==2.6.0 torchaudio
```
Install it in the two steps given above! If you get the error message with "flex attention" make sure to install `torch==2.6.0 torchaudio`. If you get an torchaudio error, make sure to update and match it to the torch 2.6.0 version.
## 🛠️ Usage
### 🎲 Random voice
A basic example using the Hugging Face Transformers:
```python
import os
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import soundfile as sf
llasa_1b_german = 'MultiLlasa/Llasa-1B-Multilingual-German'
# Loading the model
tokenizer = AutoTokenizer.from_pretrained(llasa_1b_german)
model = AutoModelForCausalLM.from_pretrained(llasa_1b_german)
model.to('cuda')
# Load XCodec2 model
from xcodec2.modeling_xcodec2 import XCodec2Model
model_path = "HKUST-Audio/xcodec2"
Codec_model = XCodec2Model.from_pretrained(model_path)
Codec_model.cuda()
input_text = "\"Weißt du was, Hoppi\", sagte der weise Uhu, \"manchmal ist es gar nicht so wichtig, das Ende des Regenbogens zu finden. Das Schönste ist doch, dass wir alle zusammen dieses Abenteuer erleben!"
def extract_speech_ids(speech_tokens_str):
speech_ids = []
for token_str in speech_tokens_str:
if token_str.startswith('<|s_') and token_str.endswith('|>'):
num_str = token_str[4:-2]
num = int(num_str)
speech_ids.append(num)
else:
print(f"Unexpected token: {token_str}")
return speech_ids
with torch.no_grad():
formatted_text = f"<|TEXT_UNDERSTANDING_START|>{input_text}<|TEXT_UNDERSTANDING_END|>"
chat = [
{"role": "user", "content": "Convert the text to speech:" + formatted_text},
{"role": "assistant", "content": "<|SPEECH_GENERATION_START|>"}
]
input_ids = tokenizer.apply_chat_template(
chat,
tokenize=True,
return_tensors='pt',
continue_final_message=True
)
input_ids = input_ids.to('cuda')
speech_end_id = tokenizer.convert_tokens_to_ids('<|SPEECH_GENERATION_END|>')
outputs = model.generate(
input_ids,
max_length=2048,
eos_token_id=speech_end_id,
do_sample=True,
top_p=1,
temperature=0.8,
)
generated_ids = outputs[0][input_ids.shape[1]:-1]
speech_tokens = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
speech_tokens = extract_speech_ids(speech_tokens)
speech_tokens = torch.tensor(speech_tokens).cuda().unsqueeze(0).unsqueeze(0)
gen_wav = Codec_model.decode_code(speech_tokens)
sf.write("generation.wav", gen_wav[0, 0, :].cpu().numpy(), 16000)
```
### 🎯 Using a specific speaker
An example with speaker reference:
```python
import torch
import torchaudio
import tempfile
import soundfile as sf
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# Input your reference audio and optional the text
sample_audio_path = "male.wav"
sample_audio_text = None # Set it to none to use whisper for transcription
# Input the target text here
target_text = "Und apropos Spannungen und Unfälle, in Stuttgart gibt es auch einige Schlagzeilen. Die Polizei sucht Zeugen, nachdem in der Stadt mehrere Autoscheiben eingeschlagen wurden. Und gestern kam es im Stuttgarter Osten zu einer Verfolgungsjagd mit einer jungen BMW-Fahrerin, die vor einer Polizeistreife geflüchtet ist."
output_filename = "no_speaker_example.wav"
#### Do not edit below ####
llasa_model_name = "MultiLlasa/Llasa-1B-Multilingual-German"
tokenizer = AutoTokenizer.from_pretrained(llasa_model_name)
model = AutoModelForCausalLM.from_pretrained(llasa_model_name)
model.to("cuda")
from xcodec2.modeling_xcodec2 import XCodec2Model
codec_model_path = "HKUST-Audio/xcodec2"
Codec_model = XCodec2Model.from_pretrained(codec_model_path)
Codec_model.cuda()
whisper_turbo_pipe = pipeline(
"automatic-speech-recognition",
model="openai/whisper-large-v3-turbo",
torch_dtype=torch.float16,
device="cuda",
)
def ids_to_speech_tokens(speech_ids):
speech_tokens_str = []
for speech_id in speech_ids:
speech_tokens_str.append(f"<|s_{speech_id}|>")
return speech_tokens_str
waveform, sample_rate = torchaudio.load(sample_audio_path)
max_secs = 15
if len(waveform[0]) / sample_rate > 15:
print("Warning: Trimming audio to first 15secs.")
waveform = waveform[:, : sample_rate * 15]
waveform = torch.nn.functional.pad( waveform, (0, int(sample_rate * 0.5)), "constant", 0)
if waveform.size(0) > 1:
waveform = torch.mean(waveform, dim=0, keepdim=True)
prompt_wav = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(waveform)
if sample_audio_text is None:
print("Transcribing audio...")
transcription = whisper_turbo_pipe(waveform[0].numpy())["text"].strip()
else:
transcription = sample_audio_text
print("Transcription:", transcription)
if len(target_text) == 0:
raise ValueError("Target text must be provided!")
elif len(target_text) > 500:
print("Text is too long; trimming to first 500 characters.")
target_text = target_text[:500]
input_text = transcription + " " + target_text
with torch.no_grad():
vq_code_prompt = Codec_model.encode_code(input_waveform=prompt_wav)
vq_code_prompt = vq_code_prompt[0, 0, :]
speech_ids_prefix = ids_to_speech_tokens(vq_code_prompt)
formatted_text = f"<|TEXT_UNDERSTANDING_START|>{input_text}<|TEXT_UNDERSTANDING_END|>"
chat = [
{"role": "user", "content": "Convert the text to speech:" + formatted_text},
{"role": "assistant", "content": "<|SPEECH_GENERATION_START|>" + "".join(speech_ids_prefix)}
]
input_ids = tokenizer.apply_chat_template(chat, tokenize=True, return_tensors="pt", continue_final_message=True)
input_ids = input_ids.to("cuda")
speech_end_id = tokenizer.convert_tokens_to_ids("<|SPEECH_GENERATION_END|>")
outputs = model.generate(
input_ids,
max_length=2048,
eos_token_id=speech_end_id,
do_sample=True,
top_p=1,
temperature=0.8,
min_new_tokens=4, # Fix so the model does not directly stop
)
generated_ids = outputs[0][input_ids.shape[1] - len(speech_ids_prefix) : -1]
speech_tokens = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
speech_tokens = extract_speech_ids(speech_tokens)
speech_tokens = torch.tensor(speech_tokens).cuda().unsqueeze(0).unsqueeze(0)
gen_wav = Codec_model.decode_code(speech_tokens)
gen_wav = gen_wav[:, :, prompt_wav.shape[1] :]
sf.write(output_filename, gen_wav[0, 0, :].cpu().numpy(), 16000)
```
## Tips
- With a reference speaker, audio glitches can happen. Try to increase the temperature to get better results.
## License
This project is licensed under the [CC-BY-NC-4.0 license](https://creativecommons.org/licenses/by-nc/4.0/).
## Acknowledgments
- **Hugging Face:** Thanks for the grant that made this project possible.
* [**HKUSTAudio:**](https://huggingface.co/HKUSTAudio/Llasa-1B-Multilingual) for providing the model open source and a great inference, training and preprocessing (xcodec2) script!
|