Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Commit 
							
							·
						
						bcf08c1
	
1
								Parent(s):
							
							33fe743
								
init
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- README.md +5 -6
 - app.py +257 -0
 - cosyvoice/__init__.py +0 -0
 - cosyvoice/__pycache__/__init__.cpython-310.pyc +0 -0
 - cosyvoice/__pycache__/__init__.cpython-38.pyc +0 -0
 - cosyvoice/bin/inference.py +114 -0
 - cosyvoice/bin/train.py +136 -0
 - cosyvoice/cli/__init__.py +0 -0
 - cosyvoice/cli/__pycache__/__init__.cpython-310.pyc +0 -0
 - cosyvoice/cli/__pycache__/__init__.cpython-38.pyc +0 -0
 - cosyvoice/cli/__pycache__/cosyvoice.cpython-310.pyc +0 -0
 - cosyvoice/cli/__pycache__/cosyvoice.cpython-38.pyc +0 -0
 - cosyvoice/cli/__pycache__/frontend.cpython-310.pyc +0 -0
 - cosyvoice/cli/__pycache__/frontend.cpython-38.pyc +0 -0
 - cosyvoice/cli/__pycache__/model.cpython-310.pyc +0 -0
 - cosyvoice/cli/__pycache__/model.cpython-38.pyc +0 -0
 - cosyvoice/cli/cosyvoice.py +83 -0
 - cosyvoice/cli/frontend.py +183 -0
 - cosyvoice/cli/model.py +60 -0
 - cosyvoice/dataset/__init__.py +0 -0
 - cosyvoice/dataset/__pycache__/__init__.cpython-310.pyc +0 -0
 - cosyvoice/dataset/__pycache__/__init__.cpython-38.pyc +0 -0
 - cosyvoice/dataset/__pycache__/processor.cpython-310.pyc +0 -0
 - cosyvoice/dataset/__pycache__/processor.cpython-38.pyc +0 -0
 - cosyvoice/dataset/dataset.py +160 -0
 - cosyvoice/dataset/processor.py +369 -0
 - cosyvoice/flow/__pycache__/decoder.cpython-310.pyc +0 -0
 - cosyvoice/flow/__pycache__/decoder.cpython-38.pyc +0 -0
 - cosyvoice/flow/__pycache__/flow.cpython-310.pyc +0 -0
 - cosyvoice/flow/__pycache__/flow.cpython-38.pyc +0 -0
 - cosyvoice/flow/__pycache__/flow_matching.cpython-310.pyc +0 -0
 - cosyvoice/flow/__pycache__/flow_matching.cpython-38.pyc +0 -0
 - cosyvoice/flow/__pycache__/length_regulator.cpython-310.pyc +0 -0
 - cosyvoice/flow/__pycache__/length_regulator.cpython-38.pyc +0 -0
 - cosyvoice/flow/decoder.py +222 -0
 - cosyvoice/flow/flow.py +141 -0
 - cosyvoice/flow/flow_matching.py +138 -0
 - cosyvoice/flow/length_regulator.py +49 -0
 - cosyvoice/hifigan/__pycache__/f0_predictor.cpython-310.pyc +0 -0
 - cosyvoice/hifigan/__pycache__/f0_predictor.cpython-38.pyc +0 -0
 - cosyvoice/hifigan/__pycache__/generator.cpython-310.pyc +0 -0
 - cosyvoice/hifigan/__pycache__/generator.cpython-38.pyc +0 -0
 - cosyvoice/hifigan/f0_predictor.py +55 -0
 - cosyvoice/hifigan/generator.py +391 -0
 - cosyvoice/llm/__pycache__/llm.cpython-310.pyc +0 -0
 - cosyvoice/llm/__pycache__/llm.cpython-38.pyc +0 -0
 - cosyvoice/llm/llm.py +206 -0
 - cosyvoice/transformer/__init__.py +0 -0
 - cosyvoice/transformer/__pycache__/__init__.cpython-310.pyc +0 -0
 - cosyvoice/transformer/__pycache__/__init__.cpython-38.pyc +0 -0
 
    	
        README.md
    CHANGED
    
    | 
         @@ -1,13 +1,12 @@ 
     | 
|
| 1 | 
         
             
            ---
         
     | 
| 2 | 
         
            -
            title: BreezyVoice 
     | 
| 3 | 
         
            -
            emoji:  
     | 
| 4 | 
         
            -
            colorFrom:  
     | 
| 5 | 
         
            -
            colorTo:  
     | 
| 6 | 
         
             
            sdk: gradio
         
     | 
| 7 | 
         
            -
            sdk_version: 5. 
     | 
| 8 | 
         
             
            app_file: app.py
         
     | 
| 9 | 
         
             
            pinned: false
         
     | 
| 10 | 
         
            -
            short_description: Playground of BreezyVoice
         
     | 
| 11 | 
         
             
            ---
         
     | 
| 12 | 
         | 
| 13 | 
         
             
            Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
         
     | 
| 
         | 
|
| 1 | 
         
             
            ---
         
     | 
| 2 | 
         
            +
            title: BreezyVoice
         
     | 
| 3 | 
         
            +
            emoji: 🏆
         
     | 
| 4 | 
         
            +
            colorFrom: green
         
     | 
| 5 | 
         
            +
            colorTo: green
         
     | 
| 6 | 
         
             
            sdk: gradio
         
     | 
| 7 | 
         
            +
            sdk_version: 5.12.0
         
     | 
| 8 | 
         
             
            app_file: app.py
         
     | 
| 9 | 
         
             
            pinned: false
         
     | 
| 
         | 
|
| 10 | 
         
             
            ---
         
     | 
| 11 | 
         | 
| 12 | 
         
             
            Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
         
     | 
    	
        app.py
    ADDED
    
    | 
         @@ -0,0 +1,257 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2025 MediaTek Reserch Inc (authors: Chan-Jan Hsu)
         
     | 
| 2 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Liu Yue)
         
     | 
| 3 | 
         
            +
            #
         
     | 
| 4 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 5 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 6 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 7 | 
         
            +
            #
         
     | 
| 8 | 
         
            +
            #   http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 9 | 
         
            +
            #
         
     | 
| 10 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 11 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 12 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 13 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 14 | 
         
            +
            # limitations under the License.
         
     | 
| 15 | 
         
            +
            import os
         
     | 
| 16 | 
         
            +
            import sys
         
     | 
| 17 | 
         
            +
            ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
         
     | 
| 18 | 
         
            +
            sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            import argparse
         
     | 
| 21 | 
         
            +
            import gradio as gr
         
     | 
| 22 | 
         
            +
            import numpy as np
         
     | 
| 23 | 
         
            +
            import torch
         
     | 
| 24 | 
         
            +
            torch.set_num_threads(1)
         
     | 
| 25 | 
         
            +
            import torchaudio
         
     | 
| 26 | 
         
            +
            import random
         
     | 
| 27 | 
         
            +
            import librosa
         
     | 
| 28 | 
         
            +
            from transformers import pipeline
         
     | 
| 29 | 
         
            +
            import subprocess
         
     | 
| 30 | 
         
            +
            from scipy.signal import resample
         
     | 
| 31 | 
         
            +
             
     | 
| 32 | 
         
            +
            import logging
         
     | 
| 33 | 
         
            +
            logging.getLogger('matplotlib').setLevel(logging.WARNING)
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
            from cosyvoice.cli.cosyvoice import CosyVoice
         
     | 
| 36 | 
         
            +
            from cosyvoice.utils.file_utils import load_wav, speed_change
         
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            #logging.basicConfig(level=logging.DEBUG,
         
     | 
| 39 | 
         
            +
            #                    format='%(asctime)s %(levelname)s %(message)s')
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            def generate_seed():
         
     | 
| 42 | 
         
            +
                seed = random.randint(1, 100000000)
         
     | 
| 43 | 
         
            +
                return {
         
     | 
| 44 | 
         
            +
                    "__type__": "update",
         
     | 
| 45 | 
         
            +
                    "value": seed
         
     | 
| 46 | 
         
            +
                }
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            def set_all_random_seed(seed):
         
     | 
| 49 | 
         
            +
                random.seed(seed)
         
     | 
| 50 | 
         
            +
                np.random.seed(seed)
         
     | 
| 51 | 
         
            +
                torch.manual_seed(seed)
         
     | 
| 52 | 
         
            +
                torch.cuda.manual_seed_all(seed)
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            max_val = 0.8
         
     | 
| 55 | 
         
            +
            def postprocess(speech, top_db=60, hop_length=220, win_length=440):
         
     | 
| 56 | 
         
            +
                speech, _ = librosa.effects.trim(
         
     | 
| 57 | 
         
            +
                    speech, top_db=top_db,
         
     | 
| 58 | 
         
            +
                    frame_length=win_length,
         
     | 
| 59 | 
         
            +
                    hop_length=hop_length
         
     | 
| 60 | 
         
            +
                )
         
     | 
| 61 | 
         
            +
                if speech.abs().max() > max_val:
         
     | 
| 62 | 
         
            +
                    speech = speech / speech.abs().max() * max_val
         
     | 
| 63 | 
         
            +
                speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1)
         
     | 
| 64 | 
         
            +
                return speech
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
            def generate_audio(tts_text, prompt_text, prompt_wav_upload, prompt_wav_record, seed, select_which):
         
     | 
| 67 | 
         
            +
                if select_which == "上傳檔案" and prompt_wav_upload is not None:
         
     | 
| 68 | 
         
            +
                    prompt_wav = prompt_wav_upload
         
     | 
| 69 | 
         
            +
                elif select_which == "麥克風" and prompt_wav_record is not None:
         
     | 
| 70 | 
         
            +
                    prompt_wav = prompt_wav_record
         
     | 
| 71 | 
         
            +
                else:
         
     | 
| 72 | 
         
            +
                    prompt_wav = None
         
     | 
| 73 | 
         
            +
                # if instruct mode, please make sure that model is iic/CosyVoice-300M-Instruct and not cross_lingual mode
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
                prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr))
         
     | 
| 76 | 
         
            +
                set_all_random_seed(seed)
         
     | 
| 77 | 
         
            +
                output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k)
         
     | 
| 78 | 
         
            +
                speed_factor = 1
         
     | 
| 79 | 
         
            +
                if speed_factor != 1.0:
         
     | 
| 80 | 
         
            +
                    #try:
         
     | 
| 81 | 
         
            +
                        #audio_data, sample_rate = speed_change(output["tts_speech"], target_sr, str(speed_factor))
         
     | 
| 82 | 
         
            +
                        #audio_data = audio_data.numpy().flatten()
         
     | 
| 83 | 
         
            +
                    new_length = int(len(output['tts_speech']) / speed_factor)
         
     | 
| 84 | 
         
            +
                    audio_data = resample(output['tts_speech'], new_length)
         
     | 
| 85 | 
         
            +
                    # except Exception as e:
         
     | 
| 86 | 
         
            +
                    #     print(f"Failed to change speed of audio: \n{e}")
         
     | 
| 87 | 
         
            +
                else:
         
     | 
| 88 | 
         
            +
                    audio_data = output['tts_speech'].numpy().flatten()
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
                return (target_sr, audio_data)
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
             
     | 
| 93 | 
         
            +
            def generate_text(prompt_wav_upload, prompt_wav_record, select_which):
         
     | 
| 94 | 
         
            +
                # Determine which input to use based on the selection in select_which
         
     | 
| 95 | 
         
            +
                if select_which == "上傳檔案" and prompt_wav_upload is not None:
         
     | 
| 96 | 
         
            +
                    prompt_wav = prompt_wav_upload
         
     | 
| 97 | 
         
            +
                    LAST_UPLOADED = "upload"
         
     | 
| 98 | 
         
            +
                elif select_which == "麥克風" and prompt_wav_record is not None:
         
     | 
| 99 | 
         
            +
                    prompt_wav = prompt_wav_record
         
     | 
| 100 | 
         
            +
                    LAST_UPLOADED = "record"
         
     | 
| 101 | 
         
            +
                else:
         
     | 
| 102 | 
         
            +
                    prompt_wav = None
         
     | 
| 103 | 
         
            +
                    LAST_UPLOADED = None
         
     | 
| 104 | 
         
            +
                print(select_which)
         
     | 
| 105 | 
         
            +
                # Process with ASR pipeline
         
     | 
| 106 | 
         
            +
                if prompt_wav:
         
     | 
| 107 | 
         
            +
                    results = asr_pipeline(prompt_wav)
         
     | 
| 108 | 
         
            +
                    return results['text']
         
     | 
| 109 | 
         
            +
                return "No valid input detected."
         
     | 
| 110 | 
         
            +
             
     | 
| 111 | 
         
            +
            # LAST_UPLOADED = ""
         
     | 
| 112 | 
         
            +
            # def switch_selected(select_which):
         
     | 
| 113 | 
         
            +
            #     # Check the file type (assuming WAV file)
         
     | 
| 114 | 
         
            +
            #     if select_which == "上傳檔案" and prompt_wav_upload is not None:
         
     | 
| 115 | 
         
            +
            #         prompt_wav = prompt_wav_upload
         
     | 
| 116 | 
         
            +
            #         LAST_UPLOADED = "upload"
         
     | 
| 117 | 
         
            +
            #     elif select_which == "麥克風" and prompt_wav_record is not None:
         
     | 
| 118 | 
         
            +
            #         prompt_wav = prompt_wav_record
         
     | 
| 119 | 
         
            +
            #     return "麥克風"
         
     | 
| 120 | 
         
            +
             
     | 
| 121 | 
         
            +
            def demo_get_audio(tts_text):
         
     | 
| 122 | 
         
            +
                sample_wav = 'sample.wav'
         
     | 
| 123 | 
         
            +
                speech, sample_rate = torchaudio.load(sample_wav)
         
     | 
| 124 | 
         
            +
                
         
     | 
| 125 | 
         
            +
                return sample_rate, speech
         
     | 
| 126 | 
         
            +
            def main():
         
     | 
| 127 | 
         
            +
                with gr.Blocks(title="BreezyVoice 語音合成系統", theme="default") as demo:
         
     | 
| 128 | 
         
            +
                    # Title and About section at the top
         
     | 
| 129 | 
         
            +
                    gr.Markdown("# BreezyVoice 語音合成系統")
         
     | 
| 130 | 
         
            +
                    gr.Markdown(
         
     | 
| 131 | 
         
            +
                        """## 僅需5秒語音樣本,就可輸出擬真人聲。
         
     | 
| 132 | 
         
            +
             
     | 
| 133 | 
         
            +
                        <img src="https://raw.githubusercontent.com/Splend1d/BreezyVoice/main/images/flowchart.png" alt="Flowchart" width="600"/>
         
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
                        #### 此沙盒使用 Huggingface CPU,請預期大於200 ��的推理時間,您可以考慮以下方法加速:
         
     | 
| 136 | 
         
            +
                        1. 複製這個 Space(僅當執行需要排隊時)
         
     | 
| 137 | 
         
            +
                        2. 複製至本地GPU執行(請參考[指南](https://huggingface.co/docs/hub/en/spaces-overview))或使用[kaggle](https://www.kaggle.com/code/a24998667/breezyvoice-playground)
         
     | 
| 138 | 
         
            +
                        3. 複製至本地CPU執行(請參考[指南](https://huggingface.co/docs/hub/en/spaces-overview))
         
     | 
| 139 | 
         
            +
             
     | 
| 140 | 
         
            +
                        為了加快推理速度,g2pw注音標註並未被啟動。
         
     | 
| 141 | 
         
            +
             
     | 
| 142 | 
         
            +
                        免責聲明:此沙盒在一次性容器地端執行,關閉後檔案將遭到刪除。此沙盒不屬於聯發創新基地,聯發創新基地無法獲得任何使用者輸入。"""
         
     | 
| 143 | 
         
            +
                    )
         
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
                    # All content arranged in a single column
         
     | 
| 146 | 
         
            +
                    with gr.Column():
         
     | 
| 147 | 
         
            +
                        # Configuration Section
         
     | 
| 148 | 
         
            +
                        
         
     | 
| 149 | 
         
            +
             
     | 
| 150 | 
         
            +
                        
         
     | 
| 151 | 
         
            +
                        # Grouping prompt audio inputs and auto speech recognition in one block using Markdown
         
     | 
| 152 | 
         
            +
                        gr.Markdown("### 步驟 1. 音訊樣本輸入 & 音訊樣本文本輸入")
         
     | 
| 153 | 
         
            +
                        gr.Markdown("選擇prompt音訊檔案或錄製prompt音訊,並手動校對自動產生的音訊樣本文本。")
         
     | 
| 154 | 
         
            +
                        prompt_wav_upload = gr.Audio(
         
     | 
| 155 | 
         
            +
                            sources='upload',
         
     | 
| 156 | 
         
            +
                            type='filepath',
         
     | 
| 157 | 
         
            +
                            label='選擇prompt音訊檔案(確保取樣率不低於16khz)'
         
     | 
| 158 | 
         
            +
                        )
         
     | 
| 159 | 
         
            +
                        prompt_wav_record = gr.Audio(
         
     | 
| 160 | 
         
            +
                            sources='microphone',
         
     | 
| 161 | 
         
            +
                            type='filepath',
         
     | 
| 162 | 
         
            +
                            label='錄製prompt音訊檔案'
         
     | 
| 163 | 
         
            +
                        )
         
     | 
| 164 | 
         
            +
                        
         
     | 
| 165 | 
         
            +
                        with gr.Blocks():
         
     | 
| 166 | 
         
            +
                            select_which = gr.Radio(["上傳檔案", "麥克風"], label="音訊來源", interactive=True )
         
     | 
| 167 | 
         
            +
                        with gr.Blocks():
         
     | 
| 168 | 
         
            +
                            prompt_text = gr.Textbox(
         
     | 
| 169 | 
         
            +
                                label="音訊樣本文本輸入(此欄位應與音檔內容完全相同)",
         
     | 
| 170 | 
         
            +
                                lines=2,
         
     | 
| 171 | 
         
            +
                                placeholder="音訊樣本文本"
         
     | 
| 172 | 
         
            +
                            )
         
     | 
| 173 | 
         
            +
             
     | 
| 174 | 
         
            +
                        # Automatic speech recognition when either prompt audio input changes
         
     | 
| 175 | 
         
            +
                        def a(X):
         
     | 
| 176 | 
         
            +
                            return "上傳檔案"
         
     | 
| 177 | 
         
            +
                        prompt_wav_upload.change(
         
     | 
| 178 | 
         
            +
                            fn=a,#lambda file: "上傳檔案",
         
     | 
| 179 | 
         
            +
                            inputs=[prompt_wav_upload],
         
     | 
| 180 | 
         
            +
                            outputs=select_which
         
     | 
| 181 | 
         
            +
                        )
         
     | 
| 182 | 
         
            +
             
     | 
| 183 | 
         
            +
                        
         
     | 
| 184 | 
         
            +
                        
         
     | 
| 185 | 
         
            +
                        
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
                        prompt_wav_record.change(
         
     | 
| 188 | 
         
            +
                            fn=lambda recording: "麥克風",
         
     | 
| 189 | 
         
            +
                            inputs=[prompt_wav_record],
         
     | 
| 190 | 
         
            +
                            outputs=select_which
         
     | 
| 191 | 
         
            +
                        )
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
                        select_which.change(
         
     | 
| 194 | 
         
            +
                            fn=generate_text,
         
     | 
| 195 | 
         
            +
                            inputs=[prompt_wav_upload, prompt_wav_record, select_which],
         
     | 
| 196 | 
         
            +
                            outputs=prompt_text
         
     | 
| 197 | 
         
            +
                        )
         
     | 
| 198 | 
         
            +
                        # select_which.change(
         
     | 
| 199 | 
         
            +
                        #     fn=switch_selected,
         
     | 
| 200 | 
         
            +
                        #     inputs=[select_which],
         
     | 
| 201 | 
         
            +
                        #     outputs= None
         
     | 
| 202 | 
         
            +
                        # )
         
     | 
| 203 | 
         
            +
                        # Input Section: Synthesis Text
         
     | 
| 204 | 
         
            +
             
     | 
| 205 | 
         
            +
                        gr.Markdown("### 步驟 2.合成文本輸入")
         
     | 
| 206 | 
         
            +
                        tts_text = gr.Textbox(
         
     | 
| 207 | 
         
            +
                            label="輸入想要合成的文本",
         
     | 
| 208 | 
         
            +
                            lines=2,
         
     | 
| 209 | 
         
            +
                            placeholder="請輸入想要合成的文本...",
         
     | 
| 210 | 
         
            +
                            value="你好,歡迎光臨"
         
     | 
| 211 | 
         
            +
                        )
         
     | 
| 212 | 
         
            +
             
     | 
| 213 | 
         
            +
             
     | 
| 214 | 
         
            +
                        # Output Section
         
     | 
| 215 | 
         
            +
                        gr.Markdown("### 步驟 3. 合成音訊")
         
     | 
| 216 | 
         
            +
                        # Generation button for audio synthesis (triggered manually)
         
     | 
| 217 | 
         
            +
             
     | 
| 218 | 
         
            +
                        with gr.Accordion("進階設定", open=False):
         
     | 
| 219 | 
         
            +
                            seed = gr.Number(value=0, label="隨機推理種子")
         
     | 
| 220 | 
         
            +
                            #seed_button = gr.Button("隨機")
         
     | 
| 221 | 
         
            +
                            seed_button = gr.Button(value="\U0001F3B2生成隨機推理種子\U0001F3B2")
         
     | 
| 222 | 
         
            +
                            speed_factor = 1
         
     | 
| 223 | 
         
            +
                            # speed_factor = gr.Slider(
         
     | 
| 224 | 
         
            +
                            #     minimum=0.25,
         
     | 
| 225 | 
         
            +
                            #     maximum=4,
         
     | 
| 226 | 
         
            +
                            #     step=0.05,
         
     | 
| 227 | 
         
            +
                            #     label="語速",
         
     | 
| 228 | 
         
            +
                            #     value=1.0,
         
     | 
| 229 | 
         
            +
                            #     interactive=True
         
     | 
| 230 | 
         
            +
                            # )
         
     | 
| 231 | 
         
            +
             
     | 
| 232 | 
         
            +
                        generate_button = gr.Button("生成音訊")
         
     | 
| 233 | 
         
            +
                        audio_output = gr.Audio(label="合成音訊")
         
     | 
| 234 | 
         
            +
             
     | 
| 235 | 
         
            +
                        # Set up callbacks for seed generation and audio synthesis
         
     | 
| 236 | 
         
            +
                        seed_button.click(fn=generate_seed, inputs=[], outputs=seed)
         
     | 
| 237 | 
         
            +
                        generate_button.click(
         
     | 
| 238 | 
         
            +
                            fn=generate_audio,
         
     | 
| 239 | 
         
            +
                            inputs=[tts_text, prompt_text, prompt_wav_upload, prompt_wav_record, seed, select_which],
         
     | 
| 240 | 
         
            +
                            outputs=audio_output
         
     | 
| 241 | 
         
            +
                        )
         
     | 
| 242 | 
         
            +
             
     | 
| 243 | 
         
            +
                    demo.queue(max_size=4, default_concurrency_limit=2)
         
     | 
| 244 | 
         
            +
                    demo.launch()
         
     | 
| 245 | 
         
            +
                    
         
     | 
| 246 | 
         
            +
            if __name__ == '__main__':
         
     | 
| 247 | 
         
            +
                cosyvoice = CosyVoice('Splend1dchan/BreezyVoice')
         
     | 
| 248 | 
         
            +
                asr_pipeline = pipeline(
         
     | 
| 249 | 
         
            +
                    "automatic-speech-recognition",
         
     | 
| 250 | 
         
            +
                    model="openai/whisper-tiny",
         
     | 
| 251 | 
         
            +
                    tokenizer="openai/whisper-tiny",
         
     | 
| 252 | 
         
            +
                    device=0  # Use GPU (if available); set to -1 for CPU
         
     | 
| 253 | 
         
            +
                )
         
     | 
| 254 | 
         
            +
                sft_spk = cosyvoice.list_avaliable_spks()
         
     | 
| 255 | 
         
            +
                prompt_sr, target_sr = 16000, 22050
         
     | 
| 256 | 
         
            +
                default_data = np.zeros(target_sr)
         
     | 
| 257 | 
         
            +
                main()
         
     | 
    	
        cosyvoice/__init__.py
    ADDED
    
    | 
         
            File without changes
         
     | 
    	
        cosyvoice/__pycache__/__init__.cpython-310.pyc
    ADDED
    
    | 
         Binary file (157 Bytes). View file 
     | 
| 
         | 
    	
        cosyvoice/__pycache__/__init__.cpython-38.pyc
    ADDED
    
    | 
         Binary file (158 Bytes). View file 
     | 
| 
         | 
    	
        cosyvoice/bin/inference.py
    ADDED
    
    | 
         @@ -0,0 +1,114 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #   http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            from __future__ import print_function
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            import argparse
         
     | 
| 18 | 
         
            +
            import logging
         
     | 
| 19 | 
         
            +
            logging.getLogger('matplotlib').setLevel(logging.WARNING)
         
     | 
| 20 | 
         
            +
            import os
         
     | 
| 21 | 
         
            +
             
     | 
| 22 | 
         
            +
            import torch
         
     | 
| 23 | 
         
            +
            from torch.utils.data import DataLoader
         
     | 
| 24 | 
         
            +
            import torchaudio
         
     | 
| 25 | 
         
            +
            from hyperpyyaml import load_hyperpyyaml
         
     | 
| 26 | 
         
            +
            from tqdm import tqdm
         
     | 
| 27 | 
         
            +
            from cosyvoice.cli.model import CosyVoiceModel
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            from cosyvoice.dataset.dataset import Dataset
         
     | 
| 30 | 
         
            +
             
     | 
| 31 | 
         
            +
            def get_args():
         
     | 
| 32 | 
         
            +
                parser = argparse.ArgumentParser(description='inference with your model')
         
     | 
| 33 | 
         
            +
                parser.add_argument('--config', required=True, help='config file')
         
     | 
| 34 | 
         
            +
                parser.add_argument('--prompt_data', required=True, help='prompt data file')
         
     | 
| 35 | 
         
            +
                parser.add_argument('--prompt_utt2data', required=True, help='prompt data file')
         
     | 
| 36 | 
         
            +
                parser.add_argument('--tts_text', required=True, help='tts input file')
         
     | 
| 37 | 
         
            +
                parser.add_argument('--llm_model', required=True, help='llm model file')
         
     | 
| 38 | 
         
            +
                parser.add_argument('--flow_model', required=True, help='flow model file')
         
     | 
| 39 | 
         
            +
                parser.add_argument('--hifigan_model', required=True, help='hifigan model file')
         
     | 
| 40 | 
         
            +
                parser.add_argument('--gpu',
         
     | 
| 41 | 
         
            +
                                    type=int,
         
     | 
| 42 | 
         
            +
                                    default=-1,
         
     | 
| 43 | 
         
            +
                                    help='gpu id for this rank, -1 for cpu')
         
     | 
| 44 | 
         
            +
                parser.add_argument('--mode',
         
     | 
| 45 | 
         
            +
                                    default='sft',
         
     | 
| 46 | 
         
            +
                                    choices=['sft', 'zero_shot'],
         
     | 
| 47 | 
         
            +
                                    help='inference mode')
         
     | 
| 48 | 
         
            +
                parser.add_argument('--result_dir', required=True, help='asr result file')
         
     | 
| 49 | 
         
            +
                args = parser.parse_args()
         
     | 
| 50 | 
         
            +
                print(args)
         
     | 
| 51 | 
         
            +
                return args
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
            def main():
         
     | 
| 55 | 
         
            +
                args = get_args()
         
     | 
| 56 | 
         
            +
                logging.basicConfig(level=logging.DEBUG,
         
     | 
| 57 | 
         
            +
                                    format='%(asctime)s %(levelname)s %(message)s')
         
     | 
| 58 | 
         
            +
                os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
                # Init cosyvoice models from configs
         
     | 
| 61 | 
         
            +
                use_cuda = args.gpu >= 0 and torch.cuda.is_available()
         
     | 
| 62 | 
         
            +
                device = torch.device('cuda' if use_cuda else 'cpu')
         
     | 
| 63 | 
         
            +
                with open(args.config, 'r') as f:
         
     | 
| 64 | 
         
            +
                    configs = load_hyperpyyaml(f)
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
                model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'])
         
     | 
| 67 | 
         
            +
                model.load(args.llm_model, args.flow_model, args.hifigan_model)
         
     | 
| 68 | 
         
            +
             
     | 
| 69 | 
         
            +
                test_dataset = Dataset(args.prompt_data, data_pipeline=configs['data_pipeline'], mode='inference', shuffle=False, partition=False, tts_file=args.tts_text, prompt_utt2data=args.prompt_utt2data)
         
     | 
| 70 | 
         
            +
                test_data_loader = DataLoader(test_dataset, batch_size=None, num_workers=0)
         
     | 
| 71 | 
         
            +
             
     | 
| 72 | 
         
            +
                del configs
         
     | 
| 73 | 
         
            +
                os.makedirs(args.result_dir, exist_ok=True)
         
     | 
| 74 | 
         
            +
                fn = os.path.join(args.result_dir, 'wav.scp')
         
     | 
| 75 | 
         
            +
                f = open(fn, 'w')
         
     | 
| 76 | 
         
            +
                with torch.no_grad():
         
     | 
| 77 | 
         
            +
                    for batch_idx, batch in tqdm(enumerate(test_data_loader)):
         
     | 
| 78 | 
         
            +
                        utts = batch["utts"]
         
     | 
| 79 | 
         
            +
                        assert len(utts) == 1, "inference mode only support batchsize 1"
         
     | 
| 80 | 
         
            +
                        text = batch["text"]
         
     | 
| 81 | 
         
            +
                        text_token = batch["text_token"].to(device)
         
     | 
| 82 | 
         
            +
                        text_token_len = batch["text_token_len"].to(device)
         
     | 
| 83 | 
         
            +
                        tts_text = batch["tts_text"]
         
     | 
| 84 | 
         
            +
                        tts_index = batch["tts_index"]
         
     | 
| 85 | 
         
            +
                        tts_text_token = batch["tts_text_token"].to(device)
         
     | 
| 86 | 
         
            +
                        tts_text_token_len = batch["tts_text_token_len"].to(device)
         
     | 
| 87 | 
         
            +
                        speech_token = batch["speech_token"].to(device)
         
     | 
| 88 | 
         
            +
                        speech_token_len = batch["speech_token_len"].to(device)
         
     | 
| 89 | 
         
            +
                        speech_feat = batch["speech_feat"].to(device)
         
     | 
| 90 | 
         
            +
                        speech_feat_len = batch["speech_feat_len"].to(device)
         
     | 
| 91 | 
         
            +
                        utt_embedding = batch["utt_embedding"].to(device)
         
     | 
| 92 | 
         
            +
                        spk_embedding = batch["spk_embedding"].to(device)
         
     | 
| 93 | 
         
            +
                        if args.mode == 'sft':
         
     | 
| 94 | 
         
            +
                            model_input = {'text': tts_text_token, 'text_len': tts_text_token_len,
         
     | 
| 95 | 
         
            +
                                           'llm_embedding': spk_embedding, 'flow_embedding': spk_embedding}
         
     | 
| 96 | 
         
            +
                        else:
         
     | 
| 97 | 
         
            +
                            model_input = {'text': tts_text_token, 'text_len': tts_text_token_len,
         
     | 
| 98 | 
         
            +
                                           'prompt_text': text_token, 'prompt_text_len': text_token_len,
         
     | 
| 99 | 
         
            +
                                           'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
         
     | 
| 100 | 
         
            +
                                           'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
         
     | 
| 101 | 
         
            +
                                           'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
         
     | 
| 102 | 
         
            +
                                           'llm_embedding': utt_embedding, 'flow_embedding': utt_embedding}
         
     | 
| 103 | 
         
            +
                        model_output = model.inference(**model_input)
         
     | 
| 104 | 
         
            +
                        tts_key = '{}_{}'.format(utts[0], tts_index[0])
         
     | 
| 105 | 
         
            +
                        tts_fn = os.path.join(args.result_dir, '{}.wav'.format(tts_key))
         
     | 
| 106 | 
         
            +
                        torchaudio.save(tts_fn, model_output['tts_speech'], sample_rate=22050)
         
     | 
| 107 | 
         
            +
                        f.write('{} {}\n'.format(tts_key, tts_fn))
         
     | 
| 108 | 
         
            +
                        f.flush()
         
     | 
| 109 | 
         
            +
                f.close()
         
     | 
| 110 | 
         
            +
                logging.info('Result wav.scp saved in {}'.format(fn))
         
     | 
| 111 | 
         
            +
             
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
            if __name__ == '__main__':
         
     | 
| 114 | 
         
            +
                main()
         
     | 
    	
        cosyvoice/bin/train.py
    ADDED
    
    | 
         @@ -0,0 +1,136 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #   http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            from __future__ import print_function
         
     | 
| 16 | 
         
            +
            import argparse
         
     | 
| 17 | 
         
            +
            import datetime
         
     | 
| 18 | 
         
            +
            import logging
         
     | 
| 19 | 
         
            +
            logging.getLogger('matplotlib').setLevel(logging.WARNING)
         
     | 
| 20 | 
         
            +
            from copy import deepcopy
         
     | 
| 21 | 
         
            +
            import torch
         
     | 
| 22 | 
         
            +
            import torch.distributed as dist
         
     | 
| 23 | 
         
            +
            import deepspeed
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            from hyperpyyaml import load_hyperpyyaml
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            from torch.distributed.elastic.multiprocessing.errors import record
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            from cosyvoice.utils.executor import Executor
         
     | 
| 30 | 
         
            +
            from cosyvoice.utils.train_utils import (
         
     | 
| 31 | 
         
            +
                init_distributed,
         
     | 
| 32 | 
         
            +
                init_dataset_and_dataloader,
         
     | 
| 33 | 
         
            +
                init_optimizer_and_scheduler,
         
     | 
| 34 | 
         
            +
                init_summarywriter, save_model,
         
     | 
| 35 | 
         
            +
                wrap_cuda_model, check_modify_and_save_config)
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
             
     | 
| 38 | 
         
            +
            def get_args():
         
     | 
| 39 | 
         
            +
                parser = argparse.ArgumentParser(description='training your network')
         
     | 
| 40 | 
         
            +
                parser.add_argument('--train_engine',
         
     | 
| 41 | 
         
            +
                                    default='torch_ddp',
         
     | 
| 42 | 
         
            +
                                    choices=['torch_ddp', 'deepspeed'],
         
     | 
| 43 | 
         
            +
                                    help='Engine for paralleled training')
         
     | 
| 44 | 
         
            +
                parser.add_argument('--model', required=True, help='model which will be trained')
         
     | 
| 45 | 
         
            +
                parser.add_argument('--config', required=True, help='config file')
         
     | 
| 46 | 
         
            +
                parser.add_argument('--train_data', required=True, help='train data file')
         
     | 
| 47 | 
         
            +
                parser.add_argument('--cv_data', required=True, help='cv data file')
         
     | 
| 48 | 
         
            +
                parser.add_argument('--checkpoint', help='checkpoint model')
         
     | 
| 49 | 
         
            +
                parser.add_argument('--model_dir', required=True, help='save model dir')
         
     | 
| 50 | 
         
            +
                parser.add_argument('--tensorboard_dir',
         
     | 
| 51 | 
         
            +
                                    default='tensorboard',
         
     | 
| 52 | 
         
            +
                                    help='tensorboard log dir')
         
     | 
| 53 | 
         
            +
                parser.add_argument('--ddp.dist_backend',
         
     | 
| 54 | 
         
            +
                                    dest='dist_backend',
         
     | 
| 55 | 
         
            +
                                    default='nccl',
         
     | 
| 56 | 
         
            +
                                    choices=['nccl', 'gloo'],
         
     | 
| 57 | 
         
            +
                                    help='distributed backend')
         
     | 
| 58 | 
         
            +
                parser.add_argument('--num_workers',
         
     | 
| 59 | 
         
            +
                                    default=0,
         
     | 
| 60 | 
         
            +
                                    type=int,
         
     | 
| 61 | 
         
            +
                                    help='num of subprocess workers for reading')
         
     | 
| 62 | 
         
            +
                parser.add_argument('--prefetch',
         
     | 
| 63 | 
         
            +
                                    default=100,
         
     | 
| 64 | 
         
            +
                                    type=int,
         
     | 
| 65 | 
         
            +
                                    help='prefetch number')
         
     | 
| 66 | 
         
            +
                parser.add_argument('--pin_memory',
         
     | 
| 67 | 
         
            +
                                    action='store_true',
         
     | 
| 68 | 
         
            +
                                    default=False,
         
     | 
| 69 | 
         
            +
                                    help='Use pinned memory buffers used for reading')
         
     | 
| 70 | 
         
            +
                parser.add_argument('--deepspeed.save_states',
         
     | 
| 71 | 
         
            +
                                    dest='save_states',
         
     | 
| 72 | 
         
            +
                                    default='model_only',
         
     | 
| 73 | 
         
            +
                                    choices=['model_only', 'model+optimizer'],
         
     | 
| 74 | 
         
            +
                                    help='save model/optimizer states')
         
     | 
| 75 | 
         
            +
                parser.add_argument('--timeout',
         
     | 
| 76 | 
         
            +
                                    default=30,
         
     | 
| 77 | 
         
            +
                                    type=int,
         
     | 
| 78 | 
         
            +
                                    help='timeout (in seconds) of cosyvoice_join.')
         
     | 
| 79 | 
         
            +
                parser = deepspeed.add_config_arguments(parser)
         
     | 
| 80 | 
         
            +
                args = parser.parse_args()
         
     | 
| 81 | 
         
            +
                return args
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
            @record
         
     | 
| 85 | 
         
            +
            def main():
         
     | 
| 86 | 
         
            +
                args = get_args()
         
     | 
| 87 | 
         
            +
                logging.basicConfig(level=logging.DEBUG,
         
     | 
| 88 | 
         
            +
                                    format='%(asctime)s %(levelname)s %(message)s')
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
                override_dict = {k: None for k in ['llm', 'flow', 'hift'] if k != args.model}
         
     | 
| 91 | 
         
            +
                with open(args.config, 'r') as f:
         
     | 
| 92 | 
         
            +
                    configs = load_hyperpyyaml(f, overrides=override_dict)
         
     | 
| 93 | 
         
            +
                configs['train_conf'].update(vars(args))
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
                # Init env for ddp
         
     | 
| 96 | 
         
            +
                init_distributed(args)
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
                # Get dataset & dataloader
         
     | 
| 99 | 
         
            +
                train_dataset, cv_dataset, train_data_loader, cv_data_loader = \
         
     | 
| 100 | 
         
            +
                    init_dataset_and_dataloader(args, configs)
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
                # Do some sanity checks and save config to arsg.model_dir
         
     | 
| 103 | 
         
            +
                configs = check_modify_and_save_config(args, configs)
         
     | 
| 104 | 
         
            +
             
     | 
| 105 | 
         
            +
                # Tensorboard summary
         
     | 
| 106 | 
         
            +
                writer = init_summarywriter(args)
         
     | 
| 107 | 
         
            +
             
     | 
| 108 | 
         
            +
                # load checkpoint
         
     | 
| 109 | 
         
            +
                model = configs[args.model]
         
     | 
| 110 | 
         
            +
                if args.checkpoint is not None:
         
     | 
| 111 | 
         
            +
                    model.load_state_dict(torch.load(args.checkpoint, map_location='cpu'))
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
                # Dispatch model from cpu to gpu
         
     | 
| 114 | 
         
            +
                model = wrap_cuda_model(args, model)
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
                # Get optimizer & scheduler
         
     | 
| 117 | 
         
            +
                model, optimizer, scheduler = init_optimizer_and_scheduler(args, configs, model)
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
                # Save init checkpoints
         
     | 
| 120 | 
         
            +
                info_dict = deepcopy(configs['train_conf'])
         
     | 
| 121 | 
         
            +
                save_model(model, 'init', info_dict)
         
     | 
| 122 | 
         
            +
             
     | 
| 123 | 
         
            +
                # Get executor
         
     | 
| 124 | 
         
            +
                executor = Executor()
         
     | 
| 125 | 
         
            +
             
     | 
| 126 | 
         
            +
                # Start training loop
         
     | 
| 127 | 
         
            +
                for epoch in range(info_dict['max_epoch']):
         
     | 
| 128 | 
         
            +
                    executor.epoch = epoch
         
     | 
| 129 | 
         
            +
                    train_dataset.set_epoch(epoch)
         
     | 
| 130 | 
         
            +
                    dist.barrier()
         
     | 
| 131 | 
         
            +
                    group_join = dist.new_group(backend="gloo", timeout=datetime.timedelta(seconds=args.timeout))
         
     | 
| 132 | 
         
            +
                    executor.train_one_epoc(model, optimizer, scheduler, train_data_loader, cv_data_loader, writer, info_dict, group_join)
         
     | 
| 133 | 
         
            +
                    dist.destroy_process_group(group_join)
         
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
            if __name__ == '__main__':
         
     | 
| 136 | 
         
            +
                main()
         
     | 
    	
        cosyvoice/cli/__init__.py
    ADDED
    
    | 
         
            File without changes
         
     | 
    	
        cosyvoice/cli/__pycache__/__init__.cpython-310.pyc
    ADDED
    
    | 
         Binary file (161 Bytes). View file 
     | 
| 
         | 
    	
        cosyvoice/cli/__pycache__/__init__.cpython-38.pyc
    ADDED
    
    | 
         Binary file (162 Bytes). View file 
     | 
| 
         | 
    	
        cosyvoice/cli/__pycache__/cosyvoice.cpython-310.pyc
    ADDED
    
    | 
         Binary file (3.17 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/cli/__pycache__/cosyvoice.cpython-38.pyc
    ADDED
    
    | 
         Binary file (3.11 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/cli/__pycache__/frontend.cpython-310.pyc
    ADDED
    
    | 
         Binary file (7.07 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/cli/__pycache__/frontend.cpython-38.pyc
    ADDED
    
    | 
         Binary file (6.87 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/cli/__pycache__/model.cpython-310.pyc
    ADDED
    
    | 
         Binary file (2.19 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/cli/__pycache__/model.cpython-38.pyc
    ADDED
    
    | 
         Binary file (2.19 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/cli/cosyvoice.py
    ADDED
    
    | 
         @@ -0,0 +1,83 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #   http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            import os
         
     | 
| 15 | 
         
            +
            import torch
         
     | 
| 16 | 
         
            +
            from hyperpyyaml import load_hyperpyyaml
         
     | 
| 17 | 
         
            +
            from huggingface_hub import snapshot_download
         
     | 
| 18 | 
         
            +
            from cosyvoice.cli.frontend import CosyVoiceFrontEnd
         
     | 
| 19 | 
         
            +
            from cosyvoice.cli.model import CosyVoiceModel
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            class CosyVoice:
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
                def __init__(self, model_dir):
         
     | 
| 24 | 
         
            +
                    instruct = True if '-Instruct' in model_dir else False
         
     | 
| 25 | 
         
            +
                    self.model_dir = model_dir
         
     | 
| 26 | 
         
            +
                    if not os.path.exists(model_dir):
         
     | 
| 27 | 
         
            +
                        model_dir = snapshot_download(model_dir)
         
     | 
| 28 | 
         
            +
                    with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f:
         
     | 
| 29 | 
         
            +
                        configs = load_hyperpyyaml(f)
         
     | 
| 30 | 
         
            +
                    self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'],
         
     | 
| 31 | 
         
            +
                                                      configs['feat_extractor'],
         
     | 
| 32 | 
         
            +
                                                      '{}/campplus.onnx'.format(model_dir),
         
     | 
| 33 | 
         
            +
                                                      '{}/speech_tokenizer_v1.onnx'.format(model_dir),
         
     | 
| 34 | 
         
            +
                                                      '{}/spk2info.pt'.format(model_dir),
         
     | 
| 35 | 
         
            +
                                                      instruct,
         
     | 
| 36 | 
         
            +
                                                      configs['allowed_special'])
         
     | 
| 37 | 
         
            +
                    self.model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift'])
         
     | 
| 38 | 
         
            +
                    self.model.load('{}/llm.pt'.format(model_dir),
         
     | 
| 39 | 
         
            +
                                    '{}/flow.pt'.format(model_dir),
         
     | 
| 40 | 
         
            +
                                    '{}/hift.pt'.format(model_dir))
         
     | 
| 41 | 
         
            +
                    del configs
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
                def list_avaliable_spks(self):
         
     | 
| 44 | 
         
            +
                    spks = list(self.frontend.spk2info.keys())
         
     | 
| 45 | 
         
            +
                    return spks
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
                def inference_sft(self, tts_text, spk_id):
         
     | 
| 48 | 
         
            +
                    tts_speeches = []
         
     | 
| 49 | 
         
            +
                    for i in self.frontend.text_normalize(tts_text, split=True):
         
     | 
| 50 | 
         
            +
                        model_input = self.frontend.frontend_sft(i, spk_id)
         
     | 
| 51 | 
         
            +
                        model_output = self.model.inference(**model_input)
         
     | 
| 52 | 
         
            +
                        tts_speeches.append(model_output['tts_speech'])
         
     | 
| 53 | 
         
            +
                    return {'tts_speech': torch.concat(tts_speeches, dim=1)}
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
                def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k):
         
     | 
| 56 | 
         
            +
                    prompt_text = self.frontend.text_normalize(prompt_text, split=False)
         
     | 
| 57 | 
         
            +
                    tts_speeches = []
         
     | 
| 58 | 
         
            +
                    for i in self.frontend.text_normalize(tts_text, split=True):
         
     | 
| 59 | 
         
            +
                        model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k)
         
     | 
| 60 | 
         
            +
                        model_output = self.model.inference(**model_input)
         
     | 
| 61 | 
         
            +
                        tts_speeches.append(model_output['tts_speech'])
         
     | 
| 62 | 
         
            +
                    return {'tts_speech': torch.concat(tts_speeches, dim=1)}
         
     | 
| 63 | 
         
            +
             
     | 
| 64 | 
         
            +
                def inference_cross_lingual(self, tts_text, prompt_speech_16k):
         
     | 
| 65 | 
         
            +
                    if self.frontend.instruct is True:
         
     | 
| 66 | 
         
            +
                        raise ValueError('{} do not support cross_lingual inference'.format(self.model_dir))
         
     | 
| 67 | 
         
            +
                    tts_speeches = []
         
     | 
| 68 | 
         
            +
                    for i in self.frontend.text_normalize(tts_text, split=True):
         
     | 
| 69 | 
         
            +
                        model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k)
         
     | 
| 70 | 
         
            +
                        model_output = self.model.inference(**model_input)
         
     | 
| 71 | 
         
            +
                        tts_speeches.append(model_output['tts_speech'])
         
     | 
| 72 | 
         
            +
                    return {'tts_speech': torch.concat(tts_speeches, dim=1)}
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
                def inference_instruct(self, tts_text, spk_id, instruct_text):
         
     | 
| 75 | 
         
            +
                    if self.frontend.instruct is False:
         
     | 
| 76 | 
         
            +
                        raise ValueError('{} do not support instruct inference'.format(self.model_dir))
         
     | 
| 77 | 
         
            +
                    instruct_text = self.frontend.text_normalize(instruct_text, split=False)
         
     | 
| 78 | 
         
            +
                    tts_speeches = []
         
     | 
| 79 | 
         
            +
                    for i in self.frontend.text_normalize(tts_text, split=True):
         
     | 
| 80 | 
         
            +
                        model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text)
         
     | 
| 81 | 
         
            +
                        model_output = self.model.inference(**model_input)
         
     | 
| 82 | 
         
            +
                        tts_speeches.append(model_output['tts_speech'])
         
     | 
| 83 | 
         
            +
                    return {'tts_speech': torch.concat(tts_speeches, dim=1)}
         
     | 
    	
        cosyvoice/cli/frontend.py
    ADDED
    
    | 
         @@ -0,0 +1,183 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #   http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            from functools import partial
         
     | 
| 15 | 
         
            +
            import onnxruntime
         
     | 
| 16 | 
         
            +
            import torch
         
     | 
| 17 | 
         
            +
            import numpy as np
         
     | 
| 18 | 
         
            +
            import whisper
         
     | 
| 19 | 
         
            +
            from typing import Callable
         
     | 
| 20 | 
         
            +
            import torchaudio.compliance.kaldi as kaldi
         
     | 
| 21 | 
         
            +
            import torchaudio
         
     | 
| 22 | 
         
            +
            import os
         
     | 
| 23 | 
         
            +
            import re
         
     | 
| 24 | 
         
            +
            import inflect
         
     | 
| 25 | 
         
            +
            import subprocess
         
     | 
| 26 | 
         
            +
            try:
         
     | 
| 27 | 
         
            +
                import ttsfrd
         
     | 
| 28 | 
         
            +
                use_ttsfrd = True
         
     | 
| 29 | 
         
            +
            except ImportError:
         
     | 
| 30 | 
         
            +
                print("failed to import ttsfrd, use WeTextProcessing instead")
         
     | 
| 31 | 
         
            +
                from tn.chinese.normalizer import Normalizer as ZhNormalizer
         
     | 
| 32 | 
         
            +
                from tn.english.normalizer import Normalizer as EnNormalizer
         
     | 
| 33 | 
         
            +
                use_ttsfrd = False
         
     | 
| 34 | 
         
            +
            from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
            class CosyVoiceFrontEnd:
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
                def __init__(self,
         
     | 
| 40 | 
         
            +
                             get_tokenizer: Callable,
         
     | 
| 41 | 
         
            +
                             feat_extractor: Callable,
         
     | 
| 42 | 
         
            +
                             campplus_model: str,
         
     | 
| 43 | 
         
            +
                             speech_tokenizer_model: str,
         
     | 
| 44 | 
         
            +
                             spk2info: str = '',
         
     | 
| 45 | 
         
            +
                             instruct: bool = False,
         
     | 
| 46 | 
         
            +
                             allowed_special: str = 'all'):
         
     | 
| 47 | 
         
            +
                    self.tokenizer = get_tokenizer()
         
     | 
| 48 | 
         
            +
                    self.feat_extractor = feat_extractor
         
     | 
| 49 | 
         
            +
                    self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
         
     | 
| 50 | 
         
            +
                    option = onnxruntime.SessionOptions()
         
     | 
| 51 | 
         
            +
                    option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
         
     | 
| 52 | 
         
            +
                    option.intra_op_num_threads = 1
         
     | 
| 53 | 
         
            +
                    self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
         
     | 
| 54 | 
         
            +
                    self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option, providers=["CUDAExecutionProvider"if torch.cuda.is_available() else "CPUExecutionProvider"])
         
     | 
| 55 | 
         
            +
                    if os.path.exists(spk2info):
         
     | 
| 56 | 
         
            +
                        self.spk2info = torch.load(spk2info, map_location=self.device)
         
     | 
| 57 | 
         
            +
                    self.instruct = instruct
         
     | 
| 58 | 
         
            +
                    self.allowed_special = allowed_special
         
     | 
| 59 | 
         
            +
                    self.inflect_parser = inflect.engine()
         
     | 
| 60 | 
         
            +
                    self.use_ttsfrd = use_ttsfrd
         
     | 
| 61 | 
         
            +
                    if self.use_ttsfrd:
         
     | 
| 62 | 
         
            +
                        self.frd = ttsfrd.TtsFrontendEngine()
         
     | 
| 63 | 
         
            +
                        ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
         
     | 
| 64 | 
         
            +
                        #print("LOCATION",ttsfrd.__file__)
         
     | 
| 65 | 
         
            +
                        #print('TTSFRD FILES',os.listdir(ttsfrd.__file__))
         
     | 
| 66 | 
         
            +
                        if not os.path.exists('resource.zip'):
         
     | 
| 67 | 
         
            +
                            # Download the file if it does not exist
         
     | 
| 68 | 
         
            +
                            subprocess.run("wget https://huggingface.co/FunAudioLLM/CosyVoice-ttsfrd/resolve/main/resource.zip".split())
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
                        # Unzip the file if it exists
         
     | 
| 71 | 
         
            +
                        if not os.path.exists('resource'):
         
     | 
| 72 | 
         
            +
                            subprocess.run("unzip resource.zip".split())
         
     | 
| 73 | 
         
            +
                        else:
         
     | 
| 74 | 
         
            +
                            pass
         
     | 
| 75 | 
         
            +
                        #print(os.listdir())
         
     | 
| 76 | 
         
            +
                        #print(subprocess.run("pwd"))
         
     | 
| 77 | 
         
            +
                        print("root",ROOT_DIR)
         
     | 
| 78 | 
         
            +
                        assert self.frd.initialize('{}/../../resource'.format(ROOT_DIR)) is True, 'failed to initialize ttsfrd resource'
         
     | 
| 79 | 
         
            +
                        self.frd.set_lang_type('pinyin')
         
     | 
| 80 | 
         
            +
                        self.frd.enable_pinyin_mix(True)
         
     | 
| 81 | 
         
            +
                        self.frd.set_breakmodel_index(1)
         
     | 
| 82 | 
         
            +
                    else:
         
     | 
| 83 | 
         
            +
                        self.zh_tn_model = ZhNormalizer(remove_erhua=False, full_to_half=False)
         
     | 
| 84 | 
         
            +
                        self.en_tn_model = EnNormalizer()
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
                def _extract_text_token(self, text):
         
     | 
| 87 | 
         
            +
                    text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special)
         
     | 
| 88 | 
         
            +
                    text_token = torch.tensor([text_token], dtype=torch.int32).to(self.device)
         
     | 
| 89 | 
         
            +
                    text_token_len = torch.tensor([text_token.shape[1]], dtype=torch.int32).to(self.device)
         
     | 
| 90 | 
         
            +
                    return text_token, text_token_len
         
     | 
| 91 | 
         
            +
             
     | 
| 92 | 
         
            +
                def _extract_speech_token(self, speech):
         
     | 
| 93 | 
         
            +
                    feat = whisper.log_mel_spectrogram(speech, n_mels=128)
         
     | 
| 94 | 
         
            +
                    speech_token = self.speech_tokenizer_session.run(None, {self.speech_tokenizer_session.get_inputs()[0].name: feat.detach().cpu().numpy(),
         
     | 
| 95 | 
         
            +
                                                                            self.speech_tokenizer_session.get_inputs()[1].name: np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist()
         
     | 
| 96 | 
         
            +
                    speech_token = torch.tensor([speech_token], dtype=torch.int32).to(self.device)
         
     | 
| 97 | 
         
            +
                    speech_token_len = torch.tensor([speech_token.shape[1]], dtype=torch.int32).to(self.device)
         
     | 
| 98 | 
         
            +
                    return speech_token, speech_token_len
         
     | 
| 99 | 
         
            +
             
     | 
| 100 | 
         
            +
                def _extract_spk_embedding(self, speech):
         
     | 
| 101 | 
         
            +
                    feat = kaldi.fbank(speech,
         
     | 
| 102 | 
         
            +
                                       num_mel_bins=80,
         
     | 
| 103 | 
         
            +
                                       dither=0,
         
     | 
| 104 | 
         
            +
                                       sample_frequency=16000)
         
     | 
| 105 | 
         
            +
                    feat = feat - feat.mean(dim=0, keepdim=True)
         
     | 
| 106 | 
         
            +
                    embedding = self.campplus_session.run(None, {self.campplus_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
         
     | 
| 107 | 
         
            +
                    embedding = torch.tensor([embedding]).to(self.device)
         
     | 
| 108 | 
         
            +
                    return embedding
         
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
                def _extract_speech_feat(self, speech):
         
     | 
| 111 | 
         
            +
                    speech_feat = self.feat_extractor(speech).squeeze(dim=0).transpose(0, 1).to(self.device)
         
     | 
| 112 | 
         
            +
                    speech_feat = speech_feat.unsqueeze(dim=0)
         
     | 
| 113 | 
         
            +
                    speech_feat_len = torch.tensor([speech_feat.shape[1]], dtype=torch.int32).to(self.device)
         
     | 
| 114 | 
         
            +
                    return speech_feat, speech_feat_len
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
                def text_normalize(self, text, split=True):
         
     | 
| 117 | 
         
            +
                    text = text.strip()
         
     | 
| 118 | 
         
            +
                    if contains_chinese(text):
         
     | 
| 119 | 
         
            +
                        if self.use_ttsfrd:
         
     | 
| 120 | 
         
            +
                            text = self.frd.get_frd_extra_info(text, 'input')
         
     | 
| 121 | 
         
            +
                        else:
         
     | 
| 122 | 
         
            +
                            text = self.zh_tn_model.normalize(text)
         
     | 
| 123 | 
         
            +
                        text = text.replace("\n", "")
         
     | 
| 124 | 
         
            +
                        text = replace_blank(text)
         
     | 
| 125 | 
         
            +
                        text = replace_corner_mark(text)
         
     | 
| 126 | 
         
            +
                        text = text.replace(".", "、")
         
     | 
| 127 | 
         
            +
                        text = text.replace(" - ", ",")
         
     | 
| 128 | 
         
            +
                        text = remove_bracket(text)
         
     | 
| 129 | 
         
            +
                        text = re.sub(r'[,,]+$', '。', text)
         
     | 
| 130 | 
         
            +
                        texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "zh", token_max_n=80,
         
     | 
| 131 | 
         
            +
                                                            token_min_n=60, merge_len=20,
         
     | 
| 132 | 
         
            +
                                                            comma_split=False)]
         
     | 
| 133 | 
         
            +
                    else:
         
     | 
| 134 | 
         
            +
                        if self.use_ttsfrd:
         
     | 
| 135 | 
         
            +
                            text = self.frd.get_frd_extra_info(text, 'input')
         
     | 
| 136 | 
         
            +
                        else:
         
     | 
| 137 | 
         
            +
                            text = self.en_tn_model.normalize(text)
         
     | 
| 138 | 
         
            +
                        text = spell_out_number(text, self.inflect_parser)
         
     | 
| 139 | 
         
            +
                        texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80,
         
     | 
| 140 | 
         
            +
                                                            token_min_n=60, merge_len=20,
         
     | 
| 141 | 
         
            +
                                                            comma_split=False)]
         
     | 
| 142 | 
         
            +
                    if split is False:
         
     | 
| 143 | 
         
            +
                        return text
         
     | 
| 144 | 
         
            +
                    return texts
         
     | 
| 145 | 
         
            +
             
     | 
| 146 | 
         
            +
                def frontend_sft(self, tts_text, spk_id):
         
     | 
| 147 | 
         
            +
                    tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
         
     | 
| 148 | 
         
            +
                    embedding = self.spk2info[spk_id]['embedding']
         
     | 
| 149 | 
         
            +
                    model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, 'llm_embedding': embedding, 'flow_embedding': embedding}
         
     | 
| 150 | 
         
            +
                    return model_input
         
     | 
| 151 | 
         
            +
             
     | 
| 152 | 
         
            +
                def frontend_zero_shot(self, tts_text, prompt_text, prompt_speech_16k):
         
     | 
| 153 | 
         
            +
                    tts_text_token, tts_text_token_len = self._extract_text_token(tts_text)
         
     | 
| 154 | 
         
            +
                    prompt_text_token, prompt_text_token_len = self._extract_text_token(prompt_text)
         
     | 
| 155 | 
         
            +
                    prompt_speech_22050 = torchaudio.transforms.Resample(orig_freq=16000, new_freq=22050)(prompt_speech_16k)
         
     | 
| 156 | 
         
            +
                    speech_feat, speech_feat_len = self._extract_speech_feat(prompt_speech_22050)
         
     | 
| 157 | 
         
            +
                    speech_token, speech_token_len = self._extract_speech_token(prompt_speech_16k)
         
     | 
| 158 | 
         
            +
                    embedding = self._extract_spk_embedding(prompt_speech_16k)
         
     | 
| 159 | 
         
            +
                    model_input = {'text': tts_text_token, 'text_len': tts_text_token_len,
         
     | 
| 160 | 
         
            +
                                   'prompt_text': prompt_text_token, 'prompt_text_len': prompt_text_token_len,
         
     | 
| 161 | 
         
            +
                                   'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len,
         
     | 
| 162 | 
         
            +
                                   'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len,
         
     | 
| 163 | 
         
            +
                                   'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len,
         
     | 
| 164 | 
         
            +
                                   'llm_embedding': embedding, 'flow_embedding': embedding}
         
     | 
| 165 | 
         
            +
                    return model_input
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
                def frontend_cross_lingual(self, tts_text, prompt_speech_16k):
         
     | 
| 168 | 
         
            +
                    model_input = self.frontend_zero_shot(tts_text, '', prompt_speech_16k)
         
     | 
| 169 | 
         
            +
                    # in cross lingual mode, we remove prompt in llm
         
     | 
| 170 | 
         
            +
                    del model_input['prompt_text']
         
     | 
| 171 | 
         
            +
                    del model_input['prompt_text_len']
         
     | 
| 172 | 
         
            +
                    del model_input['llm_prompt_speech_token']
         
     | 
| 173 | 
         
            +
                    del model_input['llm_prompt_speech_token_len']
         
     | 
| 174 | 
         
            +
                    return model_input
         
     | 
| 175 | 
         
            +
             
     | 
| 176 | 
         
            +
                def frontend_instruct(self, tts_text, spk_id, instruct_text):
         
     | 
| 177 | 
         
            +
                    model_input = self.frontend_sft(tts_text, spk_id)
         
     | 
| 178 | 
         
            +
                    # in instruct mode, we remove spk_embedding in llm due to information leakage
         
     | 
| 179 | 
         
            +
                    del model_input['llm_embedding']
         
     | 
| 180 | 
         
            +
                    instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '<endofprompt>')
         
     | 
| 181 | 
         
            +
                    model_input['prompt_text'] = instruct_text_token
         
     | 
| 182 | 
         
            +
                    model_input['prompt_text_len'] = instruct_text_token_len
         
     | 
| 183 | 
         
            +
                    return model_input
         
     | 
    	
        cosyvoice/cli/model.py
    ADDED
    
    | 
         @@ -0,0 +1,60 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #   http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            import torch
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            class CosyVoiceModel:
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
                def __init__(self,
         
     | 
| 19 | 
         
            +
                             llm: torch.nn.Module,
         
     | 
| 20 | 
         
            +
                             flow: torch.nn.Module,
         
     | 
| 21 | 
         
            +
                             hift: torch.nn.Module):
         
     | 
| 22 | 
         
            +
                    self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
         
     | 
| 23 | 
         
            +
                    self.llm = llm
         
     | 
| 24 | 
         
            +
                    self.flow = flow
         
     | 
| 25 | 
         
            +
                    self.hift = hift
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
                def load(self, llm_model, flow_model, hift_model):
         
     | 
| 28 | 
         
            +
                    self.llm.load_state_dict(torch.load(llm_model, map_location=self.device))
         
     | 
| 29 | 
         
            +
                    self.llm.to(self.device).eval()
         
     | 
| 30 | 
         
            +
                    self.flow.load_state_dict(torch.load(flow_model, map_location=self.device))
         
     | 
| 31 | 
         
            +
                    self.flow.to(self.device).eval()
         
     | 
| 32 | 
         
            +
                    self.hift.load_state_dict(torch.load(hift_model, map_location=self.device))
         
     | 
| 33 | 
         
            +
                    self.hift.to(self.device).eval()
         
     | 
| 34 | 
         
            +
             
     | 
| 35 | 
         
            +
                def inference(self, text, text_len, flow_embedding, llm_embedding=torch.zeros(0, 192),
         
     | 
| 36 | 
         
            +
                              prompt_text=torch.zeros(1, 0, dtype=torch.int32), prompt_text_len=torch.zeros(1, dtype=torch.int32),
         
     | 
| 37 | 
         
            +
                              llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
         
     | 
| 38 | 
         
            +
                              flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
         
     | 
| 39 | 
         
            +
                              prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32)):
         
     | 
| 40 | 
         
            +
                    tts_speech_token = self.llm.inference(text=text.to(self.device),
         
     | 
| 41 | 
         
            +
                                                          text_len=text_len.to(self.device),
         
     | 
| 42 | 
         
            +
                                                          prompt_text=prompt_text.to(self.device),
         
     | 
| 43 | 
         
            +
                                                          prompt_text_len=prompt_text_len.to(self.device),
         
     | 
| 44 | 
         
            +
                                                          prompt_speech_token=llm_prompt_speech_token.to(self.device),
         
     | 
| 45 | 
         
            +
                                                          prompt_speech_token_len=llm_prompt_speech_token_len.to(self.device),
         
     | 
| 46 | 
         
            +
                                                          embedding=llm_embedding.to(self.device),
         
     | 
| 47 | 
         
            +
                                                          beam_size=1,
         
     | 
| 48 | 
         
            +
                                                          sampling=25,
         
     | 
| 49 | 
         
            +
                                                          max_token_text_ratio=30,
         
     | 
| 50 | 
         
            +
                                                          min_token_text_ratio=3)
         
     | 
| 51 | 
         
            +
                    tts_mel = self.flow.inference(token=tts_speech_token,
         
     | 
| 52 | 
         
            +
                                                  token_len=torch.tensor([tts_speech_token.size(1)], dtype=torch.int32).to(self.device),
         
     | 
| 53 | 
         
            +
                                                  prompt_token=flow_prompt_speech_token.to(self.device),
         
     | 
| 54 | 
         
            +
                                                  prompt_token_len=flow_prompt_speech_token_len.to(self.device),
         
     | 
| 55 | 
         
            +
                                                  prompt_feat=prompt_speech_feat.to(self.device),
         
     | 
| 56 | 
         
            +
                                                  prompt_feat_len=prompt_speech_feat_len.to(self.device),
         
     | 
| 57 | 
         
            +
                                                  embedding=flow_embedding.to(self.device))
         
     | 
| 58 | 
         
            +
                    tts_speech = self.hift.inference(mel=tts_mel).cpu()
         
     | 
| 59 | 
         
            +
                    torch.cuda.empty_cache()
         
     | 
| 60 | 
         
            +
                    return {'tts_speech': tts_speech}
         
     | 
    	
        cosyvoice/dataset/__init__.py
    ADDED
    
    | 
         
            File without changes
         
     | 
    	
        cosyvoice/dataset/__pycache__/__init__.cpython-310.pyc
    ADDED
    
    | 
         Binary file (165 Bytes). View file 
     | 
| 
         | 
    	
        cosyvoice/dataset/__pycache__/__init__.cpython-38.pyc
    ADDED
    
    | 
         Binary file (166 Bytes). View file 
     | 
| 
         | 
    	
        cosyvoice/dataset/__pycache__/processor.cpython-310.pyc
    ADDED
    
    | 
         Binary file (10.8 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/dataset/__pycache__/processor.cpython-38.pyc
    ADDED
    
    | 
         Binary file (11.1 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/dataset/dataset.py
    ADDED
    
    | 
         @@ -0,0 +1,160 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang)
         
     | 
| 2 | 
         
            +
            #               2024 Alibaba Inc (authors: Xiang Lyu)
         
     | 
| 3 | 
         
            +
            #
         
     | 
| 4 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 5 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 6 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 7 | 
         
            +
            #
         
     | 
| 8 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 9 | 
         
            +
            #
         
     | 
| 10 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 11 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 12 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 13 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 14 | 
         
            +
            # limitations under the License.
         
     | 
| 15 | 
         
            +
             
     | 
| 16 | 
         
            +
            import random
         
     | 
| 17 | 
         
            +
            import json
         
     | 
| 18 | 
         
            +
            import math
         
     | 
| 19 | 
         
            +
            from functools import partial
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            import torch
         
     | 
| 22 | 
         
            +
            import torch.distributed as dist
         
     | 
| 23 | 
         
            +
            from torch.utils.data import IterableDataset
         
     | 
| 24 | 
         
            +
            from cosyvoice.utils.file_utils import read_lists, read_json_lists
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
            class Processor(IterableDataset):
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
                def __init__(self, source, f, *args, **kw):
         
     | 
| 30 | 
         
            +
                    assert callable(f)
         
     | 
| 31 | 
         
            +
                    self.source = source
         
     | 
| 32 | 
         
            +
                    self.f = f
         
     | 
| 33 | 
         
            +
                    self.args = args
         
     | 
| 34 | 
         
            +
                    self.kw = kw
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
                def set_epoch(self, epoch):
         
     | 
| 37 | 
         
            +
                    self.source.set_epoch(epoch)
         
     | 
| 38 | 
         
            +
             
     | 
| 39 | 
         
            +
                def __iter__(self):
         
     | 
| 40 | 
         
            +
                    """ Return an iterator over the source dataset processed by the
         
     | 
| 41 | 
         
            +
                        given processor.
         
     | 
| 42 | 
         
            +
                    """
         
     | 
| 43 | 
         
            +
                    assert self.source is not None
         
     | 
| 44 | 
         
            +
                    assert callable(self.f)
         
     | 
| 45 | 
         
            +
                    return self.f(iter(self.source), *self.args, **self.kw)
         
     | 
| 46 | 
         
            +
             
     | 
| 47 | 
         
            +
                def apply(self, f):
         
     | 
| 48 | 
         
            +
                    assert callable(f)
         
     | 
| 49 | 
         
            +
                    return Processor(self, f, *self.args, **self.kw)
         
     | 
| 50 | 
         
            +
             
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
            class DistributedSampler:
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
                def __init__(self, shuffle=True, partition=True):
         
     | 
| 55 | 
         
            +
                    self.epoch = -1
         
     | 
| 56 | 
         
            +
                    self.update()
         
     | 
| 57 | 
         
            +
                    self.shuffle = shuffle
         
     | 
| 58 | 
         
            +
                    self.partition = partition
         
     | 
| 59 | 
         
            +
             
     | 
| 60 | 
         
            +
                def update(self):
         
     | 
| 61 | 
         
            +
                    assert dist.is_available()
         
     | 
| 62 | 
         
            +
                    if dist.is_initialized():
         
     | 
| 63 | 
         
            +
                        self.rank = dist.get_rank()
         
     | 
| 64 | 
         
            +
                        self.world_size = dist.get_world_size()
         
     | 
| 65 | 
         
            +
                    else:
         
     | 
| 66 | 
         
            +
                        self.rank = 0
         
     | 
| 67 | 
         
            +
                        self.world_size = 1
         
     | 
| 68 | 
         
            +
                    worker_info = torch.utils.data.get_worker_info()
         
     | 
| 69 | 
         
            +
                    if worker_info is None:
         
     | 
| 70 | 
         
            +
                        self.worker_id = 0
         
     | 
| 71 | 
         
            +
                        self.num_workers = 1
         
     | 
| 72 | 
         
            +
                    else:
         
     | 
| 73 | 
         
            +
                        self.worker_id = worker_info.id
         
     | 
| 74 | 
         
            +
                        self.num_workers = worker_info.num_workers
         
     | 
| 75 | 
         
            +
                    return dict(rank=self.rank,
         
     | 
| 76 | 
         
            +
                                world_size=self.world_size,
         
     | 
| 77 | 
         
            +
                                worker_id=self.worker_id,
         
     | 
| 78 | 
         
            +
                                num_workers=self.num_workers)
         
     | 
| 79 | 
         
            +
             
     | 
| 80 | 
         
            +
                def set_epoch(self, epoch):
         
     | 
| 81 | 
         
            +
                    self.epoch = epoch
         
     | 
| 82 | 
         
            +
             
     | 
| 83 | 
         
            +
                def sample(self, data):
         
     | 
| 84 | 
         
            +
                    """ Sample data according to rank/world_size/num_workers
         
     | 
| 85 | 
         
            +
             
     | 
| 86 | 
         
            +
                        Args:
         
     | 
| 87 | 
         
            +
                            data(List): input data list
         
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
                        Returns:
         
     | 
| 90 | 
         
            +
                            List: data list after sample
         
     | 
| 91 | 
         
            +
                    """
         
     | 
| 92 | 
         
            +
                    data = list(range(len(data)))
         
     | 
| 93 | 
         
            +
                    # force datalist even
         
     | 
| 94 | 
         
            +
                    if self.partition:
         
     | 
| 95 | 
         
            +
                        if self.shuffle:
         
     | 
| 96 | 
         
            +
                            random.Random(self.epoch).shuffle(data)
         
     | 
| 97 | 
         
            +
                        if len(data) < self.world_size:
         
     | 
| 98 | 
         
            +
                            data = data * math.ceil(self.world_size / len(data))
         
     | 
| 99 | 
         
            +
                            data = data[:self.world_size]
         
     | 
| 100 | 
         
            +
                        data = data[self.rank::self.world_size]
         
     | 
| 101 | 
         
            +
                    if len(data) < self.num_workers:
         
     | 
| 102 | 
         
            +
                        data = data * math.ceil(self.num_workers / len(data))
         
     | 
| 103 | 
         
            +
                        data = data[:self.num_workers]
         
     | 
| 104 | 
         
            +
                    data = data[self.worker_id::self.num_workers]
         
     | 
| 105 | 
         
            +
                    return data
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
             
     | 
| 108 | 
         
            +
            class DataList(IterableDataset):
         
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
                def __init__(self, lists, shuffle=True, partition=True):
         
     | 
| 111 | 
         
            +
                    self.lists = lists
         
     | 
| 112 | 
         
            +
                    self.sampler = DistributedSampler(shuffle, partition)
         
     | 
| 113 | 
         
            +
             
     | 
| 114 | 
         
            +
                def set_epoch(self, epoch):
         
     | 
| 115 | 
         
            +
                    self.sampler.set_epoch(epoch)
         
     | 
| 116 | 
         
            +
             
     | 
| 117 | 
         
            +
                def __iter__(self):
         
     | 
| 118 | 
         
            +
                    sampler_info = self.sampler.update()
         
     | 
| 119 | 
         
            +
                    indexes = self.sampler.sample(self.lists)
         
     | 
| 120 | 
         
            +
                    for index in indexes:
         
     | 
| 121 | 
         
            +
                        data = dict(src=self.lists[index])
         
     | 
| 122 | 
         
            +
                        data.update(sampler_info)
         
     | 
| 123 | 
         
            +
                        yield data
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
             
     | 
| 126 | 
         
            +
            def Dataset(data_list_file,
         
     | 
| 127 | 
         
            +
                        data_pipeline,
         
     | 
| 128 | 
         
            +
                        mode='train',
         
     | 
| 129 | 
         
            +
                        shuffle=True,
         
     | 
| 130 | 
         
            +
                        partition=True,
         
     | 
| 131 | 
         
            +
                        tts_file='',
         
     | 
| 132 | 
         
            +
                        prompt_utt2data=''):
         
     | 
| 133 | 
         
            +
                """ Construct dataset from arguments
         
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
                    We have two shuffle stage in the Dataset. The first is global
         
     | 
| 136 | 
         
            +
                    shuffle at shards tar/raw file level. The second is global shuffle
         
     | 
| 137 | 
         
            +
                    at training samples level.
         
     | 
| 138 | 
         
            +
             
     | 
| 139 | 
         
            +
                    Args:
         
     | 
| 140 | 
         
            +
                        data_type(str): raw/shard
         
     | 
| 141 | 
         
            +
                        tokenizer (BaseTokenizer): tokenizer to tokenize
         
     | 
| 142 | 
         
            +
                        partition(bool): whether to do data partition in terms of rank
         
     | 
| 143 | 
         
            +
                """
         
     | 
| 144 | 
         
            +
                assert mode in ['train', 'inference']
         
     | 
| 145 | 
         
            +
                lists = read_lists(data_list_file)
         
     | 
| 146 | 
         
            +
                if mode == 'inference':
         
     | 
| 147 | 
         
            +
                    with open(tts_file) as f:
         
     | 
| 148 | 
         
            +
                        tts_data = json.load(f)
         
     | 
| 149 | 
         
            +
                    utt2lists = read_json_lists(prompt_utt2data)
         
     | 
| 150 | 
         
            +
                    # filter unnecessary file in inference mode
         
     | 
| 151 | 
         
            +
                    lists = list(set([utt2lists[utt] for utt in tts_data.keys() if utt2lists[utt] in lists]))
         
     | 
| 152 | 
         
            +
                dataset = DataList(lists,
         
     | 
| 153 | 
         
            +
                                   shuffle=shuffle,
         
     | 
| 154 | 
         
            +
                                   partition=partition)
         
     | 
| 155 | 
         
            +
                if mode == 'inference':
         
     | 
| 156 | 
         
            +
                    # map partial arg tts_data in inference mode
         
     | 
| 157 | 
         
            +
                    data_pipeline[0] = partial(data_pipeline[0], tts_data=tts_data)
         
     | 
| 158 | 
         
            +
                for func in data_pipeline:
         
     | 
| 159 | 
         
            +
                    dataset = Processor(dataset, func, mode=mode)
         
     | 
| 160 | 
         
            +
                return dataset
         
     | 
    	
        cosyvoice/dataset/processor.py
    ADDED
    
    | 
         @@ -0,0 +1,369 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            import logging
         
     | 
| 15 | 
         
            +
            import random
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            import pyarrow.parquet as pq
         
     | 
| 18 | 
         
            +
            from io import BytesIO
         
     | 
| 19 | 
         
            +
            import torch
         
     | 
| 20 | 
         
            +
            import torchaudio
         
     | 
| 21 | 
         
            +
            from torch.nn.utils.rnn import pad_sequence
         
     | 
| 22 | 
         
            +
            import torch.nn.functional as F
         
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            torchaudio.set_audio_backend('soundfile')
         
     | 
| 25 | 
         
            +
             
     | 
| 26 | 
         
            +
            AUDIO_FORMAT_SETS = set(['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma'])
         
     | 
| 27 | 
         
            +
             
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            def parquet_opener(data, mode='train', tts_data={}):
         
     | 
| 30 | 
         
            +
                """ Give url or local file, return file descriptor
         
     | 
| 31 | 
         
            +
                    Inplace operation.
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
                    Args:
         
     | 
| 34 | 
         
            +
                        data(Iterable[str]): url or local file list
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
                    Returns:
         
     | 
| 37 | 
         
            +
                        Iterable[{src, stream}]
         
     | 
| 38 | 
         
            +
                """
         
     | 
| 39 | 
         
            +
                for sample in data:
         
     | 
| 40 | 
         
            +
                    assert 'src' in sample
         
     | 
| 41 | 
         
            +
                    url = sample['src']
         
     | 
| 42 | 
         
            +
                    try:
         
     | 
| 43 | 
         
            +
                        df = pq.read_table(url).to_pandas()
         
     | 
| 44 | 
         
            +
                        for i in range(len(df)):
         
     | 
| 45 | 
         
            +
                            if mode == 'inference' and df.loc[i, 'utt'] not in tts_data:
         
     | 
| 46 | 
         
            +
                                continue
         
     | 
| 47 | 
         
            +
                            sample.update(dict(df.loc[i]))
         
     | 
| 48 | 
         
            +
                            if mode == 'train':
         
     | 
| 49 | 
         
            +
                                # NOTE do not return sample directly, must initialize a new dict
         
     | 
| 50 | 
         
            +
                                yield {**sample}
         
     | 
| 51 | 
         
            +
                            else:
         
     | 
| 52 | 
         
            +
                                for index, text in enumerate(tts_data[df.loc[i, 'utt']]):
         
     | 
| 53 | 
         
            +
                                    yield {**sample, 'tts_index': index, 'tts_text': text}
         
     | 
| 54 | 
         
            +
                    except Exception as ex:
         
     | 
| 55 | 
         
            +
                        logging.warning('Failed to open {}, ex info {}'.format(url, ex))
         
     | 
| 56 | 
         
            +
             
     | 
| 57 | 
         
            +
            def filter(data,
         
     | 
| 58 | 
         
            +
                       max_length=10240,
         
     | 
| 59 | 
         
            +
                       min_length=10,
         
     | 
| 60 | 
         
            +
                       token_max_length=200,
         
     | 
| 61 | 
         
            +
                       token_min_length=1,
         
     | 
| 62 | 
         
            +
                       min_output_input_ratio=0.0005,
         
     | 
| 63 | 
         
            +
                       max_output_input_ratio=1,
         
     | 
| 64 | 
         
            +
                       mode='train'):
         
     | 
| 65 | 
         
            +
                """ Filter sample according to feature and label length
         
     | 
| 66 | 
         
            +
                    Inplace operation.
         
     | 
| 67 | 
         
            +
             
     | 
| 68 | 
         
            +
                    Args::
         
     | 
| 69 | 
         
            +
                        data: Iterable[{key, wav, label, sample_rate}]
         
     | 
| 70 | 
         
            +
                        max_length: drop utterance which is greater than max_length(10ms)
         
     | 
| 71 | 
         
            +
                        min_length: drop utterance which is less than min_length(10ms)
         
     | 
| 72 | 
         
            +
                        token_max_length: drop utterance which is greater than
         
     | 
| 73 | 
         
            +
                            token_max_length, especially when use char unit for
         
     | 
| 74 | 
         
            +
                            english modeling
         
     | 
| 75 | 
         
            +
                        token_min_length: drop utterance which is
         
     | 
| 76 | 
         
            +
                            less than token_max_length
         
     | 
| 77 | 
         
            +
                        min_output_input_ratio: minimal ration of
         
     | 
| 78 | 
         
            +
                            token_length / feats_length(10ms)
         
     | 
| 79 | 
         
            +
                        max_output_input_ratio: maximum ration of
         
     | 
| 80 | 
         
            +
                            token_length / feats_length(10ms)
         
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
                    Returns:
         
     | 
| 83 | 
         
            +
                        Iterable[{key, wav, label, sample_rate}]
         
     | 
| 84 | 
         
            +
                """
         
     | 
| 85 | 
         
            +
                for sample in data:
         
     | 
| 86 | 
         
            +
                    sample['speech'], sample['sample_rate'] = torchaudio.load(BytesIO(sample['audio_data']))
         
     | 
| 87 | 
         
            +
                    del sample['audio_data']
         
     | 
| 88 | 
         
            +
                    # sample['wav'] is torch.Tensor, we have 100 frames every second
         
     | 
| 89 | 
         
            +
                    num_frames = sample['speech'].size(1) / sample['sample_rate'] * 100
         
     | 
| 90 | 
         
            +
                    if num_frames < min_length:
         
     | 
| 91 | 
         
            +
                        continue
         
     | 
| 92 | 
         
            +
                    if num_frames > max_length:
         
     | 
| 93 | 
         
            +
                        continue
         
     | 
| 94 | 
         
            +
                    if len(sample['text_token']) < token_min_length:
         
     | 
| 95 | 
         
            +
                        continue
         
     | 
| 96 | 
         
            +
                    if len(sample['text_token']) > token_max_length:
         
     | 
| 97 | 
         
            +
                        continue
         
     | 
| 98 | 
         
            +
                    if len(sample['speech_token']) == 0:
         
     | 
| 99 | 
         
            +
                        continue
         
     | 
| 100 | 
         
            +
                    if num_frames != 0:
         
     | 
| 101 | 
         
            +
                        if len(sample['text_token']) / num_frames < min_output_input_ratio:
         
     | 
| 102 | 
         
            +
                            continue
         
     | 
| 103 | 
         
            +
                        if len(sample['text_token']) / num_frames > max_output_input_ratio:
         
     | 
| 104 | 
         
            +
                            continue
         
     | 
| 105 | 
         
            +
                    yield sample
         
     | 
| 106 | 
         
            +
             
     | 
| 107 | 
         
            +
             
     | 
| 108 | 
         
            +
            def resample(data, resample_rate=22050, min_sample_rate=16000, mode='train'):
         
     | 
| 109 | 
         
            +
                """ Resample data.
         
     | 
| 110 | 
         
            +
                    Inplace operation.
         
     | 
| 111 | 
         
            +
             
     | 
| 112 | 
         
            +
                    Args:
         
     | 
| 113 | 
         
            +
                        data: Iterable[{key, wav, label, sample_rate}]
         
     | 
| 114 | 
         
            +
                        resample_rate: target resample rate
         
     | 
| 115 | 
         
            +
             
     | 
| 116 | 
         
            +
                    Returns:
         
     | 
| 117 | 
         
            +
                        Iterable[{key, wav, label, sample_rate}]
         
     | 
| 118 | 
         
            +
                """
         
     | 
| 119 | 
         
            +
                for sample in data:
         
     | 
| 120 | 
         
            +
                    assert 'sample_rate' in sample
         
     | 
| 121 | 
         
            +
                    assert 'speech' in sample
         
     | 
| 122 | 
         
            +
                    sample_rate = sample['sample_rate']
         
     | 
| 123 | 
         
            +
                    waveform = sample['speech']
         
     | 
| 124 | 
         
            +
                    if sample_rate != resample_rate:
         
     | 
| 125 | 
         
            +
                        if sample_rate < min_sample_rate:
         
     | 
| 126 | 
         
            +
                            continue
         
     | 
| 127 | 
         
            +
                        sample['sample_rate'] = resample_rate
         
     | 
| 128 | 
         
            +
                        sample['speech'] = torchaudio.transforms.Resample(
         
     | 
| 129 | 
         
            +
                            orig_freq=sample_rate, new_freq=resample_rate)(waveform)
         
     | 
| 130 | 
         
            +
                    max_val = sample['speech'].abs().max()
         
     | 
| 131 | 
         
            +
                    if max_val > 1:
         
     | 
| 132 | 
         
            +
                        sample['speech'] /= max_val
         
     | 
| 133 | 
         
            +
                    yield sample
         
     | 
| 134 | 
         
            +
             
     | 
| 135 | 
         
            +
             
     | 
| 136 | 
         
            +
            def compute_fbank(data,
         
     | 
| 137 | 
         
            +
                              feat_extractor,
         
     | 
| 138 | 
         
            +
                              mode='train'):
         
     | 
| 139 | 
         
            +
                """ Extract fbank
         
     | 
| 140 | 
         
            +
             
     | 
| 141 | 
         
            +
                    Args:
         
     | 
| 142 | 
         
            +
                        data: Iterable[{key, wav, label, sample_rate}]
         
     | 
| 143 | 
         
            +
             
     | 
| 144 | 
         
            +
                    Returns:
         
     | 
| 145 | 
         
            +
                        Iterable[{key, feat, label}]
         
     | 
| 146 | 
         
            +
                """
         
     | 
| 147 | 
         
            +
                for sample in data:
         
     | 
| 148 | 
         
            +
                    assert 'sample_rate' in sample
         
     | 
| 149 | 
         
            +
                    assert 'speech' in sample
         
     | 
| 150 | 
         
            +
                    assert 'utt' in sample
         
     | 
| 151 | 
         
            +
                    assert 'text_token' in sample
         
     | 
| 152 | 
         
            +
                    waveform = sample['speech']
         
     | 
| 153 | 
         
            +
                    mat = feat_extractor(waveform).squeeze(dim=0).transpose(0, 1)
         
     | 
| 154 | 
         
            +
                    sample['speech_feat'] = mat
         
     | 
| 155 | 
         
            +
                    del sample['speech']
         
     | 
| 156 | 
         
            +
                    yield sample
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
             
     | 
| 159 | 
         
            +
            def parse_embedding(data, normalize, mode='train'):
         
     | 
| 160 | 
         
            +
                """ Parse utt_embedding/spk_embedding
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
                    Args:
         
     | 
| 163 | 
         
            +
                        data: Iterable[{key, wav, label, sample_rate}]
         
     | 
| 164 | 
         
            +
             
     | 
| 165 | 
         
            +
                    Returns:
         
     | 
| 166 | 
         
            +
                        Iterable[{key, feat, label}]
         
     | 
| 167 | 
         
            +
                """
         
     | 
| 168 | 
         
            +
                for sample in data:
         
     | 
| 169 | 
         
            +
                    sample['utt_embedding'] = torch.tensor(sample['utt_embedding'], dtype=torch.float32)
         
     | 
| 170 | 
         
            +
                    sample['spk_embedding'] = torch.tensor(sample['spk_embedding'], dtype=torch.float32)
         
     | 
| 171 | 
         
            +
                    if normalize:
         
     | 
| 172 | 
         
            +
                        sample['utt_embedding'] = F.normalize(sample['utt_embedding'], dim=0)
         
     | 
| 173 | 
         
            +
                        sample['spk_embedding'] = F.normalize(sample['spk_embedding'], dim=0)
         
     | 
| 174 | 
         
            +
                    yield sample
         
     | 
| 175 | 
         
            +
             
     | 
| 176 | 
         
            +
             
     | 
| 177 | 
         
            +
            def tokenize(data, get_tokenizer, allowed_special, mode='train'):
         
     | 
| 178 | 
         
            +
                """ Decode text to chars or BPE
         
     | 
| 179 | 
         
            +
                    Inplace operation
         
     | 
| 180 | 
         
            +
             
     | 
| 181 | 
         
            +
                    Args:
         
     | 
| 182 | 
         
            +
                        data: Iterable[{key, wav, txt, sample_rate}]
         
     | 
| 183 | 
         
            +
             
     | 
| 184 | 
         
            +
                    Returns:
         
     | 
| 185 | 
         
            +
                        Iterable[{key, wav, txt, tokens, label, sample_rate}]
         
     | 
| 186 | 
         
            +
                """
         
     | 
| 187 | 
         
            +
                tokenizer = get_tokenizer()
         
     | 
| 188 | 
         
            +
                for sample in data:
         
     | 
| 189 | 
         
            +
                    assert 'text' in sample
         
     | 
| 190 | 
         
            +
                    sample['text_token'] = tokenizer.encode(sample['text'], allowed_special=allowed_special)
         
     | 
| 191 | 
         
            +
                    if mode == 'inference':
         
     | 
| 192 | 
         
            +
                        sample['tts_text_token'] = tokenizer.encode(sample['tts_text'], allowed_special=allowed_special)
         
     | 
| 193 | 
         
            +
                    yield sample
         
     | 
| 194 | 
         
            +
             
     | 
| 195 | 
         
            +
             
     | 
| 196 | 
         
            +
            def shuffle(data, shuffle_size=10000, mode='train'):
         
     | 
| 197 | 
         
            +
                """ Local shuffle the data
         
     | 
| 198 | 
         
            +
             
     | 
| 199 | 
         
            +
                    Args:
         
     | 
| 200 | 
         
            +
                        data: Iterable[{key, feat, label}]
         
     | 
| 201 | 
         
            +
                        shuffle_size: buffer size for shuffle
         
     | 
| 202 | 
         
            +
             
     | 
| 203 | 
         
            +
                    Returns:
         
     | 
| 204 | 
         
            +
                        Iterable[{key, feat, label}]
         
     | 
| 205 | 
         
            +
                """
         
     | 
| 206 | 
         
            +
                buf = []
         
     | 
| 207 | 
         
            +
                for sample in data:
         
     | 
| 208 | 
         
            +
                    buf.append(sample)
         
     | 
| 209 | 
         
            +
                    if len(buf) >= shuffle_size:
         
     | 
| 210 | 
         
            +
                        random.shuffle(buf)
         
     | 
| 211 | 
         
            +
                        for x in buf:
         
     | 
| 212 | 
         
            +
                            yield x
         
     | 
| 213 | 
         
            +
                        buf = []
         
     | 
| 214 | 
         
            +
                # The sample left over
         
     | 
| 215 | 
         
            +
                random.shuffle(buf)
         
     | 
| 216 | 
         
            +
                for x in buf:
         
     | 
| 217 | 
         
            +
                    yield x
         
     | 
| 218 | 
         
            +
             
     | 
| 219 | 
         
            +
             
     | 
| 220 | 
         
            +
            def sort(data, sort_size=500, mode='train'):
         
     | 
| 221 | 
         
            +
                """ Sort the data by feature length.
         
     | 
| 222 | 
         
            +
                    Sort is used after shuffle and before batch, so we can group
         
     | 
| 223 | 
         
            +
                    utts with similar lengths into a batch, and `sort_size` should
         
     | 
| 224 | 
         
            +
                    be less than `shuffle_size`
         
     | 
| 225 | 
         
            +
             
     | 
| 226 | 
         
            +
                    Args:
         
     | 
| 227 | 
         
            +
                        data: Iterable[{key, feat, label}]
         
     | 
| 228 | 
         
            +
                        sort_size: buffer size for sort
         
     | 
| 229 | 
         
            +
             
     | 
| 230 | 
         
            +
                    Returns:
         
     | 
| 231 | 
         
            +
                        Iterable[{key, feat, label}]
         
     | 
| 232 | 
         
            +
                """
         
     | 
| 233 | 
         
            +
             
     | 
| 234 | 
         
            +
                buf = []
         
     | 
| 235 | 
         
            +
                for sample in data:
         
     | 
| 236 | 
         
            +
                    buf.append(sample)
         
     | 
| 237 | 
         
            +
                    if len(buf) >= sort_size:
         
     | 
| 238 | 
         
            +
                        buf.sort(key=lambda x: x['speech_feat'].size(0))
         
     | 
| 239 | 
         
            +
                        for x in buf:
         
     | 
| 240 | 
         
            +
                            yield x
         
     | 
| 241 | 
         
            +
                        buf = []
         
     | 
| 242 | 
         
            +
                # The sample left over
         
     | 
| 243 | 
         
            +
                buf.sort(key=lambda x: x['speech_feat'].size(0))
         
     | 
| 244 | 
         
            +
                for x in buf:
         
     | 
| 245 | 
         
            +
                    yield x
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
             
     | 
| 248 | 
         
            +
            def static_batch(data, batch_size=16):
         
     | 
| 249 | 
         
            +
                """ Static batch the data by `batch_size`
         
     | 
| 250 | 
         
            +
             
     | 
| 251 | 
         
            +
                    Args:
         
     | 
| 252 | 
         
            +
                        data: Iterable[{key, feat, label}]
         
     | 
| 253 | 
         
            +
                        batch_size: batch size
         
     | 
| 254 | 
         
            +
             
     | 
| 255 | 
         
            +
                    Returns:
         
     | 
| 256 | 
         
            +
                        Iterable[List[{key, feat, label}]]
         
     | 
| 257 | 
         
            +
                """
         
     | 
| 258 | 
         
            +
                buf = []
         
     | 
| 259 | 
         
            +
                for sample in data:
         
     | 
| 260 | 
         
            +
                    buf.append(sample)
         
     | 
| 261 | 
         
            +
                    if len(buf) >= batch_size:
         
     | 
| 262 | 
         
            +
                        yield buf
         
     | 
| 263 | 
         
            +
                        buf = []
         
     | 
| 264 | 
         
            +
                if len(buf) > 0:
         
     | 
| 265 | 
         
            +
                    yield buf
         
     | 
| 266 | 
         
            +
             
     | 
| 267 | 
         
            +
             
     | 
| 268 | 
         
            +
            def dynamic_batch(data, max_frames_in_batch=12000, mode='train'):
         
     | 
| 269 | 
         
            +
                """ Dynamic batch the data until the total frames in batch
         
     | 
| 270 | 
         
            +
                    reach `max_frames_in_batch`
         
     | 
| 271 | 
         
            +
             
     | 
| 272 | 
         
            +
                    Args:
         
     | 
| 273 | 
         
            +
                        data: Iterable[{key, feat, label}]
         
     | 
| 274 | 
         
            +
                        max_frames_in_batch: max_frames in one batch
         
     | 
| 275 | 
         
            +
             
     | 
| 276 | 
         
            +
                    Returns:
         
     | 
| 277 | 
         
            +
                        Iterable[List[{key, feat, label}]]
         
     | 
| 278 | 
         
            +
                """
         
     | 
| 279 | 
         
            +
                buf = []
         
     | 
| 280 | 
         
            +
                longest_frames = 0
         
     | 
| 281 | 
         
            +
                for sample in data:
         
     | 
| 282 | 
         
            +
                    assert 'speech_feat' in sample
         
     | 
| 283 | 
         
            +
                    assert isinstance(sample['speech_feat'], torch.Tensor)
         
     | 
| 284 | 
         
            +
                    new_sample_frames = sample['speech_feat'].size(0)
         
     | 
| 285 | 
         
            +
                    longest_frames = max(longest_frames, new_sample_frames)
         
     | 
| 286 | 
         
            +
                    frames_after_padding = longest_frames * (len(buf) + 1)
         
     | 
| 287 | 
         
            +
                    if frames_after_padding > max_frames_in_batch:
         
     | 
| 288 | 
         
            +
                        yield buf
         
     | 
| 289 | 
         
            +
                        buf = [sample]
         
     | 
| 290 | 
         
            +
                        longest_frames = new_sample_frames
         
     | 
| 291 | 
         
            +
                    else:
         
     | 
| 292 | 
         
            +
                        buf.append(sample)
         
     | 
| 293 | 
         
            +
                if len(buf) > 0:
         
     | 
| 294 | 
         
            +
                    yield buf
         
     | 
| 295 | 
         
            +
             
     | 
| 296 | 
         
            +
             
     | 
| 297 | 
         
            +
            def batch(data, batch_type='static', batch_size=16, max_frames_in_batch=12000, mode='train'):
         
     | 
| 298 | 
         
            +
                """ Wrapper for static/dynamic batch
         
     | 
| 299 | 
         
            +
                """
         
     | 
| 300 | 
         
            +
                if mode == 'inference':
         
     | 
| 301 | 
         
            +
                    return static_batch(data, 1)
         
     | 
| 302 | 
         
            +
                else:
         
     | 
| 303 | 
         
            +
                    if batch_type == 'static':
         
     | 
| 304 | 
         
            +
                        return static_batch(data, batch_size)
         
     | 
| 305 | 
         
            +
                    elif batch_type == 'dynamic':
         
     | 
| 306 | 
         
            +
                        return dynamic_batch(data, max_frames_in_batch)
         
     | 
| 307 | 
         
            +
                    else:
         
     | 
| 308 | 
         
            +
                        logging.fatal('Unsupported batch type {}'.format(batch_type))
         
     | 
| 309 | 
         
            +
             
     | 
| 310 | 
         
            +
             
     | 
| 311 | 
         
            +
            def padding(data, use_spk_embedding, mode='train'):
         
     | 
| 312 | 
         
            +
                """ Padding the data into training data
         
     | 
| 313 | 
         
            +
             
     | 
| 314 | 
         
            +
                    Args:
         
     | 
| 315 | 
         
            +
                        data: Iterable[List[{key, feat, label}]]
         
     | 
| 316 | 
         
            +
             
     | 
| 317 | 
         
            +
                    Returns:
         
     | 
| 318 | 
         
            +
                        Iterable[Tuple(keys, feats, labels, feats lengths, label lengths)]
         
     | 
| 319 | 
         
            +
                """
         
     | 
| 320 | 
         
            +
                for sample in data:
         
     | 
| 321 | 
         
            +
                    assert isinstance(sample, list)
         
     | 
| 322 | 
         
            +
                    speech_feat_len = torch.tensor([x['speech_feat'].size(1) for x in sample],
         
     | 
| 323 | 
         
            +
                                                   dtype=torch.int32)
         
     | 
| 324 | 
         
            +
                    order = torch.argsort(speech_feat_len, descending=True)
         
     | 
| 325 | 
         
            +
             
     | 
| 326 | 
         
            +
                    utts = [sample[i]['utt'] for i in order]
         
     | 
| 327 | 
         
            +
                    speech_token = [torch.tensor(sample[i]['speech_token']) for i in order]
         
     | 
| 328 | 
         
            +
                    speech_token_len = torch.tensor([i.size(0) for i in speech_token], dtype=torch.int32)
         
     | 
| 329 | 
         
            +
                    speech_token = pad_sequence(speech_token,
         
     | 
| 330 | 
         
            +
                                                batch_first=True,
         
     | 
| 331 | 
         
            +
                                                padding_value=0)
         
     | 
| 332 | 
         
            +
                    speech_feat = [sample[i]['speech_feat'] for i in order]
         
     | 
| 333 | 
         
            +
                    speech_feat_len = torch.tensor([i.size(0) for i in speech_feat], dtype=torch.int32)
         
     | 
| 334 | 
         
            +
                    speech_feat = pad_sequence(speech_feat,
         
     | 
| 335 | 
         
            +
                                               batch_first=True,
         
     | 
| 336 | 
         
            +
                                               padding_value=0)
         
     | 
| 337 | 
         
            +
                    text = [sample[i]['text'] for i in order]
         
     | 
| 338 | 
         
            +
                    text_token = [torch.tensor(sample[i]['text_token']) for i in order]
         
     | 
| 339 | 
         
            +
                    text_token_len = torch.tensor([i.size(0) for i in text_token], dtype=torch.int32)
         
     | 
| 340 | 
         
            +
                    text_token = pad_sequence(text_token, batch_first=True, padding_value=0)
         
     | 
| 341 | 
         
            +
                    utt_embedding = torch.stack([sample[i]['utt_embedding'] for i in order], dim=0)
         
     | 
| 342 | 
         
            +
                    spk_embedding = torch.stack([sample[i]['spk_embedding'] for i in order], dim=0)
         
     | 
| 343 | 
         
            +
                    batch = {
         
     | 
| 344 | 
         
            +
                        "utts": utts,
         
     | 
| 345 | 
         
            +
                        "speech_token": speech_token,
         
     | 
| 346 | 
         
            +
                        "speech_token_len": speech_token_len,
         
     | 
| 347 | 
         
            +
                        "speech_feat": speech_feat,
         
     | 
| 348 | 
         
            +
                        "speech_feat_len": speech_feat_len,
         
     | 
| 349 | 
         
            +
                        "text": text,
         
     | 
| 350 | 
         
            +
                        "text_token": text_token,
         
     | 
| 351 | 
         
            +
                        "text_token_len": text_token_len,
         
     | 
| 352 | 
         
            +
                        "utt_embedding": utt_embedding,
         
     | 
| 353 | 
         
            +
                        "spk_embedding": spk_embedding,
         
     | 
| 354 | 
         
            +
                    }
         
     | 
| 355 | 
         
            +
                    if mode == 'inference':
         
     | 
| 356 | 
         
            +
                        tts_text = [sample[i]['tts_text'] for i in order]
         
     | 
| 357 | 
         
            +
                        tts_index = [sample[i]['tts_index'] for i in order]
         
     | 
| 358 | 
         
            +
                        tts_text_token = [torch.tensor(sample[i]['tts_text_token']) for i in order]
         
     | 
| 359 | 
         
            +
                        tts_text_token_len = torch.tensor([i.size(0) for i in tts_text_token], dtype=torch.int32)
         
     | 
| 360 | 
         
            +
                        tts_text_token = pad_sequence(tts_text_token, batch_first=True, padding_value=-1)
         
     | 
| 361 | 
         
            +
                        batch.update({'tts_text': tts_text,
         
     | 
| 362 | 
         
            +
                                      'tts_index': tts_index,
         
     | 
| 363 | 
         
            +
                                      'tts_text_token': tts_text_token,
         
     | 
| 364 | 
         
            +
                                      'tts_text_token_len': tts_text_token_len})
         
     | 
| 365 | 
         
            +
                    if use_spk_embedding is True:
         
     | 
| 366 | 
         
            +
                        batch["embedding"] = batch["spk_embedding"]
         
     | 
| 367 | 
         
            +
                    else:
         
     | 
| 368 | 
         
            +
                        batch["embedding"] = batch["utt_embedding"]
         
     | 
| 369 | 
         
            +
                    yield batch
         
     | 
    	
        cosyvoice/flow/__pycache__/decoder.cpython-310.pyc
    ADDED
    
    | 
         Binary file (5.14 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/flow/__pycache__/decoder.cpython-38.pyc
    ADDED
    
    | 
         Binary file (5.22 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/flow/__pycache__/flow.cpython-310.pyc
    ADDED
    
    | 
         Binary file (4.14 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/flow/__pycache__/flow.cpython-38.pyc
    ADDED
    
    | 
         Binary file (4.11 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/flow/__pycache__/flow_matching.cpython-310.pyc
    ADDED
    
    | 
         Binary file (4.54 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/flow/__pycache__/flow_matching.cpython-38.pyc
    ADDED
    
    | 
         Binary file (4.55 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/flow/__pycache__/length_regulator.cpython-310.pyc
    ADDED
    
    | 
         Binary file (1.48 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/flow/__pycache__/length_regulator.cpython-38.pyc
    ADDED
    
    | 
         Binary file (1.46 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/flow/decoder.py
    ADDED
    
    | 
         @@ -0,0 +1,222 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            import torch
         
     | 
| 15 | 
         
            +
            import torch.nn as nn
         
     | 
| 16 | 
         
            +
            from einops import pack, rearrange, repeat
         
     | 
| 17 | 
         
            +
            from matcha.models.components.decoder import SinusoidalPosEmb, Block1D, ResnetBlock1D, Downsample1D, TimestepEmbedding, Upsample1D
         
     | 
| 18 | 
         
            +
            from matcha.models.components.transformer import BasicTransformerBlock
         
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            class ConditionalDecoder(nn.Module):
         
     | 
| 22 | 
         
            +
                def __init__(
         
     | 
| 23 | 
         
            +
                    self,
         
     | 
| 24 | 
         
            +
                    in_channels,
         
     | 
| 25 | 
         
            +
                    out_channels,
         
     | 
| 26 | 
         
            +
                    channels=(256, 256),
         
     | 
| 27 | 
         
            +
                    dropout=0.05,
         
     | 
| 28 | 
         
            +
                    attention_head_dim=64,
         
     | 
| 29 | 
         
            +
                    n_blocks=1,
         
     | 
| 30 | 
         
            +
                    num_mid_blocks=2,
         
     | 
| 31 | 
         
            +
                    num_heads=4,
         
     | 
| 32 | 
         
            +
                    act_fn="snake",
         
     | 
| 33 | 
         
            +
                ):
         
     | 
| 34 | 
         
            +
                    """
         
     | 
| 35 | 
         
            +
                    This decoder requires an input with the same shape of the target. So, if your text content
         
     | 
| 36 | 
         
            +
                    is shorter or longer than the outputs, please re-sampling it before feeding to the decoder.
         
     | 
| 37 | 
         
            +
                    """
         
     | 
| 38 | 
         
            +
                    super().__init__()
         
     | 
| 39 | 
         
            +
                    channels = tuple(channels)
         
     | 
| 40 | 
         
            +
                    self.in_channels = in_channels
         
     | 
| 41 | 
         
            +
                    self.out_channels = out_channels
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
                    self.time_embeddings = SinusoidalPosEmb(in_channels)
         
     | 
| 44 | 
         
            +
                    time_embed_dim = channels[0] * 4
         
     | 
| 45 | 
         
            +
                    self.time_mlp = TimestepEmbedding(
         
     | 
| 46 | 
         
            +
                        in_channels=in_channels,
         
     | 
| 47 | 
         
            +
                        time_embed_dim=time_embed_dim,
         
     | 
| 48 | 
         
            +
                        act_fn="silu",
         
     | 
| 49 | 
         
            +
                    )
         
     | 
| 50 | 
         
            +
                    self.down_blocks = nn.ModuleList([])
         
     | 
| 51 | 
         
            +
                    self.mid_blocks = nn.ModuleList([])
         
     | 
| 52 | 
         
            +
                    self.up_blocks = nn.ModuleList([])
         
     | 
| 53 | 
         
            +
             
     | 
| 54 | 
         
            +
                    output_channel = in_channels
         
     | 
| 55 | 
         
            +
                    for i in range(len(channels)):  # pylint: disable=consider-using-enumerate
         
     | 
| 56 | 
         
            +
                        input_channel = output_channel
         
     | 
| 57 | 
         
            +
                        output_channel = channels[i]
         
     | 
| 58 | 
         
            +
                        is_last = i == len(channels) - 1
         
     | 
| 59 | 
         
            +
                        resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
         
     | 
| 60 | 
         
            +
                        transformer_blocks = nn.ModuleList(
         
     | 
| 61 | 
         
            +
                            [
         
     | 
| 62 | 
         
            +
                                BasicTransformerBlock(
         
     | 
| 63 | 
         
            +
                                    dim=output_channel,
         
     | 
| 64 | 
         
            +
                                    num_attention_heads=num_heads,
         
     | 
| 65 | 
         
            +
                                    attention_head_dim=attention_head_dim,
         
     | 
| 66 | 
         
            +
                                    dropout=dropout,
         
     | 
| 67 | 
         
            +
                                    activation_fn=act_fn,
         
     | 
| 68 | 
         
            +
                                )
         
     | 
| 69 | 
         
            +
                                for _ in range(n_blocks)
         
     | 
| 70 | 
         
            +
                            ]
         
     | 
| 71 | 
         
            +
                        )
         
     | 
| 72 | 
         
            +
                        downsample = (
         
     | 
| 73 | 
         
            +
                            Downsample1D(output_channel) if not is_last else nn.Conv1d(output_channel, output_channel, 3, padding=1)
         
     | 
| 74 | 
         
            +
                        )
         
     | 
| 75 | 
         
            +
                        self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
         
     | 
| 76 | 
         
            +
             
     | 
| 77 | 
         
            +
                    for i in range(num_mid_blocks):
         
     | 
| 78 | 
         
            +
                        input_channel = channels[-1]
         
     | 
| 79 | 
         
            +
                        out_channels = channels[-1]
         
     | 
| 80 | 
         
            +
                        resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
         
     | 
| 81 | 
         
            +
             
     | 
| 82 | 
         
            +
                        transformer_blocks = nn.ModuleList(
         
     | 
| 83 | 
         
            +
                            [
         
     | 
| 84 | 
         
            +
                                BasicTransformerBlock(
         
     | 
| 85 | 
         
            +
                                    dim=output_channel,
         
     | 
| 86 | 
         
            +
                                    num_attention_heads=num_heads,
         
     | 
| 87 | 
         
            +
                                    attention_head_dim=attention_head_dim,
         
     | 
| 88 | 
         
            +
                                    dropout=dropout,
         
     | 
| 89 | 
         
            +
                                    activation_fn=act_fn,
         
     | 
| 90 | 
         
            +
                                )
         
     | 
| 91 | 
         
            +
                                for _ in range(n_blocks)
         
     | 
| 92 | 
         
            +
                            ]
         
     | 
| 93 | 
         
            +
                        )
         
     | 
| 94 | 
         
            +
             
     | 
| 95 | 
         
            +
                        self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
                    channels = channels[::-1] + (channels[0],)
         
     | 
| 98 | 
         
            +
                    for i in range(len(channels) - 1):
         
     | 
| 99 | 
         
            +
                        input_channel = channels[i] * 2
         
     | 
| 100 | 
         
            +
                        output_channel = channels[i + 1]
         
     | 
| 101 | 
         
            +
                        is_last = i == len(channels) - 2
         
     | 
| 102 | 
         
            +
                        resnet = ResnetBlock1D(
         
     | 
| 103 | 
         
            +
                            dim=input_channel,
         
     | 
| 104 | 
         
            +
                            dim_out=output_channel,
         
     | 
| 105 | 
         
            +
                            time_emb_dim=time_embed_dim,
         
     | 
| 106 | 
         
            +
                        )
         
     | 
| 107 | 
         
            +
                        transformer_blocks = nn.ModuleList(
         
     | 
| 108 | 
         
            +
                            [
         
     | 
| 109 | 
         
            +
                                BasicTransformerBlock(
         
     | 
| 110 | 
         
            +
                                    dim=output_channel,
         
     | 
| 111 | 
         
            +
                                    num_attention_heads=num_heads,
         
     | 
| 112 | 
         
            +
                                    attention_head_dim=attention_head_dim,
         
     | 
| 113 | 
         
            +
                                    dropout=dropout,
         
     | 
| 114 | 
         
            +
                                    activation_fn=act_fn,
         
     | 
| 115 | 
         
            +
                                )
         
     | 
| 116 | 
         
            +
                                for _ in range(n_blocks)
         
     | 
| 117 | 
         
            +
                            ]
         
     | 
| 118 | 
         
            +
                        )
         
     | 
| 119 | 
         
            +
                        upsample = (
         
     | 
| 120 | 
         
            +
                            Upsample1D(output_channel, use_conv_transpose=True)
         
     | 
| 121 | 
         
            +
                            if not is_last
         
     | 
| 122 | 
         
            +
                            else nn.Conv1d(output_channel, output_channel, 3, padding=1)
         
     | 
| 123 | 
         
            +
                        )
         
     | 
| 124 | 
         
            +
                        self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
         
     | 
| 125 | 
         
            +
                    self.final_block = Block1D(channels[-1], channels[-1])
         
     | 
| 126 | 
         
            +
                    self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
         
     | 
| 127 | 
         
            +
                    self.initialize_weights()
         
     | 
| 128 | 
         
            +
             
     | 
| 129 | 
         
            +
             
     | 
| 130 | 
         
            +
                def initialize_weights(self):
         
     | 
| 131 | 
         
            +
                    for m in self.modules():
         
     | 
| 132 | 
         
            +
                        if isinstance(m, nn.Conv1d):
         
     | 
| 133 | 
         
            +
                            nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
         
     | 
| 134 | 
         
            +
                            if m.bias is not None:
         
     | 
| 135 | 
         
            +
                                nn.init.constant_(m.bias, 0)
         
     | 
| 136 | 
         
            +
                        elif isinstance(m, nn.GroupNorm):
         
     | 
| 137 | 
         
            +
                            nn.init.constant_(m.weight, 1)
         
     | 
| 138 | 
         
            +
                            nn.init.constant_(m.bias, 0)
         
     | 
| 139 | 
         
            +
                        elif isinstance(m, nn.Linear):
         
     | 
| 140 | 
         
            +
                            nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
         
     | 
| 141 | 
         
            +
                            if m.bias is not None:
         
     | 
| 142 | 
         
            +
                                nn.init.constant_(m.bias, 0)
         
     | 
| 143 | 
         
            +
             
     | 
| 144 | 
         
            +
                def forward(self, x, mask, mu, t, spks=None, cond=None):
         
     | 
| 145 | 
         
            +
                    """Forward pass of the UNet1DConditional model.
         
     | 
| 146 | 
         
            +
             
     | 
| 147 | 
         
            +
                    Args:
         
     | 
| 148 | 
         
            +
                        x (torch.Tensor): shape (batch_size, in_channels, time)
         
     | 
| 149 | 
         
            +
                        mask (_type_): shape (batch_size, 1, time)
         
     | 
| 150 | 
         
            +
                        t (_type_): shape (batch_size)
         
     | 
| 151 | 
         
            +
                        spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None.
         
     | 
| 152 | 
         
            +
                        cond (_type_, optional): placeholder for future use. Defaults to None.
         
     | 
| 153 | 
         
            +
             
     | 
| 154 | 
         
            +
                    Raises:
         
     | 
| 155 | 
         
            +
                        ValueError: _description_
         
     | 
| 156 | 
         
            +
                        ValueError: _description_
         
     | 
| 157 | 
         
            +
             
     | 
| 158 | 
         
            +
                    Returns:
         
     | 
| 159 | 
         
            +
                        _type_: _description_
         
     | 
| 160 | 
         
            +
                    """
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
                    t = self.time_embeddings(t)
         
     | 
| 163 | 
         
            +
                    t = self.time_mlp(t)
         
     | 
| 164 | 
         
            +
             
     | 
| 165 | 
         
            +
                    x = pack([x, mu], "b * t")[0]
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
                    if spks is not None:
         
     | 
| 168 | 
         
            +
                        spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
         
     | 
| 169 | 
         
            +
                        x = pack([x, spks], "b * t")[0]
         
     | 
| 170 | 
         
            +
                    if cond is not None:
         
     | 
| 171 | 
         
            +
                        x = pack([x, cond], "b * t")[0]
         
     | 
| 172 | 
         
            +
             
     | 
| 173 | 
         
            +
                    hiddens = []
         
     | 
| 174 | 
         
            +
                    masks = [mask]
         
     | 
| 175 | 
         
            +
                    for resnet, transformer_blocks, downsample in self.down_blocks:
         
     | 
| 176 | 
         
            +
                        mask_down = masks[-1]
         
     | 
| 177 | 
         
            +
                        x = resnet(x, mask_down, t)
         
     | 
| 178 | 
         
            +
                        x = rearrange(x, "b c t -> b t c").contiguous()
         
     | 
| 179 | 
         
            +
                        attn_mask = torch.matmul(mask_down.transpose(1, 2).contiguous(), mask_down)
         
     | 
| 180 | 
         
            +
                        for transformer_block in transformer_blocks:
         
     | 
| 181 | 
         
            +
                            x = transformer_block(
         
     | 
| 182 | 
         
            +
                                hidden_states=x,
         
     | 
| 183 | 
         
            +
                                attention_mask=attn_mask,
         
     | 
| 184 | 
         
            +
                                timestep=t,
         
     | 
| 185 | 
         
            +
                            )
         
     | 
| 186 | 
         
            +
                        x = rearrange(x, "b t c -> b c t").contiguous()
         
     | 
| 187 | 
         
            +
                        hiddens.append(x)  # Save hidden states for skip connections
         
     | 
| 188 | 
         
            +
                        x = downsample(x * mask_down)
         
     | 
| 189 | 
         
            +
                        masks.append(mask_down[:, :, ::2])
         
     | 
| 190 | 
         
            +
                    masks = masks[:-1]
         
     | 
| 191 | 
         
            +
                    mask_mid = masks[-1]
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
                    for resnet, transformer_blocks in self.mid_blocks:
         
     | 
| 194 | 
         
            +
                        x = resnet(x, mask_mid, t)
         
     | 
| 195 | 
         
            +
                        x = rearrange(x, "b c t -> b t c").contiguous()
         
     | 
| 196 | 
         
            +
                        attn_mask = torch.matmul(mask_mid.transpose(1, 2).contiguous(), mask_mid)
         
     | 
| 197 | 
         
            +
                        for transformer_block in transformer_blocks:
         
     | 
| 198 | 
         
            +
                            x = transformer_block(
         
     | 
| 199 | 
         
            +
                                hidden_states=x,
         
     | 
| 200 | 
         
            +
                                attention_mask=attn_mask,
         
     | 
| 201 | 
         
            +
                                timestep=t,
         
     | 
| 202 | 
         
            +
                            )
         
     | 
| 203 | 
         
            +
                        x = rearrange(x, "b t c -> b c t").contiguous()
         
     | 
| 204 | 
         
            +
             
     | 
| 205 | 
         
            +
                    for resnet, transformer_blocks, upsample in self.up_blocks:
         
     | 
| 206 | 
         
            +
                        mask_up = masks.pop()
         
     | 
| 207 | 
         
            +
                        skip = hiddens.pop()
         
     | 
| 208 | 
         
            +
                        x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0]
         
     | 
| 209 | 
         
            +
                        x = resnet(x, mask_up, t)
         
     | 
| 210 | 
         
            +
                        x = rearrange(x, "b c t -> b t c").contiguous()
         
     | 
| 211 | 
         
            +
                        attn_mask = torch.matmul(mask_up.transpose(1, 2).contiguous(), mask_up)
         
     | 
| 212 | 
         
            +
                        for transformer_block in transformer_blocks:
         
     | 
| 213 | 
         
            +
                            x = transformer_block(
         
     | 
| 214 | 
         
            +
                                hidden_states=x,
         
     | 
| 215 | 
         
            +
                                attention_mask=attn_mask,
         
     | 
| 216 | 
         
            +
                                timestep=t,
         
     | 
| 217 | 
         
            +
                            )
         
     | 
| 218 | 
         
            +
                        x = rearrange(x, "b t c -> b c t").contiguous()
         
     | 
| 219 | 
         
            +
                        x = upsample(x * mask_up)
         
     | 
| 220 | 
         
            +
                    x = self.final_block(x, mask_up)
         
     | 
| 221 | 
         
            +
                    output = self.final_proj(x * mask_up)
         
     | 
| 222 | 
         
            +
                    return output * mask
         
     | 
    	
        cosyvoice/flow/flow.py
    ADDED
    
    | 
         @@ -0,0 +1,141 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            import logging
         
     | 
| 15 | 
         
            +
            import random
         
     | 
| 16 | 
         
            +
            from typing import Dict, Optional
         
     | 
| 17 | 
         
            +
            import torch
         
     | 
| 18 | 
         
            +
            import torch.nn as nn
         
     | 
| 19 | 
         
            +
            from torch.nn import functional as F
         
     | 
| 20 | 
         
            +
            from omegaconf import DictConfig
         
     | 
| 21 | 
         
            +
            from cosyvoice.utils.mask import make_pad_mask
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            class MaskedDiffWithXvec(torch.nn.Module):
         
     | 
| 25 | 
         
            +
                def __init__(self,
         
     | 
| 26 | 
         
            +
                             input_size: int = 512,
         
     | 
| 27 | 
         
            +
                             output_size: int = 80,
         
     | 
| 28 | 
         
            +
                             spk_embed_dim: int = 192,
         
     | 
| 29 | 
         
            +
                             output_type: str = "mel",
         
     | 
| 30 | 
         
            +
                             vocab_size: int = 4096,
         
     | 
| 31 | 
         
            +
                             input_frame_rate: int = 50,
         
     | 
| 32 | 
         
            +
                             only_mask_loss: bool = True,
         
     | 
| 33 | 
         
            +
                             encoder: torch.nn.Module = None,
         
     | 
| 34 | 
         
            +
                             length_regulator: torch.nn.Module = None,
         
     | 
| 35 | 
         
            +
                             decoder: torch.nn.Module = None,
         
     | 
| 36 | 
         
            +
                             decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1, 'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine', 'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}), 'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64, 'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}},
         
     | 
| 37 | 
         
            +
                             mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050, 'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
         
     | 
| 38 | 
         
            +
                    super().__init__()
         
     | 
| 39 | 
         
            +
                    self.input_size = input_size
         
     | 
| 40 | 
         
            +
                    self.output_size = output_size
         
     | 
| 41 | 
         
            +
                    self.decoder_conf = decoder_conf
         
     | 
| 42 | 
         
            +
                    self.mel_feat_conf = mel_feat_conf
         
     | 
| 43 | 
         
            +
                    self.vocab_size = vocab_size
         
     | 
| 44 | 
         
            +
                    self.output_type = output_type
         
     | 
| 45 | 
         
            +
                    self.input_frame_rate = input_frame_rate
         
     | 
| 46 | 
         
            +
                    logging.info(f"input frame rate={self.input_frame_rate}")
         
     | 
| 47 | 
         
            +
                    self.input_embedding = nn.Embedding(vocab_size, input_size)
         
     | 
| 48 | 
         
            +
                    self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
         
     | 
| 49 | 
         
            +
                    self.encoder = encoder
         
     | 
| 50 | 
         
            +
                    self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
         
     | 
| 51 | 
         
            +
                    self.decoder = decoder
         
     | 
| 52 | 
         
            +
                    self.length_regulator = length_regulator
         
     | 
| 53 | 
         
            +
                    self.only_mask_loss = only_mask_loss
         
     | 
| 54 | 
         
            +
             
     | 
| 55 | 
         
            +
                def forward(
         
     | 
| 56 | 
         
            +
                        self,
         
     | 
| 57 | 
         
            +
                        batch: dict,
         
     | 
| 58 | 
         
            +
                        device: torch.device,
         
     | 
| 59 | 
         
            +
                ) -> Dict[str, Optional[torch.Tensor]]:
         
     | 
| 60 | 
         
            +
                    token = batch['speech_token'].to(device)
         
     | 
| 61 | 
         
            +
                    token_len = batch['speech_token_len'].to(device)
         
     | 
| 62 | 
         
            +
                    feat = batch['speech_feat'].to(device)
         
     | 
| 63 | 
         
            +
                    feat_len = batch['speech_feat_len'].to(device)
         
     | 
| 64 | 
         
            +
                    embedding = batch['embedding'].to(device)
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
                    # xvec projection
         
     | 
| 67 | 
         
            +
                    embedding = F.normalize(embedding, dim=1)
         
     | 
| 68 | 
         
            +
                    embedding = self.spk_embed_affine_layer(embedding)
         
     | 
| 69 | 
         
            +
             
     | 
| 70 | 
         
            +
                    # concat text and prompt_text
         
     | 
| 71 | 
         
            +
                    mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device)
         
     | 
| 72 | 
         
            +
                    token = self.input_embedding(torch.clamp(token, min=0)) * mask
         
     | 
| 73 | 
         
            +
             
     | 
| 74 | 
         
            +
                    # text encode
         
     | 
| 75 | 
         
            +
                    h, h_lengths = self.encoder(token, token_len)
         
     | 
| 76 | 
         
            +
                    h = self.encoder_proj(h)
         
     | 
| 77 | 
         
            +
                    h, h_lengths = self.length_regulator(h, feat_len)
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
                    # get conditions
         
     | 
| 80 | 
         
            +
                    conds = torch.zeros(feat.shape, device=token.device)
         
     | 
| 81 | 
         
            +
                    for i, j in enumerate(feat_len):
         
     | 
| 82 | 
         
            +
                        if random.random() < 0.5:
         
     | 
| 83 | 
         
            +
                            continue
         
     | 
| 84 | 
         
            +
                        index = random.randint(0, int(0.3 * j))
         
     | 
| 85 | 
         
            +
                        conds[i, :index] = feat[i, :index]
         
     | 
| 86 | 
         
            +
                    conds = conds.transpose(1, 2)
         
     | 
| 87 | 
         
            +
             
     | 
| 88 | 
         
            +
                    mask = (~make_pad_mask(feat_len)).to(h)
         
     | 
| 89 | 
         
            +
                    feat = F.interpolate(feat.unsqueeze(dim=1), size=h.shape[1:], mode="nearest").squeeze(dim=1)
         
     | 
| 90 | 
         
            +
                    loss, _ = self.decoder.compute_loss(
         
     | 
| 91 | 
         
            +
                        feat.transpose(1, 2).contiguous(),
         
     | 
| 92 | 
         
            +
                        mask.unsqueeze(1),
         
     | 
| 93 | 
         
            +
                        h.transpose(1, 2).contiguous(),
         
     | 
| 94 | 
         
            +
                        embedding,
         
     | 
| 95 | 
         
            +
                        cond=conds
         
     | 
| 96 | 
         
            +
                    )
         
     | 
| 97 | 
         
            +
                    return {'loss': loss}
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
                @torch.inference_mode()
         
     | 
| 100 | 
         
            +
                def inference(self,
         
     | 
| 101 | 
         
            +
                              token,
         
     | 
| 102 | 
         
            +
                              token_len,
         
     | 
| 103 | 
         
            +
                              prompt_token,
         
     | 
| 104 | 
         
            +
                              prompt_token_len,
         
     | 
| 105 | 
         
            +
                              prompt_feat,
         
     | 
| 106 | 
         
            +
                              prompt_feat_len,
         
     | 
| 107 | 
         
            +
                              embedding):
         
     | 
| 108 | 
         
            +
                    assert token.shape[0] == 1
         
     | 
| 109 | 
         
            +
                    # xvec projection
         
     | 
| 110 | 
         
            +
                    embedding = F.normalize(embedding, dim=1)
         
     | 
| 111 | 
         
            +
                    embedding = self.spk_embed_affine_layer(embedding)
         
     | 
| 112 | 
         
            +
             
     | 
| 113 | 
         
            +
                    # concat text and prompt_text
         
     | 
| 114 | 
         
            +
                    token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
         
     | 
| 115 | 
         
            +
                    mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(embedding)
         
     | 
| 116 | 
         
            +
                    token = self.input_embedding(torch.clamp(token, min=0)) * mask
         
     | 
| 117 | 
         
            +
             
     | 
| 118 | 
         
            +
                    # text encode
         
     | 
| 119 | 
         
            +
                    h, h_lengths = self.encoder(token, token_len)
         
     | 
| 120 | 
         
            +
                    h = self.encoder_proj(h)
         
     | 
| 121 | 
         
            +
                    feat_len = (token_len / 50 * 22050 / 256).int()
         
     | 
| 122 | 
         
            +
                    h, h_lengths = self.length_regulator(h, feat_len)
         
     | 
| 123 | 
         
            +
             
     | 
| 124 | 
         
            +
                    # get conditions
         
     | 
| 125 | 
         
            +
                    conds = torch.zeros([1, feat_len.max().item(), self.output_size], device=token.device)
         
     | 
| 126 | 
         
            +
                    if prompt_feat.shape[1] != 0:
         
     | 
| 127 | 
         
            +
                        for i, j in enumerate(prompt_feat_len):
         
     | 
| 128 | 
         
            +
                            conds[i, :j] = prompt_feat[i]
         
     | 
| 129 | 
         
            +
                    conds = conds.transpose(1, 2)
         
     | 
| 130 | 
         
            +
             
     | 
| 131 | 
         
            +
                    mask = (~make_pad_mask(feat_len)).to(h)
         
     | 
| 132 | 
         
            +
                    feat = self.decoder(
         
     | 
| 133 | 
         
            +
                        mu=h.transpose(1, 2).contiguous(),
         
     | 
| 134 | 
         
            +
                        mask=mask.unsqueeze(1),
         
     | 
| 135 | 
         
            +
                        spks=embedding,
         
     | 
| 136 | 
         
            +
                        cond=conds,
         
     | 
| 137 | 
         
            +
                        n_timesteps=10
         
     | 
| 138 | 
         
            +
                    )
         
     | 
| 139 | 
         
            +
                    if prompt_feat.shape[1] != 0:
         
     | 
| 140 | 
         
            +
                        feat = feat[:, :, prompt_feat.shape[1]:]
         
     | 
| 141 | 
         
            +
                    return feat
         
     | 
    	
        cosyvoice/flow/flow_matching.py
    ADDED
    
    | 
         @@ -0,0 +1,138 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            import torch
         
     | 
| 15 | 
         
            +
            import torch.nn.functional as F
         
     | 
| 16 | 
         
            +
            from matcha.models.components.flow_matching import BASECFM
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
            class ConditionalCFM(BASECFM):
         
     | 
| 19 | 
         
            +
                def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None):
         
     | 
| 20 | 
         
            +
                    super().__init__(
         
     | 
| 21 | 
         
            +
                        n_feats=in_channels,
         
     | 
| 22 | 
         
            +
                        cfm_params=cfm_params,
         
     | 
| 23 | 
         
            +
                        n_spks=n_spks,
         
     | 
| 24 | 
         
            +
                        spk_emb_dim=spk_emb_dim,
         
     | 
| 25 | 
         
            +
                    )
         
     | 
| 26 | 
         
            +
                    self.t_scheduler = cfm_params.t_scheduler
         
     | 
| 27 | 
         
            +
                    self.training_cfg_rate = cfm_params.training_cfg_rate
         
     | 
| 28 | 
         
            +
                    self.inference_cfg_rate = cfm_params.inference_cfg_rate
         
     | 
| 29 | 
         
            +
                    in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0)
         
     | 
| 30 | 
         
            +
                    # Just change the architecture of the estimator here
         
     | 
| 31 | 
         
            +
                    self.estimator = estimator
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
                @torch.inference_mode()
         
     | 
| 34 | 
         
            +
                def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None):
         
     | 
| 35 | 
         
            +
                    """Forward diffusion
         
     | 
| 36 | 
         
            +
             
     | 
| 37 | 
         
            +
                    Args:
         
     | 
| 38 | 
         
            +
                        mu (torch.Tensor): output of encoder
         
     | 
| 39 | 
         
            +
                            shape: (batch_size, n_feats, mel_timesteps)
         
     | 
| 40 | 
         
            +
                        mask (torch.Tensor): output_mask
         
     | 
| 41 | 
         
            +
                            shape: (batch_size, 1, mel_timesteps)
         
     | 
| 42 | 
         
            +
                        n_timesteps (int): number of diffusion steps
         
     | 
| 43 | 
         
            +
                        temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
         
     | 
| 44 | 
         
            +
                        spks (torch.Tensor, optional): speaker ids. Defaults to None.
         
     | 
| 45 | 
         
            +
                            shape: (batch_size, spk_emb_dim)
         
     | 
| 46 | 
         
            +
                        cond: Not used but kept for future purposes
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
                    Returns:
         
     | 
| 49 | 
         
            +
                        sample: generated mel-spectrogram
         
     | 
| 50 | 
         
            +
                            shape: (batch_size, n_feats, mel_timesteps)
         
     | 
| 51 | 
         
            +
                    """
         
     | 
| 52 | 
         
            +
                    z = torch.randn_like(mu) * temperature
         
     | 
| 53 | 
         
            +
                    t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device)
         
     | 
| 54 | 
         
            +
                    if self.t_scheduler == 'cosine':
         
     | 
| 55 | 
         
            +
                        t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
         
     | 
| 56 | 
         
            +
                    return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond)
         
     | 
| 57 | 
         
            +
             
     | 
| 58 | 
         
            +
                def solve_euler(self, x, t_span, mu, mask, spks, cond):
         
     | 
| 59 | 
         
            +
                    """
         
     | 
| 60 | 
         
            +
                    Fixed euler solver for ODEs.
         
     | 
| 61 | 
         
            +
                    Args:
         
     | 
| 62 | 
         
            +
                        x (torch.Tensor): random noise
         
     | 
| 63 | 
         
            +
                        t_span (torch.Tensor): n_timesteps interpolated
         
     | 
| 64 | 
         
            +
                            shape: (n_timesteps + 1,)
         
     | 
| 65 | 
         
            +
                        mu (torch.Tensor): output of encoder
         
     | 
| 66 | 
         
            +
                            shape: (batch_size, n_feats, mel_timesteps)
         
     | 
| 67 | 
         
            +
                        mask (torch.Tensor): output_mask
         
     | 
| 68 | 
         
            +
                            shape: (batch_size, 1, mel_timesteps)
         
     | 
| 69 | 
         
            +
                        spks (torch.Tensor, optional): speaker ids. Defaults to None.
         
     | 
| 70 | 
         
            +
                            shape: (batch_size, spk_emb_dim)
         
     | 
| 71 | 
         
            +
                        cond: Not used but kept for future purposes
         
     | 
| 72 | 
         
            +
                    """
         
     | 
| 73 | 
         
            +
                    t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
         
     | 
| 74 | 
         
            +
             
     | 
| 75 | 
         
            +
                    # I am storing this because I can later plot it by putting a debugger here and saving it to a file
         
     | 
| 76 | 
         
            +
                    # Or in future might add like a return_all_steps flag
         
     | 
| 77 | 
         
            +
                    sol = []
         
     | 
| 78 | 
         
            +
             
     | 
| 79 | 
         
            +
                    for step in range(1, len(t_span)):
         
     | 
| 80 | 
         
            +
                        dphi_dt = self.estimator(x, mask, mu, t, spks, cond)
         
     | 
| 81 | 
         
            +
                        # Classifier-Free Guidance inference introduced in VoiceBox
         
     | 
| 82 | 
         
            +
                        if self.inference_cfg_rate > 0:
         
     | 
| 83 | 
         
            +
                            cfg_dphi_dt = self.estimator(
         
     | 
| 84 | 
         
            +
                                x, mask,
         
     | 
| 85 | 
         
            +
                                torch.zeros_like(mu), t,
         
     | 
| 86 | 
         
            +
                                torch.zeros_like(spks) if spks is not None else None,
         
     | 
| 87 | 
         
            +
                                torch.zeros_like(cond)
         
     | 
| 88 | 
         
            +
                            )
         
     | 
| 89 | 
         
            +
                            dphi_dt = ((1.0 + self.inference_cfg_rate) * dphi_dt -
         
     | 
| 90 | 
         
            +
                                       self.inference_cfg_rate * cfg_dphi_dt)
         
     | 
| 91 | 
         
            +
                        x = x + dt * dphi_dt
         
     | 
| 92 | 
         
            +
                        t = t + dt
         
     | 
| 93 | 
         
            +
                        sol.append(x)
         
     | 
| 94 | 
         
            +
                        if step < len(t_span) - 1:
         
     | 
| 95 | 
         
            +
                            dt = t_span[step + 1] - t
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
                    return sol[-1]
         
     | 
| 98 | 
         
            +
             
     | 
| 99 | 
         
            +
                def compute_loss(self, x1, mask, mu, spks=None, cond=None):
         
     | 
| 100 | 
         
            +
                    """Computes diffusion loss
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
                    Args:
         
     | 
| 103 | 
         
            +
                        x1 (torch.Tensor): Target
         
     | 
| 104 | 
         
            +
                            shape: (batch_size, n_feats, mel_timesteps)
         
     | 
| 105 | 
         
            +
                        mask (torch.Tensor): target mask
         
     | 
| 106 | 
         
            +
                            shape: (batch_size, 1, mel_timesteps)
         
     | 
| 107 | 
         
            +
                        mu (torch.Tensor): output of encoder
         
     | 
| 108 | 
         
            +
                            shape: (batch_size, n_feats, mel_timesteps)
         
     | 
| 109 | 
         
            +
                        spks (torch.Tensor, optional): speaker embedding. Defaults to None.
         
     | 
| 110 | 
         
            +
                            shape: (batch_size, spk_emb_dim)
         
     | 
| 111 | 
         
            +
             
     | 
| 112 | 
         
            +
                    Returns:
         
     | 
| 113 | 
         
            +
                        loss: conditional flow matching loss
         
     | 
| 114 | 
         
            +
                        y: conditional flow
         
     | 
| 115 | 
         
            +
                            shape: (batch_size, n_feats, mel_timesteps)
         
     | 
| 116 | 
         
            +
                    """
         
     | 
| 117 | 
         
            +
                    b, _, t = mu.shape
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
                    # random timestep
         
     | 
| 120 | 
         
            +
                    t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
         
     | 
| 121 | 
         
            +
                    if self.t_scheduler == 'cosine':
         
     | 
| 122 | 
         
            +
                        t = 1 - torch.cos(t * 0.5 * torch.pi)
         
     | 
| 123 | 
         
            +
                    # sample noise p(x_0)
         
     | 
| 124 | 
         
            +
                    z = torch.randn_like(x1)
         
     | 
| 125 | 
         
            +
             
     | 
| 126 | 
         
            +
                    y = (1 - (1 - self.sigma_min) * t) * z + t * x1
         
     | 
| 127 | 
         
            +
                    u = x1 - (1 - self.sigma_min) * z
         
     | 
| 128 | 
         
            +
             
     | 
| 129 | 
         
            +
                    # during training, we randomly drop condition to trade off mode coverage and sample fidelity
         
     | 
| 130 | 
         
            +
                    if self.training_cfg_rate > 0:
         
     | 
| 131 | 
         
            +
                        cfg_mask = torch.rand(b, device=x1.device) > self.training_cfg_rate
         
     | 
| 132 | 
         
            +
                        mu = mu * cfg_mask.view(-1, 1, 1)
         
     | 
| 133 | 
         
            +
                        spks = spks * cfg_mask.view(-1, 1)
         
     | 
| 134 | 
         
            +
                        cond = cond * cfg_mask.view(-1, 1, 1)
         
     | 
| 135 | 
         
            +
             
     | 
| 136 | 
         
            +
                    pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond)
         
     | 
| 137 | 
         
            +
                    loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1])
         
     | 
| 138 | 
         
            +
                    return loss, y
         
     | 
    	
        cosyvoice/flow/length_regulator.py
    ADDED
    
    | 
         @@ -0,0 +1,49 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            from typing import Tuple
         
     | 
| 15 | 
         
            +
            import torch.nn as nn
         
     | 
| 16 | 
         
            +
            from torch.nn import functional as F
         
     | 
| 17 | 
         
            +
            from cosyvoice.utils.mask import make_pad_mask
         
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
             
     | 
| 20 | 
         
            +
            class InterpolateRegulator(nn.Module):
         
     | 
| 21 | 
         
            +
                def __init__(
         
     | 
| 22 | 
         
            +
                        self,
         
     | 
| 23 | 
         
            +
                        channels: int,
         
     | 
| 24 | 
         
            +
                        sampling_ratios: Tuple,
         
     | 
| 25 | 
         
            +
                        out_channels: int = None,
         
     | 
| 26 | 
         
            +
                        groups: int = 1,
         
     | 
| 27 | 
         
            +
                ):
         
     | 
| 28 | 
         
            +
                    super().__init__()
         
     | 
| 29 | 
         
            +
                    self.sampling_ratios = sampling_ratios
         
     | 
| 30 | 
         
            +
                    out_channels = out_channels or channels
         
     | 
| 31 | 
         
            +
                    model = nn.ModuleList([])
         
     | 
| 32 | 
         
            +
                    if len(sampling_ratios) > 0:
         
     | 
| 33 | 
         
            +
                        for _ in sampling_ratios:
         
     | 
| 34 | 
         
            +
                            module = nn.Conv1d(channels, channels, 3, 1, 1)
         
     | 
| 35 | 
         
            +
                            norm = nn.GroupNorm(groups, channels)
         
     | 
| 36 | 
         
            +
                            act = nn.Mish()
         
     | 
| 37 | 
         
            +
                            model.extend([module, norm, act])
         
     | 
| 38 | 
         
            +
                    model.append(
         
     | 
| 39 | 
         
            +
                        nn.Conv1d(channels, out_channels, 1, 1)
         
     | 
| 40 | 
         
            +
                    )
         
     | 
| 41 | 
         
            +
                    self.model = nn.Sequential(*model)
         
     | 
| 42 | 
         
            +
             
     | 
| 43 | 
         
            +
                def forward(self, x, ylens=None):
         
     | 
| 44 | 
         
            +
                    # x in (B, T, D)
         
     | 
| 45 | 
         
            +
                    mask = (~make_pad_mask(ylens)).to(x).unsqueeze(-1)
         
     | 
| 46 | 
         
            +
                    x = F.interpolate(x.transpose(1, 2).contiguous(), size=ylens.max(), mode='nearest')
         
     | 
| 47 | 
         
            +
                    out = self.model(x).transpose(1, 2).contiguous()
         
     | 
| 48 | 
         
            +
                    olens = ylens
         
     | 
| 49 | 
         
            +
                    return out * mask, olens
         
     | 
    	
        cosyvoice/hifigan/__pycache__/f0_predictor.cpython-310.pyc
    ADDED
    
    | 
         Binary file (1.36 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/hifigan/__pycache__/f0_predictor.cpython-38.pyc
    ADDED
    
    | 
         Binary file (1.35 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/hifigan/__pycache__/generator.cpython-310.pyc
    ADDED
    
    | 
         Binary file (11.3 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/hifigan/__pycache__/generator.cpython-38.pyc
    ADDED
    
    | 
         Binary file (11.3 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/hifigan/f0_predictor.py
    ADDED
    
    | 
         @@ -0,0 +1,55 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            import torch
         
     | 
| 15 | 
         
            +
            import torch.nn as nn
         
     | 
| 16 | 
         
            +
            from torch.nn.utils import weight_norm
         
     | 
| 17 | 
         
            +
             
     | 
| 18 | 
         
            +
             
     | 
| 19 | 
         
            +
            class ConvRNNF0Predictor(nn.Module):
         
     | 
| 20 | 
         
            +
                def __init__(self,
         
     | 
| 21 | 
         
            +
                             num_class: int = 1,
         
     | 
| 22 | 
         
            +
                             in_channels: int = 80,
         
     | 
| 23 | 
         
            +
                             cond_channels: int = 512
         
     | 
| 24 | 
         
            +
                             ):
         
     | 
| 25 | 
         
            +
                    super().__init__()
         
     | 
| 26 | 
         
            +
             
     | 
| 27 | 
         
            +
                    self.num_class = num_class
         
     | 
| 28 | 
         
            +
                    self.condnet = nn.Sequential(
         
     | 
| 29 | 
         
            +
                        weight_norm(
         
     | 
| 30 | 
         
            +
                            nn.Conv1d(in_channels, cond_channels, kernel_size=3, padding=1)
         
     | 
| 31 | 
         
            +
                        ),
         
     | 
| 32 | 
         
            +
                        nn.ELU(),
         
     | 
| 33 | 
         
            +
                        weight_norm(
         
     | 
| 34 | 
         
            +
                            nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
         
     | 
| 35 | 
         
            +
                        ),
         
     | 
| 36 | 
         
            +
                        nn.ELU(),
         
     | 
| 37 | 
         
            +
                        weight_norm(
         
     | 
| 38 | 
         
            +
                            nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
         
     | 
| 39 | 
         
            +
                        ),
         
     | 
| 40 | 
         
            +
                        nn.ELU(),
         
     | 
| 41 | 
         
            +
                        weight_norm(
         
     | 
| 42 | 
         
            +
                            nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
         
     | 
| 43 | 
         
            +
                        ),
         
     | 
| 44 | 
         
            +
                        nn.ELU(),
         
     | 
| 45 | 
         
            +
                        weight_norm(
         
     | 
| 46 | 
         
            +
                            nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
         
     | 
| 47 | 
         
            +
                        ),
         
     | 
| 48 | 
         
            +
                        nn.ELU(),
         
     | 
| 49 | 
         
            +
                    )
         
     | 
| 50 | 
         
            +
                    self.classifier = nn.Linear(in_features=cond_channels, out_features=self.num_class)
         
     | 
| 51 | 
         
            +
             
     | 
| 52 | 
         
            +
                def forward(self, x: torch.Tensor) -> torch.Tensor:
         
     | 
| 53 | 
         
            +
                    x = self.condnet(x)
         
     | 
| 54 | 
         
            +
                    x = x.transpose(1, 2)
         
     | 
| 55 | 
         
            +
                    return torch.abs(self.classifier(x).squeeze(-1))
         
     | 
    	
        cosyvoice/hifigan/generator.py
    ADDED
    
    | 
         @@ -0,0 +1,391 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
             
     | 
| 15 | 
         
            +
            """HIFI-GAN"""
         
     | 
| 16 | 
         
            +
             
     | 
| 17 | 
         
            +
            import typing as tp
         
     | 
| 18 | 
         
            +
            import numpy as np
         
     | 
| 19 | 
         
            +
            from scipy.signal import get_window
         
     | 
| 20 | 
         
            +
            import torch
         
     | 
| 21 | 
         
            +
            import torch.nn as nn
         
     | 
| 22 | 
         
            +
            import torch.nn.functional as F
         
     | 
| 23 | 
         
            +
            from torch.nn import Conv1d
         
     | 
| 24 | 
         
            +
            from torch.nn import ConvTranspose1d
         
     | 
| 25 | 
         
            +
            from torch.nn.utils import remove_weight_norm
         
     | 
| 26 | 
         
            +
            from torch.nn.utils import weight_norm
         
     | 
| 27 | 
         
            +
            from torch.distributions.uniform import Uniform
         
     | 
| 28 | 
         
            +
             
     | 
| 29 | 
         
            +
            from cosyvoice.transformer.activation import Snake
         
     | 
| 30 | 
         
            +
            from cosyvoice.utils.common import get_padding
         
     | 
| 31 | 
         
            +
            from cosyvoice.utils.common import init_weights
         
     | 
| 32 | 
         
            +
             
     | 
| 33 | 
         
            +
             
     | 
| 34 | 
         
            +
            """hifigan based generator implementation.
         
     | 
| 35 | 
         
            +
             
     | 
| 36 | 
         
            +
            This code is modified from https://github.com/jik876/hifi-gan
         
     | 
| 37 | 
         
            +
             ,https://github.com/kan-bayashi/ParallelWaveGAN and
         
     | 
| 38 | 
         
            +
             https://github.com/NVIDIA/BigVGAN
         
     | 
| 39 | 
         
            +
             
     | 
| 40 | 
         
            +
            """
         
     | 
| 41 | 
         
            +
            class ResBlock(torch.nn.Module):
         
     | 
| 42 | 
         
            +
                """Residual block module in HiFiGAN/BigVGAN."""
         
     | 
| 43 | 
         
            +
                def __init__(
         
     | 
| 44 | 
         
            +
                    self,
         
     | 
| 45 | 
         
            +
                    channels: int = 512,
         
     | 
| 46 | 
         
            +
                    kernel_size: int = 3,
         
     | 
| 47 | 
         
            +
                    dilations: tp.List[int] = [1, 3, 5],
         
     | 
| 48 | 
         
            +
                ):
         
     | 
| 49 | 
         
            +
                    super(ResBlock, self).__init__()
         
     | 
| 50 | 
         
            +
                    self.convs1 = nn.ModuleList()
         
     | 
| 51 | 
         
            +
                    self.convs2 = nn.ModuleList()
         
     | 
| 52 | 
         
            +
             
     | 
| 53 | 
         
            +
                    for dilation in dilations:
         
     | 
| 54 | 
         
            +
                        self.convs1.append(
         
     | 
| 55 | 
         
            +
                            weight_norm(
         
     | 
| 56 | 
         
            +
                                Conv1d(
         
     | 
| 57 | 
         
            +
                                    channels,
         
     | 
| 58 | 
         
            +
                                    channels,
         
     | 
| 59 | 
         
            +
                                    kernel_size,
         
     | 
| 60 | 
         
            +
                                    1,
         
     | 
| 61 | 
         
            +
                                    dilation=dilation,
         
     | 
| 62 | 
         
            +
                                    padding=get_padding(kernel_size, dilation)
         
     | 
| 63 | 
         
            +
                                )
         
     | 
| 64 | 
         
            +
                            )
         
     | 
| 65 | 
         
            +
                        )
         
     | 
| 66 | 
         
            +
                        self.convs2.append(
         
     | 
| 67 | 
         
            +
                            weight_norm(
         
     | 
| 68 | 
         
            +
                                Conv1d(
         
     | 
| 69 | 
         
            +
                                    channels,
         
     | 
| 70 | 
         
            +
                                    channels,
         
     | 
| 71 | 
         
            +
                                    kernel_size,
         
     | 
| 72 | 
         
            +
                                    1,
         
     | 
| 73 | 
         
            +
                                    dilation=1,
         
     | 
| 74 | 
         
            +
                                    padding=get_padding(kernel_size, 1)
         
     | 
| 75 | 
         
            +
                                )
         
     | 
| 76 | 
         
            +
                            )
         
     | 
| 77 | 
         
            +
                        )
         
     | 
| 78 | 
         
            +
                    self.convs1.apply(init_weights)
         
     | 
| 79 | 
         
            +
                    self.convs2.apply(init_weights)
         
     | 
| 80 | 
         
            +
                    self.activations1 = nn.ModuleList([
         
     | 
| 81 | 
         
            +
                        Snake(channels, alpha_logscale=False)
         
     | 
| 82 | 
         
            +
                        for _ in range(len(self.convs1))
         
     | 
| 83 | 
         
            +
                    ])
         
     | 
| 84 | 
         
            +
                    self.activations2 = nn.ModuleList([
         
     | 
| 85 | 
         
            +
                        Snake(channels, alpha_logscale=False)
         
     | 
| 86 | 
         
            +
                        for _ in range(len(self.convs2))
         
     | 
| 87 | 
         
            +
                    ])
         
     | 
| 88 | 
         
            +
             
     | 
| 89 | 
         
            +
                def forward(self, x: torch.Tensor) -> torch.Tensor:
         
     | 
| 90 | 
         
            +
                    for idx in range(len(self.convs1)):
         
     | 
| 91 | 
         
            +
                        xt = self.activations1[idx](x)
         
     | 
| 92 | 
         
            +
                        xt = self.convs1[idx](xt)
         
     | 
| 93 | 
         
            +
                        xt = self.activations2[idx](xt)
         
     | 
| 94 | 
         
            +
                        xt = self.convs2[idx](xt)
         
     | 
| 95 | 
         
            +
                        x = xt + x
         
     | 
| 96 | 
         
            +
                    return x
         
     | 
| 97 | 
         
            +
             
     | 
| 98 | 
         
            +
                def remove_weight_norm(self):
         
     | 
| 99 | 
         
            +
                    for idx in range(len(self.convs1)):
         
     | 
| 100 | 
         
            +
                        remove_weight_norm(self.convs1[idx])
         
     | 
| 101 | 
         
            +
                        remove_weight_norm(self.convs2[idx])
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            class SineGen(torch.nn.Module):
         
     | 
| 104 | 
         
            +
                """ Definition of sine generator
         
     | 
| 105 | 
         
            +
                SineGen(samp_rate, harmonic_num = 0,
         
     | 
| 106 | 
         
            +
                        sine_amp = 0.1, noise_std = 0.003,
         
     | 
| 107 | 
         
            +
                        voiced_threshold = 0,
         
     | 
| 108 | 
         
            +
                        flag_for_pulse=False)
         
     | 
| 109 | 
         
            +
                samp_rate: sampling rate in Hz
         
     | 
| 110 | 
         
            +
                harmonic_num: number of harmonic overtones (default 0)
         
     | 
| 111 | 
         
            +
                sine_amp: amplitude of sine-wavefrom (default 0.1)
         
     | 
| 112 | 
         
            +
                noise_std: std of Gaussian noise (default 0.003)
         
     | 
| 113 | 
         
            +
                voiced_thoreshold: F0 threshold for U/V classification (default 0)
         
     | 
| 114 | 
         
            +
                flag_for_pulse: this SinGen is used inside PulseGen (default False)
         
     | 
| 115 | 
         
            +
                Note: when flag_for_pulse is True, the first time step of a voiced
         
     | 
| 116 | 
         
            +
                    segment is always sin(np.pi) or cos(0)
         
     | 
| 117 | 
         
            +
                """
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
                def __init__(self, samp_rate, harmonic_num=0,
         
     | 
| 120 | 
         
            +
                             sine_amp=0.1, noise_std=0.003,
         
     | 
| 121 | 
         
            +
                             voiced_threshold=0):
         
     | 
| 122 | 
         
            +
                    super(SineGen, self).__init__()
         
     | 
| 123 | 
         
            +
                    self.sine_amp = sine_amp
         
     | 
| 124 | 
         
            +
                    self.noise_std = noise_std
         
     | 
| 125 | 
         
            +
                    self.harmonic_num = harmonic_num
         
     | 
| 126 | 
         
            +
                    self.sampling_rate = samp_rate
         
     | 
| 127 | 
         
            +
                    self.voiced_threshold = voiced_threshold
         
     | 
| 128 | 
         
            +
             
     | 
| 129 | 
         
            +
                def _f02uv(self, f0):
         
     | 
| 130 | 
         
            +
                    # generate uv signal
         
     | 
| 131 | 
         
            +
                    uv = (f0 > self.voiced_threshold).type(torch.float32)
         
     | 
| 132 | 
         
            +
                    return uv
         
     | 
| 133 | 
         
            +
             
     | 
| 134 | 
         
            +
                @torch.no_grad()
         
     | 
| 135 | 
         
            +
                def forward(self, f0):
         
     | 
| 136 | 
         
            +
                    """
         
     | 
| 137 | 
         
            +
                    :param f0: [B, 1, sample_len], Hz
         
     | 
| 138 | 
         
            +
                    :return: [B, 1, sample_len]
         
     | 
| 139 | 
         
            +
                    """
         
     | 
| 140 | 
         
            +
             
     | 
| 141 | 
         
            +
                    F_mat = torch.zeros((f0.size(0), self.harmonic_num + 1, f0.size(-1))).to(f0.device)
         
     | 
| 142 | 
         
            +
                    for i in range(self.harmonic_num + 1):
         
     | 
| 143 | 
         
            +
                        F_mat[:, i: i + 1, :] = f0 * (i + 1) / self.sampling_rate
         
     | 
| 144 | 
         
            +
             
     | 
| 145 | 
         
            +
                    theta_mat = 2 * np.pi * (torch.cumsum(F_mat, dim=-1) % 1)
         
     | 
| 146 | 
         
            +
                    u_dist = Uniform(low=-np.pi, high=np.pi)
         
     | 
| 147 | 
         
            +
                    phase_vec = u_dist.sample(sample_shape=(f0.size(0), self.harmonic_num + 1, 1)).to(F_mat.device)
         
     | 
| 148 | 
         
            +
                    phase_vec[:, 0, :] = 0
         
     | 
| 149 | 
         
            +
             
     | 
| 150 | 
         
            +
                    # generate sine waveforms
         
     | 
| 151 | 
         
            +
                    sine_waves = self.sine_amp * torch.sin(theta_mat + phase_vec)
         
     | 
| 152 | 
         
            +
             
     | 
| 153 | 
         
            +
                    # generate uv signal
         
     | 
| 154 | 
         
            +
                    uv = self._f02uv(f0)
         
     | 
| 155 | 
         
            +
             
     | 
| 156 | 
         
            +
                    # noise: for unvoiced should be similar to sine_amp
         
     | 
| 157 | 
         
            +
                    #        std = self.sine_amp/3 -> max value ~ self.sine_amp
         
     | 
| 158 | 
         
            +
                    # .       for voiced regions is self.noise_std
         
     | 
| 159 | 
         
            +
                    noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
         
     | 
| 160 | 
         
            +
                    noise = noise_amp * torch.randn_like(sine_waves)
         
     | 
| 161 | 
         
            +
             
     | 
| 162 | 
         
            +
                    # first: set the unvoiced part to 0 by uv
         
     | 
| 163 | 
         
            +
                    # then: additive noise
         
     | 
| 164 | 
         
            +
                    sine_waves = sine_waves * uv + noise
         
     | 
| 165 | 
         
            +
                    return sine_waves, uv, noise
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
             
     | 
| 168 | 
         
            +
            class SourceModuleHnNSF(torch.nn.Module):
         
     | 
| 169 | 
         
            +
                """ SourceModule for hn-nsf
         
     | 
| 170 | 
         
            +
                SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
         
     | 
| 171 | 
         
            +
                             add_noise_std=0.003, voiced_threshod=0)
         
     | 
| 172 | 
         
            +
                sampling_rate: sampling_rate in Hz
         
     | 
| 173 | 
         
            +
                harmonic_num: number of harmonic above F0 (default: 0)
         
     | 
| 174 | 
         
            +
                sine_amp: amplitude of sine source signal (default: 0.1)
         
     | 
| 175 | 
         
            +
                add_noise_std: std of additive Gaussian noise (default: 0.003)
         
     | 
| 176 | 
         
            +
                    note that amplitude of noise in unvoiced is decided
         
     | 
| 177 | 
         
            +
                    by sine_amp
         
     | 
| 178 | 
         
            +
                voiced_threshold: threhold to set U/V given F0 (default: 0)
         
     | 
| 179 | 
         
            +
                Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
         
     | 
| 180 | 
         
            +
                F0_sampled (batchsize, length, 1)
         
     | 
| 181 | 
         
            +
                Sine_source (batchsize, length, 1)
         
     | 
| 182 | 
         
            +
                noise_source (batchsize, length 1)
         
     | 
| 183 | 
         
            +
                uv (batchsize, length, 1)
         
     | 
| 184 | 
         
            +
                """
         
     | 
| 185 | 
         
            +
             
     | 
| 186 | 
         
            +
                def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
         
     | 
| 187 | 
         
            +
                             add_noise_std=0.003, voiced_threshod=0):
         
     | 
| 188 | 
         
            +
                    super(SourceModuleHnNSF, self).__init__()
         
     | 
| 189 | 
         
            +
             
     | 
| 190 | 
         
            +
                    self.sine_amp = sine_amp
         
     | 
| 191 | 
         
            +
                    self.noise_std = add_noise_std
         
     | 
| 192 | 
         
            +
             
     | 
| 193 | 
         
            +
                    # to produce sine waveforms
         
     | 
| 194 | 
         
            +
                    self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
         
     | 
| 195 | 
         
            +
                                             sine_amp, add_noise_std, voiced_threshod)
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
                    # to merge source harmonics into a single excitation
         
     | 
| 198 | 
         
            +
                    self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
         
     | 
| 199 | 
         
            +
                    self.l_tanh = torch.nn.Tanh()
         
     | 
| 200 | 
         
            +
             
     | 
| 201 | 
         
            +
                def forward(self, x):
         
     | 
| 202 | 
         
            +
                    """
         
     | 
| 203 | 
         
            +
                    Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
         
     | 
| 204 | 
         
            +
                    F0_sampled (batchsize, length, 1)
         
     | 
| 205 | 
         
            +
                    Sine_source (batchsize, length, 1)
         
     | 
| 206 | 
         
            +
                    noise_source (batchsize, length 1)
         
     | 
| 207 | 
         
            +
                    """
         
     | 
| 208 | 
         
            +
                    # source for harmonic branch
         
     | 
| 209 | 
         
            +
                    with torch.no_grad():
         
     | 
| 210 | 
         
            +
                        sine_wavs, uv, _ = self.l_sin_gen(x.transpose(1, 2))
         
     | 
| 211 | 
         
            +
                        sine_wavs = sine_wavs.transpose(1, 2)
         
     | 
| 212 | 
         
            +
                        uv = uv.transpose(1, 2)
         
     | 
| 213 | 
         
            +
                    sine_merge = self.l_tanh(self.l_linear(sine_wavs))
         
     | 
| 214 | 
         
            +
             
     | 
| 215 | 
         
            +
                    # source for noise branch, in the same shape as uv
         
     | 
| 216 | 
         
            +
                    noise = torch.randn_like(uv) * self.sine_amp / 3
         
     | 
| 217 | 
         
            +
                    return sine_merge, noise, uv
         
     | 
| 218 | 
         
            +
             
     | 
| 219 | 
         
            +
             
     | 
| 220 | 
         
            +
            class HiFTGenerator(nn.Module):
         
     | 
| 221 | 
         
            +
                """
         
     | 
| 222 | 
         
            +
                HiFTNet Generator: Neural Source Filter + ISTFTNet
         
     | 
| 223 | 
         
            +
                https://arxiv.org/abs/2309.09493
         
     | 
| 224 | 
         
            +
                """
         
     | 
| 225 | 
         
            +
                def __init__(
         
     | 
| 226 | 
         
            +
                        self,
         
     | 
| 227 | 
         
            +
                        in_channels: int = 80,
         
     | 
| 228 | 
         
            +
                        base_channels: int = 512,
         
     | 
| 229 | 
         
            +
                        nb_harmonics: int = 8,
         
     | 
| 230 | 
         
            +
                        sampling_rate: int = 22050,
         
     | 
| 231 | 
         
            +
                        nsf_alpha: float = 0.1,
         
     | 
| 232 | 
         
            +
                        nsf_sigma: float = 0.003,
         
     | 
| 233 | 
         
            +
                        nsf_voiced_threshold: float = 10,
         
     | 
| 234 | 
         
            +
                        upsample_rates: tp.List[int] = [8, 8],
         
     | 
| 235 | 
         
            +
                        upsample_kernel_sizes: tp.List[int] = [16, 16],
         
     | 
| 236 | 
         
            +
                        istft_params: tp.Dict[str, int] = {"n_fft": 16, "hop_len": 4},
         
     | 
| 237 | 
         
            +
                        resblock_kernel_sizes: tp.List[int] = [3, 7, 11],
         
     | 
| 238 | 
         
            +
                        resblock_dilation_sizes: tp.List[tp.List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
         
     | 
| 239 | 
         
            +
                        source_resblock_kernel_sizes: tp.List[int] = [7, 11],
         
     | 
| 240 | 
         
            +
                        source_resblock_dilation_sizes: tp.List[tp.List[int]] = [[1, 3, 5], [1, 3, 5]],
         
     | 
| 241 | 
         
            +
                        lrelu_slope: float = 0.1,
         
     | 
| 242 | 
         
            +
                        audio_limit: float = 0.99,
         
     | 
| 243 | 
         
            +
                        f0_predictor: torch.nn.Module = None,
         
     | 
| 244 | 
         
            +
                ):
         
     | 
| 245 | 
         
            +
                    super(HiFTGenerator, self).__init__()
         
     | 
| 246 | 
         
            +
             
     | 
| 247 | 
         
            +
                    self.out_channels = 1
         
     | 
| 248 | 
         
            +
                    self.nb_harmonics = nb_harmonics
         
     | 
| 249 | 
         
            +
                    self.sampling_rate = sampling_rate
         
     | 
| 250 | 
         
            +
                    self.istft_params = istft_params
         
     | 
| 251 | 
         
            +
                    self.lrelu_slope = lrelu_slope
         
     | 
| 252 | 
         
            +
                    self.audio_limit = audio_limit
         
     | 
| 253 | 
         
            +
             
     | 
| 254 | 
         
            +
                    self.num_kernels = len(resblock_kernel_sizes)
         
     | 
| 255 | 
         
            +
                    self.num_upsamples = len(upsample_rates)
         
     | 
| 256 | 
         
            +
                    self.m_source = SourceModuleHnNSF(
         
     | 
| 257 | 
         
            +
                        sampling_rate=sampling_rate,
         
     | 
| 258 | 
         
            +
                        upsample_scale=np.prod(upsample_rates) * istft_params["hop_len"],
         
     | 
| 259 | 
         
            +
                        harmonic_num=nb_harmonics,
         
     | 
| 260 | 
         
            +
                        sine_amp=nsf_alpha,
         
     | 
| 261 | 
         
            +
                        add_noise_std=nsf_sigma,
         
     | 
| 262 | 
         
            +
                        voiced_threshod=nsf_voiced_threshold)
         
     | 
| 263 | 
         
            +
                    self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * istft_params["hop_len"])
         
     | 
| 264 | 
         
            +
             
     | 
| 265 | 
         
            +
                    self.conv_pre = weight_norm(
         
     | 
| 266 | 
         
            +
                        Conv1d(in_channels, base_channels, 7, 1, padding=3)
         
     | 
| 267 | 
         
            +
                    )
         
     | 
| 268 | 
         
            +
             
     | 
| 269 | 
         
            +
                    # Up
         
     | 
| 270 | 
         
            +
                    self.ups = nn.ModuleList()
         
     | 
| 271 | 
         
            +
                    for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
         
     | 
| 272 | 
         
            +
                        self.ups.append(
         
     | 
| 273 | 
         
            +
                            weight_norm(
         
     | 
| 274 | 
         
            +
                                ConvTranspose1d(
         
     | 
| 275 | 
         
            +
                                    base_channels // (2**i),
         
     | 
| 276 | 
         
            +
                                    base_channels // (2**(i + 1)),
         
     | 
| 277 | 
         
            +
                                    k,
         
     | 
| 278 | 
         
            +
                                    u,
         
     | 
| 279 | 
         
            +
                                    padding=(k - u) // 2,
         
     | 
| 280 | 
         
            +
                                )
         
     | 
| 281 | 
         
            +
                            )
         
     | 
| 282 | 
         
            +
                        )
         
     | 
| 283 | 
         
            +
             
     | 
| 284 | 
         
            +
                    # Down
         
     | 
| 285 | 
         
            +
                    self.source_downs = nn.ModuleList()
         
     | 
| 286 | 
         
            +
                    self.source_resblocks = nn.ModuleList()
         
     | 
| 287 | 
         
            +
                    downsample_rates = [1] + upsample_rates[::-1][:-1]
         
     | 
| 288 | 
         
            +
                    downsample_cum_rates = np.cumprod(downsample_rates)
         
     | 
| 289 | 
         
            +
                    for i, (u, k, d) in enumerate(zip(downsample_cum_rates[::-1], source_resblock_kernel_sizes,
         
     | 
| 290 | 
         
            +
                                                      source_resblock_dilation_sizes)):
         
     | 
| 291 | 
         
            +
                        if u == 1:
         
     | 
| 292 | 
         
            +
                            self.source_downs.append(
         
     | 
| 293 | 
         
            +
                                Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), 1, 1)
         
     | 
| 294 | 
         
            +
                            )
         
     | 
| 295 | 
         
            +
                        else:
         
     | 
| 296 | 
         
            +
                            self.source_downs.append(
         
     | 
| 297 | 
         
            +
                                Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), u * 2, u, padding=(u // 2))
         
     | 
| 298 | 
         
            +
                            )
         
     | 
| 299 | 
         
            +
             
     | 
| 300 | 
         
            +
                        self.source_resblocks.append(
         
     | 
| 301 | 
         
            +
                            ResBlock(base_channels // (2 ** (i + 1)), k, d)
         
     | 
| 302 | 
         
            +
                        )
         
     | 
| 303 | 
         
            +
             
     | 
| 304 | 
         
            +
                    self.resblocks = nn.ModuleList()
         
     | 
| 305 | 
         
            +
                    for i in range(len(self.ups)):
         
     | 
| 306 | 
         
            +
                        ch = base_channels // (2**(i + 1))
         
     | 
| 307 | 
         
            +
                        for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
         
     | 
| 308 | 
         
            +
                            self.resblocks.append(ResBlock(ch, k, d))
         
     | 
| 309 | 
         
            +
             
     | 
| 310 | 
         
            +
                    self.conv_post = weight_norm(Conv1d(ch, istft_params["n_fft"] + 2, 7, 1, padding=3))
         
     | 
| 311 | 
         
            +
                    self.ups.apply(init_weights)
         
     | 
| 312 | 
         
            +
                    self.conv_post.apply(init_weights)
         
     | 
| 313 | 
         
            +
                    self.reflection_pad = nn.ReflectionPad1d((1, 0))
         
     | 
| 314 | 
         
            +
                    self.stft_window = torch.from_numpy(get_window("hann", istft_params["n_fft"], fftbins=True).astype(np.float32))
         
     | 
| 315 | 
         
            +
                    self.f0_predictor = f0_predictor
         
     | 
| 316 | 
         
            +
             
     | 
| 317 | 
         
            +
                def _f02source(self, f0: torch.Tensor) -> torch.Tensor:
         
     | 
| 318 | 
         
            +
                    f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2)  # bs,n,t
         
     | 
| 319 | 
         
            +
             
     | 
| 320 | 
         
            +
                    har_source, _, _ = self.m_source(f0)
         
     | 
| 321 | 
         
            +
                    return har_source.transpose(1, 2)
         
     | 
| 322 | 
         
            +
             
     | 
| 323 | 
         
            +
                def _stft(self, x):
         
     | 
| 324 | 
         
            +
                    spec = torch.stft(
         
     | 
| 325 | 
         
            +
                        x,
         
     | 
| 326 | 
         
            +
                        self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(x.device),
         
     | 
| 327 | 
         
            +
                        return_complex=True)
         
     | 
| 328 | 
         
            +
                    spec = torch.view_as_real(spec)  # [B, F, TT, 2]
         
     | 
| 329 | 
         
            +
                    return spec[..., 0], spec[..., 1]
         
     | 
| 330 | 
         
            +
             
     | 
| 331 | 
         
            +
                def _istft(self, magnitude, phase):
         
     | 
| 332 | 
         
            +
                    magnitude = torch.clip(magnitude, max=1e2)
         
     | 
| 333 | 
         
            +
                    real = magnitude * torch.cos(phase)
         
     | 
| 334 | 
         
            +
                    img = magnitude * torch.sin(phase)
         
     | 
| 335 | 
         
            +
                    inverse_transform = torch.istft(torch.complex(real, img), self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(magnitude.device))
         
     | 
| 336 | 
         
            +
                    return inverse_transform
         
     | 
| 337 | 
         
            +
             
     | 
| 338 | 
         
            +
                def forward(self, x: torch.Tensor) -> torch.Tensor:
         
     | 
| 339 | 
         
            +
                    f0 = self.f0_predictor(x)
         
     | 
| 340 | 
         
            +
                    s = self._f02source(f0)
         
     | 
| 341 | 
         
            +
             
     | 
| 342 | 
         
            +
                    s_stft_real, s_stft_imag = self._stft(s.squeeze(1))
         
     | 
| 343 | 
         
            +
                    s_stft = torch.cat([s_stft_real, s_stft_imag], dim=1)
         
     | 
| 344 | 
         
            +
             
     | 
| 345 | 
         
            +
                    x = self.conv_pre(x)
         
     | 
| 346 | 
         
            +
                    for i in range(self.num_upsamples):
         
     | 
| 347 | 
         
            +
                        x = F.leaky_relu(x, self.lrelu_slope)
         
     | 
| 348 | 
         
            +
                        x = self.ups[i](x)
         
     | 
| 349 | 
         
            +
             
     | 
| 350 | 
         
            +
                        if i == self.num_upsamples - 1:
         
     | 
| 351 | 
         
            +
                            x = self.reflection_pad(x)
         
     | 
| 352 | 
         
            +
             
     | 
| 353 | 
         
            +
                        # fusion
         
     | 
| 354 | 
         
            +
                        si = self.source_downs[i](s_stft)
         
     | 
| 355 | 
         
            +
                        si = self.source_resblocks[i](si)
         
     | 
| 356 | 
         
            +
                        x = x + si
         
     | 
| 357 | 
         
            +
             
     | 
| 358 | 
         
            +
                        xs = None
         
     | 
| 359 | 
         
            +
                        for j in range(self.num_kernels):
         
     | 
| 360 | 
         
            +
                            if xs is None:
         
     | 
| 361 | 
         
            +
                                xs = self.resblocks[i * self.num_kernels + j](x)
         
     | 
| 362 | 
         
            +
                            else:
         
     | 
| 363 | 
         
            +
                                xs += self.resblocks[i * self.num_kernels + j](x)
         
     | 
| 364 | 
         
            +
                        x = xs / self.num_kernels
         
     | 
| 365 | 
         
            +
             
     | 
| 366 | 
         
            +
                    x = F.leaky_relu(x)
         
     | 
| 367 | 
         
            +
                    x = self.conv_post(x)
         
     | 
| 368 | 
         
            +
                    magnitude = torch.exp(x[:, :self.istft_params["n_fft"] // 2 + 1, :])
         
     | 
| 369 | 
         
            +
                    phase = torch.sin(x[:, self.istft_params["n_fft"] // 2 + 1:, :])  # actually, sin is redundancy
         
     | 
| 370 | 
         
            +
             
     | 
| 371 | 
         
            +
                    x = self._istft(magnitude, phase)
         
     | 
| 372 | 
         
            +
                    x = torch.clamp(x, -self.audio_limit, self.audio_limit)
         
     | 
| 373 | 
         
            +
                    return x
         
     | 
| 374 | 
         
            +
             
     | 
| 375 | 
         
            +
                def remove_weight_norm(self):
         
     | 
| 376 | 
         
            +
                    print('Removing weight norm...')
         
     | 
| 377 | 
         
            +
                    for l in self.ups:
         
     | 
| 378 | 
         
            +
                        remove_weight_norm(l)
         
     | 
| 379 | 
         
            +
                    for l in self.resblocks:
         
     | 
| 380 | 
         
            +
                        l.remove_weight_norm()
         
     | 
| 381 | 
         
            +
                    remove_weight_norm(self.conv_pre)
         
     | 
| 382 | 
         
            +
                    remove_weight_norm(self.conv_post)
         
     | 
| 383 | 
         
            +
                    self.source_module.remove_weight_norm()
         
     | 
| 384 | 
         
            +
                    for l in self.source_downs:
         
     | 
| 385 | 
         
            +
                        remove_weight_norm(l)
         
     | 
| 386 | 
         
            +
                    for l in self.source_resblocks:
         
     | 
| 387 | 
         
            +
                        l.remove_weight_norm()
         
     | 
| 388 | 
         
            +
             
     | 
| 389 | 
         
            +
                @torch.inference_mode()
         
     | 
| 390 | 
         
            +
                def inference(self, mel: torch.Tensor) -> torch.Tensor:
         
     | 
| 391 | 
         
            +
                    return self.forward(x=mel)
         
     | 
    	
        cosyvoice/llm/__pycache__/llm.cpython-310.pyc
    ADDED
    
    | 
         Binary file (6.31 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/llm/__pycache__/llm.cpython-38.pyc
    ADDED
    
    | 
         Binary file (6.22 kB). View file 
     | 
| 
         | 
    	
        cosyvoice/llm/llm.py
    ADDED
    
    | 
         @@ -0,0 +1,206 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
         
     | 
| 2 | 
         
            +
            #
         
     | 
| 3 | 
         
            +
            # Licensed under the Apache License, Version 2.0 (the "License");
         
     | 
| 4 | 
         
            +
            # you may not use this file except in compliance with the License.
         
     | 
| 5 | 
         
            +
            # You may obtain a copy of the License at
         
     | 
| 6 | 
         
            +
            #
         
     | 
| 7 | 
         
            +
            #     http://www.apache.org/licenses/LICENSE-2.0
         
     | 
| 8 | 
         
            +
            #
         
     | 
| 9 | 
         
            +
            # Unless required by applicable law or agreed to in writing, software
         
     | 
| 10 | 
         
            +
            # distributed under the License is distributed on an "AS IS" BASIS,
         
     | 
| 11 | 
         
            +
            # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         
     | 
| 12 | 
         
            +
            # See the License for the specific language governing permissions and
         
     | 
| 13 | 
         
            +
            # limitations under the License.
         
     | 
| 14 | 
         
            +
            from typing import Dict, Optional, Union
         
     | 
| 15 | 
         
            +
            import torch
         
     | 
| 16 | 
         
            +
            from torch import nn
         
     | 
| 17 | 
         
            +
            import torch.nn.functional as F
         
     | 
| 18 | 
         
            +
            from torch.nn.utils.rnn import pad_sequence, unpad_sequence
         
     | 
| 19 | 
         
            +
            from cosyvoice.utils.common import IGNORE_ID
         
     | 
| 20 | 
         
            +
            from cosyvoice.transformer.label_smoothing_loss import LabelSmoothingLoss
         
     | 
| 21 | 
         
            +
            from cosyvoice.utils.common import th_accuracy
         
     | 
| 22 | 
         
            +
             
     | 
| 23 | 
         
            +
             
     | 
| 24 | 
         
            +
            class TransformerLM(torch.nn.Module):
         
     | 
| 25 | 
         
            +
                def __init__(
         
     | 
| 26 | 
         
            +
                        self,
         
     | 
| 27 | 
         
            +
                        text_encoder_input_size: int,
         
     | 
| 28 | 
         
            +
                        llm_input_size: int,
         
     | 
| 29 | 
         
            +
                        llm_output_size: int,
         
     | 
| 30 | 
         
            +
                        text_token_size: int,
         
     | 
| 31 | 
         
            +
                        speech_token_size: int,
         
     | 
| 32 | 
         
            +
                        text_encoder: torch.nn.Module,
         
     | 
| 33 | 
         
            +
                        llm: torch.nn.Module,
         
     | 
| 34 | 
         
            +
                        length_normalized_loss: bool = True,
         
     | 
| 35 | 
         
            +
                        lsm_weight: float = 0.0,
         
     | 
| 36 | 
         
            +
                        spk_embed_dim: int = 192,
         
     | 
| 37 | 
         
            +
                ):
         
     | 
| 38 | 
         
            +
                    super().__init__()
         
     | 
| 39 | 
         
            +
                    self.llm_input_size = llm_input_size
         
     | 
| 40 | 
         
            +
                    self.speech_token_size = speech_token_size
         
     | 
| 41 | 
         
            +
                    # 1. build text token inputs related modules
         
     | 
| 42 | 
         
            +
                    self.text_embedding = torch.nn.Embedding(text_token_size, text_encoder_input_size)
         
     | 
| 43 | 
         
            +
                    self.text_encoder = text_encoder
         
     | 
| 44 | 
         
            +
                    self.text_encoder_affine_layer = nn.Linear(
         
     | 
| 45 | 
         
            +
                        self.text_encoder.output_size(),
         
     | 
| 46 | 
         
            +
                        llm_input_size
         
     | 
| 47 | 
         
            +
                    )
         
     | 
| 48 | 
         
            +
             
     | 
| 49 | 
         
            +
                    # 2. build speech token language model related modules
         
     | 
| 50 | 
         
            +
                    self.sos_eos = 0
         
     | 
| 51 | 
         
            +
                    self.task_id = 1
         
     | 
| 52 | 
         
            +
                    self.llm_embedding = torch.nn.Embedding(2, llm_input_size)
         
     | 
| 53 | 
         
            +
                    self.llm = llm
         
     | 
| 54 | 
         
            +
                    self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 1)
         
     | 
| 55 | 
         
            +
                    self.criterion_ce = LabelSmoothingLoss(
         
     | 
| 56 | 
         
            +
                        size=speech_token_size + 1,
         
     | 
| 57 | 
         
            +
                        padding_idx=IGNORE_ID,
         
     | 
| 58 | 
         
            +
                        smoothing=lsm_weight,
         
     | 
| 59 | 
         
            +
                        normalize_length=length_normalized_loss,
         
     | 
| 60 | 
         
            +
                    )
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
                    # 3. [Optional] build speech token related modules
         
     | 
| 63 | 
         
            +
                    self.speech_embedding = torch.nn.Embedding(speech_token_size, llm_input_size)
         
     | 
| 64 | 
         
            +
                    self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, llm_input_size)
         
     | 
| 65 | 
         
            +
             
     | 
| 66 | 
         
            +
                def encode(
         
     | 
| 67 | 
         
            +
                        self,
         
     | 
| 68 | 
         
            +
                        text: torch.Tensor,
         
     | 
| 69 | 
         
            +
                        text_lengths: torch.Tensor,
         
     | 
| 70 | 
         
            +
                ):
         
     | 
| 71 | 
         
            +
                    encoder_out, encoder_mask = self.text_encoder(text, text_lengths, decoding_chunk_size=1, num_decoding_left_chunks=-1)
         
     | 
| 72 | 
         
            +
                    encoder_out_lens = encoder_mask.squeeze(1).sum(1)
         
     | 
| 73 | 
         
            +
                    encoder_out = self.text_encoder_affine_layer(encoder_out)
         
     | 
| 74 | 
         
            +
                    return encoder_out, encoder_out_lens
         
     | 
| 75 | 
         
            +
             
     | 
| 76 | 
         
            +
                def pad_unpad_sequence(self, sos_eos_emb, embedding, text_token, text_token_len, task_id_emb, speech_token, speech_token_len):
         
     | 
| 77 | 
         
            +
                    text_token = unpad_sequence(text_token, text_token_len.cpu(), batch_first=True)
         
     | 
| 78 | 
         
            +
                    speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True)
         
     | 
| 79 | 
         
            +
                    lm_input = [torch.concat([sos_eos_emb.squeeze(dim=0), embedding[i], text_token[i], task_id_emb.squeeze(dim=0), speech_token[i]], dim=0) for i in range(len(text_token))]
         
     | 
| 80 | 
         
            +
                    lm_input_len = torch.tensor([i.size(0) for i in lm_input], dtype=torch.int32)
         
     | 
| 81 | 
         
            +
                    lm_input = pad_sequence(lm_input, batch_first=True, padding_value=IGNORE_ID)
         
     | 
| 82 | 
         
            +
                    return lm_input, lm_input_len
         
     | 
| 83 | 
         
            +
             
     | 
| 84 | 
         
            +
                def forward(
         
     | 
| 85 | 
         
            +
                        self,
         
     | 
| 86 | 
         
            +
                        batch: dict,
         
     | 
| 87 | 
         
            +
                        device: torch.device,
         
     | 
| 88 | 
         
            +
                ) -> Dict[str, Optional[torch.Tensor]]:
         
     | 
| 89 | 
         
            +
                    """
         
     | 
| 90 | 
         
            +
                    Args:
         
     | 
| 91 | 
         
            +
                        text: (B, L, D)
         
     | 
| 92 | 
         
            +
                        text_lengths: (B,)
         
     | 
| 93 | 
         
            +
                        audio: (B, T, N) or (B, T)
         
     | 
| 94 | 
         
            +
                        audio_lengths: (B,)
         
     | 
| 95 | 
         
            +
                    """
         
     | 
| 96 | 
         
            +
                    text_token = batch['text_token'].to(device)
         
     | 
| 97 | 
         
            +
                    text_token_len = batch['text_token_len'].to(device)
         
     | 
| 98 | 
         
            +
                    speech_token = batch['speech_token'].to(device)
         
     | 
| 99 | 
         
            +
                    speech_token_len = batch['speech_token_len'].to(device)
         
     | 
| 100 | 
         
            +
                    embedding = batch['embedding'].to(device)
         
     | 
| 101 | 
         
            +
             
     | 
| 102 | 
         
            +
                    # 1. prepare llm_target
         
     | 
| 103 | 
         
            +
                    lm_target = [torch.tensor([IGNORE_ID] * (2 + text_token_len[i]) + speech_token[i, :speech_token_len[i]].tolist() + [self.speech_token_size]) for i in range(text_token.size(0))]
         
     | 
| 104 | 
         
            +
                    lm_target = pad_sequence(lm_target, batch_first=True, padding_value=IGNORE_ID).to(device)
         
     | 
| 105 | 
         
            +
             
     | 
| 106 | 
         
            +
                    # 1. encode text_token
         
     | 
| 107 | 
         
            +
                    text_token = self.text_embedding(text_token)
         
     | 
| 108 | 
         
            +
                    text_token, text_token_len = self.encode(text_token, text_token_len)
         
     | 
| 109 | 
         
            +
             
     | 
| 110 | 
         
            +
                    # 2. embedding projection
         
     | 
| 111 | 
         
            +
                    embedding = F.normalize(embedding, dim=1)
         
     | 
| 112 | 
         
            +
                    embedding = self.spk_embed_affine_layer(embedding)
         
     | 
| 113 | 
         
            +
                    embedding = embedding.unsqueeze(1)
         
     | 
| 114 | 
         
            +
             
     | 
| 115 | 
         
            +
                    # 3. eos and task_id
         
     | 
| 116 | 
         
            +
                    sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
         
     | 
| 117 | 
         
            +
                    task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
         
     | 
| 118 | 
         
            +
             
     | 
| 119 | 
         
            +
                    # 4. encode speech_token
         
     | 
| 120 | 
         
            +
                    speech_token = self.speech_embedding(speech_token)
         
     | 
| 121 | 
         
            +
             
     | 
| 122 | 
         
            +
                    # 5. unpad and pad
         
     | 
| 123 | 
         
            +
                    lm_input, lm_input_len = self.pad_unpad_sequence(sos_eos_emb, embedding, text_token, text_token_len, task_id_emb, speech_token, speech_token_len)
         
     | 
| 124 | 
         
            +
             
     | 
| 125 | 
         
            +
                    # 6. run lm forward
         
     | 
| 126 | 
         
            +
                    lm_output, lm_output_mask = self.llm(lm_input, lm_input_len.to(device))
         
     | 
| 127 | 
         
            +
                    logits = self.llm_decoder(lm_output)
         
     | 
| 128 | 
         
            +
                    loss = self.criterion_ce(logits, lm_target)
         
     | 
| 129 | 
         
            +
                    acc = th_accuracy(logits.view(-1, self.speech_token_size + 1), lm_target, ignore_label=IGNORE_ID)
         
     | 
| 130 | 
         
            +
                    return {'loss': loss, 'acc': acc}
         
     | 
| 131 | 
         
            +
             
     | 
| 132 | 
         
            +
                def sampling_ids(
         
     | 
| 133 | 
         
            +
                        self,
         
     | 
| 134 | 
         
            +
                        weighted_scores: torch.Tensor,
         
     | 
| 135 | 
         
            +
                        sampling: Union[bool, int, float] = True,
         
     | 
| 136 | 
         
            +
                        beam_size: int = 1,
         
     | 
| 137 | 
         
            +
                        ignore_eos: bool = True,
         
     | 
| 138 | 
         
            +
                ):
         
     | 
| 139 | 
         
            +
                    while True:
         
     | 
| 140 | 
         
            +
                        prob, indices = weighted_scores.softmax(dim=-1).topk(sampling)
         
     | 
| 141 | 
         
            +
                        top_ids = prob.multinomial(beam_size, replacement=True)
         
     | 
| 142 | 
         
            +
                        top_ids = indices[top_ids]
         
     | 
| 143 | 
         
            +
                        if (not ignore_eos) or (self.speech_token_size not in top_ids):
         
     | 
| 144 | 
         
            +
                            break
         
     | 
| 145 | 
         
            +
                    return top_ids
         
     | 
| 146 | 
         
            +
             
     | 
| 147 | 
         
            +
                @torch.inference_mode()
         
     | 
| 148 | 
         
            +
                def inference(
         
     | 
| 149 | 
         
            +
                        self,
         
     | 
| 150 | 
         
            +
                        text: torch.Tensor,
         
     | 
| 151 | 
         
            +
                        text_len: torch.Tensor,
         
     | 
| 152 | 
         
            +
                        prompt_text: torch.Tensor,
         
     | 
| 153 | 
         
            +
                        prompt_text_len: torch.Tensor,
         
     | 
| 154 | 
         
            +
                        prompt_speech_token: torch.Tensor,
         
     | 
| 155 | 
         
            +
                        prompt_speech_token_len: torch.Tensor,
         
     | 
| 156 | 
         
            +
                        embedding: torch.Tensor,
         
     | 
| 157 | 
         
            +
                        beam_size: int = 1,
         
     | 
| 158 | 
         
            +
                        sampling: int = 25,
         
     | 
| 159 | 
         
            +
                        max_token_text_ratio: float = 20,
         
     | 
| 160 | 
         
            +
                        min_token_text_ratio: float = 2,
         
     | 
| 161 | 
         
            +
                ) -> torch.Tensor:
         
     | 
| 162 | 
         
            +
                    device = text.device
         
     | 
| 163 | 
         
            +
                    text = torch.concat([prompt_text, text], dim=1)
         
     | 
| 164 | 
         
            +
                    text_len += prompt_text_len
         
     | 
| 165 | 
         
            +
                    text = self.text_embedding(text)
         
     | 
| 166 | 
         
            +
             
     | 
| 167 | 
         
            +
                    # 1. encode text
         
     | 
| 168 | 
         
            +
                    text, text_len = self.encode(text, text_len)
         
     | 
| 169 | 
         
            +
             
     | 
| 170 | 
         
            +
                    # 2. encode embedding
         
     | 
| 171 | 
         
            +
                    if embedding.shape[0] != 0:
         
     | 
| 172 | 
         
            +
                        embedding = F.normalize(embedding, dim=1)
         
     | 
| 173 | 
         
            +
                        embedding = self.spk_embed_affine_layer(embedding)
         
     | 
| 174 | 
         
            +
                        embedding = embedding.unsqueeze(dim=1)
         
     | 
| 175 | 
         
            +
                    else:
         
     | 
| 176 | 
         
            +
                        embedding = torch.zeros(1, 0, self.llm_input_size).to(device)
         
     | 
| 177 | 
         
            +
             
     | 
| 178 | 
         
            +
                    # 3. concat llm_input
         
     | 
| 179 | 
         
            +
                    sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1)
         
     | 
| 180 | 
         
            +
                    task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1)
         
     | 
| 181 | 
         
            +
                    if prompt_speech_token_len != 0:
         
     | 
| 182 | 
         
            +
                        prompt_speech_token_emb = self.speech_embedding(prompt_speech_token)
         
     | 
| 183 | 
         
            +
                    else:
         
     | 
| 184 | 
         
            +
                        prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size).to(device)
         
     | 
| 185 | 
         
            +
                    lm_input = torch.concat([sos_eos_emb, embedding, text, task_id_emb, prompt_speech_token_emb], dim=1)
         
     | 
| 186 | 
         
            +
             
     | 
| 187 | 
         
            +
                    # 4. cal min/max_length
         
     | 
| 188 | 
         
            +
                    min_len = int((text_len - prompt_text_len) * min_token_text_ratio)
         
     | 
| 189 | 
         
            +
                    max_len = int((text_len - prompt_text_len) * max_token_text_ratio)
         
     | 
| 190 | 
         
            +
             
     | 
| 191 | 
         
            +
                    # 5. step by step decode
         
     | 
| 192 | 
         
            +
                    out_tokens = []
         
     | 
| 193 | 
         
            +
                    offset = 0
         
     | 
| 194 | 
         
            +
                    att_cache, cnn_cache = torch.zeros((0, 0, 0, 0), device=lm_input.device), torch.zeros((0, 0, 0, 0), device=lm_input.device)
         
     | 
| 195 | 
         
            +
                    for i in range(max_len):
         
     | 
| 196 | 
         
            +
                        y_pred, att_cache, cnn_cache = self.llm.forward_chunk(lm_input, offset=0, required_cache_size=-1, att_cache=att_cache, cnn_cache=cnn_cache,
         
     | 
| 197 | 
         
            +
                                                                              att_mask=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]), device=lm_input.device)).to(torch.bool))
         
     | 
| 198 | 
         
            +
                        logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1)
         
     | 
| 199 | 
         
            +
                        top_ids = self.sampling_ids(logp.squeeze(dim=0), sampling, beam_size, ignore_eos=True if i < min_len else False).item()
         
     | 
| 200 | 
         
            +
                        if top_ids == self.speech_token_size:
         
     | 
| 201 | 
         
            +
                            break
         
     | 
| 202 | 
         
            +
                        out_tokens.append(top_ids)
         
     | 
| 203 | 
         
            +
                        offset += lm_input.size(1)
         
     | 
| 204 | 
         
            +
                        lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1)
         
     | 
| 205 | 
         
            +
             
     | 
| 206 | 
         
            +
                    return torch.tensor([out_tokens], dtype=torch.int64, device=device)
         
     | 
    	
        cosyvoice/transformer/__init__.py
    ADDED
    
    | 
         
            File without changes
         
     | 
    	
        cosyvoice/transformer/__pycache__/__init__.cpython-310.pyc
    ADDED
    
    | 
         Binary file (169 Bytes). View file 
     | 
| 
         | 
    	
        cosyvoice/transformer/__pycache__/__init__.cpython-38.pyc
    ADDED
    
    | 
         Binary file (170 Bytes). View file 
     | 
| 
         |