maxmax20160403 commited on
Commit
cdde032
·
1 Parent(s): 7c30511

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +215 -0
app.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys,os
2
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
3
+ import torch
4
+
5
+ from omegaconf import OmegaConf
6
+ from pitch import load_csv_pitch
7
+ from grad.utils import fix_len_compatibility
8
+ from grad.model import GradTTS
9
+ from bigvgan.model.generator import Generator
10
+
11
+ import gradio as gr
12
+ import numpy as np
13
+ import soundfile
14
+ import librosa
15
+ import logging
16
+
17
+ logging.getLogger('numba').setLevel(logging.WARNING)
18
+ logging.getLogger('markdown_it').setLevel(logging.WARNING)
19
+ logging.getLogger('urllib3').setLevel(logging.WARNING)
20
+ logging.getLogger('matplotlib').setLevel(logging.WARNING)
21
+
22
+
23
+ def load_gvc_model(checkpoint_path, model):
24
+ assert os.path.isfile(checkpoint_path)
25
+ checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
26
+ saved_state_dict = checkpoint_dict["model"]
27
+ state_dict = model.state_dict()
28
+ new_state_dict = {}
29
+ for k, v in state_dict.items():
30
+ try:
31
+ new_state_dict[k] = saved_state_dict[k]
32
+ except:
33
+ print("%s is not in the checkpoint" % k)
34
+ new_state_dict[k] = v
35
+ model.load_state_dict(new_state_dict)
36
+ return model
37
+
38
+
39
+ def load_bigv_model(checkpoint_path, model):
40
+ assert os.path.isfile(checkpoint_path)
41
+ checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
42
+ saved_state_dict = checkpoint_dict["model_g"]
43
+ state_dict = model.state_dict()
44
+ new_state_dict = {}
45
+ for k, v in state_dict.items():
46
+ try:
47
+ new_state_dict[k] = saved_state_dict[k]
48
+ except:
49
+ print("%s is not in the checkpoint" % k)
50
+ new_state_dict[k] = v
51
+ model.load_state_dict(new_state_dict)
52
+ return model
53
+
54
+
55
+ @torch.no_grad()
56
+ def gvc_main(device, model, _vec, _pit, spk, rature=1.015):
57
+ l_vec = _vec.shape[0]
58
+ d_vec = _vec.shape[1]
59
+ lengths_fix = fix_len_compatibility(l_vec)
60
+ lengths = torch.LongTensor([l_vec]).to(device)
61
+ vec = torch.zeros((1, lengths_fix, d_vec), dtype=torch.float32).to(device)
62
+ pit = torch.zeros((1, lengths_fix), dtype=torch.float32).to(device)
63
+ vec[0, :l_vec, :] = _vec
64
+ pit[0, :l_vec] = _pit
65
+ y_enc, y_dec = model(lengths, vec, pit, spk, n_timesteps=10, temperature=rature)
66
+ y_dec = y_dec.squeeze(0)
67
+ y_dec = y_dec[:, :l_vec]
68
+ return y_dec
69
+
70
+
71
+ def svc_change(argswav, argsspk):
72
+
73
+ argsvec = "svc_tmp.ppg.npy"
74
+ os.system(f"python hubert/inference.py -w {argswav} -v {argsvec}")
75
+ argspit = "svc_tmp.pit.npy"
76
+ os.system(f"python pitch/inference.py -w {argswav} -p {argspit}")
77
+
78
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
79
+ hps = OmegaConf.load('configs/base.yaml')
80
+
81
+ print('Initializing Grad-TTS...')
82
+ model = GradTTS(hps.grad.n_mels, hps.grad.n_vecs, hps.grad.n_pits, hps.grad.n_spks, hps.grad.n_embs,
83
+ hps.grad.n_enc_channels, hps.grad.filter_channels,
84
+ hps.grad.dec_dim, hps.grad.beta_min, hps.grad.beta_max, hps.grad.pe_scale)
85
+ print('Number of encoder parameters = %.2fm' % (model.encoder.nparams/1e6))
86
+ print('Number of decoder parameters = %.2fm' % (model.decoder.nparams/1e6))
87
+
88
+ load_gvc_model('grad_pretrain/gvc.pretrain.pth', model)
89
+ model.eval()
90
+ model.to(device)
91
+
92
+ spk = np.load(argsspk)
93
+ spk = torch.FloatTensor(spk)
94
+
95
+ vec = np.load(argsvec)
96
+ vec = np.repeat(vec, 2, 0)
97
+ vec = torch.FloatTensor(vec)
98
+
99
+ pit = load_csv_pitch(argspit)
100
+ pit = np.array(pit)
101
+ pit = torch.FloatTensor(pit)
102
+
103
+ len_pit = pit.size()[0]
104
+ len_vec = vec.size()[0]
105
+ len_min = min(len_pit, len_vec)
106
+ pit = pit[:len_min]
107
+ vec = vec[:len_min, :]
108
+
109
+ with torch.no_grad():
110
+ spk = spk.unsqueeze(0).to(device)
111
+
112
+ all_frame = len_min
113
+ hop_frame = 8
114
+ out_chunk = 2400 # 24 S
115
+ out_index = 0
116
+ mel = None
117
+
118
+ while (out_index < all_frame):
119
+ if (out_index == 0): # start frame
120
+ cut_s = 0
121
+ cut_s_out = 0
122
+ else:
123
+ cut_s = out_index - hop_frame
124
+ cut_s_out = hop_frame
125
+
126
+ if (out_index + out_chunk + hop_frame > all_frame): # end frame
127
+ cut_e = all_frame
128
+ cut_e_out = -1
129
+ else:
130
+ cut_e = out_index + out_chunk + hop_frame
131
+ cut_e_out = -1 * hop_frame
132
+
133
+ sub_vec = vec[cut_s:cut_e, :].to(device)
134
+ sub_pit = pit[cut_s:cut_e].to(device)
135
+
136
+ sub_out = gvc_main(device, model, sub_vec, sub_pit, spk, 1.015)
137
+ sub_out = sub_out[:, cut_s_out:cut_e_out]
138
+
139
+ out_index = out_index + out_chunk
140
+ if mel == None:
141
+ mel = sub_out
142
+ else:
143
+ mel = torch.cat((mel, sub_out), -1)
144
+ if cut_e == all_frame:
145
+ break
146
+
147
+ del model
148
+ del hps
149
+ del spk
150
+ del vec
151
+ del sub_vec
152
+ del sub_pit
153
+ del sub_out
154
+
155
+ hps = OmegaConf.load('./bigvgan/configs/nsf_bigvgan.yaml')
156
+ model = Generator(hps)
157
+ load_bigv_model('./bigvgan_pretrain/nsf_bigvgan_pretrain_32K.pth', model)
158
+ model.eval()
159
+ model.to(device)
160
+
161
+ len_pit = pit.size()[0]
162
+ len_mel = mel.size()[1]
163
+ len_min = min(len_pit, len_mel)
164
+ pit = pit[:len_min]
165
+ mel = mel[:, :len_min]
166
+
167
+ with torch.no_grad():
168
+ mel = mel.unsqueeze(0).to(device)
169
+ pit = pit.unsqueeze(0).to(device)
170
+ audio = model.inference(mel, pit)
171
+ audio = audio.cpu().detach().numpy()
172
+
173
+ pitwav = model.pitch2wav(pit)
174
+ pitwav = pitwav.cpu().detach().numpy()
175
+
176
+ return audio
177
+
178
+
179
+ def svc_main(sid, input_audio):
180
+ if input_audio is None:
181
+ return "You need to upload an audio", None
182
+ sampling_rate, audio = input_audio
183
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
184
+ if len(audio.shape) > 1:
185
+ audio = librosa.to_mono(audio.transpose(1, 0))
186
+ if sampling_rate != 16000:
187
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
188
+ if (len(audio) > 16000*100):
189
+ audio = audio[:16000*100]
190
+ wav_path = "temp.wav"
191
+ soundfile.write(wav_path, audio, 16000, format="wav")
192
+ out_audio = svc_change(wav_path, f"configs/singers/singer00{sid}.npy")
193
+ return "Success", (32000, out_audio)
194
+
195
+
196
+ app = gr.Blocks()
197
+ with app:
198
+ with gr.Tabs():
199
+ with gr.TabItem("Grad-SVC"):
200
+ gr.Markdown(value="""
201
+ 基于开源数据:Multi-Singer
202
+
203
+ https://github.com/Multi-Singer/Multi-Singer.github.io
204
+
205
+ 基于diffusion技术
206
+ """)
207
+ sid = gr.Dropdown(label="音色", choices=[
208
+ "22", "33", "47", "51"], value="47")
209
+ vc_input3 = gr.Audio(label="上传音频")
210
+ vc_submit = gr.Button("转换", variant="primary")
211
+ vc_output1 = gr.Textbox(label="状态信息")
212
+ vc_output2 = gr.Audio(label="转换音频")
213
+ vc_submit.click(svc_main, [sid, vc_input3], [vc_output1, vc_output2])
214
+
215
+ app.launch()