Delete app.py
Browse files
app.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import torch
|
3 |
-
import commons
|
4 |
-
import utils
|
5 |
-
from models import SynthesizerTrn
|
6 |
-
from text.symbols import symbols
|
7 |
-
from text import text_to_sequence
|
8 |
-
import random
|
9 |
-
import os
|
10 |
-
import datetime
|
11 |
-
import numpy as np
|
12 |
-
|
13 |
-
|
14 |
-
def get_text(text, hps):
|
15 |
-
text_norm = text_to_sequence(text, hps.data.text_cleaners)
|
16 |
-
if hps.data.add_blank:
|
17 |
-
text_norm = commons.intersperse(text_norm, 0)
|
18 |
-
text_norm = torch.LongTensor(text_norm)
|
19 |
-
return text_norm
|
20 |
-
|
21 |
-
|
22 |
-
def tts(txt, emotion, index, hps, net_g, random_emotion_root):
|
23 |
-
"""emotion为参考情感音频路径 或random_sample(随机抽取)"""
|
24 |
-
stn_tst = get_text(txt, hps)
|
25 |
-
with torch.no_grad():
|
26 |
-
x_tst = stn_tst.unsqueeze(0)
|
27 |
-
x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
|
28 |
-
sid = torch.LongTensor([index]) ##appoint character
|
29 |
-
if os.path.exists(f"{emotion}.emo.npy"):
|
30 |
-
emo = torch.FloatTensor(np.load(f"{emotion}.emo.npy")).unsqueeze(0)
|
31 |
-
elif emotion == "random_sample":
|
32 |
-
while True:
|
33 |
-
rand_wav = random.sample(os.listdir(random_emotion_root), 1)[0]
|
34 |
-
if os.path.exists(f"{random_emotion_root}/{rand_wav}"):
|
35 |
-
break
|
36 |
-
emo = torch.FloatTensor(np.load(f"{random_emotion_root}/{rand_wav}")).unsqueeze(0)
|
37 |
-
print(f"{random_emotion_root}/{rand_wav}")
|
38 |
-
else:
|
39 |
-
print("emotion参数不正确")
|
40 |
-
|
41 |
-
audio = \
|
42 |
-
net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=0.667, noise_scale_w=0.8, length_scale=1, emo=emo)[
|
43 |
-
0][
|
44 |
-
0, 0].data.float().numpy()
|
45 |
-
return audio
|
46 |
-
|
47 |
-
|
48 |
-
def random_generate(txt, index, hps, net_g, random_emotion_root):
|
49 |
-
|
50 |
-
audio = tts(txt, emotion='random_sample', index=index, hps=hps, net_g=net_g,
|
51 |
-
random_emotion_root=random_emotion_root)
|
52 |
-
return audio
|
53 |
-
|
54 |
-
|
55 |
-
def charaterRoot(name):
|
56 |
-
global random_emotion_root
|
57 |
-
if name == '九条都':
|
58 |
-
random_emotion_root = "./9nineEmo/my"
|
59 |
-
index = 0
|
60 |
-
elif name == '新海天':
|
61 |
-
random_emotion_root = "./9nineEmo/sr"
|
62 |
-
index = 1
|
63 |
-
elif name == '结城希亚':
|
64 |
-
random_emotion_root = "./9nineEmo/na"
|
65 |
-
index = 2
|
66 |
-
elif name == '蕾娜':
|
67 |
-
random_emotion_root = "./9nineEmo/gt"
|
68 |
-
index = 3
|
69 |
-
elif name == '索菲':
|
70 |
-
random_emotion_root = "./9nineEmo/sf"
|
71 |
-
index = 4
|
72 |
-
return random_emotion_root, index
|
73 |
-
|
74 |
-
|
75 |
-
def configSelect(config):
|
76 |
-
global checkPonit, config_file
|
77 |
-
if config == 'mul':
|
78 |
-
config_file = "./configs/9nine_multi.json"
|
79 |
-
checkPonit = "logs/9nineM/G_252000.pth"
|
80 |
-
elif config == "single":
|
81 |
-
config_file = "./configs/sora.json"
|
82 |
-
checkPonit = "logs/sora/G_341200.pth"
|
83 |
-
return config_file, checkPonit
|
84 |
-
|
85 |
-
|
86 |
-
def runVits(name, config, txt):
|
87 |
-
config_file, checkPoint = configSelect(config)
|
88 |
-
random_emotion_root, index = charaterRoot(name=name)
|
89 |
-
checkPonit = checkPoint
|
90 |
-
hps = utils.get_hparams_from_file(config_file)
|
91 |
-
net_g = SynthesizerTrn(
|
92 |
-
len(symbols),
|
93 |
-
hps.data.filter_length // 2 + 1,
|
94 |
-
hps.train.segment_size // hps.data.hop_length,
|
95 |
-
n_speakers=hps.data.n_speakers,
|
96 |
-
**hps.model)
|
97 |
-
_ = net_g.eval()
|
98 |
-
|
99 |
-
_ = utils.load_checkpoint(checkPonit, net_g, None)
|
100 |
-
audio = random_generate(txt=txt, index=index, random_emotion_root=random_emotion_root,
|
101 |
-
net_g=net_g, hps=hps)
|
102 |
-
return (hps.data.sampling_rate, audio)
|
103 |
-
|
104 |
-
|
105 |
-
def nineMul(name, txt):
|
106 |
-
config = 'mul'
|
107 |
-
audio = runVits(name, config, txt)
|
108 |
-
return "multiple model success", audio
|
109 |
-
|
110 |
-
|
111 |
-
def nineSingle(name,txt):
|
112 |
-
config = 'mul'
|
113 |
-
# name = "新海天"
|
114 |
-
audio = runVits(name, config, txt)
|
115 |
-
return "single model success", audio
|
116 |
-
|
117 |
-
app = gr.Blocks()
|
118 |
-
with app:
|
119 |
-
with gr.Tabs():
|
120 |
-
with gr.TabItem("9nine multiple model"):
|
121 |
-
character = gr.Radio(['九条都', '新海天', '结城希亚', '蕾娜', '索菲'], label='character',
|
122 |
-
info="select character you want")
|
123 |
-
|
124 |
-
text = gr.TextArea(label="input content", value="祭りに行っただよね、知らない女の子と一緒にいて。")
|
125 |
-
|
126 |
-
submit = gr.Button("generate", variant='privite')
|
127 |
-
message = gr.Textbox(label="Message")
|
128 |
-
audio = gr.Audio(label="output")
|
129 |
-
submit.click(nineMul, [character, text], [message, audio])
|
130 |
-
with gr.TabItem("9nine single model"):
|
131 |
-
character = gr.Radio(['新海天'], label='character',
|
132 |
-
info="select character you want")
|
133 |
-
|
134 |
-
text = gr.TextArea(label="input content", value="祭りに行っただよね、知らない女の子と一緒にいて。")
|
135 |
-
|
136 |
-
submit = gr.Button("generate", variant='privite')
|
137 |
-
message = gr.Textbox(label="Message")
|
138 |
-
audio = gr.Audio(label="output")
|
139 |
-
submit.click(nineSingle, [character, text], [message, audio])
|
140 |
-
|
141 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|