hzrr's picture
...
06441c0
raw
history blame
3.89 kB
import gradio as gr
# import matplotlib.pyplot as plt
import logging
# logger = logging.getLogger(__name__)
import os
import json
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import commons
import utils
from data_utils import TextAudioLoader, TextAudioCollate, TextAudioSpeakerLoader, TextAudioSpeakerCollate
from models import SynthesizerTrn
from text.symbols import symbols
from text import text_to_sequence
import time
def get_text(text, hps):
# text_norm = requests.post("http://121.5.171.42:39001/texttosequence?text="+text).json()["text_norm"]
text_norm = text_to_sequence(text, hps.data.text_cleaners)
# print(hps.data.text_cleaners)
# print(text_norm)
if hps.data.add_blank:
text_norm = commons.intersperse(text_norm, 0)
text_norm = torch.LongTensor(text_norm)
return text_norm
def load_model(config_path, pth_path):
global dev, hps_ms, net_g
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
hps_ms = utils.get_hparams_from_file(config_path)
net_g = SynthesizerTrn(
len(symbols),
hps_ms.data.filter_length // 2 + 1,
hps_ms.train.segment_size // hps_ms.data.hop_length,
**hps_ms.model).to(dev)
_ = net_g.eval()
_ = utils.load_checkpoint(pth_path, net_g)
return f"{pth_path}加载成功!"
def infer(c_id, text):
stn_tst = get_text(text, hps_ms)
with torch.no_grad():
x_tst = stn_tst.to(dev).unsqueeze(0)
x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(dev)
sid = torch.LongTensor([c_id]).to(dev)
audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
return audio
pth_path = "model/G_70000.pth"
config_path = "configs/config.json"
character_dict = {
"十香": 1,
"折纸": 2,
"狂三": 3,
"四糸乃": 4,
"琴里": 5,
"夕弦": 6,
"耶俱矢": 7,
"美九": 8,
"凛祢": 9,
"凛绪": 10,
"鞠亚": 11,
"鞠奈": 12,
"真那": 13,
}
app = gr.Blocks()
with app:
gr.HTML("""
<div
style="width: 100%;padding-top:116px;background-image: url('https://huggingface.co/spaces/tumuyan/vits-miki/resolve/main/bg.webp');;background-size:cover">
<div>
<div>
<h4 class="h-sign" style="font-size: 12px;">
这是一个使用<a href="https://github.com/thesupersonic16/DALTools" target="_blank">thesupersonic16/DALTools</a>提供的解包音频作为数据集,
使用<a href="https://github.com/jaywalnut310/vits" target="_blank">VITS</a>技术训练的语音合成demo。
</h4>
</div>
</div>
</div>
""")
tmp = gr.Markdown("")
with gr.Tabs():
with gr.TabItem("Basic"):
with gr.Raw():
model_submit = gr.Button("加载/重载模型", variant="primary")
output_1 = gr.Markdown("")
with gr.Raw():
tts_input1 = gr.TextArea(
label="请输入文本(仅支持日语)", value="你好,世界!")
tts_input2 = gr.Dropdown(choices=[character_dict.keys], type="index",label="选择角色", optional=False)
tts_submit = gr.Button("用文本合成", variant="primary")
tts_output2 = gr.Audio(label="Output")
model_submit.click(load_model, [config_path, pth_path], [output_1])
tts_submit.click(infer, [tts_input2+1, tts_input1], [tts_output2])
gr.HTML("""
<div style="text-align:center">
仅供学习交流,不可用于商业或非法用途
<br/>
使用本项目模型直接或间接生成的音频,必须声明由AI技术或VITS技术合成
</div>
""")
app.launch()