diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..83154ac15527935d5d435f6c8c086866be1caf68
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,38 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
+doom/cover.png filter=lfs diff=lfs merge=lfs -text
+zenyatta/cover.png filter=lfs diff=lfs merge=lfs -text
+pretrained_models/doom/cover.png filter=lfs diff=lfs merge=lfs -text
+pretrained_models/zenyatta/cover.png filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..fb07aa3eb740e5d6b84172cbf73d2523768269ee
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,382 @@
+## Ignore Visual Studio temporary files, build results, and
+## files generated by popular Visual Studio add-ons.
+##
+## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
+
+# User-specific files
+*.rsuser
+*.suo
+*.user
+*.userosscache
+*.sln.docstates
+
+# User-specific files (MonoDevelop/Xamarin Studio)
+*.userprefs
+
+# Mono auto generated files
+mono_crash.*
+
+# Build results
+[Dd]ebug/
+[Dd]ebugPublic/
+[Rr]elease/
+[Rr]eleases/
+x64/
+x86/
+[Ww][Ii][Nn]32/
+[Aa][Rr][Mm]/
+[Aa][Rr][Mm]64/
+bld/
+[Bb]in/
+[Oo]bj/
+[Oo]ut/
+[Ll]og/
+[Ll]ogs/
+
+# Visual Studio 2015/2017 cache/options directory
+.vs/
+# Uncomment if you have tasks that create the project's static files in wwwroot
+#wwwroot/
+
+# Visual Studio 2017 auto generated files
+Generated\ Files/
+
+# MSTest test Results
+[Tt]est[Rr]esult*/
+[Bb]uild[Ll]og.*
+
+# NUnit
+*.VisualState.xml
+TestResult.xml
+nunit-*.xml
+
+# Build Results of an ATL Project
+[Dd]ebugPS/
+[Rr]eleasePS/
+dlldata.c
+
+# Benchmark Results
+BenchmarkDotNet.Artifacts/
+
+# .NET Core
+project.lock.json
+project.fragment.lock.json
+artifacts/
+
+# ASP.NET Scaffolding
+ScaffoldingReadMe.txt
+
+# StyleCop
+StyleCopReport.xml
+
+# Files built by Visual Studio
+*_i.c
+*_p.c
+*_h.h
+*.ilk
+*.meta
+*.obj
+*.iobj
+*.pch
+*.pdb
+*.ipdb
+*.pgc
+*.pgd
+*.rsp
+*.sbr
+*.tlb
+*.tli
+*.tlh
+*.tmp
+*.tmp_proj
+*_wpftmp.csproj
+*.log
+*.vspscc
+*.vssscc
+.builds
+*.pidb
+*.svclog
+*.scc
+
+# Chutzpah Test files
+_Chutzpah*
+
+# Visual C++ cache files
+ipch/
+*.aps
+*.ncb
+*.opendb
+*.opensdf
+*.sdf
+*.cachefile
+*.VC.db
+*.VC.VC.opendb
+
+# Visual Studio profiler
+*.psess
+*.vsp
+*.vspx
+*.sap
+
+# Visual Studio Trace Files
+*.e2e
+
+# TFS 2012 Local Workspace
+$tf/
+
+# Guidance Automation Toolkit
+*.gpState
+
+# ReSharper is a .NET coding add-in
+_ReSharper*/
+*.[Rr]e[Ss]harper
+*.DotSettings.user
+
+# TeamCity is a build add-in
+_TeamCity*
+
+# DotCover is a Code Coverage Tool
+*.dotCover
+
+# AxoCover is a Code Coverage Tool
+.axoCover/*
+!.axoCover/settings.json
+
+# Coverlet is a free, cross platform Code Coverage Tool
+coverage*.json
+coverage*.xml
+coverage*.info
+
+# Visual Studio code coverage results
+*.coverage
+*.coveragexml
+
+# NCrunch
+_NCrunch_*
+.*crunch*.local.xml
+nCrunchTemp_*
+
+# MightyMoose
+*.mm.*
+AutoTest.Net/
+
+# Web workbench (sass)
+.sass-cache/
+
+# Installshield output folder
+[Ee]xpress/
+
+# DocProject is a documentation generator add-in
+DocProject/buildhelp/
+DocProject/Help/*.HxT
+DocProject/Help/*.HxC
+DocProject/Help/*.hhc
+DocProject/Help/*.hhk
+DocProject/Help/*.hhp
+DocProject/Help/Html2
+DocProject/Help/html
+
+# Click-Once directory
+publish/
+
+# Publish Web Output
+*.[Pp]ublish.xml
+*.azurePubxml
+# Note: Comment the next line if you want to checkin your web deploy settings,
+# but database connection strings (with potential passwords) will be unencrypted
+*.pubxml
+*.publishproj
+
+# Microsoft Azure Web App publish settings. Comment the next line if you want to
+# checkin your Azure Web App publish settings, but sensitive information contained
+# in these scripts will be unencrypted
+PublishScripts/
+
+# NuGet Packages
+*.nupkg
+# NuGet Symbol Packages
+*.snupkg
+# The packages folder can be ignored because of Package Restore
+**/[Pp]ackages/*
+# except build/, which is used as an MSBuild target.
+!**/[Pp]ackages/build/
+# Uncomment if necessary however generally it will be regenerated when needed
+#!**/[Pp]ackages/repositories.config
+# NuGet v3's project.json files produces more ignorable files
+*.nuget.props
+*.nuget.targets
+
+# Microsoft Azure Build Output
+csx/
+*.build.csdef
+
+# Microsoft Azure Emulator
+ecf/
+rcf/
+
+# Windows Store app package directories and files
+AppPackages/
+BundleArtifacts/
+Package.StoreAssociation.xml
+_pkginfo.txt
+*.appx
+*.appxbundle
+*.appxupload
+
+# Visual Studio cache files
+# files ending in .cache can be ignored
+*.[Cc]ache
+# but keep track of directories ending in .cache
+!?*.[Cc]ache/
+
+# Others
+ClientBin/
+~$*
+*~
+*.dbmdl
+*.dbproj.schemaview
+*.jfm
+*.pfx
+*.publishsettings
+orleans.codegen.cs
+
+# Including strong name files can present a security risk
+# (https://github.com/github/gitignore/pull/2483#issue-259490424)
+#*.snk
+
+# Since there are multiple workflows, uncomment next line to ignore bower_components
+# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
+#bower_components/
+
+# RIA/Silverlight projects
+Generated_Code/
+
+# Backup & report files from converting an old project file
+# to a newer Visual Studio version. Backup files are not needed,
+# because we have git ;-)
+_UpgradeReport_Files/
+Backup*/
+UpgradeLog*.XML
+UpgradeLog*.htm
+ServiceFabricBackup/
+*.rptproj.bak
+
+# SQL Server files
+*.mdf
+*.ldf
+*.ndf
+
+# Business Intelligence projects
+*.rdl.data
+*.bim.layout
+*.bim_*.settings
+*.rptproj.rsuser
+*- [Bb]ackup.rdl
+*- [Bb]ackup ([0-9]).rdl
+*- [Bb]ackup ([0-9][0-9]).rdl
+
+# Microsoft Fakes
+FakesAssemblies/
+
+# GhostDoc plugin setting file
+*.GhostDoc.xml
+
+# Node.js Tools for Visual Studio
+.ntvs_analysis.dat
+node_modules/
+
+# Visual Studio 6 build log
+*.plg
+
+# Visual Studio 6 workspace options file
+*.opt
+
+# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
+*.vbw
+
+# Visual Studio LightSwitch build output
+**/*.HTMLClient/GeneratedArtifacts
+**/*.DesktopClient/GeneratedArtifacts
+**/*.DesktopClient/ModelManifest.xml
+**/*.Server/GeneratedArtifacts
+**/*.Server/ModelManifest.xml
+_Pvt_Extensions
+
+# Paket dependency manager
+.paket/paket.exe
+paket-files/
+
+# FAKE - F# Make
+.fake/
+
+# CodeRush personal settings
+.cr/personal
+
+# Python Tools for Visual Studio (PTVS)
+__pycache__/
+
+
+# Cake - Uncomment if you are using it
+# tools/**
+# !tools/packages.config
+
+# Tabs Studio
+*.tss
+
+# Telerik's JustMock configuration file
+*.jmconfig
+
+# BizTalk build output
+*.btp.cs
+*.btm.cs
+*.odx.cs
+*.xsd.cs
+
+# OpenCover UI analysis results
+OpenCover/
+
+# Azure Stream Analytics local run output
+ASALocalRun/
+
+# MSBuild Binary and Structured Log
+*.binlog
+
+# NVidia Nsight GPU debugger configuration file
+*.nvuser
+
+# MFractors (Xamarin productivity tool) working folder
+.mfractor/
+
+# Local History for Visual Studio
+.localhistory/
+
+# BeatPulse healthcheck temp database
+healthchecksdb
+
+# Backup folder for Package Reference Convert tool in Visual Studio 2017
+MigrationBackup/
+
+# Ionide (cross platform F# VS Code tools) working folder
+.ionide/
+
+# Fody - auto-generated XML schema
+FodyWeavers.xsd
+
+# build
+build
+monotonic_align/core.c
+*.o
+*.so
+*.dll
+
+# data
+/config.json
+/*.pth
+*.wav
+/monotonic_align/monotonic_align
+/resources
+/MoeGoe.spec
+/dist/MoeGoe
+/dist
+
+.idea
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2e44ec5507a21c84647346865c876ce2b48db560
--- /dev/null
+++ b/README.md
@@ -0,0 +1,14 @@
+---
+title: Vits Models
+emoji: 🏃
+colorFrom: pink
+colorTo: indigo
+sdk: gradio
+sdk_version: 3.17.0
+app_file: app.py
+pinned: false
+license: apache-2.0
+duplicated_from: sayashi/vits-models
+---
+
+Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffcfee009308052863d7569a661fa3adebe6332e
--- /dev/null
+++ b/app.py
@@ -0,0 +1,291 @@
+# coding=utf-8
+import os
+import re
+import argparse
+import utils
+import commons
+import json
+import torch
+import gradio as gr
+from models import SynthesizerTrn
+from text import text_to_sequence, _clean_text
+from torch import no_grad, LongTensor
+import gradio.processing_utils as gr_processing_utils
+import logging
+logging.getLogger('numba').setLevel(logging.WARNING)
+limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces
+
+hps_ms = utils.get_hparams_from_file(r'config/config.json')
+
+audio_postprocess_ori = gr.Audio.postprocess
+
+def audio_postprocess(self, y):
+ data = audio_postprocess_ori(self, y)
+ if data is None:
+ return None
+ return gr_processing_utils.encode_url_or_file_to_base64(data["name"])
+
+
+gr.Audio.postprocess = audio_postprocess
+
+def get_text(text, hps, is_symbol):
+ text_norm, clean_text = text_to_sequence(text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
+ if hps.data.add_blank:
+ text_norm = commons.intersperse(text_norm, 0)
+ text_norm = LongTensor(text_norm)
+ return text_norm, clean_text
+
+def create_tts_fn(net_g_ms, speaker_id):
+ def tts_fn(text, language, noise_scale, noise_scale_w, length_scale, is_symbol):
+ text = text.replace('\n', ' ').replace('\r', '').replace(" ", "")
+ if limitation:
+ text_len = len(re.sub("\[([A-Z]{2})\]", "", text))
+ max_len = 100
+ if is_symbol:
+ max_len *= 3
+ if text_len > max_len:
+ return "Error: Text is too long", None
+ if not is_symbol:
+ if language == 0:
+ text = f"[ZH]{text}[ZH]"
+ elif language == 1:
+ text = f"[JA]{text}[JA]"
+ else:
+ text = f"{text}"
+ stn_tst, clean_text = get_text(text, hps_ms, is_symbol)
+ with no_grad():
+ x_tst = stn_tst.unsqueeze(0).to(device)
+ x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device)
+ sid = LongTensor([speaker_id]).to(device)
+ audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
+ length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
+
+ return "Success", (22050, audio)
+ return tts_fn
+
+def create_to_symbol_fn(hps):
+ def to_symbol_fn(is_symbol_input, input_text, temp_lang):
+ if temp_lang == 0:
+ clean_text = f'[ZH]{input_text}[ZH]'
+ elif temp_lang == 1:
+ clean_text = f'[JA]{input_text}[JA]'
+ else:
+ clean_text = input_text
+ return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else ''
+
+ return to_symbol_fn
+def change_lang(language):
+ if language == 0:
+ return 0.6, 0.668, 1.2
+ elif language == 1:
+ return 0.6, 0.668, 1
+ else:
+ return 0.6, 0.668, 1
+
+download_audio_js = """
+() =>{{
+ let root = document.querySelector("body > gradio-app");
+ if (root.shadowRoot != null)
+ root = root.shadowRoot;
+ let audio = root.querySelector("#tts-audio-{audio_id}").querySelector("audio");
+ let text = root.querySelector("#input-text-{audio_id}").querySelector("textarea");
+ if (audio == undefined)
+ return;
+ text = text.value;
+ if (text == undefined)
+ text = Math.floor(Math.random()*100000000);
+ audio = audio.src;
+ let oA = document.createElement("a");
+ oA.download = text.substr(0, 20)+'.wav';
+ oA.href = audio;
+ document.body.appendChild(oA);
+ oA.click();
+ oA.remove();
+}}
+"""
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--device', type=str, default='cpu')
+ parser.add_argument('--api', action="store_true", default=False)
+ parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
+ parser.add_argument("--all", action="store_true", default=False, help="enable all models")
+ args = parser.parse_args()
+ device = torch.device(args.device)
+ categories = ["Blue Archive", "Lycoris Recoil"]
+ others = {
+ "Princess Connect! Re:Dive": "https://huggingface.co/spaces/sayashi/vits-models-pcr",
+ "Genshin Impact": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3",
+ "Honkai Impact 3rd": "https://huggingface.co/spaces/sayashi/vits-models-genshin-bh3",
+ "Overwatch 2": "https://huggingface.co/spaces/sayashi/vits-models-ow2",
+ }
+ if args.all:
+ categories = ["Blue Archive", "Lycoris Recoil", "Princess Connect! Re:Dive", "Genshin Impact", "Honkai Impact 3rd", "Overwatch 2"]
+ others = {}
+ models = []
+ with open("pretrained_models/info.json", "r", encoding="utf-8") as f:
+ models_info = json.load(f)
+ for i, info in models_info.items():
+ if info['title'].split("-")[0] not in categories or not info['enable']:
+ continue
+ sid = info['sid']
+ name_en = info['name_en']
+ name_zh = info['name_zh']
+ title = info['title']
+ cover = f"pretrained_models/{i}/{info['cover']}"
+ example = info['example']
+ language = info['language']
+ net_g_ms = SynthesizerTrn(
+ len(hps_ms.symbols),
+ hps_ms.data.filter_length // 2 + 1,
+ hps_ms.train.segment_size // hps_ms.data.hop_length,
+ n_speakers=hps_ms.data.n_speakers if info['type'] == "multi" else 0,
+ **hps_ms.model)
+ utils.load_checkpoint(f'pretrained_models/{i}/{i}.pth', net_g_ms, None)
+ _ = net_g_ms.eval().to(device)
+ models.append((sid, name_en, name_zh, title, cover, example, language, net_g_ms, create_tts_fn(net_g_ms, sid), create_to_symbol_fn(hps_ms)))
+ with gr.Blocks() as app:
+ gr.Markdown(
+ "#
vits-models\n"
+ "## Please do not generate content that could infringe upon the rights or cause harm to individuals or organizations.\n"
+ "## 请不要生成会对个人以及组织造成侵害的内容\n"
+ "\n\n"
+ "[](https://colab.research.google.com/drive/10QOk9NPgoKZUXkIhhuVaZ7SYra1MPMKH?usp=share_link)\n\n"
+ "[](https://huggingface.co/spaces/sayashi/vits-models?duplicate=true)\n\n"
+ "[](https://github.com/SayaSS/vits-finetuning)"
+ )
+
+ with gr.Tabs():
+ for category in categories:
+ with gr.TabItem(category):
+ with gr.TabItem("EN"):
+ for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
+ if title.split("-")[0] != category:
+ continue
+ with gr.TabItem(name_en):
+ with gr.Row():
+ gr.Markdown(
+ ''
+ f'
{title}'
+ f'

' if cover else ""
+ '
'
+ )
+ with gr.Row():
+ with gr.Column():
+ input_text = gr.Textbox(label="Text (100 words limitation)" if limitation else "Text", lines=5, value=example, elem_id=f"input-text-en-{name_en.replace(' ','')}")
+ lang = gr.Dropdown(label="Language", choices=["Chinese", "Japanese", "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"],
+ type="index", value=language)
+ with gr.Accordion(label="Advanced Options", open=False):
+ symbol_input = gr.Checkbox(value=False, label="Symbol input")
+ symbol_list = gr.Dataset(label="Symbol list", components=[input_text],
+ samples=[[x] for x in hps_ms.symbols])
+ symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
+ btn = gr.Button(value="Generate", variant="primary")
+ with gr.Row():
+ ns = gr.Slider(label="noise_scale", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
+ nsw = gr.Slider(label="noise_scale_w", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
+ ls = gr.Slider(label="length_scale", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
+ with gr.Column():
+ o1 = gr.Textbox(label="Output Message")
+ o2 = gr.Audio(label="Output Audio", elem_id=f"tts-audio-en-{name_en.replace(' ','')}")
+ download = gr.Button("Download Audio")
+ btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2], api_name=f"tts-{name_en}")
+ download.click(None, [], [], _js=download_audio_js.format(audio_id=f"en-{name_en.replace(' ', '')}"))
+ lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
+ symbol_input.change(
+ to_symbol_fn,
+ [symbol_input, input_text, lang],
+ [input_text]
+ )
+ symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
+ _js=f"""
+ (i,symbols) => {{
+ let root = document.querySelector("body > gradio-app");
+ if (root.shadowRoot != null)
+ root = root.shadowRoot;
+ let text_input = root.querySelector("#input-text-en-{name_en.replace(' ', '')}").querySelector("textarea");
+ let startPos = text_input.selectionStart;
+ let endPos = text_input.selectionEnd;
+ let oldTxt = text_input.value;
+ let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
+ text_input.value = result;
+ let x = window.scrollX, y = window.scrollY;
+ text_input.focus();
+ text_input.selectionStart = startPos + symbols[i].length;
+ text_input.selectionEnd = startPos + symbols[i].length;
+ text_input.blur();
+ window.scrollTo(x, y);
+ return text_input.value;
+ }}""")
+ with gr.TabItem("中文"):
+ for (sid, name_en, name_zh, title, cover, example, language, net_g_ms, tts_fn, to_symbol_fn) in models:
+ if title.split("-")[0] != category:
+ continue
+ with gr.TabItem(name_zh):
+ with gr.Row():
+ gr.Markdown(
+ ''
+ f'
{title}'
+ f'

' if cover else ""
+ '
'
+ )
+ with gr.Row():
+ with gr.Column():
+ input_text = gr.Textbox(label="文本 (100字上限)" if limitation else "文本", lines=5, value=example, elem_id=f"input-text-zh-{name_zh}")
+ lang = gr.Dropdown(label="语言", choices=["中文", "日语", "中日混合(中文用[ZH][ZH]包裹起来,日文用[JA][JA]包裹起来)"],
+ type="index", value="中文"if language == "Chinese" else "日语")
+ with gr.Accordion(label="高级选项", open=False):
+ symbol_input = gr.Checkbox(value=False, label="符号输入")
+ symbol_list = gr.Dataset(label="符号列表", components=[input_text],
+ samples=[[x] for x in hps_ms.symbols])
+ symbol_list_json = gr.Json(value=hps_ms.symbols, visible=False)
+ btn = gr.Button(value="生成", variant="primary")
+ with gr.Row():
+ ns = gr.Slider(label="控制感情变化程度", minimum=0.1, maximum=1.0, step=0.1, value=0.6, interactive=True)
+ nsw = gr.Slider(label="控制音素发音长度", minimum=0.1, maximum=1.0, step=0.1, value=0.668, interactive=True)
+ ls = gr.Slider(label="控制整体语速", minimum=0.1, maximum=2.0, step=0.1, value=1.2 if language=="Chinese" else 1, interactive=True)
+ with gr.Column():
+ o1 = gr.Textbox(label="输出信息")
+ o2 = gr.Audio(label="输出音频", elem_id=f"tts-audio-zh-{name_zh}")
+ download = gr.Button("下载音频")
+ btn.click(tts_fn, inputs=[input_text, lang, ns, nsw, ls, symbol_input], outputs=[o1, o2])
+ download.click(None, [], [], _js=download_audio_js.format(audio_id=f"zh-{name_zh}"))
+ lang.change(change_lang, inputs=[lang], outputs=[ns, nsw, ls])
+ symbol_input.change(
+ to_symbol_fn,
+ [symbol_input, input_text, lang],
+ [input_text]
+ )
+ symbol_list.click(None, [symbol_list, symbol_list_json], [input_text],
+ _js=f"""
+ (i,symbols) => {{
+ let root = document.querySelector("body > gradio-app");
+ if (root.shadowRoot != null)
+ root = root.shadowRoot;
+ let text_input = root.querySelector("#input-text-zh-{name_zh}").querySelector("textarea");
+ let startPos = text_input.selectionStart;
+ let endPos = text_input.selectionEnd;
+ let oldTxt = text_input.value;
+ let result = oldTxt.substring(0, startPos) + symbols[i] + oldTxt.substring(endPos);
+ text_input.value = result;
+ let x = window.scrollX, y = window.scrollY;
+ text_input.focus();
+ text_input.selectionStart = startPos + symbols[i].length;
+ text_input.selectionEnd = startPos + symbols[i].length;
+ text_input.blur();
+ window.scrollTo(x, y);
+ return text_input.value;
+ }}""")
+ for category, link in others.items():
+ with gr.TabItem(category):
+ gr.Markdown(
+ f'''
+
+ Click to Go
+
+
+
+ '''
+ )
+ app.queue(concurrency_count=1, api_open=args.api).launch(share=args.share)
diff --git a/attentions.py b/attentions.py
new file mode 100644
index 0000000000000000000000000000000000000000..86bc73b5fe98cc7b443e9078553920346c996707
--- /dev/null
+++ b/attentions.py
@@ -0,0 +1,300 @@
+import math
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+import commons
+from modules import LayerNorm
+
+
+class Encoder(nn.Module):
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+
+ self.drop = nn.Dropout(p_dropout)
+ self.attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask):
+ attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.attn_layers[i](x, x, attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class Decoder(nn.Module):
+ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
+ super().__init__()
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+
+ self.drop = nn.Dropout(p_dropout)
+ self.self_attn_layers = nn.ModuleList()
+ self.norm_layers_0 = nn.ModuleList()
+ self.encdec_attn_layers = nn.ModuleList()
+ self.norm_layers_1 = nn.ModuleList()
+ self.ffn_layers = nn.ModuleList()
+ self.norm_layers_2 = nn.ModuleList()
+ for i in range(self.n_layers):
+ self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
+ self.norm_layers_0.append(LayerNorm(hidden_channels))
+ self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
+ self.norm_layers_1.append(LayerNorm(hidden_channels))
+ self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
+ self.norm_layers_2.append(LayerNorm(hidden_channels))
+
+ def forward(self, x, x_mask, h, h_mask):
+ """
+ x: decoder input
+ h: encoder output
+ """
+ self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
+ encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
+ x = x * x_mask
+ for i in range(self.n_layers):
+ y = self.self_attn_layers[i](x, x, self_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_0[i](x + y)
+
+ y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
+ y = self.drop(y)
+ x = self.norm_layers_1[i](x + y)
+
+ y = self.ffn_layers[i](x, x_mask)
+ y = self.drop(y)
+ x = self.norm_layers_2[i](x + y)
+ x = x * x_mask
+ return x
+
+
+class MultiHeadAttention(nn.Module):
+ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
+ super().__init__()
+ assert channels % n_heads == 0
+
+ self.channels = channels
+ self.out_channels = out_channels
+ self.n_heads = n_heads
+ self.p_dropout = p_dropout
+ self.window_size = window_size
+ self.heads_share = heads_share
+ self.block_length = block_length
+ self.proximal_bias = proximal_bias
+ self.proximal_init = proximal_init
+ self.attn = None
+
+ self.k_channels = channels // n_heads
+ self.conv_q = nn.Conv1d(channels, channels, 1)
+ self.conv_k = nn.Conv1d(channels, channels, 1)
+ self.conv_v = nn.Conv1d(channels, channels, 1)
+ self.conv_o = nn.Conv1d(channels, out_channels, 1)
+ self.drop = nn.Dropout(p_dropout)
+
+ if window_size is not None:
+ n_heads_rel = 1 if heads_share else n_heads
+ rel_stddev = self.k_channels**-0.5
+ self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
+ self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
+
+ nn.init.xavier_uniform_(self.conv_q.weight)
+ nn.init.xavier_uniform_(self.conv_k.weight)
+ nn.init.xavier_uniform_(self.conv_v.weight)
+ if proximal_init:
+ with torch.no_grad():
+ self.conv_k.weight.copy_(self.conv_q.weight)
+ self.conv_k.bias.copy_(self.conv_q.bias)
+
+ def forward(self, x, c, attn_mask=None):
+ q = self.conv_q(x)
+ k = self.conv_k(c)
+ v = self.conv_v(c)
+
+ x, self.attn = self.attention(q, k, v, mask=attn_mask)
+
+ x = self.conv_o(x)
+ return x
+
+ def attention(self, query, key, value, mask=None):
+ # reshape [b, d, t] -> [b, n_h, t, d_k]
+ b, d, t_s, t_t = (*key.size(), query.size(2))
+ query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
+ key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+ value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
+
+ scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
+ if self.window_size is not None:
+ assert t_s == t_t, "Relative attention is only available for self-attention."
+ key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
+ rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
+ scores_local = self._relative_position_to_absolute_position(rel_logits)
+ scores = scores + scores_local
+ if self.proximal_bias:
+ assert t_s == t_t, "Proximal bias is only available for self-attention."
+ scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
+ if mask is not None:
+ scores = scores.masked_fill(mask == 0, -1e4)
+ if self.block_length is not None:
+ assert t_s == t_t, "Local attention is only available for self-attention."
+ block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
+ scores = scores.masked_fill(block_mask == 0, -1e4)
+ p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
+ p_attn = self.drop(p_attn)
+ output = torch.matmul(p_attn, value)
+ if self.window_size is not None:
+ relative_weights = self._absolute_position_to_relative_position(p_attn)
+ value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
+ output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
+ output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
+ return output, p_attn
+
+ def _matmul_with_relative_values(self, x, y):
+ """
+ x: [b, h, l, m]
+ y: [h or 1, m, d]
+ ret: [b, h, l, d]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0))
+ return ret
+
+ def _matmul_with_relative_keys(self, x, y):
+ """
+ x: [b, h, l, d]
+ y: [h or 1, m, d]
+ ret: [b, h, l, m]
+ """
+ ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
+ return ret
+
+ def _get_relative_embeddings(self, relative_embeddings, length):
+ max_relative_position = 2 * self.window_size + 1
+ # Pad first before slice to avoid using cond ops.
+ pad_length = max(length - (self.window_size + 1), 0)
+ slice_start_position = max((self.window_size + 1) - length, 0)
+ slice_end_position = slice_start_position + 2 * length - 1
+ if pad_length > 0:
+ padded_relative_embeddings = F.pad(
+ relative_embeddings,
+ commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
+ else:
+ padded_relative_embeddings = relative_embeddings
+ used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
+ return used_relative_embeddings
+
+ def _relative_position_to_absolute_position(self, x):
+ """
+ x: [b, h, l, 2*l-1]
+ ret: [b, h, l, l]
+ """
+ batch, heads, length, _ = x.size()
+ # Concat columns of pad to shift from relative to absolute indexing.
+ x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
+
+ # Concat extra elements so to add up to shape (len+1, 2*len-1).
+ x_flat = x.view([batch, heads, length * 2 * length])
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
+
+ # Reshape and slice out the padded elements.
+ x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
+ return x_final
+
+ def _absolute_position_to_relative_position(self, x):
+ """
+ x: [b, h, l, l]
+ ret: [b, h, l, 2*l-1]
+ """
+ batch, heads, length, _ = x.size()
+ # padd along column
+ x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
+ x_flat = x.view([batch, heads, length**2 + length*(length -1)])
+ # add 0's in the beginning that will skew the elements after reshape
+ x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
+ x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
+ return x_final
+
+ def _attention_bias_proximal(self, length):
+ """Bias for self-attention to encourage attention to close positions.
+ Args:
+ length: an integer scalar.
+ Returns:
+ a Tensor with shape [1, 1, length, length]
+ """
+ r = torch.arange(length, dtype=torch.float32)
+ diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
+ return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
+
+
+class FFN(nn.Module):
+ def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.activation = activation
+ self.causal = causal
+
+ if causal:
+ self.padding = self._causal_padding
+ else:
+ self.padding = self._same_padding
+
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
+ self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
+ self.drop = nn.Dropout(p_dropout)
+
+ def forward(self, x, x_mask):
+ x = self.conv_1(self.padding(x * x_mask))
+ if self.activation == "gelu":
+ x = x * torch.sigmoid(1.702 * x)
+ else:
+ x = torch.relu(x)
+ x = self.drop(x)
+ x = self.conv_2(self.padding(x * x_mask))
+ return x * x_mask
+
+ def _causal_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = self.kernel_size - 1
+ pad_r = 0
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
+
+ def _same_padding(self, x):
+ if self.kernel_size == 1:
+ return x
+ pad_l = (self.kernel_size - 1) // 2
+ pad_r = self.kernel_size // 2
+ padding = [[0, 0], [0, 0], [pad_l, pad_r]]
+ x = F.pad(x, commons.convert_pad_shape(padding))
+ return x
diff --git a/commons.py b/commons.py
new file mode 100644
index 0000000000000000000000000000000000000000..40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9
--- /dev/null
+++ b/commons.py
@@ -0,0 +1,172 @@
+import math
+import torch
+from torch.nn import functional as F
+import torch.jit
+
+
+def script_method(fn, _rcb=None):
+ return fn
+
+
+def script(obj, optimize=True, _frames_up=0, _rcb=None):
+ return obj
+
+
+torch.jit.script_method = script_method
+torch.jit.script = script
+
+
+def init_weights(m, mean=0.0, std=0.01):
+ classname = m.__class__.__name__
+ if classname.find("Conv") != -1:
+ m.weight.data.normal_(mean, std)
+
+
+def get_padding(kernel_size, dilation=1):
+ return int((kernel_size*dilation - dilation)/2)
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def intersperse(lst, item):
+ result = [item] * (len(lst) * 2 + 1)
+ result[1::2] = lst
+ return result
+
+
+def kl_divergence(m_p, logs_p, m_q, logs_q):
+ """KL(P||Q)"""
+ kl = (logs_q - logs_p) - 0.5
+ kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
+ return kl
+
+
+def rand_gumbel(shape):
+ """Sample from the Gumbel distribution, protect from overflows."""
+ uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
+ return -torch.log(-torch.log(uniform_samples))
+
+
+def rand_gumbel_like(x):
+ g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
+ return g
+
+
+def slice_segments(x, ids_str, segment_size=4):
+ ret = torch.zeros_like(x[:, :, :segment_size])
+ for i in range(x.size(0)):
+ idx_str = ids_str[i]
+ idx_end = idx_str + segment_size
+ ret[i] = x[i, :, idx_str:idx_end]
+ return ret
+
+
+def rand_slice_segments(x, x_lengths=None, segment_size=4):
+ b, d, t = x.size()
+ if x_lengths is None:
+ x_lengths = t
+ ids_str_max = x_lengths - segment_size + 1
+ ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
+ ret = slice_segments(x, ids_str, segment_size)
+ return ret, ids_str
+
+
+def get_timing_signal_1d(
+ length, channels, min_timescale=1.0, max_timescale=1.0e4):
+ position = torch.arange(length, dtype=torch.float)
+ num_timescales = channels // 2
+ log_timescale_increment = (
+ math.log(float(max_timescale) / float(min_timescale)) /
+ (num_timescales - 1))
+ inv_timescales = min_timescale * torch.exp(
+ torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
+ scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
+ signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
+ signal = F.pad(signal, [0, 0, 0, channels % 2])
+ signal = signal.view(1, channels, length)
+ return signal
+
+
+def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return x + signal.to(dtype=x.dtype, device=x.device)
+
+
+def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
+ b, channels, length = x.size()
+ signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
+ return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
+
+
+def subsequent_mask(length):
+ mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
+ return mask
+
+
+@torch.jit.script
+def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
+ n_channels_int = n_channels[0]
+ in_act = input_a + input_b
+ t_act = torch.tanh(in_act[:, :n_channels_int, :])
+ s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
+ acts = t_act * s_act
+ return acts
+
+
+def convert_pad_shape(pad_shape):
+ l = pad_shape[::-1]
+ pad_shape = [item for sublist in l for item in sublist]
+ return pad_shape
+
+
+def shift_1d(x):
+ x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
+ return x
+
+
+def sequence_mask(length, max_length=None):
+ if max_length is None:
+ max_length = length.max()
+ x = torch.arange(max_length, dtype=length.dtype, device=length.device)
+ return x.unsqueeze(0) < length.unsqueeze(1)
+
+
+def generate_path(duration, mask):
+ """
+ duration: [b, 1, t_x]
+ mask: [b, 1, t_y, t_x]
+ """
+ device = duration.device
+
+ b, _, t_y, t_x = mask.shape
+ cum_duration = torch.cumsum(duration, -1)
+
+ cum_duration_flat = cum_duration.view(b * t_x)
+ path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
+ path = path.view(b, t_x, t_y)
+ path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
+ path = path.unsqueeze(1).transpose(2,3) * mask
+ return path
+
+
+def clip_grad_value_(parameters, clip_value, norm_type=2):
+ if isinstance(parameters, torch.Tensor):
+ parameters = [parameters]
+ parameters = list(filter(lambda p: p.grad is not None, parameters))
+ norm_type = float(norm_type)
+ if clip_value is not None:
+ clip_value = float(clip_value)
+
+ total_norm = 0
+ for p in parameters:
+ param_norm = p.grad.data.norm(norm_type)
+ total_norm += param_norm.item() ** norm_type
+ if clip_value is not None:
+ p.grad.data.clamp_(min=-clip_value, max=clip_value)
+ total_norm = total_norm ** (1. / norm_type)
+ return total_norm
diff --git a/config/config.json b/config/config.json
new file mode 100644
index 0000000000000000000000000000000000000000..fed68c6a98507f540d2f384f8f749d136fbaee73
--- /dev/null
+++ b/config/config.json
@@ -0,0 +1,55 @@
+{
+ "train": {
+ "log_interval": 200,
+ "eval_interval": 1000,
+ "seed": 1234,
+ "epochs": 10000,
+ "learning_rate": 2e-4,
+ "betas": [0.8, 0.99],
+ "eps": 1e-9,
+ "batch_size": 32,
+ "fp16_run": true,
+ "lr_decay": 0.999875,
+ "segment_size": 8192,
+ "init_lr_ratio": 1,
+ "warmup_epochs": 0,
+ "c_mel": 45,
+ "c_kl": 1.0
+ },
+ "data": {
+ "training_files":"filelists/xiaoke_train.txt.cleaned",
+ "validation_files":"filelists/xiaoke_val.txt.cleaned",
+ "text_cleaners":["zh_ja_mixture_cleaners"],
+ "max_wav_value": 32768.0,
+ "sampling_rate": 22050,
+ "filter_length": 1024,
+ "hop_length": 256,
+ "win_length": 1024,
+ "n_mel_channels": 80,
+ "mel_fmin": 0.0,
+ "mel_fmax": null,
+ "add_blank": true,
+ "n_speakers": 804,
+ "cleaned_text": true
+ },
+ "model": {
+ "inter_channels": 192,
+ "hidden_channels": 192,
+ "filter_channels": 768,
+ "n_heads": 2,
+ "n_layers": 6,
+ "kernel_size": 3,
+ "p_dropout": 0.1,
+ "resblock": "1",
+ "resblock_kernel_sizes": [3,7,11],
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
+ "upsample_rates": [8,8,2,2],
+ "upsample_initial_channel": 512,
+ "upsample_kernel_sizes": [16,16,4,4],
+ "n_layers_q": 3,
+ "use_spectral_norm": false,
+ "gin_channels": 256
+ },
+ "speakers": ["\u7279\u522b\u5468", "\u65e0\u58f0\u94c3\u9e7f", "\u4e1c\u6d77\u5e1d\u7687\uff08\u5e1d\u5b9d\uff0c\u5e1d\u738b\uff09", "\u4e38\u5584\u65af\u57fa", "\u5bcc\u58eb\u5947\u8ff9", "\u5c0f\u6817\u5e3d", "\u9ec4\u91d1\u8239", "\u4f0f\u7279\u52a0", "\u5927\u548c\u8d64\u9aa5", "\u5927\u6811\u5feb\u8f66", "\u8349\u4e0a\u98de", "\u83f1\u4e9a\u9a6c\u900a", "\u76ee\u767d\u9ea6\u6606", "\u795e\u9e70", "\u597d\u6b4c\u5267", "\u6210\u7530\u767d\u4ec1", "\u9c81\u9053\u592b\u8c61\u5f81\uff08\u7687\u5e1d\uff09", "\u6c14\u69fd", "\u7231\u4e3d\u6570\u7801", "\u661f\u4e91\u5929\u7a7a", "\u7389\u85fb\u5341\u5b57", "\u7f8e\u5999\u59ff\u52bf", "\u7435\u7436\u6668\u5149", "\u6469\u8036\u91cd\u70ae", "\u66fc\u57ce\u8336\u5ea7", "\u7f8e\u6d66\u6ce2\u65c1", "\u76ee\u767d\u8d56\u6069", "\u83f1\u66d9", "\u96ea\u4e2d\u7f8e\u4eba", "\u7c73\u6d74", "\u827e\u5c3c\u65af\u98ce\u795e", "\u7231\u4e3d\u901f\u5b50\uff08\u7231\u4e3d\u5feb\u5b50\uff09", "\u7231\u6155\u7ec7\u59ec", "\u7a3b\u8377\u4e00", "\u80dc\u5229\u5956\u5238", "\u7a7a\u4e2d\u795e\u5bab", "\u8363\u8fdb\u95ea\u8000", "\u771f\u673a\u4f36", "\u5ddd\u4e0a\u516c\u4e3b", "\u9ec4\u91d1\u57ce\uff08\u9ec4\u91d1\u57ce\u5e02\uff09", "\u6a31\u82b1\u8fdb\u738b", "\u91c7\u73e0", "\u65b0\u5149\u98ce", "\u4e1c\u5546\u53d8\u9769", "\u8d85\u7ea7\u5c0f\u6d77\u6e7e", "\u9192\u76ee\u98de\u9e70\uff08\u5bc4\u5bc4\u5b50\uff09", "\u8352\u6f20\u82f1\u96c4", "\u4e1c\u701b\u4f50\u6566", "\u4e2d\u5c71\u5e86\u5178", "\u6210\u7530\u5927\u8fdb", "\u897f\u91ce\u82b1", "\u6625\u4e3d\uff08\u4e4c\u62c9\u62c9\uff09", "\u9752\u7af9\u56de\u5fc6", "\u5fae\u5149\u98de\u9a79", "\u7f8e\u4e3d\u5468\u65e5", "\u5f85\u517c\u798f\u6765", "mr cb\uff08cb\u5148\u751f\uff09", "\u540d\u5c06\u6012\u6d9b\uff08\u540d\u5c06\u6237\u4ec1\uff09", "\u76ee\u767d\u591a\u4f2f", "\u4f18\u79c0\u7d20\u8d28", "\u5e1d\u738b\u5149\u8f89", "\u5f85\u517c\u8bd7\u6b4c\u5267", "\u751f\u91ce\u72c4\u675c\u65af", "\u76ee\u767d\u5584\u4fe1", "\u5927\u62d3\u592a\u9633\u795e", "\u53cc\u6da1\u8f6e\uff08\u4e24\u7acb\u76f4\uff0c\u4e24\u55b7\u5c04\uff0c\u4e8c\u9505\u5934\uff0c\u9006\u55b7\u5c04\uff09", "\u91cc\u89c1\u5149\u94bb\uff08\u8428\u6258\u8bfa\u91d1\u521a\u77f3\uff09", "\u5317\u90e8\u7384\u9a79", "\u6a31\u82b1\u5343\u4ee3\u738b", "\u5929\u72fc\u661f\u8c61\u5f81", "\u76ee\u767d\u963f\u5c14\u4e39", "\u516b\u91cd\u65e0\u654c", "\u9e64\u4e38\u521a\u5fd7", "\u76ee\u767d\u5149\u660e", "\u6210\u7530\u62dc\u4ec1\uff08\u6210\u7530\u8def\uff09", "\u4e5f\u6587\u6444\u8f89", "\u5c0f\u6797\u5386\u5947", "\u5317\u6e2f\u706b\u5c71", "\u5947\u9510\u9a8f", "\u82e6\u6da9\u7cd6\u971c", "\u5c0f\u5c0f\u8695\u8327", "\u9a8f\u5ddd\u624b\u7eb2\uff08\u7eff\u5e3d\u6076\u9b54\uff09", "\u79cb\u5ddd\u5f25\u751f\uff08\u5c0f\u5c0f\u7406\u4e8b\u957f\uff09", "\u4e59\u540d\u53f2\u60a6\u5b50\uff08\u4e59\u540d\u8bb0\u8005\uff09", "\u6850\u751f\u9662\u8475", "\u5b89\u5fc3\u6cfd\u523a\u523a\u7f8e", "\u6a2b\u672c\u7406\u5b50", "\u795e\u91cc\u7eeb\u534e\uff08\u9f9f\u9f9f\uff09", "\u7434", "\u7a7a\uff08\u7a7a\u54e5\uff09", "\u4e3d\u838e", "\u8367\uff08\u8367\u59b9\uff09", "\u82ad\u82ad\u62c9", "\u51ef\u4e9a", "\u8fea\u5362\u514b", "\u96f7\u6cfd", "\u5b89\u67cf", "\u6e29\u8fea", "\u9999\u83f1", "\u5317\u6597", "\u884c\u79cb", "\u9b48", "\u51dd\u5149", "\u53ef\u8389", "\u949f\u79bb", "\u83f2\u8c22\u5c14\uff08\u7687\u5973\uff09", "\u73ed\u5c3c\u7279", "\u8fbe\u8fbe\u5229\u4e9a\uff08\u516c\u5b50\uff09", "\u8bfa\u827e\u5c14\uff08\u5973\u4ec6\uff09", "\u4e03\u4e03", "\u91cd\u4e91", "\u7518\u96e8\uff08\u6930\u7f8a\uff09", "\u963f\u8d1d\u591a", "\u8fea\u5965\u5a1c\uff08\u732b\u732b\uff09", "\u83ab\u5a1c", "\u5c0f\u53ef", "\u7802\u7cd6", "\u8f9b\u7131", "\u7f57\u838e\u8389\u4e9a", "\u80e1\u6843", "\u67ab\u539f\u4e07\u53f6\uff08\u4e07\u53f6\uff09", "\u70df\u7eef", "\u5bb5\u5bab", "\u6258\u9a6c", "\u4f18\u83c8", "\u96f7\u7535\u5c06\u519b\uff08\u96f7\u795e\uff09", "\u65e9\u67da", "\u73ca\u745a\u5bab\u5fc3\u6d77\uff08\u5fc3\u6d77\uff0c\u6263\u6263\u7c73\uff09", "\u4e94\u90ce", "\u4e5d\u6761\u88df\u7f57", "\u8352\u6cf7\u4e00\u6597\uff08\u4e00\u6597\uff09", "\u57c3\u6d1b\u4f0a", "\u7533\u9e64", "\u516b\u91cd\u795e\u5b50\uff08\u795e\u5b50\uff09", "\u795e\u91cc\u7eeb\u4eba\uff08\u7eeb\u4eba\uff09", "\u591c\u5170", "\u4e45\u5c90\u5fcd", "\u9e7f\u91ce\u82d1\u5e73\u85cf", "\u63d0\u7eb3\u91cc", "\u67ef\u83b1", "\u591a\u8389", "\u4e91\u5807", "\u7eb3\u897f\u59b2\uff08\u8349\u795e\uff09", "\u6df1\u6e0a\u4f7f\u5f92", "\u59ae\u9732", "\u8d5b\u8bfa", "\u503a\u52a1\u5904\u7406\u4eba", "\u574e\u8482\u4e1d", "\u771f\u5f13\u5feb\u8f66", "\u79cb\u4eba", "\u671b\u65cf", "\u827e\u5c14\u83f2", "\u827e\u8389\u4e1d", "\u827e\u4f26", "\u963f\u6d1b\u74e6", "\u5929\u91ce", "\u5929\u76ee\u5341\u4e94", "\u611a\u4eba\u4f17-\u5b89\u5fb7\u70c8", "\u5b89\u987a", "\u5b89\u897f", "\u8475", "\u9752\u6728", "\u8352\u5ddd\u5e78\u6b21", "\u8352\u8c37", "\u6709\u6cfd", "\u6d45\u5ddd", "\u9ebb\u7f8e", "\u51dd\u5149\u52a9\u624b", "\u963f\u6258", "\u7afa\u5b50", "\u767e\u8bc6", "\u767e\u95fb", "\u767e\u6653", "\u767d\u672f", "\u8d1d\u96c5\u7279\u4e3d\u5947", "\u4e3d\u5854", "\u5931\u843d\u8ff7\u8fed", "\u7f2d\u4e71\u661f\u68d8", "\u4f0a\u7538", "\u4f0f\u7279\u52a0\u5973\u5b69", "\u72c2\u70ed\u84dd\u8c03", "\u8389\u8389\u5a05", "\u841d\u838e\u8389\u5a05", "\u516b\u91cd\u6a31", "\u516b\u91cd\u971e", "\u5361\u83b2", "\u7b2c\u516d\u591c\u60f3\u66f2", "\u5361\u841d\u5c14", "\u59ec\u5b50", "\u6781\u5730\u6218\u5203", "\u5e03\u6d1b\u59ae\u5a05", "\u6b21\u751f\u94f6\u7ffc", "\u7406\u4e4b\u5f8b\u8005%26\u5e0c\u513f", "\u7406\u4e4b\u5f8b\u8005", "\u8ff7\u57ce\u9a87\u5154", "\u5e0c\u513f", "\u9b47\u591c\u661f\u6e0a", "\u9ed1\u5e0c\u513f", "\u5e15\u6735\u83f2\u8389\u4e1d", "\u4e0d\u706d\u661f\u951a", "\u5929\u5143\u9a91\u82f1", "\u5e7d\u5170\u9edb\u5c14", "\u6d3e\u8499bh3", "\u7231\u9171", "\u7eef\u7389\u4e38", "\u5fb7\u4e3d\u838e", "\u6708\u4e0b\u521d\u62e5", "\u6714\u591c\u89c2\u661f", "\u66ae\u5149\u9a91\u58eb", "\u683c\u857e\u4fee", "\u7559\u4e91\u501f\u98ce\u771f\u541b", "\u6885\u6bd4\u4e4c\u65af", "\u4eff\u72b9\u5927", "\u514b\u83b1\u56e0", "\u5723\u5251\u5e7d\u5170\u9edb\u5c14", "\u5996\u7cbe\u7231\u8389", "\u7279\u65af\u62c9zero", "\u82cd\u7384", "\u82e5\u6c34", "\u897f\u7433", "\u6234\u56e0\u65af\u96f7\u5e03", "\u8d1d\u62c9", "\u8d64\u9e22", "\u9547\u9b42\u6b4c", "\u6e21\u9e26", "\u4eba\u4e4b\u5f8b\u8005", "\u7231\u8389\u5e0c\u96c5", "\u5929\u7a79\u6e38\u4fa0", "\u742a\u4e9a\u5a1c", "\u7a7a\u4e4b\u5f8b\u8005", "\u85aa\u708e\u4e4b\u5f8b\u8005", "\u4e91\u58a8\u4e39\u5fc3", "\u7b26\u534e", "\u8bc6\u4e4b\u5f8b\u8005", "\u7279\u74e6\u6797", "\u7ef4\u5c14\u8587", "\u82bd\u8863", "\u96f7\u4e4b\u5f8b\u8005", "\u65ad\u7f6a\u5f71\u821e", "\u963f\u6ce2\u5c3c\u4e9a", "\u698e\u672c", "\u5384\u5c3c\u65af\u7279", "\u6076\u9f99", "\u8303\u4e8c\u7237", "\u6cd5\u62c9", "\u611a\u4eba\u4f17\u58eb\u5175", "\u611a\u4eba\u4f17\u58eb\u5175a", "\u611a\u4eba\u4f17\u58eb\u5175b", "\u611a\u4eba\u4f17\u58eb\u5175c", "\u611a\u4eba\u4f17a", "\u611a\u4eba\u4f17b", "\u98de\u98de", "\u83f2\u5229\u514b\u65af", "\u5973\u6027\u8ddf\u968f\u8005", "\u9022\u5ca9", "\u6446\u6e21\u4eba", "\u72c2\u8e81\u7684\u7537\u4eba", "\u5965\u5179", "\u8299\u841d\u62c9", "\u8ddf\u968f\u8005", "\u871c\u6c41\u751f\u7269", "\u9ec4\u9ebb\u5b50", "\u6e0a\u4e0a", "\u85e4\u6728", "\u6df1\u89c1", "\u798f\u672c", "\u8299\u84c9", "\u53e4\u6cfd", "\u53e4\u7530", "\u53e4\u5c71", "\u53e4\u8c37\u6607", "\u5085\u4e09\u513f", "\u9ad8\u8001\u516d", "\u77ff\u5de5\u5192", "\u5143\u592a", "\u5fb7\u5b89\u516c", "\u8302\u624d\u516c", "\u6770\u62c9\u5fb7", "\u845b\u7f57\u4e3d", "\u91d1\u5ffd\u5f8b", "\u516c\u4fca", "\u9505\u5df4", "\u6b4c\u5fb7", "\u963f\u8c6a", "\u72d7\u4e09\u513f", "\u845b\u745e\u4e1d", "\u82e5\u5fc3", "\u963f\u5c71\u5a46", "\u602a\u9e1f", "\u5e7f\u7af9", "\u89c2\u6d77", "\u5173\u5b8f", "\u871c\u6c41\u536b\u5175", "\u5b88\u536b1", "\u50b2\u6162\u7684\u5b88\u536b", "\u5bb3\u6015\u7684\u5b88\u536b", "\u8d35\u5b89", "\u76d6\u4f0a", "\u963f\u521b", "\u54c8\u592b\u4e39", "\u65e5\u8bed\u963f\u8d1d\u591a\uff08\u91ce\u5c9b\u5065\u513f\uff09", "\u65e5\u8bed\u57c3\u6d1b\u4f0a\uff08\u9ad8\u57a3\u5f69\u9633\uff09", "\u65e5\u8bed\u5b89\u67cf\uff08\u77f3\u89c1\u821e\u83dc\u9999\uff09", "\u65e5\u8bed\u795e\u91cc\u7eeb\u534e\uff08\u65e9\u89c1\u6c99\u7ec7\uff09", "\u65e5\u8bed\u795e\u91cc\u7eeb\u4eba\uff08\u77f3\u7530\u5f70\uff09", "\u65e5\u8bed\u767d\u672f\uff08\u6e38\u4f50\u6d69\u4e8c\uff09", "\u65e5\u8bed\u82ad\u82ad\u62c9\uff08\u9b3c\u5934\u660e\u91cc\uff09", "\u65e5\u8bed\u5317\u6597\uff08\u5c0f\u6e05\u6c34\u4e9a\u7f8e\uff09", "\u65e5\u8bed\u73ed\u5c3c\u7279\uff08\u9022\u5742\u826f\u592a\uff09", "\u65e5\u8bed\u574e\u8482\u4e1d\uff08\u67da\u6728\u51c9\u9999\uff09", "\u65e5\u8bed\u91cd\u4e91\uff08\u9f50\u85e4\u58ee\u9a6c\uff09", "\u65e5\u8bed\u67ef\u83b1\uff08\u524d\u5ddd\u51c9\u5b50\uff09", "\u65e5\u8bed\u8d5b\u8bfa\uff08\u5165\u91ce\u81ea\u7531\uff09", "\u65e5\u8bed\u6234\u56e0\u65af\u96f7\u5e03\uff08\u6d25\u7530\u5065\u6b21\u90ce\uff09", "\u65e5\u8bed\u8fea\u5362\u514b\uff08\u5c0f\u91ce\u8d24\u7ae0\uff09", "\u65e5\u8bed\u8fea\u5965\u5a1c\uff08\u4e95\u6cfd\u8bd7\u7ec7\uff09", "\u65e5\u8bed\u591a\u8389\uff08\u91d1\u7530\u670b\u5b50\uff09", "\u65e5\u8bed\u4f18\u83c8\uff08\u4f50\u85e4\u5229\u5948\uff09", "\u65e5\u8bed\u83f2\u8c22\u5c14\uff08\u5185\u7530\u771f\u793c\uff09", "\u65e5\u8bed\u7518\u96e8\uff08\u4e0a\u7530\u4e3d\u5948\uff09", "\u65e5\u8bed\uff08\u7560\u4e2d\u7950\uff09", "\u65e5\u8bed\u9e7f\u91ce\u9662\u5e73\u85cf\uff08\u4e95\u53e3\u7950\u4e00\uff09", "\u65e5\u8bed\u7a7a\uff08\u5800\u6c5f\u77ac\uff09", "\u65e5\u8bed\u8367\uff08\u60a0\u6728\u78a7\uff09", "\u65e5\u8bed\u80e1\u6843\uff08\u9ad8\u6865\u674e\u4f9d\uff09", "\u65e5\u8bed\u4e00\u6597\uff08\u897f\u5ddd\u8d35\u6559\uff09", "\u65e5\u8bed\u51ef\u4e9a\uff08\u9e1f\u6d77\u6d69\u8f85\uff09", "\u65e5\u8bed\u4e07\u53f6\uff08\u5c9b\u5d0e\u4fe1\u957f\uff09", "\u65e5\u8bed\u523b\u6674\uff08\u559c\u591a\u6751\u82f1\u68a8\uff09", "\u65e5\u8bed\u53ef\u8389\uff08\u4e45\u91ce\u7f8e\u54b2\uff09", "\u65e5\u8bed\u5fc3\u6d77\uff08\u4e09\u68ee\u94c3\u5b50\uff09", "\u65e5\u8bed\u4e5d\u6761\u88df\u7f57\uff08\u6fd1\u6237\u9ebb\u6c99\u7f8e\uff09", "\u65e5\u8bed\u4e3d\u838e\uff08\u7530\u4e2d\u7406\u60e0\uff09", "\u65e5\u8bed\u83ab\u5a1c\uff08\u5c0f\u539f\u597d\u7f8e\uff09", "\u65e5\u8bed\u7eb3\u897f\u59b2\uff08\u7530\u6751\u7531\u52a0\u8389\uff09", "\u65e5\u8bed\u59ae\u9732\uff08\u91d1\u5143\u5bff\u5b50\uff09", "\u65e5\u8bed\u51dd\u5149\uff08\u5927\u539f\u6c99\u8036\u9999\uff09", "\u65e5\u8bed\u8bfa\u827e\u5c14\uff08\u9ad8\u5c3e\u594f\u97f3\uff09", "\u65e5\u8bed\u5965\u5179\uff08\u589e\u8c37\u5eb7\u7eaa\uff09", "\u65e5\u8bed\u6d3e\u8499\uff08\u53e4\u8d3a\u8475\uff09", "\u65e5\u8bed\u7434\uff08\u658b\u85e4\u5343\u548c\uff09", "\u65e5\u8bed\u4e03\u4e03\uff08\u7530\u6751\u7531\u52a0\u8389\uff09", "\u65e5\u8bed\u96f7\u7535\u5c06\u519b\uff08\u6cfd\u57ce\u7f8e\u96ea\uff09", "\u65e5\u8bed\u96f7\u6cfd\uff08\u5185\u5c71\u6602\u8f89\uff09", "\u65e5\u8bed\u7f57\u838e\u8389\u4e9a\uff08\u52a0\u9688\u4e9a\u8863\uff09", "\u65e5\u8bed\u65e9\u67da\uff08\u6d32\u5d0e\u7eeb\uff09", "\u65e5\u8bed\u6563\u5175\uff08\u67ff\u539f\u5f7b\u4e5f\uff09", "\u65e5\u8bed\u7533\u9e64\uff08\u5ddd\u6f84\u7eeb\u5b50\uff09", "\u65e5\u8bed\u4e45\u5c90\u5fcd\uff08\u6c34\u6865\u9999\u7ec7\uff09", "\u65e5\u8bed\u5973\u58eb\uff08\u5e84\u5b50\u88d5\u8863\uff09", "\u65e5\u8bed\u7802\u7cd6\uff08\u85e4\u7530\u831c\uff09", "\u65e5\u8bed\u8fbe\u8fbe\u5229\u4e9a\uff08\u6728\u6751\u826f\u5e73\uff09", "\u65e5\u8bed\u6258\u9a6c\uff08\u68ee\u7530\u6210\u4e00\uff09", "\u65e5\u8bed\u63d0\u7eb3\u91cc\uff08\u5c0f\u6797\u6c99\u82d7\uff09", "\u65e5\u8bed\u6e29\u8fea\uff08\u6751\u6fd1\u6b65\uff09", "\u65e5\u8bed\u9999\u83f1\uff08\u5c0f\u6cfd\u4e9a\u674e\uff09", "\u65e5\u8bed\u9b48\uff08\u677e\u5188\u796f\u4e1e\uff09", "\u65e5\u8bed\u884c\u79cb\uff08\u7686\u5ddd\u7eaf\u5b50\uff09", "\u65e5\u8bed\u8f9b\u7131\uff08\u9ad8\u6865\u667a\u79cb\uff09", "\u65e5\u8bed\u516b\u91cd\u795e\u5b50\uff08\u4f50\u4ed3\u7eeb\u97f3\uff09", "\u65e5\u8bed\u70df\u7eef\uff08\u82b1\u5b88\u7531\u7f8e\u91cc\uff09", "\u65e5\u8bed\u591c\u5170\uff08\u8fdc\u85e4\u7eeb\uff09", "\u65e5\u8bed\u5bb5\u5bab\uff08\u690d\u7530\u4f73\u5948\uff09", "\u65e5\u8bed\u4e91\u5807\uff08\u5c0f\u5ca9\u4e95\u5c0f\u9e1f\uff09", "\u65e5\u8bed\u949f\u79bb\uff08\u524d\u91ce\u667a\u662d\uff09", "\u6770\u514b", "\u963f\u5409", "\u6c5f\u821f", "\u9274\u79cb", "\u5609\u4e49", "\u7eaa\u82b3", "\u666f\u6f84", "\u7ecf\u7eb6", "\u666f\u660e", "\u664b\u4f18", "\u963f\u9e20", "\u9152\u5ba2", "\u4e54\u5c14", "\u4e54\u745f\u592b", "\u7ea6\u987f", "\u4e54\u4f0a\u65af", "\u5c45\u5b89", "\u541b\u541b", "\u987a\u5409", "\u7eaf\u4e5f", "\u91cd\u4f50", "\u5927\u5c9b\u7eaf\u5e73", "\u84b2\u6cfd", "\u52d8\u89e3\u7531\u5c0f\u8def\u5065\u4e09\u90ce", "\u67ab", "\u67ab\u539f\u4e49\u5e86", "\u836b\u5c71", "\u7532\u6590\u7530\u9f8d\u99ac", "\u6d77\u6597", "\u60df\u795e\u6674\u4e4b\u4ecb", "\u9e7f\u91ce\u5948\u5948", "\u5361\u7435\u8389\u4e9a", "\u51ef\u745f\u7433", "\u52a0\u85e4\u4fe1\u609f", "\u52a0\u85e4\u6d0b\u5e73", "\u80dc\u5bb6", "\u8305\u847a\u4e00\u5e86", "\u548c\u662d", "\u4e00\u6b63", "\u4e00\u9053", "\u6842\u4e00", "\u5e86\u6b21\u90ce", "\u963f\u8d24", "\u5065\u53f8", "\u5065\u6b21\u90ce", "\u5065\u4e09\u90ce", "\u5929\u7406", "\u6740\u624ba", "\u6740\u624bb", "\u6728\u5357\u674f\u5948", "\u6728\u6751", "\u56fd\u738b", "\u6728\u4e0b", "\u5317\u6751", "\u6e05\u60e0", "\u6e05\u4eba", "\u514b\u5217\u95e8\u7279", "\u9a91\u58eb", "\u5c0f\u6797", "\u5c0f\u6625", "\u5eb7\u62c9\u5fb7", "\u5927\u8089\u4e38", "\u7434\u7f8e", "\u5b8f\u4e00", "\u5eb7\u4ecb", "\u5e78\u5fb7", "\u9ad8\u5584", "\u68a2", "\u514b\u7f57\u7d22", "\u4e45\u4fdd", "\u4e5d\u6761\u9570\u6cbb", "\u4e45\u6728\u7530", "\u6606\u94a7", "\u83ca\u5730\u541b", "\u4e45\u5229\u987b", "\u9ed1\u7530", "\u9ed1\u6cfd\u4eac\u4e4b\u4ecb", "\u54cd\u592a", "\u5c9a\u59d0", "\u5170\u6eaa", "\u6f9c\u9633", "\u52b3\u4f26\u65af", "\u4e50\u660e", "\u83b1\u8bfa", "\u83b2", "\u826f\u5b50", "\u674e\u5f53", "\u674e\u4e01", "\u5c0f\u4e50", "\u7075", "\u5c0f\u73b2", "\u7433\u7405a", "\u7433\u7405b", "\u5c0f\u5f6c", "\u5c0f\u5fb7", "\u5c0f\u697d", "\u5c0f\u9f99", "\u5c0f\u5434", "\u5c0f\u5434\u7684\u8bb0\u5fc6", "\u7406\u6b63", "\u963f\u9f99", "\u5362\u5361", "\u6d1b\u6210", "\u7f57\u5de7", "\u5317\u98ce\u72fc", "\u5362\u6b63", "\u840d\u59e5\u59e5", "\u524d\u7530", "\u771f\u663c", "\u9ebb\u7eaa", "\u771f", "\u611a\u4eba\u4f17-\u9a6c\u514b\u897f\u59c6", "\u5973\u6027a", "\u5973\u6027b", "\u5973\u6027a\u7684\u8ddf\u968f\u8005", "\u963f\u5b88", "\u739b\u683c\u4e3d\u7279", "\u771f\u7406", "\u739b\u4e54\u4e3d", "\u739b\u6587", "\u6b63\u80dc", "\u660c\u4fe1", "\u5c06\u53f8", "\u6b63\u4eba", "\u8def\u7237", "\u8001\u7ae0", "\u677e\u7530", "\u677e\u672c", "\u677e\u6d66", "\u677e\u5742", "\u8001\u5b5f", "\u5b5f\u4e39", "\u5546\u4eba\u968f\u4ece", "\u4f20\u4ee4\u5175", "\u7c73\u6b47\u5c14", "\u5fa1\u8206\u6e90\u4e00\u90ce", "\u5fa1\u8206\u6e90\u6b21\u90ce", "\u5343\u5ca9\u519b\u6559\u5934", "\u5343\u5ca9\u519b\u58eb\u5175", "\u660e\u535a", "\u660e\u4fca", "\u7f8e\u94c3", "\u7f8e\u548c", "\u963f\u5e78", "\u524a\u6708\u7b51\u9633\u771f\u541b", "\u94b1\u773c\u513f", "\u68ee\u5f66", "\u5143\u52a9", "\u7406\u6c34\u53e0\u5c71\u771f\u541b", "\u7406\u6c34\u758a\u5c71\u771f\u541b", "\u6731\u8001\u677f", "\u6728\u6728", "\u6751\u4e0a", "\u6751\u7530", "\u6c38\u91ce", "\u957f\u91ce\u539f\u9f99\u4e4b\u4ecb", "\u957f\u6fd1", "\u4e2d\u91ce\u5fd7\u4e43", "\u83dc\u83dc\u5b50", "\u6960\u6960", "\u6210\u6fd1", "\u963f\u5185", "\u5b81\u7984", "\u725b\u5fd7", "\u4fe1\u535a", "\u4f38\u592b", "\u91ce\u65b9", "\u8bfa\u62c9", "\u7eaa\u9999", "\u8bfa\u66fc", "\u4fee\u5973", "\u7eaf\u6c34\u7cbe\u7075", "\u5c0f\u5ddd", "\u5c0f\u4ed3\u6faa", "\u5188\u6797", "\u5188\u5d0e\u7ed8\u91cc\u9999", "\u5188\u5d0e\u9646\u6597", "\u5965\u62c9\u592b", "\u8001\u79d1", "\u9b3c\u5a46\u5a46", "\u5c0f\u91ce\u5bfa", "\u5927\u6cb3\u539f\u4e94\u53f3\u536b\u95e8", "\u5927\u4e45\u4fdd\u5927\u4ecb", "\u5927\u68ee", "\u5927\u52a9", "\u5965\u7279", "\u6d3e\u8499", "\u6d3e\u84992", "\u75c5\u4ebaa", "\u75c5\u4ebab", "\u5df4\u987f", "\u6d3e\u6069", "\u670b\u4e49", "\u56f4\u89c2\u7fa4\u4f17", "\u56f4\u89c2\u7fa4\u4f17a", "\u56f4\u89c2\u7fa4\u4f17b", "\u56f4\u89c2\u7fa4\u4f17c", "\u56f4\u89c2\u7fa4\u4f17d", "\u56f4\u89c2\u7fa4\u4f17e", "\u94dc\u96c0", "\u963f\u80a5", "\u5174\u53d4", "\u8001\u5468\u53d4", "\u516c\u4e3b", "\u5f7c\u5f97", "\u4e7e\u5b50", "\u828a\u828a", "\u4e7e\u73ae", "\u7eee\u547d", "\u675e\u5e73", "\u79cb\u6708", "\u6606\u6069", "\u96f7\u7535\u5f71", "\u5170\u9053\u5c14", "\u96f7\u8499\u5fb7", "\u5192\u5931\u7684\u5e15\u62c9\u5fb7", "\u4f36\u4e00", "\u73b2\u82b1", "\u963f\u4ec1", "\u5bb6\u81e3\u4eec", "\u68a8\u7ed8", "\u8363\u6c5f", "\u620e\u4e16", "\u6d6a\u4eba", "\u7f57\u4f0a\u65af", "\u5982\u610f", "\u51c9\u5b50", "\u5f69\u9999", "\u9152\u4e95", "\u5742\u672c", "\u6714\u6b21\u90ce", "\u6b66\u58eba", "\u6b66\u58ebb", "\u6b66\u58ebc", "\u6b66\u58ebd", "\u73ca\u745a", "\u4e09\u7530", "\u838e\u62c9", "\u7b39\u91ce", "\u806a\u7f8e", "\u806a", "\u5c0f\u767e\u5408", "\u6563\u5175", "\u5bb3\u6015\u7684\u5c0f\u5218", "\u8212\u4f2f\u7279", "\u8212\u8328", "\u6d77\u9f99", "\u4e16\u5b50", "\u8c22\u5c14\u76d6", "\u5bb6\u4e01", "\u5546\u534e", "\u6c99\u5bc5", "\u963f\u5347", "\u67f4\u7530", "\u963f\u8302", "\u5f0f\u5927\u5c06", "\u6e05\u6c34", "\u5fd7\u6751\u52d8\u5175\u536b", "\u65b0\u4e4b\u4e1e", "\u5fd7\u7ec7", "\u77f3\u5934", "\u8bd7\u7fbd", "\u8bd7\u7b60", "\u77f3\u58ee", "\u7fd4\u592a", "\u6b63\u4e8c", "\u5468\u5e73", "\u8212\u6768", "\u9f50\u683c\u8299\u4e3d\u96c5", "\u5973\u58eb", "\u601d\u52e4", "\u516d\u6307\u4e54\u745f", "\u611a\u4eba\u4f17\u5c0f\u5175d", "\u611a\u4eba\u4f17\u5c0f\u5175a", "\u611a\u4eba\u4f17\u5c0f\u5175b", "\u611a\u4eba\u4f17\u5c0f\u5175c", "\u5434\u8001\u4e94", "\u5434\u8001\u4e8c", "\u6ed1\u5934\u9b3c", "\u8a00\u7b11", "\u5434\u8001\u4e03", "\u58eb\u5175h", "\u58eb\u5175i", "\u58eb\u5175a", "\u58eb\u5175b", "\u58eb\u5175c", "\u58eb\u5175d", "\u58eb\u5175e", "\u58eb\u5175f", "\u58eb\u5175g", "\u594f\u592a", "\u65af\u5766\u5229", "\u6387\u661f\u652b\u8fb0\u5929\u541b", "\u5c0f\u5934", "\u5927\u6b66", "\u9676\u4e49\u9686", "\u6749\u672c", "\u82cf\u897f", "\u5acc\u7591\u4ebaa", "\u5acc\u7591\u4ebab", "\u5acc\u7591\u4ebac", "\u5acc\u7591\u4ebad", "\u65af\u4e07", "\u5251\u5ba2a", "\u5251\u5ba2b", "\u963f\u4e8c", "\u5fe0\u80dc", "\u5fe0\u592b", "\u963f\u656c", "\u5b5d\u5229", "\u9e70\u53f8\u8fdb", "\u9ad8\u5c71", "\u4e5d\u6761\u5b5d\u884c", "\u6bc5", "\u7af9\u5185", "\u62d3\u771f", "\u5353\u4e5f", "\u592a\u90ce\u4e38", "\u6cf0\u52d2", "\u624b\u5c9b", "\u54f2\u5e73", "\u54f2\u592b", "\u6258\u514b", "\u5927boss", "\u963f\u5f3a", "\u6258\u5c14\u5fb7\u62c9", "\u65c1\u89c2\u8005", "\u5929\u6210", "\u963f\u5927", "\u8482\u739b\u4e4c\u65af", "\u63d0\u7c73", "\u6237\u7530", "\u963f\u4e09", "\u4e00\u8d77\u7684\u4eba", "\u5fb7\u7530", "\u5fb7\u957f", "\u667a\u6811", "\u5229\u5f66", "\u80d6\u4e4e\u4e4e\u7684\u65c5\u884c\u8005", "\u85cf\u5b9d\u4ebaa", "\u85cf\u5b9d\u4ebab", "\u85cf\u5b9d\u4ebac", "\u85cf\u5b9d\u4ebad", "\u963f\u7947", "\u6052\u96c4", "\u9732\u5b50", "\u8bdd\u5267\u56e2\u56e2\u957f", "\u5185\u6751", "\u4e0a\u91ce", "\u4e0a\u6749", "\u8001\u6234", "\u8001\u9ad8", "\u8001\u8d3e", "\u8001\u58a8", "\u8001\u5b59", "\u5929\u67a2\u661f", "\u8001\u4e91", "\u6709\u4e50\u658b", "\u4e11\u96c4", "\u4e4c\u7ef4", "\u74e6\u4eac", "\u83f2\u5c14\u6208\u9edb\u7279", "\u7ef4\u591a\u5229\u4e9a", "\u8587\u5c14", "\u74e6\u683c\u7eb3", "\u963f\u5916", "\u4f8d\u5973", "\u74e6\u62c9", "\u671b\u96c5", "\u5b9b\u70df", "\u742c\u7389", "\u6218\u58eba", "\u6218\u58ebb", "\u6e21\u8fba", "\u6e21\u90e8", "\u963f\u4f1f", "\u6587\u749f", "\u6587\u6e0a", "\u97e6\u5c14\u7eb3", "\u738b\u6273\u624b", "\u6b66\u6c9b", "\u6653\u98de", "\u8f9b\u7a0b", "\u661f\u706b", "\u661f\u7a00", "\u8f9b\u79c0", "\u79c0\u534e", "\u963f\u65ed", "\u5f90\u5218\u5e08", "\u77e2\u90e8", "\u516b\u6728", "\u5c71\u4e0a", "\u963f\u9633", "\u989c\u7b11", "\u5eb7\u660e", "\u6cf0\u4e45", "\u5b89\u6b66", "\u77e2\u7530\u5e78\u559c", "\u77e2\u7530\u8f9b\u559c", "\u4e49\u575a", "\u83ba\u513f", "\u76c8\u4e30", "\u5b9c\u5e74", "\u94f6\u674f", "\u9038\u8f69", "\u6a2a\u5c71", "\u6c38\u8d35", "\u6c38\u4e1a", "\u5609\u4e45", "\u5409\u5ddd", "\u4e49\u9ad8", "\u7528\u9ad8", "\u9633\u592a", "\u5143\u84c9", "\u73a5\u8f89", "\u6bd3\u534e", "\u6709\u9999", "\u5e78\u4e5f", "\u7531\u771f", "\u7ed3\u83dc", "\u97f5\u5b81", "\u767e\u5408", "\u767e\u5408\u534e", "\u5c24\u82cf\u6ce2\u592b", "\u88d5\u5b50", "\u60a0\u7b56", "\u60a0\u4e5f", "\u4e8e\u5ae3", "\u67da\u5b50", "\u8001\u90d1", "\u6b63\u8302", "\u5fd7\u6210", "\u82b7\u5de7", "\u77e5\u6613", "\u652f\u652f", "\u5468\u826f", "\u73e0\u51fd", "\u795d\u660e", "\u795d\u6d9b"],
+ "symbols": ["_", ",", ".", "!", "?", "-", "~", "\u2026", "A", "E", "I", "N", "O", "Q", "U", "a", "b", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "v", "w", "y", "z", "\u0283", "\u02a7", "\u02a6", "\u026f", "\u0279", "\u0259", "\u0265", "\u207c", "\u02b0", "`", "\u2192", "\u2193", "\u2191", " "]
+}
\ No newline at end of file
diff --git a/mel_processing.py b/mel_processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e252e76320522a8a4195a60665168f22769aec2
--- /dev/null
+++ b/mel_processing.py
@@ -0,0 +1,101 @@
+import torch
+import torch.utils.data
+from librosa.filters import mel as librosa_mel_fn
+
+MAX_WAV_VALUE = 32768.0
+
+
+def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
+ """
+ PARAMS
+ ------
+ C: compression factor
+ """
+ return torch.log(torch.clamp(x, min=clip_val) * C)
+
+
+def dynamic_range_decompression_torch(x, C=1):
+ """
+ PARAMS
+ ------
+ C: compression factor used to compress
+ """
+ return torch.exp(x) / C
+
+
+def spectral_normalize_torch(magnitudes):
+ output = dynamic_range_compression_torch(magnitudes)
+ return output
+
+
+def spectral_de_normalize_torch(magnitudes):
+ output = dynamic_range_decompression_torch(magnitudes)
+ return output
+
+
+mel_basis = {}
+hann_window = {}
+
+
+def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
+ if torch.min(y) < -1.:
+ print('min value is ', torch.min(y))
+ if torch.max(y) > 1.:
+ print('max value is ', torch.max(y))
+
+ global hann_window
+ dtype_device = str(y.dtype) + '_' + str(y.device)
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
+ if wnsize_dtype_device not in hann_window:
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
+
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
+ y = y.squeeze(1)
+
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
+ center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
+
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
+ return spec
+
+
+def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
+ global mel_basis
+ dtype_device = str(spec.dtype) + '_' + str(spec.device)
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
+ if fmax_dtype_device not in mel_basis:
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
+ spec = spectral_normalize_torch(spec)
+ return spec
+
+
+def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
+ if torch.min(y) < -1.:
+ print('min value is ', torch.min(y))
+ if torch.max(y) > 1.:
+ print('max value is ', torch.max(y))
+
+ global mel_basis, hann_window
+ dtype_device = str(y.dtype) + '_' + str(y.device)
+ fmax_dtype_device = str(fmax) + '_' + dtype_device
+ wnsize_dtype_device = str(win_size) + '_' + dtype_device
+ if fmax_dtype_device not in mel_basis:
+ mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
+ mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
+ if wnsize_dtype_device not in hann_window:
+ hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
+
+ y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
+ y = y.squeeze(1)
+
+ spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
+ center=center, pad_mode='reflect', normalized=False, onesided=True)
+
+ spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
+
+ spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
+ spec = spectral_normalize_torch(spec)
+
+ return spec
diff --git a/models.py b/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..8353b867f441de7e4d05aef980e672899c3a8889
--- /dev/null
+++ b/models.py
@@ -0,0 +1,533 @@
+import math
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+import commons
+import modules
+import attentions
+import monotonic_align
+
+from torch.nn import Conv1d, ConvTranspose1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
+from commons import init_weights, get_padding
+
+
+class StochasticDurationPredictor(nn.Module):
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0):
+ super().__init__()
+ filter_channels = in_channels # it needs to be removed from future version.
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.log_flow = modules.Log()
+ self.flows = nn.ModuleList()
+ self.flows.append(modules.ElementwiseAffine(2))
+ for i in range(n_flows):
+ self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
+ self.flows.append(modules.Flip())
+
+ self.post_pre = nn.Conv1d(1, filter_channels, 1)
+ self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
+ self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
+ self.post_flows = nn.ModuleList()
+ self.post_flows.append(modules.ElementwiseAffine(2))
+ for i in range(4):
+ self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3))
+ self.post_flows.append(modules.Flip())
+
+ self.pre = nn.Conv1d(in_channels, filter_channels, 1)
+ self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
+ self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout)
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
+
+ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
+ x = torch.detach(x)
+ x = self.pre(x)
+ if g is not None:
+ g = torch.detach(g)
+ x = x + self.cond(g)
+ x = self.convs(x, x_mask)
+ x = self.proj(x) * x_mask
+
+ if not reverse:
+ flows = self.flows
+ assert w is not None
+
+ logdet_tot_q = 0
+ h_w = self.post_pre(w)
+ h_w = self.post_convs(h_w, x_mask)
+ h_w = self.post_proj(h_w) * x_mask
+ e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask
+ z_q = e_q
+ for flow in self.post_flows:
+ z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
+ logdet_tot_q += logdet_q
+ z_u, z1 = torch.split(z_q, [1, 1], 1)
+ u = torch.sigmoid(z_u) * x_mask
+ z0 = (w - u) * x_mask
+ logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2])
+ logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q
+
+ logdet_tot = 0
+ z0, logdet = self.log_flow(z0, x_mask)
+ logdet_tot += logdet
+ z = torch.cat([z0, z1], 1)
+ for flow in flows:
+ z, logdet = flow(z, x_mask, g=x, reverse=reverse)
+ logdet_tot = logdet_tot + logdet
+ nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot
+ return nll + logq # [b]
+ else:
+ flows = list(reversed(self.flows))
+ flows = flows[:-2] + [flows[-1]] # remove a useless vflow
+ z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale
+ for flow in flows:
+ z = flow(z, x_mask, g=x, reverse=reverse)
+ z0, z1 = torch.split(z, [1, 1], 1)
+ logw = z0
+ return logw
+
+
+class DurationPredictor(nn.Module):
+ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0):
+ super().__init__()
+
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.gin_channels = gin_channels
+
+ self.drop = nn.Dropout(p_dropout)
+ self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2)
+ self.norm_1 = modules.LayerNorm(filter_channels)
+ self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2)
+ self.norm_2 = modules.LayerNorm(filter_channels)
+ self.proj = nn.Conv1d(filter_channels, 1, 1)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, in_channels, 1)
+
+ def forward(self, x, x_mask, g=None):
+ x = torch.detach(x)
+ if g is not None:
+ g = torch.detach(g)
+ x = x + self.cond(g)
+ x = self.conv_1(x * x_mask)
+ x = torch.relu(x)
+ x = self.norm_1(x)
+ x = self.drop(x)
+ x = self.conv_2(x * x_mask)
+ x = torch.relu(x)
+ x = self.norm_2(x)
+ x = self.drop(x)
+ x = self.proj(x * x_mask)
+ return x * x_mask
+
+
+class TextEncoder(nn.Module):
+ def __init__(self,
+ n_vocab,
+ out_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout):
+ super().__init__()
+ self.n_vocab = n_vocab
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+
+ self.emb = nn.Embedding(n_vocab, hidden_channels)
+ nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
+
+ self.encoder = attentions.Encoder(
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+ self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths):
+ x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
+ x = torch.transpose(x, 1, -1) # [b, h, t]
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
+
+ x = self.encoder(x * x_mask, x_mask)
+ stats = self.proj(x) * x_mask
+
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ return x, m, logs, x_mask
+
+
+class ResidualCouplingBlock(nn.Module):
+ def __init__(self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ n_flows=4,
+ gin_channels=0):
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.n_flows = n_flows
+ self.gin_channels = gin_channels
+
+ self.flows = nn.ModuleList()
+ for i in range(n_flows):
+ self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True))
+ self.flows.append(modules.Flip())
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ if not reverse:
+ for flow in self.flows:
+ x, _ = flow(x, x_mask, g=g, reverse=reverse)
+ else:
+ for flow in reversed(self.flows):
+ x = flow(x, x_mask, g=g, reverse=reverse)
+ return x
+
+
+class PosteriorEncoder(nn.Module):
+ def __init__(self,
+ in_channels,
+ out_channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ gin_channels=0):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+
+ self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
+ self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels)
+ self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
+
+ def forward(self, x, x_lengths, g=None):
+ x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
+ x = self.pre(x) * x_mask
+ x = self.enc(x, x_mask, g=g)
+ stats = self.proj(x) * x_mask
+ m, logs = torch.split(stats, self.out_channels, dim=1)
+ z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
+ return z, m, logs, x_mask
+
+
+class Generator(torch.nn.Module):
+ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0):
+ super(Generator, self).__init__()
+ self.num_kernels = len(resblock_kernel_sizes)
+ self.num_upsamples = len(upsample_rates)
+ self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3)
+ resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2
+
+ self.ups = nn.ModuleList()
+ for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
+ self.ups.append(weight_norm(
+ ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
+ k, u, padding=(k-u)//2)))
+
+ self.resblocks = nn.ModuleList()
+ for i in range(len(self.ups)):
+ ch = upsample_initial_channel//(2**(i+1))
+ for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
+ self.resblocks.append(resblock(ch, k, d))
+
+ self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
+ self.ups.apply(init_weights)
+
+ if gin_channels != 0:
+ self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
+
+ def forward(self, x, g=None):
+ x = self.conv_pre(x)
+ if g is not None:
+ x = x + self.cond(g)
+
+ for i in range(self.num_upsamples):
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ x = self.ups[i](x)
+ xs = None
+ for j in range(self.num_kernels):
+ if xs is None:
+ xs = self.resblocks[i*self.num_kernels+j](x)
+ else:
+ xs += self.resblocks[i*self.num_kernels+j](x)
+ x = xs / self.num_kernels
+ x = F.leaky_relu(x)
+ x = self.conv_post(x)
+ x = torch.tanh(x)
+
+ return x
+
+ def remove_weight_norm(self):
+ print('Removing weight norm...')
+ for l in self.ups:
+ remove_weight_norm(l)
+ for l in self.resblocks:
+ l.remove_weight_norm()
+
+
+class DiscriminatorP(torch.nn.Module):
+ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
+ super(DiscriminatorP, self).__init__()
+ self.period = period
+ self.use_spectral_norm = use_spectral_norm
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))),
+ norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))),
+ ])
+ self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
+
+ def forward(self, x):
+ fmap = []
+
+ # 1d to 2d
+ b, c, t = x.shape
+ if t % self.period != 0: # pad first
+ n_pad = self.period - (t % self.period)
+ x = F.pad(x, (0, n_pad), "reflect")
+ t = t + n_pad
+ x = x.view(b, c, t // self.period, self.period)
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class DiscriminatorS(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(DiscriminatorS, self).__init__()
+ norm_f = weight_norm if use_spectral_norm == False else spectral_norm
+ self.convs = nn.ModuleList([
+ norm_f(Conv1d(1, 16, 15, 1, padding=7)),
+ norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
+ norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
+ norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
+ norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
+ norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
+ ])
+ self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
+
+ def forward(self, x):
+ fmap = []
+
+ for l in self.convs:
+ x = l(x)
+ x = F.leaky_relu(x, modules.LRELU_SLOPE)
+ fmap.append(x)
+ x = self.conv_post(x)
+ fmap.append(x)
+ x = torch.flatten(x, 1, -1)
+
+ return x, fmap
+
+
+class MultiPeriodDiscriminator(torch.nn.Module):
+ def __init__(self, use_spectral_norm=False):
+ super(MultiPeriodDiscriminator, self).__init__()
+ periods = [2,3,5,7,11]
+
+ discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
+ discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods]
+ self.discriminators = nn.ModuleList(discs)
+
+ def forward(self, y, y_hat):
+ y_d_rs = []
+ y_d_gs = []
+ fmap_rs = []
+ fmap_gs = []
+ for i, d in enumerate(self.discriminators):
+ y_d_r, fmap_r = d(y)
+ y_d_g, fmap_g = d(y_hat)
+ y_d_rs.append(y_d_r)
+ y_d_gs.append(y_d_g)
+ fmap_rs.append(fmap_r)
+ fmap_gs.append(fmap_g)
+
+ return y_d_rs, y_d_gs, fmap_rs, fmap_gs
+
+
+
+class SynthesizerTrn(nn.Module):
+ """
+ Synthesizer for Training
+ """
+
+ def __init__(self,
+ n_vocab,
+ spec_channels,
+ segment_size,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout,
+ resblock,
+ resblock_kernel_sizes,
+ resblock_dilation_sizes,
+ upsample_rates,
+ upsample_initial_channel,
+ upsample_kernel_sizes,
+ n_speakers=0,
+ gin_channels=0,
+ use_sdp=True,
+ **kwargs):
+
+ super().__init__()
+ self.n_vocab = n_vocab
+ self.spec_channels = spec_channels
+ self.inter_channels = inter_channels
+ self.hidden_channels = hidden_channels
+ self.filter_channels = filter_channels
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.kernel_size = kernel_size
+ self.p_dropout = p_dropout
+ self.resblock = resblock
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_initial_channel = upsample_initial_channel
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ self.segment_size = segment_size
+ self.n_speakers = n_speakers
+ self.gin_channels = gin_channels
+
+ self.use_sdp = use_sdp
+
+ self.enc_p = TextEncoder(n_vocab,
+ inter_channels,
+ hidden_channels,
+ filter_channels,
+ n_heads,
+ n_layers,
+ kernel_size,
+ p_dropout)
+ self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels)
+ self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels)
+ self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels)
+
+ if use_sdp:
+ self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels)
+ else:
+ self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels)
+
+ if n_speakers > 1:
+ self.emb_g = nn.Embedding(n_speakers, gin_channels)
+
+ def forward(self, x, x_lengths, y, y_lengths, sid=None):
+
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
+ if self.n_speakers > 0:
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
+ else:
+ g = None
+
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
+ z_p = self.flow(z, y_mask, g=g)
+
+ with torch.no_grad():
+ # negative cross-entropy
+ s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]
+ neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s]
+ neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
+ neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]
+ neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s]
+ neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4
+
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
+ attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach()
+
+ w = attn.sum(2)
+ if self.use_sdp:
+ l_length = self.dp(x, x_mask, w, g=g)
+ l_length = l_length / torch.sum(x_mask)
+ else:
+ logw_ = torch.log(w + 1e-6) * x_mask
+ logw = self.dp(x, x_mask, g=g)
+ l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging
+
+ # expand prior
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)
+
+ z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size)
+ o = self.dec(z_slice, g=g)
+ return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
+
+ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None):
+ x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths)
+ if self.n_speakers > 0:
+ g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]
+ else:
+ g = None
+
+ if self.use_sdp:
+ logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w)
+ else:
+ logw = self.dp(x, x_mask, g=g)
+ w = torch.exp(logw) * x_mask * length_scale
+ w_ceil = torch.ceil(w)
+ y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()
+ y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype)
+ attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)
+ attn = commons.generate_path(w_ceil, attn_mask)
+
+ m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
+ logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t']
+
+ z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale
+ z = self.flow(z_p, y_mask, g=g, reverse=True)
+ o = self.dec((z * y_mask)[:,:,:max_len], g=g)
+ return o, attn, y_mask, (z, z_p, m_p, logs_p)
+
+ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt):
+ assert self.n_speakers > 0, "n_speakers have to be larger than 0."
+ g_src = self.emb_g(sid_src).unsqueeze(-1)
+ g_tgt = self.emb_g(sid_tgt).unsqueeze(-1)
+ z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src)
+ z_p = self.flow(z, y_mask, g=g_src)
+ z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True)
+ o_hat = self.dec(z_hat * y_mask, g=g_tgt)
+ return o_hat, y_mask, (z, z_p, z_hat)
+
diff --git a/modules.py b/modules.py
new file mode 100644
index 0000000000000000000000000000000000000000..56ea4145eddf19dd330a3a41ab0183efc1686d83
--- /dev/null
+++ b/modules.py
@@ -0,0 +1,388 @@
+import math
+import numpy as np
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
+from torch.nn.utils import weight_norm, remove_weight_norm
+
+import commons
+from commons import init_weights, get_padding
+from transforms import piecewise_rational_quadratic_transform
+
+
+LRELU_SLOPE = 0.1
+
+
+class LayerNorm(nn.Module):
+ def __init__(self, channels, eps=1e-5):
+ super().__init__()
+ self.channels = channels
+ self.eps = eps
+
+ self.gamma = nn.Parameter(torch.ones(channels))
+ self.beta = nn.Parameter(torch.zeros(channels))
+
+ def forward(self, x):
+ x = x.transpose(1, -1)
+ x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
+ return x.transpose(1, -1)
+
+
+class ConvReluNorm(nn.Module):
+ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
+ super().__init__()
+ self.in_channels = in_channels
+ self.hidden_channels = hidden_channels
+ self.out_channels = out_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+ assert n_layers > 1, "Number of layers should be larger than 0."
+
+ self.conv_layers = nn.ModuleList()
+ self.norm_layers = nn.ModuleList()
+ self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.relu_drop = nn.Sequential(
+ nn.ReLU(),
+ nn.Dropout(p_dropout))
+ for _ in range(n_layers-1):
+ self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
+ self.norm_layers.append(LayerNorm(hidden_channels))
+ self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask):
+ x_org = x
+ for i in range(self.n_layers):
+ x = self.conv_layers[i](x * x_mask)
+ x = self.norm_layers[i](x)
+ x = self.relu_drop(x)
+ x = x_org + self.proj(x)
+ return x * x_mask
+
+
+class DDSConv(nn.Module):
+ """
+ Dialted and Depth-Separable Convolution
+ """
+ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
+ super().__init__()
+ self.channels = channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.p_dropout = p_dropout
+
+ self.drop = nn.Dropout(p_dropout)
+ self.convs_sep = nn.ModuleList()
+ self.convs_1x1 = nn.ModuleList()
+ self.norms_1 = nn.ModuleList()
+ self.norms_2 = nn.ModuleList()
+ for i in range(n_layers):
+ dilation = kernel_size ** i
+ padding = (kernel_size * dilation - dilation) // 2
+ self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
+ groups=channels, dilation=dilation, padding=padding
+ ))
+ self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
+ self.norms_1.append(LayerNorm(channels))
+ self.norms_2.append(LayerNorm(channels))
+
+ def forward(self, x, x_mask, g=None):
+ if g is not None:
+ x = x + g
+ for i in range(self.n_layers):
+ y = self.convs_sep[i](x * x_mask)
+ y = self.norms_1[i](y)
+ y = F.gelu(y)
+ y = self.convs_1x1[i](y)
+ y = self.norms_2[i](y)
+ y = F.gelu(y)
+ y = self.drop(y)
+ x = x + y
+ return x * x_mask
+
+
+class WN(torch.nn.Module):
+ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
+ super(WN, self).__init__()
+ assert(kernel_size % 2 == 1)
+ self.hidden_channels =hidden_channels
+ self.kernel_size = kernel_size,
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.gin_channels = gin_channels
+ self.p_dropout = p_dropout
+
+ self.in_layers = torch.nn.ModuleList()
+ self.res_skip_layers = torch.nn.ModuleList()
+ self.drop = nn.Dropout(p_dropout)
+
+ if gin_channels != 0:
+ cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
+ self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
+
+ for i in range(n_layers):
+ dilation = dilation_rate ** i
+ padding = int((kernel_size * dilation - dilation) / 2)
+ in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
+ dilation=dilation, padding=padding)
+ in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
+ self.in_layers.append(in_layer)
+
+ # last one is not necessary
+ if i < n_layers - 1:
+ res_skip_channels = 2 * hidden_channels
+ else:
+ res_skip_channels = hidden_channels
+
+ res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
+ res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
+ self.res_skip_layers.append(res_skip_layer)
+
+ def forward(self, x, x_mask, g=None, **kwargs):
+ output = torch.zeros_like(x)
+ n_channels_tensor = torch.IntTensor([self.hidden_channels])
+
+ if g is not None:
+ g = self.cond_layer(g)
+
+ for i in range(self.n_layers):
+ x_in = self.in_layers[i](x)
+ if g is not None:
+ cond_offset = i * 2 * self.hidden_channels
+ g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
+ else:
+ g_l = torch.zeros_like(x_in)
+
+ acts = commons.fused_add_tanh_sigmoid_multiply(
+ x_in,
+ g_l,
+ n_channels_tensor)
+ acts = self.drop(acts)
+
+ res_skip_acts = self.res_skip_layers[i](acts)
+ if i < self.n_layers - 1:
+ res_acts = res_skip_acts[:,:self.hidden_channels,:]
+ x = (x + res_acts) * x_mask
+ output = output + res_skip_acts[:,self.hidden_channels:,:]
+ else:
+ output = output + res_skip_acts
+ return output * x_mask
+
+ def remove_weight_norm(self):
+ if self.gin_channels != 0:
+ torch.nn.utils.remove_weight_norm(self.cond_layer)
+ for l in self.in_layers:
+ torch.nn.utils.remove_weight_norm(l)
+ for l in self.res_skip_layers:
+ torch.nn.utils.remove_weight_norm(l)
+
+
+class ResBlock1(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
+ super(ResBlock1, self).__init__()
+ self.convs1 = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
+ padding=get_padding(kernel_size, dilation[2])))
+ ])
+ self.convs1.apply(init_weights)
+
+ self.convs2 = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
+ padding=get_padding(kernel_size, 1)))
+ ])
+ self.convs2.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c1, c2 in zip(self.convs1, self.convs2):
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c1(xt)
+ xt = F.leaky_relu(xt, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c2(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs1:
+ remove_weight_norm(l)
+ for l in self.convs2:
+ remove_weight_norm(l)
+
+
+class ResBlock2(torch.nn.Module):
+ def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
+ super(ResBlock2, self).__init__()
+ self.convs = nn.ModuleList([
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
+ padding=get_padding(kernel_size, dilation[0]))),
+ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
+ padding=get_padding(kernel_size, dilation[1])))
+ ])
+ self.convs.apply(init_weights)
+
+ def forward(self, x, x_mask=None):
+ for c in self.convs:
+ xt = F.leaky_relu(x, LRELU_SLOPE)
+ if x_mask is not None:
+ xt = xt * x_mask
+ xt = c(xt)
+ x = xt + x
+ if x_mask is not None:
+ x = x * x_mask
+ return x
+
+ def remove_weight_norm(self):
+ for l in self.convs:
+ remove_weight_norm(l)
+
+
+class Log(nn.Module):
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
+ logdet = torch.sum(-y, [1, 2])
+ return y, logdet
+ else:
+ x = torch.exp(x) * x_mask
+ return x
+
+
+class Flip(nn.Module):
+ def forward(self, x, *args, reverse=False, **kwargs):
+ x = torch.flip(x, [1])
+ if not reverse:
+ logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
+ return x, logdet
+ else:
+ return x
+
+
+class ElementwiseAffine(nn.Module):
+ def __init__(self, channels):
+ super().__init__()
+ self.channels = channels
+ self.m = nn.Parameter(torch.zeros(channels,1))
+ self.logs = nn.Parameter(torch.zeros(channels,1))
+
+ def forward(self, x, x_mask, reverse=False, **kwargs):
+ if not reverse:
+ y = self.m + torch.exp(self.logs) * x
+ y = y * x_mask
+ logdet = torch.sum(self.logs * x_mask, [1,2])
+ return y, logdet
+ else:
+ x = (x - self.m) * torch.exp(-self.logs) * x_mask
+ return x
+
+
+class ResidualCouplingLayer(nn.Module):
+ def __init__(self,
+ channels,
+ hidden_channels,
+ kernel_size,
+ dilation_rate,
+ n_layers,
+ p_dropout=0,
+ gin_channels=0,
+ mean_only=False):
+ assert channels % 2 == 0, "channels should be divisible by 2"
+ super().__init__()
+ self.channels = channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation_rate = dilation_rate
+ self.n_layers = n_layers
+ self.half_channels = channels // 2
+ self.mean_only = mean_only
+
+ self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
+ self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
+ self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
+ self.post.weight.data.zero_()
+ self.post.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
+ h = self.pre(x0) * x_mask
+ h = self.enc(h, x_mask, g=g)
+ stats = self.post(h) * x_mask
+ if not self.mean_only:
+ m, logs = torch.split(stats, [self.half_channels]*2, 1)
+ else:
+ m = stats
+ logs = torch.zeros_like(m)
+
+ if not reverse:
+ x1 = m + x1 * torch.exp(logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ logdet = torch.sum(logs, [1,2])
+ return x, logdet
+ else:
+ x1 = (x1 - m) * torch.exp(-logs) * x_mask
+ x = torch.cat([x0, x1], 1)
+ return x
+
+
+class ConvFlow(nn.Module):
+ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
+ super().__init__()
+ self.in_channels = in_channels
+ self.filter_channels = filter_channels
+ self.kernel_size = kernel_size
+ self.n_layers = n_layers
+ self.num_bins = num_bins
+ self.tail_bound = tail_bound
+ self.half_channels = in_channels // 2
+
+ self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
+ self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
+ self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
+ self.proj.weight.data.zero_()
+ self.proj.bias.data.zero_()
+
+ def forward(self, x, x_mask, g=None, reverse=False):
+ x0, x1 = torch.split(x, [self.half_channels]*2, 1)
+ h = self.pre(x0)
+ h = self.convs(h, x_mask, g=g)
+ h = self.proj(h) * x_mask
+
+ b, c, t = x0.shape
+ h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
+
+ unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
+ unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
+ unnormalized_derivatives = h[..., 2 * self.num_bins:]
+
+ x1, logabsdet = piecewise_rational_quadratic_transform(x1,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=reverse,
+ tails='linear',
+ tail_bound=self.tail_bound
+ )
+
+ x = torch.cat([x0, x1], 1) * x_mask
+ logdet = torch.sum(logabsdet * x_mask, [1,2])
+ if not reverse:
+ return x, logdet
+ else:
+ return x
diff --git a/monotonic_align/__init__.py b/monotonic_align/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1199b58758d7b2f64d504a639bd8a2c62d055a2e
--- /dev/null
+++ b/monotonic_align/__init__.py
@@ -0,0 +1,20 @@
+from numpy import zeros, int32, float32
+from torch import from_numpy
+
+from .core import maximum_path_jit
+
+
+def maximum_path(neg_cent, mask):
+ """ numba optimized version.
+ neg_cent: [b, t_t, t_s]
+ mask: [b, t_t, t_s]
+ """
+ device = neg_cent.device
+ dtype = neg_cent.dtype
+ neg_cent = neg_cent.data.cpu().numpy().astype(float32)
+ path = zeros(neg_cent.shape, dtype=int32)
+
+ t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
+ t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
+ maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
+ return from_numpy(path).to(device=device, dtype=dtype)
diff --git a/monotonic_align/core.py b/monotonic_align/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8b06844fa479fa99a40e00f353d4af0b9a5d369
--- /dev/null
+++ b/monotonic_align/core.py
@@ -0,0 +1,36 @@
+import numba
+
+
+@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]),
+ nopython=True, nogil=True)
+def maximum_path_jit(paths, values, t_ys, t_xs):
+ b = paths.shape[0]
+ max_neg_val = -1e9
+ for i in range(int(b)):
+ path = paths[i]
+ value = values[i]
+ t_y = t_ys[i]
+ t_x = t_xs[i]
+
+ v_prev = v_cur = 0.0
+ index = t_x - 1
+
+ for y in range(t_y):
+ for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
+ if x == y:
+ v_cur = max_neg_val
+ else:
+ v_cur = value[y - 1, x]
+ if x == 0:
+ if y == 0:
+ v_prev = 0.
+ else:
+ v_prev = max_neg_val
+ else:
+ v_prev = value[y - 1, x - 1]
+ value[y, x] += max(v_prev, v_cur)
+
+ for y in range(t_y - 1, -1, -1):
+ path[y, index] = 1
+ if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
+ index = index - 1
\ No newline at end of file
diff --git a/pretrained_models/abyssinvoker/abyssinvoker.pth b/pretrained_models/abyssinvoker/abyssinvoker.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1758406b3c00bfbc12605350772882b9172e5377
--- /dev/null
+++ b/pretrained_models/abyssinvoker/abyssinvoker.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7f20e5575d3f265c3b6a092751253df52726dd9b5d09252c267746c882698ea
+size 159706189
diff --git a/pretrained_models/abyssinvoker/cover.png b/pretrained_models/abyssinvoker/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..8b5536e8f104edc1259872a74582942a0202c987
Binary files /dev/null and b/pretrained_models/abyssinvoker/cover.png differ
diff --git a/pretrained_models/alice/alice.pth b/pretrained_models/alice/alice.pth
new file mode 100644
index 0000000000000000000000000000000000000000..257e86fc05aaa13c85b84bf0bcc93ab5819cbd51
--- /dev/null
+++ b/pretrained_models/alice/alice.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:38ec9a70b25319dd05884242cf84c08ac337e27534f56fc90c5f5460251fb561
+size 159706189
diff --git a/pretrained_models/alice/cover.png b/pretrained_models/alice/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..f277dfa8989b745dc0da219859c1759d721daa58
Binary files /dev/null and b/pretrained_models/alice/cover.png differ
diff --git a/pretrained_models/ameth/ameth.pth b/pretrained_models/ameth/ameth.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ae55492ddb5650ad303ffa1ceabb077ce7139997
--- /dev/null
+++ b/pretrained_models/ameth/ameth.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:293a2d89ad400edfc9279bcf1cb3b32cfda484babdcafa71cabcfa1d1cad8495
+size 145471413
diff --git a/pretrained_models/ameth/cover.png b/pretrained_models/ameth/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..06b18bd35fc7f96475c3cbc5caaefb6ebef9e1f9
Binary files /dev/null and b/pretrained_models/ameth/cover.png differ
diff --git a/pretrained_models/asuna/asuna.pth b/pretrained_models/asuna/asuna.pth
new file mode 100644
index 0000000000000000000000000000000000000000..32036508f1d40b22fee6cb9f40565c325e78f925
--- /dev/null
+++ b/pretrained_models/asuna/asuna.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6cdb530c944533c68a6169edb3acdedb1f3bd17e6e4a3dc71285c870e575a36a
+size 159706189
diff --git a/pretrained_models/asuna/cover.png b/pretrained_models/asuna/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..d8c9ce2dcf774582bc2cff971c27c50f9c35056a
Binary files /dev/null and b/pretrained_models/asuna/cover.png differ
diff --git a/pretrained_models/ayaka-jp/ayaka-jp.pth b/pretrained_models/ayaka-jp/ayaka-jp.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9a84eb2adb468348cde26cfd854161d32a203f3f
--- /dev/null
+++ b/pretrained_models/ayaka-jp/ayaka-jp.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fa4dcf1ba1d782f85dcaf832ef4903872e801dee501f20cf7570841f6e4a0180
+size 159706189
diff --git a/pretrained_models/ayaka-jp/cover.png b/pretrained_models/ayaka-jp/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..eea64959df3fd0988b023f9efb526124c174f93e
Binary files /dev/null and b/pretrained_models/ayaka-jp/cover.png differ
diff --git a/pretrained_models/azusa/azusa.pth b/pretrained_models/azusa/azusa.pth
new file mode 100644
index 0000000000000000000000000000000000000000..c4013dffa24bf9ad1622ff22bfe610c5c79b44d3
--- /dev/null
+++ b/pretrained_models/azusa/azusa.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b29524ef3d13c8dfec9f1755fc2fb753ee40c2e313c1b7dccb2f805d57264911
+size 159706189
diff --git a/pretrained_models/azusa/cover.png b/pretrained_models/azusa/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..29795e1d907d516282f185dc2a83f942696c5176
Binary files /dev/null and b/pretrained_models/azusa/cover.png differ
diff --git a/pretrained_models/bronya/bronya.pth b/pretrained_models/bronya/bronya.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1ee207c1eddfbb57c8d85a00cd526fbef9dd9236
--- /dev/null
+++ b/pretrained_models/bronya/bronya.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03b67727a8cf31767d2ee266e86f03e6ec61aebe23a16663cf712ba22f0eaab5
+size 159706189
diff --git a/pretrained_models/bronya/cover.png b/pretrained_models/bronya/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..fd10213a60f84c49aa31e98dccc3a09e23515911
Binary files /dev/null and b/pretrained_models/bronya/cover.png differ
diff --git a/pretrained_models/chisato/chisato.pth b/pretrained_models/chisato/chisato.pth
new file mode 100644
index 0000000000000000000000000000000000000000..16f61d098ee7ae955ba0d7e36c6117c77853e681
--- /dev/null
+++ b/pretrained_models/chisato/chisato.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5bfe2195080dd81503946f5d24f20ac0e337263a736c37dca22b54240bc310de
+size 145471413
diff --git a/pretrained_models/chisato/cover.png b/pretrained_models/chisato/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..70450e4236814aa7b7dc5574fb146979a85008b6
Binary files /dev/null and b/pretrained_models/chisato/cover.png differ
diff --git a/pretrained_models/doom/cover.png b/pretrained_models/doom/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..8c0bf2d7fe2e89a3305a3263f0e04de2b2255ce0
--- /dev/null
+++ b/pretrained_models/doom/cover.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5764d61453ed097338103b53fb7c5b0ced488db3a7806b35d913652cc262b5aa
+size 1027558
diff --git a/pretrained_models/doom/doom.pth b/pretrained_models/doom/doom.pth
new file mode 100644
index 0000000000000000000000000000000000000000..39ba9fba3244315f3f70a192a46ab84336977861
--- /dev/null
+++ b/pretrained_models/doom/doom.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ee6f598ec8333f1dfd1e890a67e85ed3b2eaaf35f99f8eca02f4db8c70ab8d2d
+size 159706189
diff --git a/pretrained_models/echo/cover.png b/pretrained_models/echo/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..7744262797d789a79c323092856d2ab0f8c0a030
Binary files /dev/null and b/pretrained_models/echo/cover.png differ
diff --git a/pretrained_models/echo/echo.pth b/pretrained_models/echo/echo.pth
new file mode 100644
index 0000000000000000000000000000000000000000..8af951161c6cf8fddbb3e2e282601e4f74e3c600
--- /dev/null
+++ b/pretrained_models/echo/echo.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7050e1f5d8ed5dce02379aa77040cf4b3435e0c294b9260dd418c3072bd4ac70
+size 159706189
diff --git a/pretrained_models/eriko/cover.png b/pretrained_models/eriko/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..1283a6fedf204a4cc66edb7957c95a7b7bdcf6d8
Binary files /dev/null and b/pretrained_models/eriko/cover.png differ
diff --git a/pretrained_models/eriko/eriko.pth b/pretrained_models/eriko/eriko.pth
new file mode 100644
index 0000000000000000000000000000000000000000..3a83166f77019c897a7e658447e072b3f26eed68
--- /dev/null
+++ b/pretrained_models/eriko/eriko.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f211bfcad2d0512a8756002b14c4e5d2ed72ff7e0d0ac4fd038182fda696f81
+size 145471477
diff --git a/pretrained_models/eula/cover.png b/pretrained_models/eula/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..6bbe2118883ab9366f6f945ef1725fafea57ec49
Binary files /dev/null and b/pretrained_models/eula/cover.png differ
diff --git a/pretrained_models/eula/eula.pth b/pretrained_models/eula/eula.pth
new file mode 100644
index 0000000000000000000000000000000000000000..af36e7ef0c2ecfd917aad69887b48ffad2645cd0
--- /dev/null
+++ b/pretrained_models/eula/eula.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fbfaf3e5dbdca404efaf761cddd60630112bea395791accc96bff4b03039bac3
+size 159706189
diff --git a/pretrained_models/hatsune/cover.png b/pretrained_models/hatsune/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..d4b0619c4c1e83cab1762fa4316090f838e6b684
Binary files /dev/null and b/pretrained_models/hatsune/cover.png differ
diff --git a/pretrained_models/hatsune/hatsune.pth b/pretrained_models/hatsune/hatsune.pth
new file mode 100644
index 0000000000000000000000000000000000000000..da5cd6bc93fa2e8c0e2b71cc4e62ea4f56a2aab1
--- /dev/null
+++ b/pretrained_models/hatsune/hatsune.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93ff23ba52e7c6675d3a63baeb9531f5706a85666739c3378d4d0f1dc7b46df3
+size 159706189
diff --git a/pretrained_models/hina/cover.png b/pretrained_models/hina/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..ecf835edea709d6de8ab39518da433918ae3e54a
Binary files /dev/null and b/pretrained_models/hina/cover.png differ
diff --git a/pretrained_models/hina/hina.pth b/pretrained_models/hina/hina.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6e6be4e0ae5096c484d6f49219222b06d02a150c
--- /dev/null
+++ b/pretrained_models/hina/hina.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f1aafbf4ab512ffd076434f27ab8375709dd22eaf9e7ca5d2d0c96f8ccbde95
+size 159706189
diff --git a/pretrained_models/hiyori/cover.png b/pretrained_models/hiyori/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..94e5710a71de1be0b796e2dc3343dd1a4180760e
Binary files /dev/null and b/pretrained_models/hiyori/cover.png differ
diff --git a/pretrained_models/hiyori/hiyori.pth b/pretrained_models/hiyori/hiyori.pth
new file mode 100644
index 0000000000000000000000000000000000000000..fb64958f23e184dddc03dc4029e3dd055a3d6517
--- /dev/null
+++ b/pretrained_models/hiyori/hiyori.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:52168d7d2d015236d1549bffc808b4084f7c3ee722893aec0ec0e4f560e421b9
+size 145471413
diff --git a/pretrained_models/hoshino/cover.png b/pretrained_models/hoshino/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..fb2dedd07c9e074658b2bc690bed6e698696122f
Binary files /dev/null and b/pretrained_models/hoshino/cover.png differ
diff --git a/pretrained_models/hoshino/hoshino.pth b/pretrained_models/hoshino/hoshino.pth
new file mode 100644
index 0000000000000000000000000000000000000000..b192f3739e003071c0acac70d8ebd05c30689191
--- /dev/null
+++ b/pretrained_models/hoshino/hoshino.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b3510c37149fd6ecf791b0c056c6cb87ce3c5a53e06a240ca8aa48ddfe96b57b
+size 159706189
diff --git a/pretrained_models/info.json b/pretrained_models/info.json
new file mode 100644
index 0000000000000000000000000000000000000000..63bc61ff99fd2e8fac5c4a459ed089576927d49d
--- /dev/null
+++ b/pretrained_models/info.json
@@ -0,0 +1,398 @@
+{
+ "momoi": {
+ "enable": true,
+ "name_en": "Saiba Momoi",
+ "name_zh": "才羽桃井",
+ "title": "Blue Archive-才羽モモイ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "すずめの戸締り、みんなは何回みた〜?",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "yuni": {
+ "enable": true,
+ "name_en": "Yuni",
+ "name_zh": "优妮",
+ "title": "Princess Connect! Re:Dive-ユニ",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "それに新しいお菓子屋さんも出来てみんな買いものを楽しんでいます!",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "misora": {
+ "enable": true,
+ "name_en": "Misora",
+ "name_zh": "美空",
+ "title": "Princess Connect! Re:Dive-ミソラ",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "ただいま。お詫びにお前の好きなケーキを買ってきてやったから、一緒に喰おうな?",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "kyoka": {
+ "enable": true,
+ "name_en": "Kyoka",
+ "name_zh": "镜华",
+ "title": "Princess Connect! Re:Dive-キョウカ",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "まだまだ領内の発展に落ち着きは生まれそうにないわね",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "hiyori": {
+ "enable": true,
+ "name_en": "Hiyori",
+ "name_zh": "日和莉",
+ "title": "Princess Connect! Re:Dive-ヒヨリ",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "今日はいい天気ですね!",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "ameth": {
+ "enable": true,
+ "name_en": "Ameth",
+ "name_zh": "爱梅斯",
+ "title": "Princess Connect! Re:Dive-アメス",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "きょうは高気圧に緩やかに覆われるでしょう。沖縄と九州から北海道にかけて晴れる所が多くなりそうです。",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "hatsune": {
+ "enable": true,
+ "name_en": "Hatsune",
+ "name_zh": "柏崎初音",
+ "title": "Princess Connect! Re:Dive-柏崎初音",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "バトルでの役割や立ち回りをチェックしてみてくださいね!",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "eriko": {
+ "enable": true,
+ "name_en": "Eriko",
+ "name_zh": "惠理子",
+ "title": "Princess Connect! Re:Dive-倉石恵理子",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "プリンセスコネクト",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "pecorine": {
+ "enable": true,
+ "name_en": "Pecorine",
+ "name_zh": "佩可莉姆",
+ "title": "Princess Connect! Re:Dive-ペコリーヌ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね!",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "kokoro": {
+ "enable": true,
+ "name_en": "Kokoro",
+ "name_zh": "可可萝",
+ "title": "Princess Connect! Re:Dive-棗こころ",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "kyaru": {
+ "enable": true,
+ "name_en": "Kyaru",
+ "name_zh": "凯露",
+ "title": "Princess Connect! Re:Dive-キャル",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね!!",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "chisato": {
+ "enable": true,
+ "name_en": "Nishikigi Chisato",
+ "name_zh": "锦木千束",
+ "title": "Lycoris Recoil-錦木千束",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "takina": {
+ "enable": true,
+ "name_en": "Takina Inoue",
+ "name_zh": "井上泷奈",
+ "title": "Lycoris Recoil-井ノ上たきな",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "ayaka-jp": {
+ "enable": true,
+ "name_en": "ayaka-jp",
+ "name_zh": "神里绫华-日语",
+ "title": "Genshin Impact-神里綾華",
+ "cover": "cover.png",
+ "sid": 303,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "nahida-jp": {
+ "enable": true,
+ "name_en": "nahida-jp",
+ "name_zh": "纳西妲-日语",
+ "title": "Genshin Impact-ナヒーダ",
+ "cover": "cover.png",
+ "sid": 0,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "single"
+ },
+ "iroha": {
+ "enable": true,
+ "name_en": "Natsume Iroha",
+ "name_zh": "枣伊吕波",
+ "title": "Blue Archive-棗イロハ,",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "mika": {
+ "enable": true,
+ "name_en": "Misono Mika",
+ "name_zh": "圣园未花",
+ "title": "Blue Archive-聖園ミカ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "miyu": {
+ "enable": true,
+ "name_en": "Kasumizawa Miyu",
+ "name_zh": "霞泽美游",
+ "title": "Blue Archive-霞沢ミユ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "karin": {
+ "enable": false,
+ "name_en": "Kakudate Karin",
+ "name_zh": "角楯花凛",
+ "title": "Blue Archive-角楯カリン",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "asuna": {
+ "enable": false,
+ "name_en": "Ichinose Asuna",
+ "name_zh": "一之濑明日奈",
+ "title": "Blue Archive-一之瀬アスナ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "azusa": {
+ "enable": true,
+ "name_en": "Shirasu Azusa",
+ "name_zh": "白洲梓",
+ "title": "Blue Archive-白洲アズサ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "alice": {
+ "enable": true,
+ "name_en": "Tendou Alice",
+ "name_zh": "天童爱丽丝",
+ "title": "Blue Archive-天童アリス",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "shiroko": {
+ "enable": true,
+ "name_en": "Sunaookami Shiroko",
+ "name_zh": "砂狼白子",
+ "title": "Blue Archive-砂狼シロコ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "hoshino": {
+ "enable": false,
+ "name_en": "Takanasi Hosino",
+ "name_zh": "小鸟游星野",
+ "title": "Blue Archive-小鳥遊 ホシノ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "hina": {
+ "enable": true,
+ "name_en": "Sorasaki Hina",
+ "name_zh": "空崎日奈",
+ "title": "Blue Archive-空崎 ヒナ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "iori": {
+ "enable": true,
+ "name_en": "Shiromi Iori",
+ "name_zh": "银镜伊织",
+ "title": "Blue Archive-銀鏡イオリ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "izuna": {
+ "enable": true,
+ "name_en": "Kuda Izuna",
+ "name_zh": "久田泉奈",
+ "title": "Blue Archive-久田イズナ",
+ "cover": "cover.png",
+ "sid": 10,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "yuuka": {
+ "enable": true,
+ "name_en": "Hayase Yuuka",
+ "name_zh": "早濑优香",
+ "title": "Blue Archive-早瀬ユウカ",
+ "cover": "cover.png",
+ "sid": 40,
+ "example": "今日はいい天気ですね。",
+ "language": "Japanese",
+ "type": "multi"
+ },
+ "doom": {
+ "enable": true,
+ "name_en": "Doomfist",
+ "name_zh": "末日铁拳",
+ "title": "Overwatch 2-Doomfist",
+ "cover": "cover.png",
+ "sid": 93,
+ "example": "无需等待队列,并且没有长度限制",
+ "language": "Chinese",
+ "type": "multi"
+ },
+ "echo": {
+ "enable": true,
+ "name_en": "Echo",
+ "name_zh": "回声",
+ "title": "Overwatch 2-Echo",
+ "cover": "cover.png",
+ "sid": 93,
+ "example": "正在复制,派蒙",
+ "language": "Chinese",
+ "type": "multi"
+ },
+ "zenyatta": {
+ "enable": true,
+ "name_en": "Zenyatta",
+ "name_zh": "禅雅塔",
+ "title": "Overwatch 2-Zenyatta",
+ "cover": "cover.png",
+ "sid": 93,
+ "example": "今天晚上吃啥好呢",
+ "language": "Chinese",
+ "type": "multi"
+ },
+ "abyssinvoker": {
+ "enable": true,
+ "name_en": "Abyss Invoker",
+ "name_zh": "深渊使徒",
+ "title": "Genshin Impact-深渊使徒",
+ "cover": "cover.png",
+ "sid": 94,
+ "example": "今天晚上吃啥好呢",
+ "language": "Chinese",
+ "type": "multi"
+ },
+ "keqing": {
+ "enable": true,
+ "name_en": "Keqing",
+ "name_zh": "刻晴",
+ "title": "Genshin Impact-刻晴",
+ "cover": "cover.png",
+ "sid": 115,
+ "example": "今天晚上吃啥好呢",
+ "language": "Chinese",
+ "type": "multi"
+ },
+ "eula": {
+ "enable": true,
+ "name_en": "Eula",
+ "name_zh": "优菈",
+ "title": "Genshin Impact-优菈",
+ "cover": "cover.png",
+ "sid": 124,
+ "example": "今天晚上吃啥好呢",
+ "language": "Chinese",
+ "type": "multi"
+ },
+ "bronya": {
+ "enable": true,
+ "name_en": "Herrscher of Reason",
+ "name_zh": "理之律者",
+ "title": "Honkai Impact 3rd-理之律者",
+ "cover": "cover.png",
+ "sid": 193,
+ "example": "今天晚上吃啥好呢",
+ "language": "Chinese",
+ "type": "multi"
+ },
+ "theresa": {
+ "enable": true,
+ "name_en": "Theresa",
+ "name_zh": "德丽莎",
+ "title": "Honkai Impact 3rd-德丽莎",
+ "cover": "cover.png",
+ "sid": 193,
+ "example": "今天晚上吃啥好呢",
+ "language": "Chinese",
+ "type": "multi"
+ }
+}
\ No newline at end of file
diff --git a/pretrained_models/iori/cover.png b/pretrained_models/iori/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..bc7b8320198f1ce970f75c9d22d7fcdd405cc7f4
Binary files /dev/null and b/pretrained_models/iori/cover.png differ
diff --git a/pretrained_models/iori/iori.pth b/pretrained_models/iori/iori.pth
new file mode 100644
index 0000000000000000000000000000000000000000..84a0f14d2b155d73518df0a745733ae1358d45a9
--- /dev/null
+++ b/pretrained_models/iori/iori.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e38336227ee7dfce8be61290461cae9396dedd45f90100a43466be9e341749c7
+size 159706189
diff --git a/pretrained_models/iroha/cover.png b/pretrained_models/iroha/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..7cf8f9be53d6c1a8230ce3463273310c49daf0bd
Binary files /dev/null and b/pretrained_models/iroha/cover.png differ
diff --git a/pretrained_models/iroha/iroha.pth b/pretrained_models/iroha/iroha.pth
new file mode 100644
index 0000000000000000000000000000000000000000..208bcd92ef714658de07bd24e199fdc1d704b825
--- /dev/null
+++ b/pretrained_models/iroha/iroha.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:045e9171a56078e5e517fc045b25651fa3ce4fa8afd4b1f98ecfc2b8e27cd4f9
+size 159706189
diff --git a/pretrained_models/izuna/cover.png b/pretrained_models/izuna/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..a3b6a9b1c5c90d3477dbe77cc9b6a64985737f2c
Binary files /dev/null and b/pretrained_models/izuna/cover.png differ
diff --git a/pretrained_models/izuna/izuna.pth b/pretrained_models/izuna/izuna.pth
new file mode 100644
index 0000000000000000000000000000000000000000..bfba8ab3ea2d774462e59769fd14d09f823f22d8
--- /dev/null
+++ b/pretrained_models/izuna/izuna.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:81179e9ad3dae487a56a7bc868ac6e37cb3ded3577ec558ad87c5175647a24fc
+size 159706189
diff --git a/pretrained_models/karin/cover.png b/pretrained_models/karin/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..5a3b7ecbe2b66773443f31196c3c2687d88b8678
Binary files /dev/null and b/pretrained_models/karin/cover.png differ
diff --git a/pretrained_models/karin/karin.pth b/pretrained_models/karin/karin.pth
new file mode 100644
index 0000000000000000000000000000000000000000..93efe8a9fa1ee09d2da537e8318922006416019e
--- /dev/null
+++ b/pretrained_models/karin/karin.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d9137becad3476d726300b50abf777e001bf0ae19cf427c4bc7e008d637c0436
+size 159706189
diff --git a/pretrained_models/keqing/cover.png b/pretrained_models/keqing/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..b360d63c9c72e7d4d4ea916174ba585c04277880
Binary files /dev/null and b/pretrained_models/keqing/cover.png differ
diff --git a/pretrained_models/keqing/keqing.pth b/pretrained_models/keqing/keqing.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4bc9be1b92345408d6e329093028fa0037284bfd
--- /dev/null
+++ b/pretrained_models/keqing/keqing.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:407ceaf8b1d765a14249b36ef731a8fb7ef99c863f4f684673d39da29a3e9460
+size 159706189
diff --git a/pretrained_models/kokoro/cover.png b/pretrained_models/kokoro/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..85f7cfb52158605ff5af8206830d043c4204e857
Binary files /dev/null and b/pretrained_models/kokoro/cover.png differ
diff --git a/pretrained_models/kokoro/kokoro.pth b/pretrained_models/kokoro/kokoro.pth
new file mode 100644
index 0000000000000000000000000000000000000000..ef541565bd5d38a77b9fef988f697dd5e993d367
--- /dev/null
+++ b/pretrained_models/kokoro/kokoro.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0bfda9011cd85c2c80dac92f61db8beb007a76e969a9ea1f1d55007948d4a719
+size 145471413
diff --git a/pretrained_models/kyaru/cover.png b/pretrained_models/kyaru/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..297ce09b832bb0f7fbc7ab22288d51c257f1a59e
Binary files /dev/null and b/pretrained_models/kyaru/cover.png differ
diff --git a/pretrained_models/kyaru/kyaru.pth b/pretrained_models/kyaru/kyaru.pth
new file mode 100644
index 0000000000000000000000000000000000000000..4c76dfcde5188c889e8c5a558e9de053c8820c4a
--- /dev/null
+++ b/pretrained_models/kyaru/kyaru.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d70a08b235c9ade8ef85b566a18c176557280d166a96d241a0671885d59568a4
+size 159706189
diff --git a/pretrained_models/kyoka/cover.png b/pretrained_models/kyoka/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..b7109ebda54f760946a5ebf9fabd120fcefb4ba6
Binary files /dev/null and b/pretrained_models/kyoka/cover.png differ
diff --git a/pretrained_models/kyoka/kyoka.pth b/pretrained_models/kyoka/kyoka.pth
new file mode 100644
index 0000000000000000000000000000000000000000..aeb70820e8939b4c475ef7d06489d898ef09579c
--- /dev/null
+++ b/pretrained_models/kyoka/kyoka.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:af05584069dae6f3e3519a55e8b77ef0536160a60376730c818ce56a9199c5a6
+size 159706189
diff --git a/pretrained_models/mika/cover.png b/pretrained_models/mika/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..af90a3742d4f61cc30579137306f37562538cbca
Binary files /dev/null and b/pretrained_models/mika/cover.png differ
diff --git a/pretrained_models/mika/mika.pth b/pretrained_models/mika/mika.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1cbc60a66dead5c298258ebc6229ddaf121abb96
--- /dev/null
+++ b/pretrained_models/mika/mika.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:095585ac6a8bcee11777b6572bec4835ff53c51ea4dabbaed08c931929cb71ac
+size 159706189
diff --git a/pretrained_models/misora/cover.png b/pretrained_models/misora/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..c9d0cf7b69192326f91a560ac05451763b88e6fc
Binary files /dev/null and b/pretrained_models/misora/cover.png differ
diff --git a/pretrained_models/misora/misora.pth b/pretrained_models/misora/misora.pth
new file mode 100644
index 0000000000000000000000000000000000000000..6dbe43fbade01065d881fc10d89d2db0f10cae8e
--- /dev/null
+++ b/pretrained_models/misora/misora.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c07bc0ac391a1b1d08adcc3137fa63a903e69e7b830a5e2d622d8fed461d9091
+size 145471413
diff --git a/pretrained_models/miyu/cover.png b/pretrained_models/miyu/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..393860cb7fe491b1316105b5793634433a08fb88
Binary files /dev/null and b/pretrained_models/miyu/cover.png differ
diff --git a/pretrained_models/miyu/miyu.pth b/pretrained_models/miyu/miyu.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a04bbe45cd9036476d486e68373a7e5506143889
--- /dev/null
+++ b/pretrained_models/miyu/miyu.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:512dc91b2d0d546870b49f526a089e33ec2318aa8ac5f53dadb7cb9507c9806d
+size 159706189
diff --git a/pretrained_models/momoi/cover.png b/pretrained_models/momoi/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..7ba5d34866b0e402e2804bb91dbdae90e5829ec5
Binary files /dev/null and b/pretrained_models/momoi/cover.png differ
diff --git a/pretrained_models/momoi/momoi.pth b/pretrained_models/momoi/momoi.pth
new file mode 100644
index 0000000000000000000000000000000000000000..251f796cfc474d6f136893e10e876a05bda4e7a9
--- /dev/null
+++ b/pretrained_models/momoi/momoi.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0460284930e3410cb882a2d87799c5d1218dc78fd87b741eea5448982d6b2985
+size 159706189
diff --git a/pretrained_models/nahida-jp/cover.png b/pretrained_models/nahida-jp/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..8ba98bc2ea9315e0c3a7584161cbef785c2779ca
Binary files /dev/null and b/pretrained_models/nahida-jp/cover.png differ
diff --git a/pretrained_models/nahida-jp/nahida-jp.pth b/pretrained_models/nahida-jp/nahida-jp.pth
new file mode 100644
index 0000000000000000000000000000000000000000..27e62e4e6b815536b49174376ba59c20f067dcde
--- /dev/null
+++ b/pretrained_models/nahida-jp/nahida-jp.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9796b8a28e8727e073641eaab2f67aa396636427e0a1293bbb4c34585d3ccddf
+size 145471413
diff --git a/pretrained_models/pecorine/cover.png b/pretrained_models/pecorine/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..edbd443885d4834f11c82d89621a2e9491e92eb0
Binary files /dev/null and b/pretrained_models/pecorine/cover.png differ
diff --git a/pretrained_models/pecorine/pecorine.pth b/pretrained_models/pecorine/pecorine.pth
new file mode 100644
index 0000000000000000000000000000000000000000..f0aecdd13107b5d934fbee90eaf7bd4b4b3e735f
--- /dev/null
+++ b/pretrained_models/pecorine/pecorine.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3394a1075a6bcd83280bc1b15cd449b664e7c214d96c93c33e86240f87d03fc5
+size 159706189
diff --git a/pretrained_models/shiroko/cover.png b/pretrained_models/shiroko/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..949e653fdf46f6b9b03cc1160dee7185356e3822
Binary files /dev/null and b/pretrained_models/shiroko/cover.png differ
diff --git a/pretrained_models/shiroko/shiroko.pth b/pretrained_models/shiroko/shiroko.pth
new file mode 100644
index 0000000000000000000000000000000000000000..a340ae9454aa1ab888f78710ef05b540f1db8b71
--- /dev/null
+++ b/pretrained_models/shiroko/shiroko.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5dc1d2d5ff8ee028d320fc84d49627ae512a3db06695dad8954a4ecdcd11fed2
+size 159706189
diff --git a/pretrained_models/takina/cover.png b/pretrained_models/takina/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..61edd717b1615457a090cae05a8022cdc34538c9
Binary files /dev/null and b/pretrained_models/takina/cover.png differ
diff --git a/pretrained_models/takina/takina.pth b/pretrained_models/takina/takina.pth
new file mode 100644
index 0000000000000000000000000000000000000000..e40c981cc9fe27325005e0a9c2368ddec454de16
--- /dev/null
+++ b/pretrained_models/takina/takina.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:03615f9966391d25c9e95611389edb46b3e1b0d3ba70519caefb1dabbc70c46c
+size 145471413
diff --git a/pretrained_models/theresa/cover.png b/pretrained_models/theresa/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..4bc394a352509d03ba3bbe1b7868b074e9e7db6a
Binary files /dev/null and b/pretrained_models/theresa/cover.png differ
diff --git a/pretrained_models/theresa/theresa.pth b/pretrained_models/theresa/theresa.pth
new file mode 100644
index 0000000000000000000000000000000000000000..79873439e48ba55498710e4c45e4b6d9a6ff2fd9
--- /dev/null
+++ b/pretrained_models/theresa/theresa.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:bfe9019d5f4279356db013ca167f1fd10e83a6cfbab1ffd1912db7d431027b72
+size 159706189
diff --git a/pretrained_models/yuni/cover.png b/pretrained_models/yuni/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..e0fcb99bd9a29a2a9f69835f31470ba91b019010
Binary files /dev/null and b/pretrained_models/yuni/cover.png differ
diff --git a/pretrained_models/yuni/yuni.pth b/pretrained_models/yuni/yuni.pth
new file mode 100644
index 0000000000000000000000000000000000000000..06879b8ae40ad06ce2ddffd8d4041ff6425a43e3
--- /dev/null
+++ b/pretrained_models/yuni/yuni.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f95ada54358fd6b46a1185a014a341963a6808c9ff9167ed0682ab8e0ce07ef8
+size 145471477
diff --git a/pretrained_models/yuuka/cover.png b/pretrained_models/yuuka/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..8483b06d5b5c4aba52fa398d24acf15a86d495e5
Binary files /dev/null and b/pretrained_models/yuuka/cover.png differ
diff --git a/pretrained_models/yuuka/yuuka.pth b/pretrained_models/yuuka/yuuka.pth
new file mode 100644
index 0000000000000000000000000000000000000000..7988e30d03e759059447d3ca302ccde013029dbd
--- /dev/null
+++ b/pretrained_models/yuuka/yuuka.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:21fac223c418de4fd8a96250d9de4ceee5a13c88e5a6b665d82f20b63559a8fa
+size 159706189
diff --git a/pretrained_models/zenyatta/cover.png b/pretrained_models/zenyatta/cover.png
new file mode 100644
index 0000000000000000000000000000000000000000..b3e45d210c8bde688ebcb48f95d7e256241d94e9
--- /dev/null
+++ b/pretrained_models/zenyatta/cover.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a184421929aba82f615c18434d75ef1853b60563a65b0d2fe9421f7fa8c2c2bf
+size 1283257
diff --git a/pretrained_models/zenyatta/zenyatta.pth b/pretrained_models/zenyatta/zenyatta.pth
new file mode 100644
index 0000000000000000000000000000000000000000..1c33e311fd4d8ec69a340920ccc7481632a6a7bf
--- /dev/null
+++ b/pretrained_models/zenyatta/zenyatta.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed3eddf9780af94faaf2a28ce1f59af7a165d8190e05c06c0b28786f44cbd7ac
+size 159706189
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fdef1ec9f93a90d3469e3e3a073389fb3eaa8c67
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,17 @@
+Cython
+librosa
+matplotlib
+numpy
+phonemizer
+scipy
+tensorboard
+torch
+torchvision
+Unidecode
+pyopenjtalk
+ffmpeg
+jamo
+cn2an
+gradio==3.17.0
+pypinyin
+jieba
\ No newline at end of file
diff --git a/text/LICENSE b/text/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..4ad4ed1d5e34d95c8380768ec16405d789cc6de4
--- /dev/null
+++ b/text/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2017 Keith Ito
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/text/__init__.py b/text/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..663c4b6416affb53c9dc56dddbc8b2b65d4bf518
--- /dev/null
+++ b/text/__init__.py
@@ -0,0 +1,57 @@
+""" from https://github.com/keithito/tacotron """
+from text import cleaners
+from text.symbols import symbols
+
+
+# Mappings from symbol to numeric ID and vice versa:
+_symbol_to_id = {s: i for i, s in enumerate(symbols)}
+_id_to_symbol = {i: s for i, s in enumerate(symbols)}
+
+
+def text_to_sequence(text, symbols, cleaner_names):
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
+ Args:
+ text: string to convert to a sequence
+ cleaner_names: names of the cleaner functions to run the text through
+ Returns:
+ List of integers corresponding to the symbols in the text
+ '''
+ _symbol_to_id = {s: i for i, s in enumerate(symbols)}
+ sequence = []
+
+ clean_text = _clean_text(text, cleaner_names)
+ for symbol in clean_text:
+ if symbol not in _symbol_to_id.keys():
+ continue
+ symbol_id = _symbol_to_id[symbol]
+ sequence += [symbol_id]
+ return sequence, clean_text
+
+
+def cleaned_text_to_sequence(cleaned_text):
+ '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
+ Args:
+ text: string to convert to a sequence
+ Returns:
+ List of integers corresponding to the symbols in the text
+ '''
+ sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
+ return sequence
+
+
+def sequence_to_text(sequence):
+ '''Converts a sequence of IDs back to a string'''
+ result = ''
+ for symbol_id in sequence:
+ s = _id_to_symbol[symbol_id]
+ result += s
+ return result
+
+
+def _clean_text(text, cleaner_names):
+ for name in cleaner_names:
+ cleaner = getattr(cleaners, name)
+ if not cleaner:
+ raise Exception('Unknown cleaner: %s' % name)
+ text = cleaner(text)
+ return text
diff --git a/text/cleaners.py b/text/cleaners.py
new file mode 100644
index 0000000000000000000000000000000000000000..68c9ad24d5a303b68a521fba2e8776c8cc867356
--- /dev/null
+++ b/text/cleaners.py
@@ -0,0 +1,475 @@
+""" from https://github.com/keithito/tacotron """
+
+'''
+Cleaners are transformations that run over the input text at both training and eval time.
+
+Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
+hyperparameter. Some cleaners are English-specific. You'll typically want to use:
+ 1. "english_cleaners" for English text
+ 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
+ the Unidecode library (https://pypi.python.org/pypi/Unidecode)
+ 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
+ the symbols in symbols.py to match your data).
+'''
+
+import re
+from unidecode import unidecode
+import pyopenjtalk
+from jamo import h2j, j2hcj
+from pypinyin import lazy_pinyin, BOPOMOFO
+import jieba, cn2an
+
+
+# This is a list of Korean classifiers preceded by pure Korean numerals.
+_korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
+
+# Regular expression matching whitespace:
+_whitespace_re = re.compile(r'\s+')
+
+# Regular expression matching Japanese without punctuation marks:
+_japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
+
+# Regular expression matching non-Japanese characters or punctuation marks:
+_japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
+
+# List of (regular expression, replacement) pairs for abbreviations:
+_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('mrs', 'misess'),
+ ('mr', 'mister'),
+ ('dr', 'doctor'),
+ ('st', 'saint'),
+ ('co', 'company'),
+ ('jr', 'junior'),
+ ('maj', 'major'),
+ ('gen', 'general'),
+ ('drs', 'doctors'),
+ ('rev', 'reverend'),
+ ('lt', 'lieutenant'),
+ ('hon', 'honorable'),
+ ('sgt', 'sergeant'),
+ ('capt', 'captain'),
+ ('esq', 'esquire'),
+ ('ltd', 'limited'),
+ ('col', 'colonel'),
+ ('ft', 'fort'),
+]]
+
+# List of (hangul, hangul divided) pairs:
+_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
+ ('ㄳ', 'ㄱㅅ'),
+ ('ㄵ', 'ㄴㅈ'),
+ ('ㄶ', 'ㄴㅎ'),
+ ('ㄺ', 'ㄹㄱ'),
+ ('ㄻ', 'ㄹㅁ'),
+ ('ㄼ', 'ㄹㅂ'),
+ ('ㄽ', 'ㄹㅅ'),
+ ('ㄾ', 'ㄹㅌ'),
+ ('ㄿ', 'ㄹㅍ'),
+ ('ㅀ', 'ㄹㅎ'),
+ ('ㅄ', 'ㅂㅅ'),
+ ('ㅘ', 'ㅗㅏ'),
+ ('ㅙ', 'ㅗㅐ'),
+ ('ㅚ', 'ㅗㅣ'),
+ ('ㅝ', 'ㅜㅓ'),
+ ('ㅞ', 'ㅜㅔ'),
+ ('ㅟ', 'ㅜㅣ'),
+ ('ㅢ', 'ㅡㅣ'),
+ ('ㅑ', 'ㅣㅏ'),
+ ('ㅒ', 'ㅣㅐ'),
+ ('ㅕ', 'ㅣㅓ'),
+ ('ㅖ', 'ㅣㅔ'),
+ ('ㅛ', 'ㅣㅗ'),
+ ('ㅠ', 'ㅣㅜ')
+]]
+
+# List of (Latin alphabet, hangul) pairs:
+_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('a', '에이'),
+ ('b', '비'),
+ ('c', '시'),
+ ('d', '디'),
+ ('e', '이'),
+ ('f', '에프'),
+ ('g', '지'),
+ ('h', '에이치'),
+ ('i', '아이'),
+ ('j', '제이'),
+ ('k', '케이'),
+ ('l', '엘'),
+ ('m', '엠'),
+ ('n', '엔'),
+ ('o', '오'),
+ ('p', '피'),
+ ('q', '큐'),
+ ('r', '아르'),
+ ('s', '에스'),
+ ('t', '티'),
+ ('u', '유'),
+ ('v', '브이'),
+ ('w', '더블유'),
+ ('x', '엑스'),
+ ('y', '와이'),
+ ('z', '제트')
+]]
+
+# List of (Latin alphabet, bopomofo) pairs:
+_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('a', 'ㄟˉ'),
+ ('b', 'ㄅㄧˋ'),
+ ('c', 'ㄙㄧˉ'),
+ ('d', 'ㄉㄧˋ'),
+ ('e', 'ㄧˋ'),
+ ('f', 'ㄝˊㄈㄨˋ'),
+ ('g', 'ㄐㄧˋ'),
+ ('h', 'ㄝˇㄑㄩˋ'),
+ ('i', 'ㄞˋ'),
+ ('j', 'ㄐㄟˋ'),
+ ('k', 'ㄎㄟˋ'),
+ ('l', 'ㄝˊㄛˋ'),
+ ('m', 'ㄝˊㄇㄨˋ'),
+ ('n', 'ㄣˉ'),
+ ('o', 'ㄡˉ'),
+ ('p', 'ㄆㄧˉ'),
+ ('q', 'ㄎㄧㄡˉ'),
+ ('r', 'ㄚˋ'),
+ ('s', 'ㄝˊㄙˋ'),
+ ('t', 'ㄊㄧˋ'),
+ ('u', 'ㄧㄡˉ'),
+ ('v', 'ㄨㄧˉ'),
+ ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
+ ('x', 'ㄝˉㄎㄨˋㄙˋ'),
+ ('y', 'ㄨㄞˋ'),
+ ('z', 'ㄗㄟˋ')
+]]
+
+
+# List of (bopomofo, romaji) pairs:
+_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
+ ('ㄅㄛ', 'p⁼wo'),
+ ('ㄆㄛ', 'pʰwo'),
+ ('ㄇㄛ', 'mwo'),
+ ('ㄈㄛ', 'fwo'),
+ ('ㄅ', 'p⁼'),
+ ('ㄆ', 'pʰ'),
+ ('ㄇ', 'm'),
+ ('ㄈ', 'f'),
+ ('ㄉ', 't⁼'),
+ ('ㄊ', 'tʰ'),
+ ('ㄋ', 'n'),
+ ('ㄌ', 'l'),
+ ('ㄍ', 'k⁼'),
+ ('ㄎ', 'kʰ'),
+ ('ㄏ', 'h'),
+ ('ㄐ', 'ʧ⁼'),
+ ('ㄑ', 'ʧʰ'),
+ ('ㄒ', 'ʃ'),
+ ('ㄓ', 'ʦ`⁼'),
+ ('ㄔ', 'ʦ`ʰ'),
+ ('ㄕ', 's`'),
+ ('ㄖ', 'ɹ`'),
+ ('ㄗ', 'ʦ⁼'),
+ ('ㄘ', 'ʦʰ'),
+ ('ㄙ', 's'),
+ ('ㄚ', 'a'),
+ ('ㄛ', 'o'),
+ ('ㄜ', 'ə'),
+ ('ㄝ', 'e'),
+ ('ㄞ', 'ai'),
+ ('ㄟ', 'ei'),
+ ('ㄠ', 'au'),
+ ('ㄡ', 'ou'),
+ ('ㄧㄢ', 'yeNN'),
+ ('ㄢ', 'aNN'),
+ ('ㄧㄣ', 'iNN'),
+ ('ㄣ', 'əNN'),
+ ('ㄤ', 'aNg'),
+ ('ㄧㄥ', 'iNg'),
+ ('ㄨㄥ', 'uNg'),
+ ('ㄩㄥ', 'yuNg'),
+ ('ㄥ', 'əNg'),
+ ('ㄦ', 'əɻ'),
+ ('ㄧ', 'i'),
+ ('ㄨ', 'u'),
+ ('ㄩ', 'ɥ'),
+ ('ˉ', '→'),
+ ('ˊ', '↑'),
+ ('ˇ', '↓↑'),
+ ('ˋ', '↓'),
+ ('˙', ''),
+ (',', ','),
+ ('。', '.'),
+ ('!', '!'),
+ ('?', '?'),
+ ('—', '-')
+]]
+
+
+def expand_abbreviations(text):
+ for regex, replacement in _abbreviations:
+ text = re.sub(regex, replacement, text)
+ return text
+
+
+def lowercase(text):
+ return text.lower()
+
+
+def collapse_whitespace(text):
+ return re.sub(_whitespace_re, ' ', text)
+
+
+def convert_to_ascii(text):
+ return unidecode(text)
+
+
+def japanese_to_romaji_with_accent(text):
+ '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
+ sentences = re.split(_japanese_marks, text)
+ marks = re.findall(_japanese_marks, text)
+ text = ''
+ for i, sentence in enumerate(sentences):
+ if re.match(_japanese_characters, sentence):
+ if text!='':
+ text+=' '
+ labels = pyopenjtalk.extract_fullcontext(sentence)
+ for n, label in enumerate(labels):
+ phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
+ if phoneme not in ['sil','pau']:
+ text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
+ else:
+ continue
+ n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
+ a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
+ a2 = int(re.search(r"\+(\d+)\+", label).group(1))
+ a3 = int(re.search(r"\+(\d+)/", label).group(1))
+ if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
+ a2_next=-1
+ else:
+ a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
+ # Accent phrase boundary
+ if a3 == 1 and a2_next == 1:
+ text += ' '
+ # Falling
+ elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
+ text += '↓'
+ # Rising
+ elif a2 == 1 and a2_next == 2:
+ text += '↑'
+ if i= bin_locations,
+ dim=-1
+ ) - 1
+
+
+def unconstrained_rational_quadratic_spline(inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ tails='linear',
+ tail_bound=1.,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
+ inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
+ outside_interval_mask = ~inside_interval_mask
+
+ outputs = torch.zeros_like(inputs)
+ logabsdet = torch.zeros_like(inputs)
+
+ if tails == 'linear':
+ unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
+ constant = np.log(np.exp(1 - min_derivative) - 1)
+ unnormalized_derivatives[..., 0] = constant
+ unnormalized_derivatives[..., -1] = constant
+
+ outputs[outside_interval_mask] = inputs[outside_interval_mask]
+ logabsdet[outside_interval_mask] = 0
+ else:
+ raise RuntimeError('{} tails are not implemented.'.format(tails))
+
+ outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
+ inputs=inputs[inside_interval_mask],
+ unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
+ unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
+ unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
+ inverse=inverse,
+ left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
+ min_bin_width=min_bin_width,
+ min_bin_height=min_bin_height,
+ min_derivative=min_derivative
+ )
+
+ return outputs, logabsdet
+
+def rational_quadratic_spline(inputs,
+ unnormalized_widths,
+ unnormalized_heights,
+ unnormalized_derivatives,
+ inverse=False,
+ left=0., right=1., bottom=0., top=1.,
+ min_bin_width=DEFAULT_MIN_BIN_WIDTH,
+ min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
+ min_derivative=DEFAULT_MIN_DERIVATIVE):
+ if torch.min(inputs) < left or torch.max(inputs) > right:
+ raise ValueError('Input to a transform is not within its domain')
+
+ num_bins = unnormalized_widths.shape[-1]
+
+ if min_bin_width * num_bins > 1.0:
+ raise ValueError('Minimal bin width too large for the number of bins')
+ if min_bin_height * num_bins > 1.0:
+ raise ValueError('Minimal bin height too large for the number of bins')
+
+ widths = F.softmax(unnormalized_widths, dim=-1)
+ widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
+ cumwidths = torch.cumsum(widths, dim=-1)
+ cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
+ cumwidths = (right - left) * cumwidths + left
+ cumwidths[..., 0] = left
+ cumwidths[..., -1] = right
+ widths = cumwidths[..., 1:] - cumwidths[..., :-1]
+
+ derivatives = min_derivative + F.softplus(unnormalized_derivatives)
+
+ heights = F.softmax(unnormalized_heights, dim=-1)
+ heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
+ cumheights = torch.cumsum(heights, dim=-1)
+ cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
+ cumheights = (top - bottom) * cumheights + bottom
+ cumheights[..., 0] = bottom
+ cumheights[..., -1] = top
+ heights = cumheights[..., 1:] - cumheights[..., :-1]
+
+ if inverse:
+ bin_idx = searchsorted(cumheights, inputs)[..., None]
+ else:
+ bin_idx = searchsorted(cumwidths, inputs)[..., None]
+
+ input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
+ input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
+
+ input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
+ delta = heights / widths
+ input_delta = delta.gather(-1, bin_idx)[..., 0]
+
+ input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
+ input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
+
+ input_heights = heights.gather(-1, bin_idx)[..., 0]
+
+ if inverse:
+ a = (((inputs - input_cumheights) * (input_derivatives
+ + input_derivatives_plus_one
+ - 2 * input_delta)
+ + input_heights * (input_delta - input_derivatives)))
+ b = (input_heights * input_derivatives
+ - (inputs - input_cumheights) * (input_derivatives
+ + input_derivatives_plus_one
+ - 2 * input_delta))
+ c = - input_delta * (inputs - input_cumheights)
+
+ discriminant = b.pow(2) - 4 * a * c
+ assert (discriminant >= 0).all()
+
+ root = (2 * c) / (-b - torch.sqrt(discriminant))
+ outputs = root * input_bin_widths + input_cumwidths
+
+ theta_one_minus_theta = root * (1 - root)
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
+ * theta_one_minus_theta)
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
+ + 2 * input_delta * theta_one_minus_theta
+ + input_derivatives * (1 - root).pow(2))
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
+
+ return outputs, -logabsdet
+ else:
+ theta = (inputs - input_cumwidths) / input_bin_widths
+ theta_one_minus_theta = theta * (1 - theta)
+
+ numerator = input_heights * (input_delta * theta.pow(2)
+ + input_derivatives * theta_one_minus_theta)
+ denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
+ * theta_one_minus_theta)
+ outputs = input_cumheights + numerator / denominator
+
+ derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
+ + 2 * input_delta * theta_one_minus_theta
+ + input_derivatives * (1 - theta).pow(2))
+ logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
+
+ return outputs, logabsdet
diff --git a/utils.py b/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee4b01ddfbe8173965371b29f770f3e87615fe71
--- /dev/null
+++ b/utils.py
@@ -0,0 +1,225 @@
+import os
+import sys
+import argparse
+import logging
+import json
+import subprocess
+import numpy as np
+import librosa
+import torch
+
+MATPLOTLIB_FLAG = False
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
+logger = logging
+
+
+def load_checkpoint(checkpoint_path, model, optimizer=None):
+ assert os.path.isfile(checkpoint_path)
+ checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
+ iteration = checkpoint_dict['iteration']
+ learning_rate = checkpoint_dict['learning_rate']
+ if optimizer is not None:
+ optimizer.load_state_dict(checkpoint_dict['optimizer'])
+ saved_state_dict = checkpoint_dict['model']
+ if hasattr(model, 'module'):
+ state_dict = model.module.state_dict()
+ else:
+ state_dict = model.state_dict()
+ new_state_dict= {}
+ for k, v in state_dict.items():
+ try:
+ new_state_dict[k] = saved_state_dict[k]
+ except:
+ logger.info("%s is not in the checkpoint" % k)
+ new_state_dict[k] = v
+ if hasattr(model, 'module'):
+ model.module.load_state_dict(new_state_dict)
+ else:
+ model.load_state_dict(new_state_dict)
+ logger.info("Loaded checkpoint '{}' (iteration {})" .format(
+ checkpoint_path, iteration))
+ return model, optimizer, learning_rate, iteration
+
+
+def plot_spectrogram_to_numpy(spectrogram):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger('matplotlib')
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(10,2))
+ im = ax.imshow(spectrogram, aspect="auto", origin="lower",
+ interpolation='none')
+ plt.colorbar(im, ax=ax)
+ plt.xlabel("Frames")
+ plt.ylabel("Channels")
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+def plot_alignment_to_numpy(alignment, info=None):
+ global MATPLOTLIB_FLAG
+ if not MATPLOTLIB_FLAG:
+ import matplotlib
+ matplotlib.use("Agg")
+ MATPLOTLIB_FLAG = True
+ mpl_logger = logging.getLogger('matplotlib')
+ mpl_logger.setLevel(logging.WARNING)
+ import matplotlib.pylab as plt
+ import numpy as np
+
+ fig, ax = plt.subplots(figsize=(6, 4))
+ im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
+ interpolation='none')
+ fig.colorbar(im, ax=ax)
+ xlabel = 'Decoder timestep'
+ if info is not None:
+ xlabel += '\n\n' + info
+ plt.xlabel(xlabel)
+ plt.ylabel('Encoder timestep')
+ plt.tight_layout()
+
+ fig.canvas.draw()
+ data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
+ data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
+ plt.close()
+ return data
+
+
+def load_audio_to_torch(full_path, target_sampling_rate):
+ audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
+ return torch.FloatTensor(audio.astype(np.float32))
+
+
+def load_filepaths_and_text(filename, split="|"):
+ with open(filename, encoding='utf-8') as f:
+ filepaths_and_text = [line.strip().split(split) for line in f]
+ return filepaths_and_text
+
+
+def get_hparams(init=True):
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
+ help='JSON file for configuration')
+ parser.add_argument('-m', '--model', type=str, required=True,
+ help='Model name')
+
+ args = parser.parse_args()
+ model_dir = os.path.join("./logs", args.model)
+
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+
+ config_path = args.config
+ config_save_path = os.path.join(model_dir, "config.json")
+ if init:
+ with open(config_path, "r") as f:
+ data = f.read()
+ with open(config_save_path, "w") as f:
+ f.write(data)
+ else:
+ with open(config_save_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams = HParams(**config)
+ hparams.model_dir = model_dir
+ return hparams
+
+
+def get_hparams_from_dir(model_dir):
+ config_save_path = os.path.join(model_dir, "config.json")
+ with open(config_save_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams =HParams(**config)
+ hparams.model_dir = model_dir
+ return hparams
+
+
+def get_hparams_from_file(config_path):
+ with open(config_path, "r") as f:
+ data = f.read()
+ config = json.loads(data)
+
+ hparams =HParams(**config)
+ return hparams
+
+
+def check_git_hash(model_dir):
+ source_dir = os.path.dirname(os.path.realpath(__file__))
+ if not os.path.exists(os.path.join(source_dir, ".git")):
+ logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
+ source_dir
+ ))
+ return
+
+ cur_hash = subprocess.getoutput("git rev-parse HEAD")
+
+ path = os.path.join(model_dir, "githash")
+ if os.path.exists(path):
+ saved_hash = open(path).read()
+ if saved_hash != cur_hash:
+ logger.warn("git hash values are different. {}(saved) != {}(current)".format(
+ saved_hash[:8], cur_hash[:8]))
+ else:
+ open(path, "w").write(cur_hash)
+
+
+def get_logger(model_dir, filename="train.log"):
+ global logger
+ logger = logging.getLogger(os.path.basename(model_dir))
+ logger.setLevel(logging.DEBUG)
+
+ formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
+ if not os.path.exists(model_dir):
+ os.makedirs(model_dir)
+ h = logging.FileHandler(os.path.join(model_dir, filename))
+ h.setLevel(logging.DEBUG)
+ h.setFormatter(formatter)
+ logger.addHandler(h)
+ return logger
+
+
+class HParams():
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ if type(v) == dict:
+ v = HParams(**v)
+ self[k] = v
+
+ def keys(self):
+ return self.__dict__.keys()
+
+ def items(self):
+ return self.__dict__.items()
+
+ def values(self):
+ return self.__dict__.values()
+
+ def __len__(self):
+ return len(self.__dict__)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __setitem__(self, key, value):
+ return setattr(self, key, value)
+
+ def __contains__(self, key):
+ return key in self.__dict__
+
+ def __repr__(self):
+ return self.__dict__.__repr__()