repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/07-asvspoof-ssl/model-W2V-Large1-fix-LLGF/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq
class SSLModel():
def __init__(self, cp_path, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args:
cp_path: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([cp_path])
self.model = model[0]
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" feature = extract_feat(input_data)
Args:
input_data: tensor, waveform, (batch, length)
Return:
feature: tensor, feature, (batch, frame_num, feat_dim)
"""
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
self.model.eval()
with torch.no_grad():
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
# not good way, but the path is fixed
ssl_path = os.path.dirname(__file__) + '/../../../SSL_pretrained/libri960_big.pt'
# This model produces 1024 output feature dimensions (per frame)
ssl_orig_output_dim = 1024
# SSL model is declared as a global var since it is fixed
g_ssl_model = SSLModel(ssl_path, ssl_orig_output_dim)
#################
## Misc functions
#################
# A function to load in/out label for OOD detection. This is just a place holder
# in this project
g_attack_map = {}
def protocol_parse_general(protocol_filepaths, g_map, sep=' ', target_row=-1):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
The format is:
SPEAKER TRIAL_NAME - SPOOF_TYPE TAG
LA_0031 LA_E_5932896 - A13 spoof
LA_0030 LA_E_5849185 - - bonafide
...
input:
-----
protocol_filepath: string, path to the protocol file
target_row: int, default -1, use line[-1] as the target label
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = nii_asvspoof.CustomDict(missing_value=True)
if type(protocol_filepaths) is str:
tmp = [protocol_filepaths]
else:
tmp = protocol_filepaths
for protocol_filepath in tmp:
if len(protocol_filepath) and os.path.isfile(protocol_filepath):
with open(protocol_filepath, 'r') as file_ptr:
for line in file_ptr:
line = line.rstrip('\n')
cols = line.split(sep)
if g_map:
try:
data_buffer[cols[1]] = g_map[cols[target_row]]
except KeyError:
data_buffer[cols[1]] = False
else:
data_buffer[cols[1]] = True
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_f = prj_conf.optional_argument
# Load CM protocol (if available)
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
# Load OOD protocl (if available)
self.in_out_parser = protocol_parse_general(protocol_f, g_attack_map,
' ', -2)
# Working sampling rate
# torchaudio may be used to change sampling rate
#self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
#self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
#
self.v_feat_dim = [128]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.v_feat_dim)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = None
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
#
self.m_before_pooling = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# confidence predictor
self.m_conf = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, v_feat_dim in enumerate(self.v_feat_dim):
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Dropout(0.7)
)
)
self.m_before_pooling.append(
torch_nn.Sequential(
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32),
nii_nn.BLSTMLayer((v_feat_dim//16) * 32,
(v_feat_dim//16) * 32)
)
)
if self.v_emd_dim is None:
self.v_emd_dim = (v_feat_dim // 16) * 32
else:
assert self.v_emd_dim == (v_feat_dim//16)*32, "v_emd_dim error"
self.m_output_act.append(
torch_nn.Linear((v_feat_dim // 16) * 32, self.v_out_class)
)
self.m_frontend.append(
torch_nn.Linear(g_ssl_model.out_dim, v_feat_dim)
)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_before_pooling = torch_nn.ModuleList(self.m_before_pooling)
# output
self.m_loss = torch_nn.CrossEntropyLoss()
self.m_temp = 1
self.m_lambda = 0.
self.m_e_m_in = -25.0
self.m_e_m_out = -7.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_ssl_feat = g_ssl_model.extract_feat(wav.squeeze(-1))
# return
return self.m_frontend[idx](x_ssl_feat)
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros(
[batch_size * self.v_submodels, self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (m_trans, m_be_pool, m_output) in \
enumerate(
zip(self.m_transform, self.m_before_pooling,
self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. (batch, channel, frame//N, feat_dim//N) ->
# (batch, frame//N, channel * feat_dim//N)
# where N is caused by conv with stride
hidden_features = hidden_features.permute(0, 2, 1, 3).contiguous()
frame_num = hidden_features.shape[1]
hidden_features = hidden_features.view(batch_size, frame_num, -1)
# 4. pooling
# 4. pass through LSTM then summing
hidden_features_lstm = m_be_pool(hidden_features)
# 5. pass through the output layer
tmp_emb = (hidden_features_lstm + hidden_features).mean(1)
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_emb
return output_emb
def _compute_logit(self, feature_vec, inference=False):
"""
"""
# number of sub models
batch_size = feature_vec.shape[0]
# buffer to store output scores from sub-models
output_act = torch.zeros(
[batch_size * self.v_submodels, self.v_out_class],
device=feature_vec.device, dtype=feature_vec.dtype)
# compute scores for each sub-models
for idx, m_output in enumerate(self.m_output_act):
tmp_emb = feature_vec[idx*batch_size : (idx+1)*batch_size]
output_act[idx*batch_size : (idx+1)*batch_size] = m_output(tmp_emb)
# feature_vec is [batch * submodel, output-class]
return output_act
def _compute_score(self, logits):
"""
"""
# [batch * submodel, output-class], logits
# [:, 1] denotes being bonafide
if logits.shape[1] == 2:
return logits[:, 1] - logits[:, 0]
else:
return logits[:, -1]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _clamp_prob(self, input_prob, clamp_val=1e-12):
return torch.clamp(input_prob, 0.0 + clamp_val, 1.0 - clamp_val)
def _get_in_out_indx(self, filenames):
in_indx = []
out_indx = []
for x, y in enumerate(filenames):
if self.in_out_parser[y]:
in_indx.append(x)
else:
out_indx.append(x)
return np.array(in_indx), np.array(out_indx)
def _energy(self, logits):
"""
"""
# - T \log \sum_y \exp (logits[x, y] / T)
eng = - self.m_temp * torch.logsumexp(logits / self.m_temp, dim=1)
return eng
def _loss(self, logits, targets, energy, in_indx, out_indx):
"""
"""
# loss over cross-entropy on in-dist. data
if len(in_indx):
loss = self.m_loss(logits[in_indx], targets[in_indx])
else:
loss = 0
# loss on energy of in-dist.data
if len(in_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(energy[in_indx] - self.m_e_m_in), 2).mean()
# loss on energy of out-dist. data
if len(out_indx):
loss += self.m_lambda * torch.pow(
torch_nn_func.relu(self.m_e_m_out - energy[out_indx]), 2).mean()
return loss
def forward(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, %s, %d, %f, %f" % (
filename, target, 0.0, 0.0))
return None
feature_vec = self._compute_embedding(x, datalength)
logits = self._compute_logit(feature_vec)
energy = self._energy(logits)
in_indx, out_indx = self._get_in_out_indx(filenames)
if self.training:
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device, dtype=torch.long)
target_vec = target_vec.repeat(self.v_submodels)
# loss
loss = self._loss(logits, target_vec, energy, in_indx, out_indx)
return loss
else:
scores = self._compute_score(logits)
targets = self._get_target(filenames)
for filename, target, score, energytmp in \
zip(filenames, targets, scores, energy):
print("Output, %s, %d, %f, %f" % (
filename, target, score.item(), -energytmp.item()))
# don't write output score as a single file
return None
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,250 | 33.749097 | 86 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper for active learning.
The base is on main_mergedataset.py
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import copy
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager_AL as nii_nn_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper_base
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# Load data pool and create data loader
pool_lst = prj_conf.al_pool_list
pool_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.al_pool_set_name, \
pool_lst,
prj_conf.al_pool_in_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.al_pool_out_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if hasattr(prj_conf, 'val_input_dirs'):
val_input_dirs = prj_conf.val_input_dirs
else:
val_input_dirs = prj_conf.input_dirs
if hasattr(prj_conf, 'val_output_dirs'):
val_output_dirs = prj_conf.val_output_dirs
else:
val_output_dirs = prj_conf.output_dirs
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
val_input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
val_output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# pre-training using standard procedure
# change args
args_tmp = copy.deepcopy(args)
args_tmp.epochs = args.active_learning_pre_train_epoch_num
args_tmp.not_save_each_epoch = True
args_tmp.save_trained_name += '_pretrained'
args_tmp.active_learning_cycle_num = 0
pretraind_name = args_tmp.save_trained_name + args_tmp.save_model_ext
if args.active_learning_pre_train_epoch_num:
nii_warn.f_print_w_date("Normal training (warm-up) phase",level='h')
nii_warn.f_print("Normal training for {:d} epochs".format(
args.active_learning_pre_train_epoch_num))
op_wrapper_tmp = nii_op_wrapper.OptimizerWrapper(model, args_tmp)
loss_wrapper_tmp = prj_model.Loss(args_tmp)
nii_nn_wrapper_base.f_train_wrapper(
args_tmp, model, loss_wrapper, device, op_wrapper_tmp,
trn_set, val_set, checkpoint)
checkpoint = torch.load(pretraind_name)
elif checkpoint is None:
if os.path.isfile(pretraind_name):
checkpoint = torch.load(pretraind_name)
nii_warn.f_print("Use pretrained model before active learning")
else:
nii_warn.f_print("Use seed model to initialize")
nii_warn.f_print_w_date("Active learning phase",level='h')
# start training
nii_nn_wrapper.f_train_wrapper(
args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, pool_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper_base.f_inference_wrapper(
args, model, device, test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 10,498 | 36.766187 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/config_AL_train_toyset.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# this will be used as the name of cache files created for each set
#
# Name for the seed training set, in case you merge multiple data sets as
# a single training set, just specify the name for each subset.
# Here we only have 1 training subset
trn_set_name = ['asvspoof2019_toyset_trn']
# Name for the development set
val_set_name = ['asvspoof2019_toyset_val']
# For convenience, specify a path to the toy data set
# because config*.py will be copied into model-*/config_AL_train_toyset/NN
# we need to use ../../../
tmp = os.path.dirname(__file__) + '/../../../DATA/toy_example'
# File list for training and development sets
# (text file, one file name per line, without name extension)
# we need to provide one lst for each subset
# trn_list[n] will correspond to trn_set_name[n]
# for training set
trn_list = [tmp + '/scp/train.lst']
# for development set
val_list = [tmp + '/scp/val.lst']
# Directories for input data
# We need to provide the path to the directory that saves the input data.
# We assume waveforms for training and development of one subset
# are stored in the same directory.
# Hence, input_dirs[n] is for trn_set_name[n] and val_set_name[n]
#
# If you need to specify a separate val_input_dirs
# val_input_dirs = [[PATH_TO_DEVELOPMENT_SET]]
#
# Each input_dirs[n] is a list,
# for example, input_dirs[n] = [wav, speaker_label, augmented_wav, ...]
#
# Here, input for each file is a single waveform
input_dirs = [[tmp + '/train_dev']]
# Dimensions of input features
# What is the dimension of the input feature
# len(input_dims) should be equal to len(input_dirs[n])
#
# Here, input for each file is a single waveform, dimension is 1
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# len(input_exts) should be equal to len(input_dirs[n])
#
# Here, input file extension is .wav
# We use .wav not .flac
input_exts = ['.wav']
# Temporal resolution for input features
# This is not relevant for CM but for other projects
# len(input_reso) should be equal to len(input_dirs[n])
# Here, it is 1 for waveform
input_reso = [1]
# Whether input features should be z-normalized
# This is not relevant for CM but for other projects
# len(input_norm) should be equal to len(input_dirs[n])
# Here, it is False for waveform
# We don't normalize the waveform
input_norm = [False]
# Similar configurations for output features
# Here, we set output to empty because we will load
# the target labels from protocol rather than output feature
# '.bin' is also a place holder
output_dirs = [[] for x in input_dirs]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# ===
# For active learning pool data
# ===
# Similar configurations as above
#
# This is for demonstration, we still use the toy set as pool set.
# And we will merge the trainin and development sets as the pool set
#
# Name of the pool subsets
al_pool_set_name = ['pool_toyset_trn', 'pool_toyset_val']
# list of files for each pool subsets
al_pool_list = [tmp + '/scp/train.lst', tmp + '/scp/val.lst']
# list of input data directories
al_pool_in_dirs = [[tmp + '/train_dev'],
[tmp + '/train_dev']]
al_pool_out_dirs = [[] for x in al_pool_in_dirs]
# ===
# Waveform configuration
# ===
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 64000
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 8000
# Optional argument
# This used to load protocol(s)
# Multiple protocol files can be specified in the list
#
# Note that these protocols should cover all the
# training, development, and pool set data.
# Otherwise, the code will raise an error
#
# Here, this protocol will cover all the data in the toy set
optional_argument = [tmp + '/protocol.txt']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
#
# path to the SSL model (it is downloaded by 01_download.sh)
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
# this must be provided.
ssl_front_end_out_dim = 1024
#########################################################
## Configuration for inference stage
#########################################################
# This part is not used in this project
# They are place holders
test_set_name = trn_set_name + val_set_name
# List of test set data
# for convenience, you may directly load test_set list here
test_list = trn_list + val_list
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = input_dirs * 2
# Directories for output features, which are []
test_output_dirs = [[]] * 2
| 5,691 | 30.274725 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/config_auto.py | #!/usr/bin/env python
"""
config.py
This configuration file will read environment variables
for configuration. it is used for scoring
It assumes that input data will be waveform files (*.wav)
No need to change settings here
"""
import os
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
trn_set_name = ['']
val_set_name = ['']
trn_list = ['']
val_list = ['']
input_dirs = [['']]
input_dims = [1]
input_exts = ['.wav']
input_reso = [1]
input_norm = [False]
output_dirs = [[]]
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
# ASVspoof uses 16000 Hz
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
# For ASVspoof, we don't do truncate here
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
# For ASVspoof, we don't set minimum length of input trial
minimum_len = None
# Optional argument
# We will use this optional_argument to read protocol file
# When evaluating on a eval set without protocol file, set this to ['']
optional_argument = ['']
# ===
# pre-trained SSL model
# ===
# We will load this pre-trained SSL model as the front-end
# We need this because the model definition is written in
# this file.
# Its weight will be overwritten by the trained CM.
#
# path to the SSL model. It will be loaded as front-end
ssl_front_end_path = os.path.dirname(__file__) \
+ '/../../../SSL_pretrained/xlsr_53_56k.pt'
# dimension of the SSL model output
ssl_front_end_out_dim = 1024
#########################################################
## Configuration for inference stage
#########################################################
# We set environment variables
# No need to change
test_set_name = [os.getenv('TEMP_DATA_NAME')]
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [test_set_name[0] + '.lst']
# Directories for input features
# input_dirs = [[path_of_feature_1, path_of_feature_2, ..., ]]
# directory of the evaluation set waveform
test_input_dirs = [[os.getenv('TEMP_DATA_DIR')]]
# Directories for output features, which are [[]]
test_output_dirs = [[]]
| 2,644 | 26.268041 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Adv/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_retrieve_data_knowing_train to select data for training
The above function is called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data_knowing_train(self,
train_data_loader,
pool_data_loader,
num_sample):
"""idx = al_retrieve_data_knowing_train(
train_data_loader,
pool_data_loader,
num_sample)
Data retrival function for active learning
Args:
-----
train_data_loader: Pytorch DataLoader, for train data
pool_data_loader: Pytorch DataLoader, for pool data
num_sample: int, number of samples selected
Return
------
idx: list of index
"""
def _adv_attack(data, data_grad, epsilon=0.3):
return data+ data_grad * epsilon
def _feat_dis(feat1, feat2):
# feat1 (batch, feat)
# feat2 (batch, feat)
edis = torch.cdist(feat1.unsqueeze(0), feat2.unsqueeze(0))[0]
return torch.min(edis, dim=0)[0]
# note that data_loader.dataset.__len__() returns the number of
# individual samples, not the number of mini-batches
idx_list = np.zeros([pool_data_loader.dataset.__len__()])
conf_list = np.zeros([pool_data_loader.dataset.__len__()])
#
counter = 0
# get gradients
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(train_data_loader):
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device, dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
# To collect gradient
x.requires_grad = True
# Forward pass (copied from forward())
# We cannot directly use forward() because that function requires
# self.training, and mini-batch will be made balanced
feat_vec = self.m_front_end(x)
logits, _ = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
loss = self.m_ce_loss(logits, target_)
# Backward pass
self.zero_grad()
loss.backward()
# get gradient
data_grad = x.grad.data
break
# create adversarial example
perturbed_data = _adv_attack(x, data_grad)
# loop over the pool and find the nearest pool data
with torch.no_grad():
# feature vec for adversarial example
ad_feature_vec = self.m_front_end(perturbed_data)
_, ad_feature_vec = self.m_back_end(ad_feature_vec)
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(pool_data_loader):
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device, dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
or_feature_vec = self.m_front_end(x)
_, or_feature_vec = self.m_back_end(or_feature_vec)
scores = _feat_dis(ad_feature_vec, or_feature_vec)
# add the distance score) and data index to the buffer
conf_list[counter:counter+x.shape[0]] = np.array(
[x.item() for x in scores])
idx_list[counter:counter+x.shape[0]] = np.array(
idx_orig)
counter += x.shape[0]
# select the best
sorted_idx = np.argsort(conf_list)
return_idx = [idx_list[x] for x in sorted_idx[:num_sample]]
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 20,901 | 32.125198 | 79 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Rem/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_exclude_data to select data to be excluded from the pool
then
A function al_retrieve_data to select data from the pool (for training)
Both functions are called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data(self, data_loader, num_sample):
"""idx = al_retrieve_data(data_loader, num_sample)
Data retrival function for active learning
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# randomly select data index
sorted_idx = np.arange(data_loader.dataset.__len__())
np.random.shuffle(sorted_idx)
return_idx = sorted_idx[0:num_sample]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
def al_exclude_data(self, data_loader, num_sample):
"""idx = al_exclude_data(data_loader, num_sample)
Function to select useless data from the pool and remove them
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# buffer
# note that data_loader.dataset.__len__() returns the number of
# individual samples, not the number of mini-batches
idx_list = np.zeros([data_loader.dataset.__len__()])
conf_list = np.zeros([data_loader.dataset.__len__()])
#
counter = 0
# loop over the pool set
with torch.no_grad():
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(data_loader):
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device,
dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# add the energy (confidence score) and data index to the buffer
conf_list[counter:counter+x.shape[0]] = np.array(
[x.item() for x in energy])
idx_list[counter:counter+x.shape[0]] = np.array(
idx_orig)
counter += x.shape[0]
# select data with low enerngy (i.e., high confidence, the model
# already seen this kind of data, thus the data is useless)
sorted_idx = np.argsort(conf_list)
# retrieve the data index
return_idx = [idx_list[x] for x in sorted_idx[:num_sample]]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,868 | 31.518822 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-PosE/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_retrieve_data to scoring the pool set data.
al_retrieve_data scores the pool set data and returns a list of data index.
The returned data index will be used to retrieve the data from pool.
al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data(self, data_loader, num_sample):
"""idx = al_retrieve_data(data_loader, num_sample)
Data retrival function for active learning
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# buffer
# note that data_loader.dataset.__len__() returns the number of
# individual samples, not the number of mini-batches
idx_list = np.zeros([data_loader.dataset.__len__()])
conf_list = np.zeros([data_loader.dataset.__len__()])
#
counter = 0
# loop over the pool set
with torch.no_grad():
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(data_loader):
# feedforward pass
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device,
dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# add the energy (confidence score) and data index to the buffer
conf_list[counter:counter+x.shape[0]] = np.array(
[x.item() for x in energy])
idx_list[counter:counter+x.shape[0]] = np.array(
idx_orig)
counter += x.shape[0]
# select the least useful data (those with low enerngy, high-confidence)
sorted_idx = np.argsort(conf_list)
# retrieve the data index
return_idx = [idx_list[x] for x in sorted_idx[:num_sample]]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,133 | 31.651877 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-NegE/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_retrieve_data to scoring the pool set data.
al_retrieve_data scores the pool set data and returns a list of data index.
The returned data index will be used to retrieve the data from pool.
al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data(self, data_loader, num_sample):
"""idx = al_retrieve_data(data_loader, num_sample)
Data retrival function for active learning
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# buffer
# note that data_loader.dataset.__len__() returns the number of
# individual samples, not the number of mini-batches
idx_list = np.zeros([data_loader.dataset.__len__()])
conf_list = np.zeros([data_loader.dataset.__len__()])
#
counter = 0
# loop over the pool set
with torch.no_grad():
for data_idx, (x, y, data_info, idx_orig) in \
enumerate(data_loader):
# feedforward pass
filenames = [nii_seq_tk.parse_filename(y) for y in data_info]
datalength = [nii_seq_tk.parse_length(y) for y in data_info]
if isinstance(x, torch.Tensor):
x = x.to(self.input_std.device,
dtype=self.input_std.dtype)
else:
nii_display.f_die("data input is not a tensor")
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# add the energy (confidence score) and data index to the buffer
conf_list[counter:counter+x.shape[0]] = np.array(
[x.item() for x in energy])
idx_list[counter:counter+x.shape[0]] = np.array(
idx_orig)
counter += x.shape[0]
# select the most useful data (those with high enerngy, low-confidence)
sorted_idx = np.argsort(conf_list)[::-1]
# retrieve the data index
return_idx = [idx_list[x] for x in sorted_idx[:num_sample]]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 19,138 | 31.66041 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/08-asvspoof-activelearn/model-AL-Pas/model.py | #!/usr/bin/env python
"""
model.py for Active learning model
This model.py consists of two parts:
1. A CM with SSL-based front-end and linear back-end.
The same model as 07-asvspoof-ssl/model-W2V-XLSR-ft-GF/model.py,
but the code is revised and simplified.
2. A function al_retrieve_data to scoring the pool set data.
al_retrieve_data scores the pool set data and returns a list of data index.
The returned data index will be used to retrieve the data from pool.
al_retrieve_data is called in core_scripts/nn_manager/nn_manager_AL.py.
Please check the training algorithm there.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import sandbox.eval_asvspoof as nii_asvspoof
import sandbox.util_bayesian as nii_bayesian
import sandbox.util_loss_metric as nii_loss_util
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2022, Xin Wang"
############################
## FOR pre-trained MODEL
############################
import fairseq as fq
class SSLModel(torch_nn.Module):
def __init__(self, mpath, ssl_orig_output_dim):
""" SSLModel(cp_path, ssl_orig_output_dim)
Args
----
mpath: string, path to the pre-trained SSL model
ssl_orig_output_dim: int, dimension of the SSL model output feature
"""
super(SSLModel, self).__init__()
md, _, _ = fq.checkpoint_utils.load_model_ensemble_and_task([mpath])
self.model = md[0]
# this should be loaded from md
self.out_dim = ssl_orig_output_dim
return
def extract_feat(self, input_data):
""" output = extract_feat(input_data)
input:
------
input_data,tensor, (batch, length, 1) or (batch, length)
datalength: list of int, length of wav in the mini-batch
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
# put the model to GPU if it not there
if next(self.model.parameters()).device != input_data.device \
or next(self.model.parameters()).dtype != input_data.dtype:
self.model.to(input_data.device, dtype=input_data.dtype)
# input should be in shape (batch, length)
if input_data.ndim == 3:
input_tmp = input_data[:, :, 0]
else:
input_tmp = input_data
# emb has shape [batch, length, dim]
emb = self.model(input_tmp, mask=False, features_only=True)['x']
return emb
##############
## FOR MODEL
##############
class FrontEnd(torch_nn.Module):
""" Front end wrapper
"""
def __init__(self, output_dim, mpath, ssl_out_dim, fix_ssl=False):
super(FrontEnd, self).__init__()
# dimension of output feature
self.out_dim = output_dim
# whether fix SSL or not
self.flag_fix_ssl = fix_ssl
# ssl part
self.ssl_model = SSLModel(mpath, ssl_out_dim)
# post transformation part
self.m_front_end_process = torch_nn.Linear(
self.ssl_model.out_dim, self.out_dim)
return
def set_flag_fix_ssl(self, fix_ssl):
self.flag_fix_ssl = fix_ssl
return
def forward(self, wav):
""" output = front_end(wav)
input:
------
wav: tensor, (batch, length, 1)
output:
-------
output: tensor, (batch, frame_num, frame_feat_dim)
"""
if self.flag_fix_ssl:
self.ssl_model.eval()
with torch.no_grad():
x_ssl_feat = self.ssl_model.extract_feat(wav)
else:
x_ssl_feat = self.ssl_model.extract_feat(wav)
output = self.m_front_end_process(x_ssl_feat)
return output
class BackEnd(torch_nn.Module):
"""Back End Wrapper
"""
def __init__(self, input_dim, out_dim, num_classes,
dropout_rate, dropout_flag=True, dropout_trials=[1]):
super(BackEnd, self).__init__()
# input feature dimension
self.in_dim = input_dim
# output embedding dimension
self.out_dim = out_dim
# number of output classes
self.num_class = num_classes
# dropout rate
self.m_mcdp_rate = dropout_rate
self.m_mcdp_flag = dropout_flag
self.m_mcdp_num = dropout_trials
# linear linear to produce output logits
self.m_utt_level = torch_nn.Linear(self.out_dim, self.num_class)
return
def forward(self, feat):
""" logits, emb_vec = back_end_emb(feat)
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
logits: tensor, (batch, num_output_class)
emb_vec: tensor, (batch, emb_dim)
"""
# through the frame-level network
# (batch, frame_num, self.out_dim)
# average pooling -> (batch, self.out_dim)
feat_utt = feat.mean(1)
# output linear
logits = self.m_utt_level(feat_utt)
return logits, feat_utt
def inference(self, feat):
"""scores, emb_vec, energy = inference(feat)
This is used for inference, output includes the logits and
confidence scores.
input:
------
feat: tensor, (batch, frame_num, feat_feat_dim)
output:
-------
scores: tensor, (batch, 1)
emb_vec: tensor, (batch, emb_dim)
energy: tensor, (batch, 1)
"""
# logits
logits, feat_utt = self.forward(feat)
# logits -> score
scores = logits[:, 1] - logits[:, 0]
# compute confidence using negative energy
energy = nii_loss_util.neg_energy(logits)
return scores, feat_utt, energy
class MainLossModule(torch_nn.Module):
""" Loss wrapper
"""
def __init__(self):
super(MainLossModule, self).__init__()
self.m_loss = torch_nn.CrossEntropyLoss()
return
def forward(self, logits, target):
return self.m_loss(logits, target)
class FeatLossModule(torch_nn.Module):
""" Loss wrapper over features
Not used here
"""
def __init__(self):
super(FeatLossModule, self).__init__()
return
def forward(self, data, target):
"""
"""
return 0
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(
in_dim,out_dim, args, prj_conf, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
#self.model_debug = False
#self.validation = False
############################################
####
# auxililary
####
# flag of current training stage
# this variable will be overwritten
self.temp_flag = args.temp_flag
####
# Load protocol and prepare the target data for network training
####
protocol_f = prj_conf.optional_argument
self.protocol_parser = nii_asvspoof.protocol_parse_general(protocol_f)
####
# Bayesian parameter
####
self.m_mcdp_rate = None
self.m_mcdp_flag = True
# if [1], we will only do one inference
self.m_mcdropout_num = [1]
####
# Model definition
####
# front-end
# dimension of compressed front-end feature
self.v_feat_dim = 128
self.m_front_end = FrontEnd(self.v_feat_dim,
prj_conf.ssl_front_end_path,
prj_conf.ssl_front_end_out_dim)
# back-end
# dimension of utterance-level embedding vectors
self.v_emd_dim = self.v_feat_dim
# number of output classes
self.v_out_class = 2
self.m_back_end = BackEnd(self.v_feat_dim,
self.v_emd_dim,
self.v_out_class,
self.m_mcdp_rate,
self.m_mcdp_flag,
self.m_mcdropout_num)
#####
# Loss function
#####
self.m_ce_loss = MainLossModule()
self.m_cr_loss = FeatLossModule()
# weight for the feature loss
self.m_feat = 0.0
# done
return
def prepare_mean_std(self, in_dim, out_dim, args,
prj_conf, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but irrelevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but irrelevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but irrelevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but irrelevant to this code
"""
return y * self.output_std + self.output_mean
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def _get_target_vec(self, num_sys, num_aug, bs, device, dtype):
target = [1] * num_aug + [0 for x in range((num_sys-1) * num_aug)]
target = np.tile(target, bs)
target = torch.tensor(target, device=device, dtype=dtype)
return target
def __inference(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# too short sentences, skip it
if not self.training and x.shape[1] < 3000:
targets = self._get_target(filenames)
for filename, target in zip(filenames, targets):
print("Output, {:s}, {:d}, {:f}, {:f}, {:f}".format(
filename, target, 0.0, 0.0, 0.0))
return None
# front-end
feat_vec = self.m_front_end(x)
# back-end
scores, _, energy = self.m_back_end.inference(feat_vec)
# print output
targets = self._get_target(filenames)
for filename, target, score, eps in \
zip(filenames, targets, scores, energy):
print("Output, {:s}, {:d}, {:f}, {:f}".format(
filename, target, score.item(), eps.item()))
# don't write output score as a single file
return None
def __forward_single_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# front-end & back-end
feat_vec = self.m_front_end(x)
logits, emb_vec = self.m_back_end(feat_vec)
target = self._get_target(filenames)
target_ = torch.tensor(target, device=x.device, dtype=torch.long)
# loss
loss = self.m_ce_loss(logits, target_)
return loss
def __forward_multi_view(self, x, fileinfo):
"""
"""
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
# input will be (batchsize, length, 1+num_spoofed, num_aug)
bat_siz = x.shape[0]
pad_len = x.shape[1]
num_sys = x.shape[2]
num_aug = x.shape[3]
# to (batchsize * (1+num_spoofed) * num_aug, length)
x_new = x.permute(0, 2, 3, 1).contiguous().view(-1, pad_len)
datalen_tmp = np.repeat(datalength, num_sys * num_aug)
# target vector
# this is for CE loss
# [1, 0, 0, ..., 1, 0, 0 ...]
target = self._get_target_vec(num_sys, num_aug, bat_siz,
x.device, torch.long)
# this is for contrasitive loss (ignore the augmentation)
target_feat = self._get_target_vec(num_sys, 1, bat_siz,
x.device, torch.long)
# front-end & back-end
feat_vec = self.m_front_end(x_new)
logits, emb_vec = self.m_back_end(feat_vec)
# CE loss
loss_ce = self.m_ce_loss(logits, target)
if self.m_feat:
# feat loss
loss_cr_1 = 0
loss_cr_2 = 0
# reshape to multi-view format
# (batch, (1+num_spoof), nview, dimension...)
feat_vec_ = feat_vec.view(bat_siz, num_sys, num_aug, -1,
feat_vec.shape[-1])
emb_vec_ = emb_vec.view(bat_siz, num_sys, num_aug, -1)
for bat_idx in range(bat_siz):
loss_cr_1 += self.m_feat / bat_siz * self.m_cr_loss(
feat_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
loss_cr_2 += self.m_feat / bat_siz * self.m_cr_loss(
emb_vec_[bat_idx],
target_feat[bat_idx * num_sys :(bat_idx+1) * num_sys])
return [[loss_ce, loss_cr_1, loss_cr_2],
[True, True, True]]
else:
return loss_ce
def forward(self, x, fileinfo):
"""
"""
if self.training and x.shape[2] > 1:
# if training with multi-view data
return self.__forward_multi_view(x, fileinfo)
elif self.training:
return self.__forward_single_view(x, fileinfo)
else:
return self.__inference(x, fileinfo)
def get_embedding(self, x, fileinfo):
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
feature_vec = self._compute_embedding(x, datalength)
return feature_vec
def al_retrieve_data(self, data_loader, num_sample):
"""idx = al_retrieve_data(data_loader, num_sample)
Data retrival function for active learning
Args:
-----
data_loader: Pytorch DataLoader for the pool data set
num_sample: int, number of samples to be selected
Return
------
idx: list of index
"""
# randomly select data index
sorted_idx = np.arange(data_loader.dataset.__len__())
np.random.shuffle(sorted_idx)
return_idx = sorted_idx[0:num_sample]
# return the data index,
# the corresponding samples will be added to training set
return return_idx
class Loss():
""" Wrapper for scripts, ignore it
"""
def __init__(self, args):
"""
"""
def compute(self, outputs, target):
"""
"""
return outputs
if __name__ == "__main__":
print("Definition of model")
| 17,572 | 31.009107 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/00_evaluate.py | #!/usr/bin/python
"""
Wrapper to parse the score file and compute EER and min tDCF
Usage:
python 00_evaluate.py log_file
"""
import os
import sys
import numpy as np
from sandbox import eval_asvspoof
def parse_txt(file_path):
bonafide = []
spoofed = []
with open(file_path, 'r') as file_ptr:
for line in file_ptr:
if line.startswith('Output,'):
temp = line.split(',')
flag = int(temp[2])
if flag:
bonafide.append(float(temp[-1]))
else:
spoofed.append(float(temp[-1]))
bonafide = np.array(bonafide)
spoofed = np.array(spoofed)
return bonafide, spoofed
if __name__ == "__main__":
data_path = sys.argv[1]
bonafide, spoofed = parse_txt(data_path)
mintDCF, eer, threshold = eval_asvspoof.tDCF_wrapper(bonafide, spoofed)
print("mintDCF: %f\tEER: %2.3f %%\tThreshold: %f" % (mintDCF, eer * 100,
threshold))
| 1,038 | 24.975 | 77 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,366 | 34.569832 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors, which is input to oc-softmax layer
self.v_emd_dim = 256
# output class (1 for one-class softmax)
self.v_out_class = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# softmax
self.m_a_softmax = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 512),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(256, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, pad or trim each trial independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_num, frame_feat_dim)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
# negative class scores
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
# positive class scores
x_phi_val = torch.zeros_like(x_cos_val)
# get scores
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return x_cos_val
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 16,219 | 34.884956 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-ocsoftmax/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
tmp = '../DATA/asvspoof2019_LA/'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = ['../DATA/asvspoof2019_LA/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,226 | 29.733333 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,366 | 34.569832 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import sandbox.block_resnet as nii_resnet
import core_scripts.other_tools.debug as nii_debug
import core_modules.oc_softmax as nii_oc_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors, which will be into to oc-softmax layer
self.v_emd_dim = 256
# output class (1 for one-class softmax)
self.v_out_class = 1
####
# create network
####
# backend
self.m_model = []
# fronend
self.m_frontend = []
# softmax layer for backend
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_model.append(
nii_resnet.ResNet(self.v_emd_dim)
)
self.m_frontend.append(
nii_front_end.LFCC(
self.frame_lens[idx], self.frame_hops[idx], self.fft_n[idx],
self.m_target_sr, self.lfcc_dim[idx], with_energy=True)
)
self.m_a_softmax.append(
nii_oc_softmax.OCAngleLayer(self.v_emd_dim)
)
self.m_model = torch_nn.ModuleList(self.m_model)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, handle the padding and trim independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# input to resnet should be (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp_buff
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_model) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens, self.m_model)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, fft_bin, frame_length)
# 2. compute hidden features
features, final_output = m_model(x_sp_amp.unsqueeze(1))
output_emb[idx * batch_size : (idx+1) * batch_size] = features
return output_emb
def _compute_score(self, feature_vec, angle=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
# negaitve class scores
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
# positive class scores
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx], angle)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)[0]
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_oc_softmax.OCSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 13,524 | 33.414758 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-restnet-ocsoftmax/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
tmp = '../DATA/asvspoof2019_LA/'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = ['../DATA/asvspoof2019_LA/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,226 | 29.733333 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,366 | 34.569832 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_modules.a_softmax as nii_a_softmax
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
torch.manual_seed(1)
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors, which will be into to a-softmax layer
self.v_emd_dim = 2
# output class (2 for a-softmax layer)
self.v_out_class = 2
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# softmax
self.m_a_softmax = []
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_a_softmax.append(
nii_a_softmax.AngleLayer(self.v_emd_dim, self.v_out_class)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
self.m_a_softmax = torch_nn.ModuleList(self.m_a_softmax)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, pad or trim each trial independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_num, frame_feat_dim)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract feature (stft spectrogram)
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# compute a-softmax output for each feature configuration
batch_size = feature_vec.shape[0] // self.v_submodels
# negative class scores
x_cos_val = torch.zeros(
[feature_vec.shape[0], self.v_out_class],
dtype=feature_vec.dtype, device=feature_vec.device)
# positive class scores
x_phi_val = torch.zeros_like(x_cos_val)
for idx in range(self.v_submodels):
s_idx = idx * batch_size
e_idx = idx * batch_size + batch_size
tmp1, tmp2 = self.m_a_softmax[idx](feature_vec[s_idx:e_idx],
inference)
x_cos_val[s_idx:e_idx] = tmp1
x_phi_val[s_idx:e_idx] = tmp2
if inference:
return torch_nn_func.softmax(x_cos_val, dim=1)[:, 1]
else:
return [x_cos_val, x_phi_val]
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
a_softmax_act = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target, device=x.device).long()
target_vec = target_vec.repeat(self.v_submodels)
return [a_softmax_act, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
score = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], score.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = nii_a_softmax.AngularSoftmaxWithLoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 16,175 | 34.946667 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-a-softmax/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
tmp = '../DATA/asvspoof2019_LA/'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = ['../DATA/asvspoof2019_LA/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,226 | 29.733333 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The default training/inference process wrapper
Requires model.py and config.py
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers}
# Load file list and create data loader
trn_lst = nii_list_tool.read_list_from_text(prj_conf.trn_list)
trn_set = nii_dset.NIIDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate)
if prj_conf.val_list is not None:
val_lst = nii_list_tool.read_list_from_text(prj_conf.val_list)
val_set = nii_dset.NIIDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NIIDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 6,366 | 34.569832 | 74 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/model.py | #!/usr/bin/env python
"""
model.py
Self defined model definition.
Usage:
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import sandbox.block_nn as nii_nn
import sandbox.util_frontend as nii_front_end
import core_scripts.other_tools.debug as nii_debug
import core_scripts.data_io.seq_info as nii_seq_tk
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
## util
##############
def protocol_parse(protocol_filepath):
""" Parse protocol of ASVspoof2019 and get bonafide/spoof for each trial
input:
-----
protocol_filepath: string, path to the protocol file
for convenience, I put train/dev/eval trials into a single protocol file
output:
-------
data_buffer: dic, data_bufer[filename] -> 1 (bonafide), 0 (spoof)
"""
data_buffer = {}
temp_buffer = np.loadtxt(protocol_filepath, dtype='str')
for row in temp_buffer:
if row[-1] == 'bonafide':
data_buffer[row[1]] = 1
else:
data_buffer[row[1]] = 0
return data_buffer
##############
## FOR MODEL
##############
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
##### required part, no need to change #####
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
# a flag for debugging (by default False)
self.model_debug = False
self.flag_validation = False
#####
####
# on input waveform and output target
####
# Load protocol and prepare the target data for network training
protocol_file = prj_conf.optional_argument[0]
self.protocol_parser = protocol_parse(protocol_file)
# Working sampling rate
# torchaudio may be used to change sampling rate
self.m_target_sr = 16000
####
# optional configs (not used)
####
# re-sampling (optional)
#self.m_resampler = torchaudio.transforms.Resample(
# prj_conf.wav_samp_rate, self.m_target_sr)
# vad (optional)
#self.m_vad = torchaudio.transforms.Vad(sample_rate = self.m_target_sr)
# flag for balanced class (temporary use)
self.v_flag = 1
####
# front-end configuration
# multiple front-end configurations may be used
# by default, use a single front-end
####
# frame shift (number of waveform points)
self.frame_hops = [160]
# frame length
self.frame_lens = [320]
# FFT length
self.fft_n = [512]
# LFCC dim (base component)
self.lfcc_dim = [20]
self.lfcc_with_delta = True
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating (not used)
self.amp_floor = 0.00001
# number of frames to be kept for each trial
# 750 frames are quite long for ASVspoof2019 LA with frame_shift = 10ms
self.v_truncate_lens = [10 * 16 * 750 // x for x in self.frame_hops]
# number of sub-models (by default, a single model)
self.v_submodels = len(self.frame_lens)
# dimension of embedding vectors
# here, the embedding is just the activation before sigmoid()
self.v_emd_dim = 1
####
# create network
####
# 1st part of the classifier
self.m_transform = []
# 2nd part of the classifier
self.m_output_act = []
# front-end
self.m_frontend = []
# it can handle models with multiple front-end configuration
# by default, only a single front-end
for idx, (trunc_len, fft_n, lfcc_dim) in enumerate(zip(
self.v_truncate_lens, self.fft_n, self.lfcc_dim)):
fft_n_bins = fft_n // 2 + 1
if self.lfcc_with_delta:
lfcc_dim = lfcc_dim * 3
self.m_transform.append(
torch_nn.Sequential(
torch_nn.Conv2d(1, 64, [5, 5], 1, padding=[2, 2]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 96, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 96, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(48, affine=False),
torch_nn.Conv2d(48, 128, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch.nn.MaxPool2d([2, 2], [2, 2]),
torch_nn.Conv2d(64, 128, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(64, affine=False),
torch_nn.Conv2d(64, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [1, 1], 1, padding=[0, 0]),
nii_nn.MaxFeatureMap2D(),
torch_nn.BatchNorm2d(32, affine=False),
torch_nn.Conv2d(32, 64, [3, 3], 1, padding=[1, 1]),
nii_nn.MaxFeatureMap2D(),
torch_nn.MaxPool2d([2, 2], [2, 2])
)
)
self.m_output_act.append(
torch_nn.Sequential(
torch_nn.Dropout(0.7),
torch_nn.Linear((trunc_len // 16) *
(lfcc_dim // 16) * 32, 160),
nii_nn.MaxFeatureMap2D(),
torch_nn.Linear(80, self.v_emd_dim)
)
)
self.m_frontend.append(
nii_front_end.LFCC(self.frame_lens[idx],
self.frame_hops[idx],
self.fft_n[idx],
self.m_target_sr,
self.lfcc_dim[idx],
with_energy=True)
)
self.m_transform = torch_nn.ModuleList(self.m_transform)
self.m_output_act = torch_nn.ModuleList(self.m_output_act)
self.m_frontend = torch_nn.ModuleList(self.m_frontend)
# output
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
""" prepare mean and std for data processing
This is required for the Pytorch project, but not relevant to this code
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
This is required for the Pytorch project, but not relevant to this code
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
This is required for the Pytorch project, but not relevant to this code
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
This is required for the Pytorch project, but not relevant to this code
"""
return y * self.output_std + self.output_mean
def _front_end(self, wav, idx, trunc_len, datalength):
""" simple fixed front-end to extract features
input:
------
wav: waveform
idx: idx of the trial in mini-batch
trunc_len: number of frames to be kept after truncation
datalength: list of data length in mini-batch
output:
-------
x_sp_amp: front-end featues, (batch, frame_num, frame_feat_dim)
"""
with torch.no_grad():
x_sp_amp = self.m_frontend[idx](wav.squeeze(-1))
# permute to (batch, frame_feat_dim, frame_num)
x_sp_amp = x_sp_amp.permute(0, 2, 1)
# make sure the buffer is long enough
x_sp_amp_buff = torch.zeros(
[x_sp_amp.shape[0], x_sp_amp.shape[1], trunc_len],
dtype=x_sp_amp.dtype, device=x_sp_amp.device)
# for batch of data, pad or trim each trial independently
fs = self.frame_hops[idx]
for fileidx in range(x_sp_amp.shape[0]):
# roughtly this is the number of frames
true_frame_num = datalength[fileidx] // fs
if true_frame_num > trunc_len:
# trim randomly
pos = torch.rand([1]) * (true_frame_num-trunc_len)
pos = torch.floor(pos[0]).long()
tmp = x_sp_amp[fileidx, :, pos:trunc_len+pos]
x_sp_amp_buff[fileidx] = tmp
else:
rep = int(np.ceil(trunc_len / true_frame_num))
tmp = x_sp_amp[fileidx, :, 0:true_frame_num].repeat(1, rep)
x_sp_amp_buff[fileidx] = tmp[:, 0:trunc_len]
# permute to (batch, frame_num, frame_feat_dim)
x_sp_amp = x_sp_amp_buff.permute(0, 2, 1)
# return
return x_sp_amp
def _compute_embedding(self, x, datalength):
""" definition of forward method
Assume x (batchsize, length, dim)
Output x (batchsize * number_filter, output_dim)
"""
# resample if necessary
#x = self.m_resampler(x.squeeze(-1)).unsqueeze(-1)
# number of sub models
batch_size = x.shape[0]
# buffer to store output scores from sub-models
output_emb = torch.zeros([batch_size * self.v_submodels,
self.v_emd_dim],
device=x.device, dtype=x.dtype)
# compute scores for each sub-models
for idx, (fs, fl, fn, trunc_len, m_trans, m_output) in enumerate(
zip(self.frame_hops, self.frame_lens, self.fft_n,
self.v_truncate_lens,
self.m_transform, self.m_output_act)):
# extract front-end feature
x_sp_amp = self._front_end(x, idx, trunc_len, datalength)
# compute scores
# 1. unsqueeze to (batch, 1, frame_length, fft_bin)
# 2. compute hidden features
hidden_features = m_trans(x_sp_amp.unsqueeze(1))
# 3. flatten and transform through output function
tmp_score = m_output(torch.flatten(hidden_features, 1))
output_emb[idx * batch_size : (idx+1) * batch_size] = tmp_score
return output_emb
def _compute_score(self, feature_vec, inference=False):
"""
"""
# feature_vec is [batch * submodel, 1]
if inference:
return feature_vec.squeeze(1)
else:
return torch.sigmoid(feature_vec).squeeze(1)
def _get_target(self, filenames):
try:
return [self.protocol_parser[x] for x in filenames]
except KeyError:
print("Cannot find target data for %s" % (str(filenames)))
sys.exit(1)
def forward(self, x, fileinfo):
#with torch.no_grad():
# vad_waveform = self.m_vad(x.squeeze(-1))
# vad_waveform = self.m_vad(torch.flip(vad_waveform, dims=[1]))
# if vad_waveform.shape[-1] > 0:
# x = torch.flip(vad_waveform, dims=[1]).unsqueeze(-1)
# else:
# pass
filenames = [nii_seq_tk.parse_filename(y) for y in fileinfo]
datalength = [nii_seq_tk.parse_length(y) for y in fileinfo]
if self.training:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec)
# target
target = self._get_target(filenames)
target_vec = torch.tensor(target,
device=x.device, dtype=scores.dtype)
target_vec = target_vec.repeat(self.v_submodels)
return [scores, target_vec, True]
else:
feature_vec = self._compute_embedding(x, datalength)
scores = self._compute_score(feature_vec, True)
target = self._get_target(filenames)
print("Output, %s, %d, %f" % (filenames[0],
target[0], scores.mean()))
# don't write output score as a single file
return None
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
self.m_loss = torch_nn.BCELoss()
def compute(self, outputs, target):
"""
"""
loss = self.m_loss(outputs[0], outputs[1])
return loss
if __name__ == "__main__":
print("Definition of model")
| 15,225 | 34.741784 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/02-asvspoof/lfcc-lcnn-sigmoid/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'asvspoof2019_trn'
val_set_name = 'asvspoof2019_val'
# for convenience
tmp = '../DATA/asvspoof2019_LA/'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = tmp + '/scp/train.lst'
# val_file_list: list of files for validation set. It can be None
val_list = tmp + '/scp/val.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [tmp + '/train_dev']
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.wav']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [1]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [False]
# Similar configurations for output features
output_dirs = []
output_dims = [1]
output_exts = ['.bin']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = None
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = None
# Optional argument
# Just a buffer for convenience
# It can contain anything
optional_argument = ['../DATA/asvspoof2019_LA/protocol.txt']
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = 'asvspoof2019_test'
# List of test set data
# for convenience, you may directly load test_set list here
test_list = tmp + '/scp/test.lst'
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = ['../DATA/asvspoof2019_LA/eval']
# Directories for output features, which are []
test_output_dirs = []
| 3,226 | 29.733333 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/model.py | #!/usr/bin/env python
"""
model.py for harmonic-plus-noise NSF with trainable sinc filter
version: 9
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
# Building blocks (torch.nn modules + dimension operation)
#
# For blstm
class BLSTMLayer(torch_nn.Module):
""" Wrapper over dilated BLSTM
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
Recurrency is conducted along "length"
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True)
def forward(self, x):
# permute to (length, batchsize=1, dim)
blstm_data, _ = self.l_blstm(x.permute(1, 0, 2))
# permute it backt to (batchsize=1, length, dim)
return blstm_data.permute(1, 0, 2)
#
# 1D dilated convolution that keep the input/output length
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is optional
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant'):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.pad_mode = pad_mode
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri, 0, 0),
mode = self.pad_mode).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False, \
pad_mode='replicate'):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False, \
pad_mode=pad_mode)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
Note: FIR Filtering is conducted on each dimension (channel)
independently: groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_train: whether train the filter coefficients (default false)
Input data: (batchsize=1, length, feature_dim)
Output data: (batchsize=1, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_train)
else:
print("TimeInvFIRFilter expects filter_coef to be 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
class TimeVarFIRFilter(torch_nn.Module):
""" TimeVarFIRFilter
Given sequences of filter coefficients and a signal, do filtering
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
For batch 0:
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
Note: filter coef (0, n, :) is only used to compute the output
at (0, n, 1)
"""
def __init__(self):
super(TimeVarFIRFilter, self).__init__()
def forward(self, signal, f_coef):
"""
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
Output: (batchsize=1, signal_length, 1)
For n in [1, sequence_length):
output(0, n, 1)= \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
This method may be not efficient:
Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K]
output [y_1, y_2, y_3, ..., y_N, *, * ... *]
= a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
"""
signal_l = signal.shape[1]
order_k = f_coef.shape[-1]
# pad to (batchsize=1, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
y = torch.zeros_like(signal)
# roll and weighted sum, only take [0:signal_length]
for k in range(order_k):
y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \
* f_coef[:, :, k:k+1]
# done
return y
# Sinc filter generator
class SincFilter(torch_nn.Module):
""" SincFilter
Given the cut-off-frequency, produce the low-pass and high-pass
windowed-sinc-filters.
If input cut-off-frequency is (batchsize=1, signal_length, 1),
output filter coef is (batchsize=1, signal_length, filter_order).
For each time step in [1, signal_length), we calculate one
filter for low-pass sinc filter and another for high-pass filter.
Example:
import scipy
import scipy.signal
import numpy as np
filter_order = 31
cut_f = 0.2
sinc_layer = SincFilter(filter_order)
lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f)
w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1])
w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1])
plt.plot(w, 20*np.log10(np.abs(h1)))
plt.plot(w, 20*np.log10(np.abs(h2)))
plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0])
"""
def __init__(self, filter_order):
super(SincFilter, self).__init__()
# Make the filter oder an odd number
# [-(M-1)/2, ... 0, (M-1)/2]
#
self.half_k = (filter_order - 1) // 2
self.order = self.half_k * 2 +1
def hamming_w(self, n_index):
""" prepare hamming window for each time step
n_index (batchsize=1, signal_length, filter_order)
For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2]
...
output (batchsize=1, signal_length, filter_order)
output[0, 0, :] = hamming_window
output[0, 1, :] = hamming_window
...
"""
# Hamming window
return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order)
def sinc(self, x):
""" Normalized sinc-filter sin( pi * x) / pi * x
https://en.wikipedia.org/wiki/Sinc_function
Assume x (batchsize, signal_length, filter_order) and
x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order]
x[:, :, self.half_order] -> time index = 0, sinc(0)=1
"""
y = torch.zeros_like(x)
y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \
/ (np.pi * x[:, :, 0:self.half_k])
y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:]) \
/ (np.pi * x[:, :, self.half_k+1:])
y[:,:,self.half_k] = 1
return y
def forward(self, cut_f):
""" lp_coef, hp_coef = forward(self, cut_f)
cut-off frequency cut_f (batchsize=1, length, dim = 1)
lp_coef: low-pass filter coefs (batchsize, length, filter_order)
hp_coef: high-pass filter coefs (batchsize, length, filter_order)
"""
# create the filter order index
with torch.no_grad():
# [- (M-1) / 2, ..., 0, ..., (M-1)/2]
lp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
# [[[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ],
# [[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ]]
lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
hp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
# temporary buffer of [-1^n] for gain norm in hp_coef
tmp_one = torch.pow(-1, hp_coef)
# unnormalized filter coefs with hamming window
lp_coef = cut_f * self.sinc(cut_f * lp_coef) \
* self.hamming_w(lp_coef)
hp_coef = (self.sinc(hp_coef) \
- cut_f * self.sinc(cut_f * hp_coef)) \
* self.hamming_w(hp_coef)
# normalize the coef to make gain at 0/pi is 0 dB
# sum_n lp_coef[n]
lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1)
# sum_n hp_coef[n] * -1^n
hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1)
lp_coef = lp_coef / lp_coef_norm
hp_coef = hp_coef / hp_coef_norm
# return normed coef
return lp_coef, hp_coef
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize=1, length, dim)
Ouput tensor: (batchsize=1, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
# Neural filter block (1 block)
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
"""
def __init__(self, signal_size, hidden_size,\
kernel_size=3, conv_num=10):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \
bias=False)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=False) \
for x in self.dilation_s]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, \
bias=False)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, \
bias=False)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale
self.scale = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
return
def forward(self, signal, context):
"""
Assume: signal (batchsize=1, length, signal_size)
context (batchsize=1, length, hidden_size)
Output: (batchsize=1, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
return output_signal
#
# Sine waveform generator
#
# Sine waveform generator
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) \
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
#####
## Model definition
##
## For condition module only provide Spectral feature to Filter block
class CondModuleHnSincNSF(torch_nn.Module):
""" Condition module for hn-sinc-NSF
Upsample and transform input features
CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
CondModuleHnSincNSF(input_dim, output_dim, up_sample,
blstm_s = 64, cnn_kernel_s = 3,
voiced_threshold = 0):
input_dim: sum of dimensions of input features
output_dim: dim of the feature Spec to be used by neural filter-block
up_sample: up sampling rate of input features
blstm_s: dimension of the features from blstm (default 64)
cnn_kernel_s: kernel size of CNN in condition module (default 3)
voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0):
super(CondModuleHnSincNSF, self).__init__()
# input feature dimension
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
#self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
self.cut_f_smooth = up_sample * 4
self.voiced_threshold = voiced_threshold
# the blstm layer
tmp_input_size = [input_dim, output_dim, output_dim]
tmp_output_size = [output_dim, output_dim, output_dim]
tmp = [Conv1dKeepLength(x, y, dilation_s = 1,
kernel_s = self.cnn_kernel_s)
for x, y in zip(tmp_input_size, tmp_output_size)]
self.l_conv1ds = torch_nn.ModuleList(tmp)
#self.l_conv1ds = BLSTMLayer(input_dim, self.blstm_s)
# the CNN layer (+1 dim for cut_off_frequence of sinc filter)
#self.l_conv1d = Conv1dKeepLength(self.blstm_s, \
# self.output_dim, \
# dilation_s = 1, \
# kernel_s = self.cnn_kernel_s)
# Upsampling layer for hidden features
self.l_upsamp = UpSampleLayer(self.output_dim, \
self.up_sample, True)
# separate layer for up-sampling normalized F0 values
self.l_upsamp_f0_hi = UpSampleLayer(1, self.up_sample, True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False)
# Another smoothing layer to smooth the cut-off frequency
# for sinc filters. Use a larger window to smooth
self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth)
def get_cut_f(self, hidden_feat, f0):
""" cut_f = get_cut_f(self, feature, f0)
feature: (batchsize, length, dim=1)
f0: (batchsize, length, dim=1)
"""
# generate uv signal
uv = torch.ones_like(f0) * (f0 > self.voiced_threshold)
# hidden_feat is between (-1, 1) after conv1d with tanh
# (-0.2, 0.2) + 0.3 = (0.1, 0.5)
# voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9)
# unvoiced: (0.1, 0.5) = (0.1, 0.5)
return hidden_feat * 0.2 + uv * 0.4 + 0.3
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
tmp = feature
for l_conv in self.l_conv1ds:
tmp = l_conv(tmp)
tmp = self.l_upsamp(tmp)
# concatenat normed F0 with hidden spectral features
context = torch.cat((tmp[:, :, 0:self.output_dim-1], \
self.l_upsamp_f0_hi(feature[:, :, -1:])), \
dim=2)
# hidden feature for cut-off frequency
hidden_cut_f = tmp[:, :, self.output_dim-1:]
# directly up-sample F0 without smoothing
f0_upsamp = self.l_upsamp_F0(f0)
# get the cut-off-frequency from output of CNN
cut_f = self.get_cut_f(hidden_cut_f, f0_upsamp)
# smooth the cut-off-frequency using fixed average smoothing
cut_f_smoothed = self.l_cut_f_smooth(cut_f)
# return
return context, f0_upsamp, cut_f_smoothed, hidden_cut_f
# For source module
class SourceModuleHnNSF(torch_nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch_nn.Linear(harmonic_num+1, 1)
self.l_tanh = torch_nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
# For Filter module
class FilterModuleHnSincNSF(torch_nn.Module):
""" Filter for Hn-sinc-NSF
FilterModuleHnSincNSF(signal_size, hidden_size, sinc_order = 31,
block_num = 5, kernel_size = 3,
conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
sinc_order: order of the sinc filter
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
Usage:
output = FilterModuleHnSincNSF(har_source, noi_source, cut_f, context)
har_source: source for harmonic branch (batchsize, length, dim=1)
noi_source: source for noise branch (batchsize, length, dim=1)
cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1)
context: hidden features to be added (batchsize, length, dim)
output: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, sinc_order = 31, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleHnSincNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
self.sinc_order = sinc_order
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.ModuleList(tmp)
# filter blocks for noise branch (only one block, 5 sub-blocks)
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block // 2) \
for x in range(1)]
self.l_noi_blocks = torch_nn.ModuleList(tmp)
# sinc filter generators and time-variant filtering layer
self.l_sinc_coef = SincFilter(self.sinc_order)
self.l_tv_filtering = TimeVarFIRFilter()
# done
def forward(self, har_component, noi_component, cond_feat, cut_f):
"""
"""
# harmonic component
for l_har_block in self.l_har_blocks:
har_component = l_har_block(har_component, cond_feat)
# noise componebt
for l_noi_block in self.l_noi_blocks:
noi_component = l_noi_block(noi_component, cond_feat)
# get sinc filter coefficients
lp_coef, hp_coef = self.l_sinc_coef(cut_f)
# time-variant filtering
har_signal = self.l_tv_filtering(har_component, lp_coef)
noi_signal = self.l_tv_filtering(noi_component, hp_coef)
# get output
return har_signal + noi_signal
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# configurations
# amplitude of sine waveform (for each harmonic)
self.sine_amp = 0.1
# standard deviation of Gaussian noise for additive noise
self.noise_std = 0.003
# dimension of hidden features in filter blocks
self.hidden_dim = 64
# upsampling rate on input acoustic features (16kHz * 5ms = 80)
# assume input_reso has the same value
self.upsamp_rate = prj_conf.input_reso[0]
# sampling rate (Hz)
self.sampling_rate = prj_conf.wav_samp_rate
# CNN kernel size in filter blocks
self.cnn_kernel_s = 3
# number of filter blocks (for harmonic branch)
# noise branch only uses 1 block
self.filter_block_num = 5
# number of dilated CNN in each filter block
self.cnn_num_in_block = 10
# number of harmonic overtones in source
self.harmonic_num = 7
# order of sinc-windowed-FIR-filter
self.sinc_order = 31
# the three modules
self.m_cond = CondModuleHnSincNSF(self.input_dim, \
self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s=self.cnn_kernel_s)
self.m_source = SourceModuleHnNSF(self.sampling_rate,
self.harmonic_num,
self.sine_amp, self.noise_std)
self.m_filter = FilterModuleHnSincNSF(self.output_dim, \
self.hidden_dim, \
self.sinc_order, \
self.filter_block_num, \
self.cnn_kernel_s, \
self.cnn_num_in_block)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# assume x[:, :, -1] is F0, denormalize F0
f0 = x[:, :, -1:]
# normalize the input features data
feat = self.normalize_input(x)
# condition module
# feature-to-filter-block, f0-up-sampled, cut-off-f-for-sinc,
# hidden-feature-for-cut-off-f
cond_feat, f0_upsamped, cut_f, hid_cut_f = self.m_cond(feat, f0)
# source module
# harmonic-source, noise-source (for noise branch), uv
har_source, noi_source, uv = self.m_source(f0_upsamped)
# neural filter module (including sinc-based FIR filtering)
# output
output = self.m_filter(har_source, noi_source, cond_feat, cut_f)
if self.training:
# just in case we need to penalize the hidden feauture for
# cut-off-freq.
return [output.squeeze(-1), hid_cut_f]
else:
return output.squeeze(-1)
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# fft length
self.fft_n = [512, 128, 2048]
# window type in stft
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# loss function
self.loss = torch_nn.MSELoss()
# weight to penalize hidden features for cut-off-frequency
# for experiments on CMU-arctic, ATR-F009, VCTK, cutoff_w = 0.0
self.cutoff_w = 0.0
return
def _stft(self, signal, fft_p, frame_shift, frame_len):
""" wrapper of torch.stft
Remember to use onesided=True, pad_mode="constant"
Signal (batchsize, length)
Output (batchsize, fft_p/2+1, frame_num, 2)
"""
# to be compatible with different torch versions
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant")
else:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant", return_complex=False)
def _amp(self, x):
""" _amp(stft)
x_stft: (batchsize, fft_p/2+1, frame_num, 2)
output: (batchsize, fft_p/2+1, frame_num)
output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2
+ floor)
"""
return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor)
def compute(self, outputs, target):
""" Loss().compute(outputs, target) should return
the Loss in torch.tensor format
Assume output and target as (batchsize=1, length)
"""
# hidden-feature for cut-off-frequency
cut_f = outputs[1]
# generated signal
output = outputs[0]
# convert from (batchsize=1, length, dim=1) to (1, length)
if target.ndim == 3:
target.squeeze_(-1)
# compute loss
loss = 0
for frame_shift, frame_len, fft_p in \
zip(self.frame_hops, self.frame_lens, self.fft_n):
x_stft = self._stft(output, fft_p, frame_shift, frame_len)
y_stft = self._stft(target, fft_p, frame_shift, frame_len)
x_sp_amp = self._amp(x_stft)
y_sp_amp = self._amp(y_stft)
loss += self.loss(x_sp_amp, y_sp_amp)
# A norm on cut_f, which forces sinc-cut-off-frequency
# to be close to the U/V-decided value
# Experiments on CMU-arctic, ATR-F009, and VCTK don't use it
# by setting self.cutoff_w = 0.0
# However, just in case
loss += self.cutoff_w * self.loss(cut_f, torch.zeros_like(cut_f))
return loss
if __name__ == "__main__":
print("Definition of model")
| 40,407 | 38.810837 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-sinc-nsf-10/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'cmu_all_trn'
val_set_name = 'cmu_all_val'
# for convenience
tmp = '../DATA/cmu-arctic-data-set'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [80, 1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.mfbsp', '.f0']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [80, 80]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [True, True]
# Similar configurations for output features
output_dirs = [[tmp + '/wav_16k_norm']]
output_dims = [1]
output_exts = ['.wav']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 16000 * 3
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 80 * 50
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = ['cmu_all_test_tiny']
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476',
'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476',
'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476',
'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']]
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Directories for output features, which are []
test_output_dirs = [[]]
| 3,430 | 32.31068 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/main.py | #!/usr/bin/env python
"""
main.py for project-NN-pytorch/projects
The training/inference process wrapper.
Dataset API is replaced with NII_MergeDataSetLoader.
It is more convenient to train model on corpora stored in different directories.
Requires model.py and config.py (config_merge_datasets.py)
Usage: $: python main.py [options]
"""
from __future__ import absolute_import
import os
import sys
import torch
import importlib
import core_scripts.other_tools.display as nii_warn
import core_scripts.data_io.default_data_io as nii_default_dset
import core_scripts.data_io.customize_dataset as nii_dset
import core_scripts.data_io.conf as nii_dconf
import core_scripts.other_tools.list_tools as nii_list_tool
import core_scripts.config_parse.config_parse as nii_config_parse
import core_scripts.config_parse.arg_parse as nii_arg_parse
import core_scripts.op_manager.op_manager as nii_op_wrapper
import core_scripts.nn_manager.nn_manager as nii_nn_wrapper
import core_scripts.startup_config as nii_startup
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
def main():
""" main(): the default wrapper for training and inference process
Please prepare config.py and model.py
"""
# arguments initialization
args = nii_arg_parse.f_args_parsed()
#
nii_warn.f_print_w_date("Start program", level='h')
nii_warn.f_print("Load module: %s" % (args.module_config))
nii_warn.f_print("Load module: %s" % (args.module_model))
prj_conf = importlib.import_module(args.module_config)
prj_model = importlib.import_module(args.module_model)
# initialization
nii_startup.set_random_seed(args.seed, args)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# prepare data io
if not args.inference:
params = {'batch_size': args.batch_size,
'shuffle': args.shuffle,
'num_workers': args.num_workers,
'sampler': args.sampler}
in_trans_fns = prj_conf.input_trans_fns \
if hasattr(prj_conf, 'input_trans_fns') else None
out_trans_fns = prj_conf.output_trans_fns \
if hasattr(prj_conf, 'output_trans_fns') else None
# Load file list and create data loader
trn_lst = prj_conf.trn_list
trn_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.trn_set_name, \
trn_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./',
params = params,
truncate_seq = prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = True,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
if prj_conf.val_list is not None:
val_lst = prj_conf.val_list
val_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.val_set_name,
val_lst,
prj_conf.input_dirs, \
prj_conf.input_exts, \
prj_conf.input_dims, \
prj_conf.input_reso, \
prj_conf.input_norm, \
prj_conf.output_dirs, \
prj_conf.output_exts, \
prj_conf.output_dims, \
prj_conf.output_reso, \
prj_conf.output_norm, \
'./', \
params = params,
truncate_seq= prj_conf.truncate_seq,
min_seq_len = prj_conf.minimum_len,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
else:
val_set = None
# initialize the model and loss function
model = prj_model.Model(trn_set.get_in_dim(), \
trn_set.get_out_dim(), \
args, prj_conf, trn_set.get_data_mean_std())
loss_wrapper = prj_model.Loss(args)
# initialize the optimizer
optimizer_wrapper = nii_op_wrapper.OptimizerWrapper(model, args)
# if necessary, resume training
if args.trained_model == "":
checkpoint = None
else:
checkpoint = torch.load(args.trained_model)
# start training
nii_nn_wrapper.f_train_wrapper(args, model,
loss_wrapper, device,
optimizer_wrapper,
trn_set, val_set, checkpoint)
# done for traing
else:
# for inference
# default, no truncating, no shuffling
params = {'batch_size': args.batch_size,
'shuffle': False,
'num_workers': args.num_workers}
in_trans_fns = prj_conf.test_input_trans_fns \
if hasattr(prj_conf, 'test_input_trans_fns') else None
out_trans_fns = prj_conf.test_output_trans_fns \
if hasattr(prj_conf, 'test_output_trans_fns') else None
if type(prj_conf.test_list) is list:
t_lst = prj_conf.test_list
else:
t_lst = nii_list_tool.read_list_from_text(prj_conf.test_list)
test_set = nii_dset.NII_MergeDataSetLoader(
prj_conf.test_set_name, \
t_lst, \
prj_conf.test_input_dirs,
prj_conf.input_exts,
prj_conf.input_dims,
prj_conf.input_reso,
prj_conf.input_norm,
prj_conf.test_output_dirs,
prj_conf.output_exts,
prj_conf.output_dims,
prj_conf.output_reso,
prj_conf.output_norm,
'./',
params = params,
truncate_seq= None,
min_seq_len = None,
save_mean_std = False,
wav_samp_rate = prj_conf.wav_samp_rate,
way_to_merge = args.way_to_merge_datasets,
global_arg = args,
dset_config = prj_conf,
input_augment_funcs = in_trans_fns,
output_augment_funcs = out_trans_fns)
# initialize model
model = prj_model.Model(test_set.get_in_dim(), \
test_set.get_out_dim(), \
args, prj_conf)
if args.trained_model == "":
print("No model is loaded by ---trained-model for inference")
print("By default, load %s%s" % (args.save_trained_name,
args.save_model_ext))
checkpoint = torch.load("%s%s" % (args.save_trained_name,
args.save_model_ext))
else:
checkpoint = torch.load(args.trained_model)
# do inference and output data
nii_nn_wrapper.f_inference_wrapper(args, model, device, \
test_set, checkpoint)
# done
return
if __name__ == "__main__":
main()
| 7,819 | 35.886792 | 80 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/model.py | #!/usr/bin/env python
"""
model.py for harmonic-plus-noise NSF
version: 1
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
# Building blocks (torch.nn modules + dimension operation)
#
# For blstm
class BLSTMLayer(torch_nn.Module):
""" Wrapper over BLSTM
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length, dim_out)
We want to keep the length the same
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True)
def forward(self, x):
# permute to (length, batchsize=1, dim)
blstm_data, _ = self.l_blstm(x.permute(1, 0, 2))
# permute it backt to (batchsize=1, length, dim)
return blstm_data.permute(1, 0, 2)
#
# 1D dilated convolution that keep the input/output length
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize, length, dim_in)
Output tensor: (batchsize, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is applied
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri, 0, 0)).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
Note: FIR Filtering is conducted on each dimension (channel)
independently: groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_train: whether train the filter coefficients (default: false)
Input data: (batchsize, length, feature_dim)
Output data: (batchsize, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_train)
else:
print("TimeInvFIRFilter expects filter_coef to be 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize, length, dim)
Ouput tensor: (batchsize, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
# Neural filter block (1 block)
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
"""
def __init__(self, signal_size, hidden_size, \
kernel_size=3, conv_num=10):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_size = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \
bias=False)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=False) \
for x in self.dilation_size]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4,
bias=False)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size,
bias=False)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale
self.scale = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
return
def forward(self, signal, context):
"""
Assume: signal (batchsize=1, length, signal_size)
context (batchsize=1, length, hidden_size)
Output: (batchsize=1, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
return output_signal
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x *2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) \
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx+1] = f0_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
#####
## Model definition
##
## For condition module only provide Spectral feature to Filter block
class CondModule(torch_nn.Module):
""" Conditiona module
Upsample and transform input features
CondModule(input_dimension, output_dimension, up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0 = CondModule(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3):
super(CondModule, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
self.l_blstm = BLSTMLayer(input_dim, self.blstm_s)
self.l_conv1d = Conv1dKeepLength(self.blstm_s, output_dim, 1, \
self.cnn_kernel_s)
self.l_upsamp = UpSampleLayer(self.output_dim, self.up_sample,
True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False)
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
spec = self.l_upsamp(self.l_conv1d(self.l_blstm(feature)))
f0 = self.l_upsamp_F0(f0)
return spec, f0
# For source module
class SourceModuleHnNSF(torch_nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch_nn.Linear(harmonic_num+1, 1)
self.l_tanh = torch_nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
# For Filter module
class FilterModuleHnNSF(torch_nn.Module):
""" Filter for Hn-NSF
FilterModuleHnNSF(signal_size, hidden_size, fir_coef,
block_num = 5,
kernel_size = 3, conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
fir_coef: list of FIR filter coeffs,
(low_pass_1, low_pass_2, high_pass_1, high_pass_2)
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
output = FilterModuleHnNSF(harmonic_source, noise_source, uv, context)
harmonic_source (batchsize, length, dim=1)
noise_source (batchsize, length, dim=1)
context (batchsize, length, dim)
uv (batchsize, length, dim)
output: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, filter_coef, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleHnNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
self.filter_coef = filter_coef
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.ModuleList(tmp)
# filter blocks for noise branch (only one block, 5 sub-blocks)
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block // 2) \
for x in range(1)]
self.l_noi_blocks = torch_nn.ModuleList(tmp)
# FIR filter groups
# lp_v: filter for voiced region, harmonic component
# lp_u: filter for unvoiced region, harmonic component
# hp_v: filter for voiced region, noise component
# hp_u: filter for unvoiced region, noise component
self.l_fir_lp_v = TimeInvFIRFilter(signal_size, filter_coef[0])
self.l_fir_lp_u = TimeInvFIRFilter(signal_size, filter_coef[1])
self.l_fir_hp_v = TimeInvFIRFilter(signal_size, filter_coef[2])
self.l_fir_hp_u = TimeInvFIRFilter(signal_size, filter_coef[3])
def forward(self, har_component, noi_component, condition_feat, uv):
"""
"""
# harmonic component
for l_har_block in self.l_har_blocks:
har_component = l_har_block(har_component, condition_feat)
# noise componebt
for l_noi_block in self.l_noi_blocks:
noi_component = l_noi_block(noi_component, condition_feat)
# harmonic + noise in time-domain
# assume uv is {0, 1}, produce a weight vector for voiced/unvoiced
# sigmoid is used to avoid {0, 1}, and uv is scaled to {-5, 5}
w_voi = torch.sigmoid((uv - 0.5) * 10)
w_unv = 1.0 - w_voi
har_v = self.l_fir_lp_v(har_component)
har_u = self.l_fir_lp_u(har_component)
noi_v = self.l_fir_hp_v(noi_component)
noi_u = self.l_fir_hp_u(noi_component)
output = (har_v + noi_v) * w_voi + (har_u + noi_u) * w_unv
return output
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, prj_conf, mean_std=None):
super(Model, self).__init__()
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# configurations
# amplitude of sine waveform (for each harmonic)
self.sine_amp = 0.1
# standard deviation of Gaussian noise for additive noise
self.noise_std = 0.003
# dimension of hidden features in filter blocks
self.hidden_dim = 64
# upsampling rate on input acoustic features (16kHz * 5ms = 80)
# assume input_reso has the same value
self.upsamp_rate = prj_conf.input_reso[0]
# sampling rate (Hz)
self.sampling_rate = prj_conf.wav_samp_rate
# CNN kernel size in filter blocks
self.cnn_kernel_size = 3
# number of filter blocks (for harmonic branch)
# noise branch only uses 1 block
self.filter_block_num = 5
# number of dilated CNN in each filter block
self.cnn_num_in_block = 10
# number of harmonic overtone in source
self.harmonic_num = 7
# fixed filter coefficients
# computed using PM algorithm
# (tool: http://t-filter.engineerjs.com)
#
# low-pass for harmonic-component in voiced region
# 16kHz, pass-band 0-5K, gain 1, ripple 5dB,
# stop-band 7-8k, gain 0, ripple -40dB)
lp_v = [0.08538414199291068, 0.04920229475534168,
-0.1470178606967731, 0.24737764593887432,
0.7103067853166558, 0.24737764593887432,
-0.1470178606967731, 0.04920229475534168,
0.08538414199291068]
# low-pass for harmonic-copmonent in unvoiced region
# 16kHz, pass-band 0-1K, gain 1, ripple 5dB,
# stop-band 3-8k, gain 0, ripple -40dB)
lp_u = [0.00936455546502, 0.0416254862901, 0.0878313219556,
0.146086321198, 0.192602581136, 0.211221591449,
0.192602581136, 0.146086321198, 0.0878313219556,
0.0416254862901, 0.00936455546502]
#
# high-pass for noise-component in voiced region
# 16kHz, pass-band 7-8K, gain 1, ripple 5dB,
# stop-band 0-5k, gain 0, ripple -40dB)
hp_v = [-0.00936455546502148, 0.04162548629009957,
-0.08783132195564508, 0.1460863211980122,
-0.19260258113649556, 0.21122159144894015,
-0.19260258113649556, 0.1460863211980122,
-0.08783132195564508, 0.04162548629009957,
-0.00936455546502148]
#
# high-pass for noise-component in unvoiced region
# 16kHz, pass-band 3-8K, gain 1, ripple 5dB,
# stop-band 0-1k, gain 0, ripple -40dB)
hp_u = [0.0853841419929, -0.0492022947553, -0.147017860697,
-0.247377645939, 0.710306785317, -0.247377645939,
-0.147017860697, -0.0492022947553, 0.0853841419929]
self.fir_filters = [torch.tensor(lp_v), torch.tensor(lp_u),
torch.tensor(hp_v), torch.tensor(hp_u)]
# the three modules
self.m_condition = CondModule(self.input_dim, self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s = self.cnn_kernel_size)
self.m_source = SourceModuleHnNSF(self.sampling_rate,
self.harmonic_num,
self.sine_amp, self.noise_std)
self.m_filter = FilterModuleHnNSF(self.output_dim,
self.hidden_dim,\
self.fir_filters,
self.filter_block_num, \
self.cnn_kernel_size, \
self.cnn_num_in_block)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.ones([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.ones([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# assume x[:, :, -1] is F0, denormalize F0
f0 = x[:, :, -1:]
# normalize the data
feat = self.normalize_input(x)
# condition module
# features_for_filter_block, up-sampled F0
cond_feat, f0_upsamped = self.m_condition(feat, f0)
# source module
# harmonic-source, noise-source (for noise branch), uv flag
har_source, noi_source, uv = self.m_source(f0_upsamped)
# filter module (including FIR filtering)
# output signal
output = self.m_filter(har_source, noi_source, cond_feat, uv)
# output
return output.squeeze(-1)
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# FFT length
self.fft_n = [512, 128, 2048]
# window type
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# loss
self.loss = torch_nn.MSELoss()
return
def _stft(self, signal, fft_p, frame_shift, frame_len):
""" wrapper of torch.stft
Remember to use onesided=True, pad_mode="constant"
Signal (batchsize, length)
Output (batchsize, fft_p/2+1, frame_num, 2)
"""
# to be compatible with different torch versions
if torch.__version__.split('.')[1].isnumeric() and \
int(torch.__version__.split('.')[1]) < 7:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant")
else:
return torch.stft(
signal, fft_p, frame_shift, frame_len,
window=self.win(frame_len, dtype=signal.dtype,
device=signal.device),
onesided=True, pad_mode="constant", return_complex=False)
def _amp(self, x):
""" _amp(stft)
x_stft: (batchsize, fft_p/2+1, frame_num, 2)
output: (batchsize, fft_p/2+1, frame_num)
output[x, y, z] = log(x_stft[x, y, z, 1]^2 + x_stft[x, y, z, 2]^2
+ floor)
"""
return torch.log(torch.norm(x, 2, -1).pow(2) + self.amp_floor)
def compute(self, output, target):
""" Loss().compute(output, target) should return
the Loss in torch.tensor format
Assume output and target as (batchsize=1, length)
"""
# convert from (batchsize=1, length, dim=1) to (1, length)
if target.ndim == 3:
target.squeeze_(-1)
# compute loss
loss = 0
for frame_shift, frame_len, fft_p in \
zip(self.frame_hops, self.frame_lens, self.fft_n):
x_stft = self._stft(output, fft_p, frame_shift, frame_len)
y_stft = self._stft(target, fft_p, frame_shift, frame_len)
x_sp_amp = self._amp(x_stft)
y_sp_amp = self._amp(y_stft)
loss += self.loss(x_sp_amp, y_sp_amp)
return loss
if __name__ == "__main__":
print("Definition of model")
| 32,145 | 39.384422 | 78 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/config.py | #!/usr/bin/env python
"""
config.py for project-NN-pytorch/projects
Usage:
For training, change Configuration for training stage
For inference, change Configuration for inference stage
"""
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
#########################################################
## Configuration for training stage
#########################################################
# Name of datasets
# after data preparation, trn/val_set_name are used to save statistics
# about the data sets
trn_set_name = 'cmu_all_trn'
val_set_name = 'cmu_all_val'
# for convenience
tmp = '../DATA/cmu-arctic-data-set'
# File lists (text file, one data name per line, without name extension)
# trin_file_list: list of files for training set
trn_list = [tmp + '/scp/train.lst']
# val_file_list: list of files for validation set. It can be None
val_list = [tmp + '/scp/val.lst']
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Dimensions of input features
# input_dims = [dimension_of_feature_1, dimension_of_feature_2, ...]
input_dims = [80, 1]
# File name extension for input features
# input_exts = [name_extention_of_feature_1, ...]
# Please put ".f0" as the last feature
input_exts = ['.mfbsp', '.f0']
# Temporal resolution for input features
# input_reso = [reso_feature_1, reso_feature_2, ...]
# for waveform modeling, temporal resolution of input acoustic features
# may be = waveform_sampling_rate * frame_shift_of_acoustic_features
# for example, 80 = 16000 Hz * 5 ms
input_reso = [80, 80]
# Whether input features should be z-normalized
# input_norm = [normalize_feature_1, normalize_feature_2]
input_norm = [True, True]
# Similar configurations for output features
output_dirs = [[tmp + '/wav_16k_norm']]
output_dims = [1]
output_exts = ['.wav']
output_reso = [1]
output_norm = [False]
# Waveform sampling rate
# wav_samp_rate can be None if no waveform data is used
wav_samp_rate = 16000
# Truncating input sequences so that the maximum length = truncate_seq
# When truncate_seq is larger, more GPU mem required
# If you don't want truncating, please truncate_seq = None
truncate_seq = 16000 * 3
# Minimum sequence length
# If sequence length < minimum_len, this sequence is not used for training
# minimum_len can be None
minimum_len = 80 * 50
#########################################################
## Configuration for inference stage
#########################################################
# similar options to training stage
test_set_name = ['cmu_all_test_tiny']
# List of test set data
# for convenience, you may directly load test_set list here
test_list = [['slt_arctic_b0474', 'slt_arctic_b0475', 'slt_arctic_b0476',
'bdl_arctic_b0474', 'bdl_arctic_b0475', 'bdl_arctic_b0476',
'rms_arctic_b0474', 'rms_arctic_b0475', 'rms_arctic_b0476',
'clb_arctic_b0474', 'clb_arctic_b0475', 'clb_arctic_b0476']]
# Directories for input features
# input_dirs = [path_of_feature_1, path_of_feature_2, ..., ]
# we assume train and validation data are put in the same sub-directory
test_input_dirs = [[tmp + '/5ms/melspec', tmp + '/5ms/f0']]
# Directories for output features, which are []
test_output_dirs = [[]]
| 3,430 | 32.31068 | 75 | py |
project-NN-Pytorch-scripts | project-NN-Pytorch-scripts-master/project/01-nsf/hn-nsf/._main.py |